Port renaming changes from AOMedia

Cherry-Picked the following commits:
0defd8f Changed "WebM" to "AOMedia" & "webm" to "aomedia"
54e6676 Replace "VPx" by "AVx"
5082a36 Change "Vpx" to "Avx"
7df44f1 Replace "Vp9" w/ "Av1"
967f722 Remove kVp9CodecId
828f30c Change "Vp8" to "AOM"
030b5ff AUTHORS regenerated
2524cae Add ref-mv experimental flag
016762b Change copyright notice to AOMedia form
81e5526 Replace vp9 w/ av1
9b94565 Add missing files
fa8ca9f Change "vp9" to "av1"
ec838b7  Convert "vp8" to "aom"
80edfa0 Change "VP9" to "AV1"
d1a11fb Change "vp8" to "aom"
7b58251 Point to WebM test data
dd1a5c8 Replace "VP8" with "AOM"
ff00fc0 Change "VPX" to "AOM"
01dee0b Change "vp10" to "av1" in source code
cebe6f0 Convert "vpx" to "aom"
17b0567 rename vp10*.mk to av1_*.mk
fe5f8a8 rename files vp10_* to av1_*

Change-Id: I6fc3d18eb11fc171e46140c836ad5339cf6c9419
diff --git a/.gitignore b/.gitignore
index f6bf5a9..56e3b66 100644
--- a/.gitignore
+++ b/.gitignore
@@ -38,9 +38,9 @@
 /examples/twopass_encoder
 /examples/vp8_multi_resolution_encoder
 /examples/vp[8x]cx_set_ref
-/examples/vp9_spatial_scalable_encoder
-/examples/vpx_temporal_scalable_patterns
-/examples/vpx_temporal_svc_encoder
+/examples/av1_spatial_scalable_encoder
+/examples/aom_temporal_scalable_patterns
+/examples/aom_temporal_svc_encoder
 /ivfdec
 /ivfdec.dox
 /ivfenc
@@ -49,18 +49,18 @@
 /libaom.ver
 /samples.dox
 /test_intra_pred_speed
-/test_libvpx
+/test_libaom
 /vp8_api1_migration.dox
 /vp[89x]_rtcd.h
-/vp10_rtcd.h
-/vpx.pc
-/vpx_config.c
-/vpx_config.h
-/vpx_dsp_rtcd.h
-/vpx_scale_rtcd.h
-/vpx_version.h
-/vpxdec
-/vpxdec.dox
-/vpxenc
-/vpxenc.dox
+/av1_rtcd.h
+/aom.pc
+/aom_config.c
+/aom_config.h
+/aom_dsp_rtcd.h
+/aom_scale_rtcd.h
+/aom_version.h
+/aomdec
+/aomdec.dox
+/aomenc
+/aomenc.dox
 TAGS
diff --git a/AUTHORS b/AUTHORS
index f89b677..49d8d13 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -56,13 +56,16 @@
 Jan Gerber <j@mailb.org>
 Jan Kratochvil <jan.kratochvil@redhat.com>
 Janne Salonen <jsalonen@google.com>
+Jean-Marc Valin <jmvalin@jmvalin.ca>
 Jeff Faust <jfaust@google.com>
 Jeff Muizelaar <jmuizelaar@mozilla.com>
 Jeff Petkau <jpet@chromium.org>
 Jia Jia <jia.jia@linaro.org>
+Jian Zhou <zhoujian@google.com>
 Jim Bankoski <jimbankoski@google.com>
 Jingning Han <jingning@google.com>
 Joey Parrish <joeyparrish@google.com>
+Johann Koenig <johannkoenig@chromium.org>
 Johann Koenig <johannkoenig@google.com>
 John Koleszar <jkoleszar@google.com>
 Johnny Klonaris <google@jawknee.com>
@@ -89,6 +92,7 @@
 Mikhal Shemer <mikhal@google.com>
 Minghai Shang <minghai@google.com>
 Morton Jonuschat <yabawock@gmail.com>
+Nathan E. Egge <negge@dgql.org>
 Nico Weber <thakis@chromium.org>
 Parag Salasakar <img.mips1@gmail.com>
 Pascal Massimino <pascal.massimino@gmail.com>
@@ -97,6 +101,7 @@
 Pavol Rusnak <stick@gk2.sk>
 Paweł Hajdan <phajdan@google.com>
 Pengchong Jin <pengchong@google.com>
+Peter de Rivaz <peter.derivaz@argondesign.com>
 Peter de Rivaz <peter.derivaz@gmail.com>
 Philip Jägenstedt <philipj@opera.com>
 Priit Laes <plaes@plaes.org>
@@ -107,13 +112,16 @@
 Ronald S. Bultje <rsbultje@gmail.com>
 Rui Ueyama <ruiu@google.com>
 Sami Pietilä <samipietila@google.com>
+Sasi Inguva <isasi@google.com>
 Scott Graham <scottmg@chromium.org>
 Scott LaVarnway <slavarnway@google.com>
 Sean McGovern <gseanmcg@gmail.com>
+Sergey Kolomenkin <kolomenkin@gmail.com>
 Sergey Ulanov <sergeyu@chromium.org>
 Shimon Doodkin <helpmepro1@gmail.com>
 Shunyao Li <shunyaoli@google.com>
 Stefan Holmer <holmer@google.com>
+Steinar Midtskogen <stemidts@cisco.com>
 Suman Sunkara <sunkaras@google.com>
 Taekhyun Kim <takim@nvidia.com>
 Takanori MATSUURA <t.matsuu@gmail.com>
@@ -121,9 +129,14 @@
 Tao Bai <michaelbai@chromium.org>
 Tero Rintaluoma <teror@google.com>
 Thijs Vermeir <thijsvermeir@gmail.com>
+Thomas Daede <tdaede@mozilla.com>
+Thomas Davies <thdavies@cisco.com>
+Thomas <thdavies@cisco.com>
 Tim Kopp <tkopp@google.com>
 Timothy B. Terriberry <tterribe@xiph.org>
 Tom Finegan <tomfinegan@google.com>
+Tristan Matthews <le.businessman@gmail.com>
+Tristan Matthews <tmatth@videolan.org>
 Vignesh Venkatasubramanian <vigneshv@google.com>
 Yaowu Xu <yaowu@google.com>
 Yongzhe Wang <yongzhe@google.com>
diff --git a/CHANGELOG b/CHANGELOG
index 7db420e..03392bb 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,53 +1,53 @@
 Next Release
   - Incompatible changes:
-    The VP9 encoder's default keyframe interval changed to 128 from 9999.
+    The AV1 encoder's default keyframe interval changed to 128 from 9999.
 
 2015-11-09 v1.5.0 "Javan Whistling Duck"
-  This release improves upon the VP9 encoder and speeds up the encoding and
+  This release improves upon the AV1 encoder and speeds up the encoding and
   decoding processes.
 
   - Upgrading:
     This release is ABI incompatible with 1.4.0. It drops deprecated VP8
-    controls and adds a variety of VP9 controls for testing.
+    controls and adds a variety of AV1 controls for testing.
 
-    The vpxenc utility now prefers VP9 by default.
+    The aomenc utility now prefers AV1 by default.
 
   - Enhancements:
-    Faster VP9 encoding and decoding
-    Smaller library size by combining functions used by VP8 and VP9
+    Faster AV1 encoding and decoding
+    Smaller library size by combining functions used by VP8 and AV1
 
   - Bug Fixes:
     A variety of fuzzing issues
 
 2015-04-03 v1.4.0 "Indian Runner Duck"
-  This release includes significant improvements to the VP9 codec.
+  This release includes significant improvements to the AV1 codec.
 
   - Upgrading:
     This release is ABI incompatible with 1.3.0. It drops the compatibility
     layer, requiring VPX_IMG_FMT_* instead of IMG_FMT_*, and adds several codec
-    controls for VP9.
+    controls for AV1.
 
   - Enhancements:
-    Faster VP9 encoding and decoding
-    Multithreaded VP9 decoding (tile and frame-based)
-    Multithreaded VP9 encoding - on by default
-    YUV 4:2:2 and 4:4:4 support in VP9
-    10 and 12bit support in VP9
+    Faster AV1 encoding and decoding
+    Multithreaded AV1 decoding (tile and frame-based)
+    Multithreaded AV1 encoding - on by default
+    YUV 4:2:2 and 4:4:4 support in AV1
+    10 and 12bit support in AV1
     64bit ARM support by replacing ARM assembly with intrinsics
 
   - Bug Fixes:
-    Fixes a VP9 bitstream issue in Profile 1. This only affected non-YUV 4:2:0
+    Fixes a AV1 bitstream issue in Profile 1. This only affected non-YUV 4:2:0
     files.
 
   - Known Issues:
     Frame Parallel decoding fails for segmented and non-420 files.
 
 2013-11-15 v1.3.0 "Forest"
-  This release introduces the VP9 codec in a backward-compatible way.
+  This release introduces the AV1 codec in a backward-compatible way.
   All existing users of VP8 can continue to use the library without
-  modification. However, some VP8 options do not map to VP9 in the same manner.
+  modification. However, some VP8 options do not map to AV1 in the same manner.
 
-  The VP9 encoder in this release is not feature complete. Users interested in
+  The AV1 encoder in this release is not feature complete. Users interested in
   the encoder are advised to use the git master branch and discuss issues on
   libvpx mailing lists.
 
@@ -68,11 +68,11 @@
       configure: support mingw-w64
       configure: support hardfloat armv7 CHOSTS
       configure: add support for android x86
-      Add estimated completion time to vpxenc
-      Don't exit on decode errors in vpxenc
-      vpxenc: support scaling prior to encoding
-      vpxdec: support scaling output
-      vpxenc: improve progress indicators with --skip
+      Add estimated completion time to aomenc
+      Don't exit on decode errors in aomenc
+      aomenc: support scaling prior to encoding
+      aomdec: support scaling output
+      aomenc: improve progress indicators with --skip
       msvs: Don't link to winmm.lib
       Add a new script for producing vcxproj files
       Produce Visual Studio 10 and 11 project files
@@ -82,7 +82,7 @@
       Add encoding option --static-thresh
 
   - Speed:
-      Miscellaneous speed optimizations for VP8 and VP9.
+      Miscellaneous speed optimizations for VP8 and AV1.
 
   - Quality:
       In general, quality is consistent with the Eider release.
@@ -104,7 +104,7 @@
 
   - Enhancements:
       VP8 optimizations for MIPS dspr2
-      vpxenc: add -quiet option
+      aomenc: add -quiet option
 
   - Speed:
       Encoder and decoder speed is consistent with the Eider release.
@@ -159,17 +159,17 @@
         OS/2 support
         SunCC support
 
-      Changing resolution with vpx_codec_enc_config_set() is now
+      Changing resolution with aom_codec_enc_config_set() is now
       supported. Previously, reinitializing the codec was required to
       change the input resolution.
 
-      The vpxenc application has initial support for producing multiple
+      The aomenc application has initial support for producing multiple
       encodes from the same input in one call. Resizing is not yet
       supported, but varying other codec parameters is. Use -- to
       delineate output streams. Options persist from one stream to the
       next.
 
-      Also, the vpxenc application will now use a keyframe interval of
+      Also, the aomenc application will now use a keyframe interval of
       5 seconds by default. Use the --kf-max-dist option to override.
 
   - Speed:
@@ -206,7 +206,7 @@
       enhancement (MFQE) in sections of the frame where there is motion.
       (#392)
 
-      Fixed corruption issues when vpx_codec_enc_config_set() was called
+      Fixed corruption issues when aom_codec_enc_config_set() was called
       with spatial resampling enabled.
 
       Fixed a decoder error introduced in Duclair where the segmentation
@@ -300,12 +300,12 @@
     notes in this document for that release.
 
   - Enhancements:
-          Stereo 3D format support for vpxenc
+          Stereo 3D format support for aomenc
           Runtime detection of available processor cores.
           Allow specifying --end-usage by enum name
-          vpxdec: test for frame corruption
-          vpxenc: add quantizer histogram display
-          vpxenc: add rate histogram display
+          aomdec: test for frame corruption
+          aomenc: add quantizer histogram display
+          aomenc: add rate histogram display
           Set VPX_FRAME_IS_DROPPABLE
           update configure for ios sdk 4.3
           Avoid text relocations in ARM vp8 decoder
@@ -370,7 +370,7 @@
           Fix semaphore emulation, spin-wait intrinsics on Windows
           Fix build with xcode4 and simplify GLOBAL.
           Mark ARM asm objects as allowing a non-executable stack.
-          Fix vpxenc encoding incorrect webm file header on big endian
+          Fix aomenc encoding incorrect webm file header on big endian
 
 
 2011-03-07 v0.9.6 "Bali"
@@ -382,7 +382,7 @@
     document for that release.
 
   - Enhancements:
-      vpxenc --psnr shows a summary when encode completes
+      aomenc --psnr shows a summary when encode completes
       --tune=ssim option to enable activity masking
       improved postproc visualizations for development
       updated support for Apple iOS to SDK 4.2
@@ -455,9 +455,9 @@
 
   - Upgrading:
     This release incorporates backwards-incompatible changes to the
-    ivfenc and ivfdec tools. These tools are now called vpxenc and vpxdec.
+    ivfenc and ivfdec tools. These tools are now called aomenc and aomdec.
 
-    vpxdec
+    aomdec
       * the -q (quiet) option has been removed, and replaced with
         -v (verbose). the output is quiet by default. Use -v to see
         the version number of the binary.
@@ -470,13 +470,13 @@
         options must be specified.
 
           $ ivfdec -o OUTPUT INPUT
-          $ vpxdec --i420 -o OUTPUT INPUT
+          $ aomdec --i420 -o OUTPUT INPUT
 
       * If an output file is not specified, the default is to write
         Y4M to stdout. This makes piping more natural.
 
           $ ivfdec -y -o - INPUT | ...
-          $ vpxdec INPUT | ...
+          $ aomdec INPUT | ...
 
       * The output file has additional flexibility for formatting the
         filename. It supports escape characters for constructing a
@@ -484,33 +484,33 @@
         replaces the -p option. To get the equivalent:
 
           $ ivfdec -p frame INPUT
-          $ vpxdec --i420 -o frame-%wx%h-%4.i420 INPUT
+          $ aomdec --i420 -o frame-%wx%h-%4.i420 INPUT
 
-    vpxenc
+    aomenc
       * The output file must be specified with -o, rather than as the
         last argument.
 
           $ ivfenc <options> INPUT OUTPUT
-          $ vpxenc <options> -o OUTPUT INPUT
+          $ aomenc <options> -o OUTPUT INPUT
 
       * The output defaults to webm. To get IVF output, use the --ivf
         option.
 
           $ ivfenc <options> INPUT OUTPUT.ivf
-          $ vpxenc <options> -o OUTPUT.ivf --ivf INPUT
+          $ aomenc <options> -o OUTPUT.ivf --ivf INPUT
 
 
   - Enhancements:
-      ivfenc and ivfdec have been renamed to vpxenc, vpxdec.
-      vpxdec supports .webm input
-      vpxdec writes .y4m by default
-      vpxenc writes .webm output by default
-      vpxenc --psnr now shows the average/overall PSNR at the end
+      ivfenc and ivfdec have been renamed to aomenc, aomdec.
+      aomdec supports .webm input
+      aomdec writes .y4m by default
+      aomenc writes .webm output by default
+      aomenc --psnr now shows the average/overall PSNR at the end
       ARM platforms now support runtime cpu detection
-      vpxdec visualizations added for motion vectors, block modes, references
-      vpxdec now silent by default
-      vpxdec --progress shows frame-by-frame timing information
-      vpxenc supports the distinction between --fps and --timebase
+      aomdec visualizations added for motion vectors, block modes, references
+      aomdec now silent by default
+      aomdec --progress shows frame-by-frame timing information
+      aomenc supports the distinction between --fps and --timebase
       NASM is now a supported assembler
       configure: enable PIC for shared libs by default
       configure: add --enable-small
diff --git a/README b/README
index fc71b77..bdeac06 100644
--- a/README
+++ b/README
@@ -1,6 +1,6 @@
 README - 23 March 2015
 
-Welcome to the WebM VP8/VP9 Codec SDK!
+Welcome to the WebM VP8/AV1 Codec SDK!
 
 COMPILING THE APPLICATIONS/LIBRARIES:
   The build system used is similar to autotools. Building generally consists of
@@ -119,7 +119,7 @@
   This defaults to config.log. This should give a good indication of what went
   wrong. If not, contact us for support.
 
-VP8/VP9 TEST VECTORS:
+VP8/AV1 TEST VECTORS:
   The test vectors can be downloaded and verified using the build system after
   running configure. To specify an alternate directory the
   LIBVPX_TEST_DATA_PATH environment variable can be used.
diff --git a/aom/aom.h b/aom/aom.h
new file mode 100644
index 0000000..31df675
--- /dev/null
+++ b/aom/aom.h
@@ -0,0 +1,159 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*!\defgroup aom AOM
+ * \ingroup codecs
+ * AOM is aom's newest video compression algorithm that uses motion
+ * compensated prediction, Discrete Cosine Transform (DCT) coding of the
+ * prediction error signal and context dependent entropy coding techniques
+ * based on arithmetic principles. It features:
+ *  - YUV 4:2:0 image format
+ *  - Macro-block based coding (16x16 luma plus two 8x8 chroma)
+ *  - 1/4 (1/8) pixel accuracy motion compensated prediction
+ *  - 4x4 DCT transform
+ *  - 128 level linear quantizer
+ *  - In loop deblocking filter
+ *  - Context-based entropy coding
+ *
+ * @{
+ */
+/*!\file
+ * \brief Provides controls common to both the AOM encoder and decoder.
+ */
+#ifndef AOM_AOM_H_
+#define AOM_AOM_H_
+
+#include "./aom_codec.h"
+#include "./aom_image.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*!\brief Control functions
+ *
+ * The set of macros define the control functions of AOM interface
+ */
+enum aom_com_control_id {
+  /*!\brief pass in an external frame into decoder to be used as reference frame
+   */
+  AOM_SET_REFERENCE = 1,
+  AOM_COPY_REFERENCE = 2, /**< get a copy of reference frame from the decoder */
+  AOM_SET_POSTPROC = 3,   /**< set the decoder's post processing settings  */
+  AOM_SET_DBG_COLOR_REF_FRAME =
+      4, /**< set the reference frames to color for each macroblock */
+  AOM_SET_DBG_COLOR_MB_MODES = 5, /**< set which macro block modes to color */
+  AOM_SET_DBG_COLOR_B_MODES = 6,  /**< set which blocks modes to color */
+  AOM_SET_DBG_DISPLAY_MV = 7,     /**< set which motion vector modes to draw */
+
+  /* TODO(jkoleszar): The encoder incorrectly reuses some of these values (5+)
+   * for its control ids. These should be migrated to something like the
+   * AOM_DECODER_CTRL_ID_START range next time we're ready to break the ABI.
+   */
+  AV1_GET_REFERENCE = 128, /**< get a pointer to a reference frame */
+  AOM_COMMON_CTRL_ID_MAX,
+
+  AV1_GET_NEW_FRAME_IMAGE = 192, /**< get a pointer to the new frame */
+
+  AOM_DECODER_CTRL_ID_START = 256
+};
+
+/*!\brief post process flags
+ *
+ * The set of macros define AOM decoder post processing flags
+ */
+enum aom_postproc_level {
+  AOM_NOFILTERING = 0,
+  AOM_DEBLOCK = 1 << 0,
+  AOM_DEMACROBLOCK = 1 << 1,
+  AOM_ADDNOISE = 1 << 2,
+  AOM_DEBUG_TXT_FRAME_INFO = 1 << 3, /**< print frame information */
+  AOM_DEBUG_TXT_MBLK_MODES =
+      1 << 4, /**< print macro block modes over each macro block */
+  AOM_DEBUG_TXT_DC_DIFF = 1 << 5,   /**< print dc diff for each macro block */
+  AOM_DEBUG_TXT_RATE_INFO = 1 << 6, /**< print video rate info (encoder only) */
+  AOM_MFQE = 1 << 10
+};
+
+/*!\brief post process flags
+ *
+ * This define a structure that describe the post processing settings. For
+ * the best objective measure (using the PSNR metric) set post_proc_flag
+ * to AOM_DEBLOCK and deblocking_level to 1.
+ */
+
+typedef struct aom_postproc_cfg {
+  /*!\brief the types of post processing to be done, should be combination of
+   * "aom_postproc_level" */
+  int post_proc_flag;
+  int deblocking_level; /**< the strength of deblocking, valid range [0, 16] */
+  int noise_level; /**< the strength of additive noise, valid range [0, 16] */
+} aom_postproc_cfg_t;
+
+/*!\brief reference frame type
+ *
+ * The set of macros define the type of AOM reference frames
+ */
+typedef enum aom_ref_frame_type {
+  AOM_LAST_FRAME = 1,
+  AOM_GOLD_FRAME = 2,
+  AOM_ALTR_FRAME = 4
+} aom_ref_frame_type_t;
+
+/*!\brief reference frame data struct
+ *
+ * Define the data struct to access aom reference frames.
+ */
+typedef struct aom_ref_frame {
+  aom_ref_frame_type_t frame_type; /**< which reference frame */
+  aom_image_t img;                 /**< reference frame data in image format */
+} aom_ref_frame_t;
+
+/*!\brief AV1 specific reference frame data struct
+ *
+ * Define the data struct to access av1 reference frames.
+ */
+typedef struct av1_ref_frame {
+  int idx;         /**< frame index to get (input) */
+  aom_image_t img; /**< img structure to populate (output) */
+} av1_ref_frame_t;
+
+/*!\cond */
+/*!\brief aom decoder control function parameter type
+ *
+ * defines the data type for each of AOM decoder control function requires
+ */
+AOM_CTRL_USE_TYPE(AOM_SET_REFERENCE, aom_ref_frame_t *)
+#define AOM_CTRL_AOM_SET_REFERENCE
+AOM_CTRL_USE_TYPE(AOM_COPY_REFERENCE, aom_ref_frame_t *)
+#define AOM_CTRL_AOM_COPY_REFERENCE
+AOM_CTRL_USE_TYPE(AOM_SET_POSTPROC, aom_postproc_cfg_t *)
+#define AOM_CTRL_AOM_SET_POSTPROC
+AOM_CTRL_USE_TYPE(AOM_SET_DBG_COLOR_REF_FRAME, int)
+#define AOM_CTRL_AOM_SET_DBG_COLOR_REF_FRAME
+AOM_CTRL_USE_TYPE(AOM_SET_DBG_COLOR_MB_MODES, int)
+#define AOM_CTRL_AOM_SET_DBG_COLOR_MB_MODES
+AOM_CTRL_USE_TYPE(AOM_SET_DBG_COLOR_B_MODES, int)
+#define AOM_CTRL_AOM_SET_DBG_COLOR_B_MODES
+AOM_CTRL_USE_TYPE(AOM_SET_DBG_DISPLAY_MV, int)
+#define AOM_CTRL_AOM_SET_DBG_DISPLAY_MV
+AOM_CTRL_USE_TYPE(AV1_GET_REFERENCE, av1_ref_frame_t *)
+#define AOM_CTRL_AV1_GET_REFERENCE
+AOM_CTRL_USE_TYPE(AV1_GET_NEW_FRAME_IMAGE, aom_image_t *)
+#define AOM_CTRL_AV1_GET_NEW_FRAME_IMAGE
+
+/*!\endcond */
+/*! @} - end defgroup aom */
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // AOM_AOM_H_
diff --git a/aom/vpx_codec.h b/aom/aom_codec.h
similarity index 68%
rename from aom/vpx_codec.h
rename to aom/aom_codec.h
index 107469f..b41a799 100644
--- a/aom/vpx_codec.h
+++ b/aom/aom_codec.h
@@ -22,28 +22,28 @@
  * video codec algorithm.
  *
  * An application instantiates a specific codec instance by using
- * vpx_codec_init() and a pointer to the algorithm's interface structure:
+ * aom_codec_init() and a pointer to the algorithm's interface structure:
  *     <pre>
  *     my_app.c:
- *       extern vpx_codec_iface_t my_codec;
+ *       extern aom_codec_iface_t my_codec;
  *       {
- *           vpx_codec_ctx_t algo;
- *           res = vpx_codec_init(&algo, &my_codec);
+ *           aom_codec_ctx_t algo;
+ *           res = aom_codec_init(&algo, &my_codec);
  *       }
  *     </pre>
  *
  * Once initialized, the instance is manged using other functions from
- * the vpx_codec_* family.
+ * the aom_codec_* family.
  */
-#ifndef VPX_VPX_CODEC_H_
-#define VPX_VPX_CODEC_H_
+#ifndef AOM_AOM_CODEC_H_
+#define AOM_AOM_CODEC_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-#include "./vpx_integer.h"
-#include "./vpx_image.h"
+#include "./aom_integer.h"
+#include "./aom_image.h"
 
 /*!\brief Decorator indicating a function is deprecated */
 #ifndef DEPRECATED
@@ -83,31 +83,31 @@
  * types, removing or reassigning enums, adding/removing/rearranging
  * fields to structures
  */
-#define VPX_CODEC_ABI_VERSION (3 + VPX_IMAGE_ABI_VERSION) /**<\hideinitializer*/
+#define AOM_CODEC_ABI_VERSION (3 + AOM_IMAGE_ABI_VERSION) /**<\hideinitializer*/
 
 /*!\brief Algorithm return codes */
 typedef enum {
   /*!\brief Operation completed without error */
-  VPX_CODEC_OK,
+  AOM_CODEC_OK,
 
   /*!\brief Unspecified error */
-  VPX_CODEC_ERROR,
+  AOM_CODEC_ERROR,
 
   /*!\brief Memory operation failed */
-  VPX_CODEC_MEM_ERROR,
+  AOM_CODEC_MEM_ERROR,
 
   /*!\brief ABI version mismatch */
-  VPX_CODEC_ABI_MISMATCH,
+  AOM_CODEC_ABI_MISMATCH,
 
   /*!\brief Algorithm does not have required capability */
-  VPX_CODEC_INCAPABLE,
+  AOM_CODEC_INCAPABLE,
 
   /*!\brief The given bitstream is not supported.
    *
    * The bitstream was unable to be parsed at the highest level. The decoder
    * is unable to proceed. This error \ref SHOULD be treated as fatal to the
    * stream. */
-  VPX_CODEC_UNSUP_BITSTREAM,
+  AOM_CODEC_UNSUP_BITSTREAM,
 
   /*!\brief Encoded bitstream uses an unsupported feature
    *
@@ -116,7 +116,7 @@
    * pictures from being properly decoded. This error \ref MAY be treated as
    * fatal to the stream or \ref MAY be treated as fatal to the current GOP.
    */
-  VPX_CODEC_UNSUP_FEATURE,
+  AOM_CODEC_UNSUP_FEATURE,
 
   /*!\brief The coded data for this stream is corrupt or incomplete
    *
@@ -126,60 +126,60 @@
    * stream or \ref MAY be treated as fatal to the current GOP. If decoding
    * is continued for the current GOP, artifacts may be present.
    */
-  VPX_CODEC_CORRUPT_FRAME,
+  AOM_CODEC_CORRUPT_FRAME,
 
   /*!\brief An application-supplied parameter is not valid.
    *
    */
-  VPX_CODEC_INVALID_PARAM,
+  AOM_CODEC_INVALID_PARAM,
 
   /*!\brief An iterator reached the end of list.
    *
    */
-  VPX_CODEC_LIST_END
+  AOM_CODEC_LIST_END
 
-} vpx_codec_err_t;
+} aom_codec_err_t;
 
 /*! \brief Codec capabilities bitfield
  *
  *  Each codec advertises the capabilities it supports as part of its
- *  ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
+ *  ::aom_codec_iface_t interface structure. Capabilities are extra interfaces
  *  or functionality, and are not required to be supported.
  *
- *  The available flags are specified by VPX_CODEC_CAP_* defines.
+ *  The available flags are specified by AOM_CODEC_CAP_* defines.
  */
-typedef long vpx_codec_caps_t;
-#define VPX_CODEC_CAP_DECODER 0x1 /**< Is a decoder */
-#define VPX_CODEC_CAP_ENCODER 0x2 /**< Is an encoder */
+typedef long aom_codec_caps_t;
+#define AOM_CODEC_CAP_DECODER 0x1 /**< Is a decoder */
+#define AOM_CODEC_CAP_ENCODER 0x2 /**< Is an encoder */
 
 /*! \brief Initialization-time Feature Enabling
  *
  *  Certain codec features must be known at initialization time, to allow for
  *  proper memory allocation.
  *
- *  The available flags are specified by VPX_CODEC_USE_* defines.
+ *  The available flags are specified by AOM_CODEC_USE_* defines.
  */
-typedef long vpx_codec_flags_t;
+typedef long aom_codec_flags_t;
 
 /*!\brief Codec interface structure.
  *
  * Contains function pointers and other data private to the codec
  * implementation. This structure is opaque to the application.
  */
-typedef const struct vpx_codec_iface vpx_codec_iface_t;
+typedef const struct aom_codec_iface aom_codec_iface_t;
 
 /*!\brief Codec private data structure.
  *
  * Contains data private to the codec implementation. This structure is opaque
  * to the application.
  */
-typedef struct vpx_codec_priv vpx_codec_priv_t;
+typedef struct aom_codec_priv aom_codec_priv_t;
 
 /*!\brief Iterator
  *
  * Opaque storage used for iterating over lists.
  */
-typedef const void *vpx_codec_iter_t;
+typedef const void *aom_codec_iter_t;
 
 /*!\brief Codec context structure
  *
@@ -189,31 +189,31 @@
  * may reference the 'name' member to get a printable description of the
  * algorithm.
  */
-typedef struct vpx_codec_ctx {
+typedef struct aom_codec_ctx {
   const char *name;             /**< Printable interface name */
-  vpx_codec_iface_t *iface;     /**< Interface pointers */
-  vpx_codec_err_t err;          /**< Last returned error */
+  aom_codec_iface_t *iface;     /**< Interface pointers */
+  aom_codec_err_t err;          /**< Last returned error */
   const char *err_detail;       /**< Detailed info, if available */
-  vpx_codec_flags_t init_flags; /**< Flags passed at init time */
+  aom_codec_flags_t init_flags; /**< Flags passed at init time */
   union {
     /**< Decoder Configuration Pointer */
-    const struct vpx_codec_dec_cfg *dec;
+    const struct aom_codec_dec_cfg *dec;
     /**< Encoder Configuration Pointer */
-    const struct vpx_codec_enc_cfg *enc;
+    const struct aom_codec_enc_cfg *enc;
     const void *raw;
   } config;               /**< Configuration pointer aliasing union */
-  vpx_codec_priv_t *priv; /**< Algorithm private storage */
-} vpx_codec_ctx_t;
+  aom_codec_priv_t *priv; /**< Algorithm private storage */
+} aom_codec_ctx_t;
 
 /*!\brief Bit depth for codec
  * *
  * This enumeration determines the bit depth of the codec.
  */
-typedef enum vpx_bit_depth {
-  VPX_BITS_8 = 8,   /**<  8 bits */
-  VPX_BITS_10 = 10, /**< 10 bits */
-  VPX_BITS_12 = 12, /**< 12 bits */
-} vpx_bit_depth_t;
+typedef enum aom_bit_depth {
+  AOM_BITS_8 = 8,   /**<  8 bits */
+  AOM_BITS_10 = 10, /**< 10 bits */
+  AOM_BITS_12 = 12, /**< 12 bits */
+} aom_bit_depth_t;
 
 /*!\brief Superblock size selection.
  *
@@ -221,19 +221,19 @@
  * either be fixed at 64x64 or 128x128 pixels, or it can be dynamically
  * selected by the encoder for each frame.
  */
-typedef enum vpx_superblock_size {
-  VPX_SUPERBLOCK_SIZE_64X64,   /**< Always use 64x64 superblocks. */
-  VPX_SUPERBLOCK_SIZE_128X128, /**< Always use 128x128 superblocks. */
-  VPX_SUPERBLOCK_SIZE_DYNAMIC  /**< Select superblock size dynamically. */
-} vpx_superblock_size_t;
+typedef enum aom_superblock_size {
+  AOM_SUPERBLOCK_SIZE_64X64,   /**< Always use 64x64 superblocks. */
+  AOM_SUPERBLOCK_SIZE_128X128, /**< Always use 128x128 superblocks. */
+  AOM_SUPERBLOCK_SIZE_DYNAMIC  /**< Select superblock size dynamically. */
+} aom_superblock_size_t;
 
 /*
  * Library Version Number Interface
  *
  * For example, see the following sample return values:
- *     vpx_codec_version()           (1<<16 | 2<<8 | 3)
- *     vpx_codec_version_str()       "v1.2.3-rc1-16-gec6a1ba"
- *     vpx_codec_version_extra_str() "rc1-16-gec6a1ba"
+ *     aom_codec_version()           (1<<16 | 2<<8 | 3)
+ *     aom_codec_version_str()       "v1.2.3-rc1-16-gec6a1ba"
+ *     aom_codec_version_extra_str() "rc1-16-gec6a1ba"
  */
 
 /*!\brief Return the version information (as an integer)
@@ -246,22 +246,22 @@
  * in the future.
  *
  */
-int vpx_codec_version(void);
-#define VPX_VERSION_MAJOR(v) \
+int aom_codec_version(void);
+#define AOM_VERSION_MAJOR(v) \
   ((v >> 16) & 0xff) /**< extract major from packed version */
-#define VPX_VERSION_MINOR(v) \
+#define AOM_VERSION_MINOR(v) \
   ((v >> 8) & 0xff) /**< extract minor from packed version */
-#define VPX_VERSION_PATCH(v) \
+#define AOM_VERSION_PATCH(v) \
   ((v >> 0) & 0xff) /**< extract patch from packed version */
 
 /*!\brief Return the version major number */
-#define vpx_codec_version_major() ((vpx_codec_version() >> 16) & 0xff)
+#define aom_codec_version_major() ((aom_codec_version() >> 16) & 0xff)
 
 /*!\brief Return the version minor number */
-#define vpx_codec_version_minor() ((vpx_codec_version() >> 8) & 0xff)
+#define aom_codec_version_minor() ((aom_codec_version() >> 8) & 0xff)
 
 /*!\brief Return the version patch number */
-#define vpx_codec_version_patch() ((vpx_codec_version() >> 0) & 0xff)
+#define aom_codec_version_patch() ((aom_codec_version() >> 0) & 0xff)
 
 /*!\brief Return the version information (as a string)
  *
@@ -272,24 +272,24 @@
  * release candidates, prerelease versions, etc.
  *
  */
-const char *vpx_codec_version_str(void);
+const char *aom_codec_version_str(void);
 
 /*!\brief Return the version information (as a string)
  *
  * Returns a printable "extra string". This is the component of the string
  * returned
- * by vpx_codec_version_str() following the three digit version number.
+ * by aom_codec_version_str() following the three digit version number.
  *
  */
-const char *vpx_codec_version_extra_str(void);
+const char *aom_codec_version_extra_str(void);
 
 /*!\brief Return the build configuration
  *
  * Returns a printable string containing an encoded version of the build
- * configuration. This may be useful to vpx support.
+ * configuration. This may be useful to aom support.
  *
  */
-const char *vpx_codec_build_config(void);
+const char *aom_codec_build_config(void);
 
 /*!\brief Return the name for a given interface
  *
@@ -298,7 +298,7 @@
  * \param[in]    iface     Interface pointer
  *
  */
-const char *vpx_codec_iface_name(vpx_codec_iface_t *iface);
+const char *aom_codec_iface_name(aom_codec_iface_t *iface);
 
 /*!\brief Convert error number to printable string
  *
@@ -310,7 +310,7 @@
  * \param[in]    err     Error number.
  *
  */
-const char *vpx_codec_err_to_string(vpx_codec_err_t err);
+const char *aom_codec_err_to_string(aom_codec_err_t err);
 
 /*!\brief Retrieve error synopsis for codec context
  *
@@ -322,7 +322,7 @@
  * \param[in]    ctx     Pointer to this instance's context.
  *
  */
-const char *vpx_codec_error(vpx_codec_ctx_t *ctx);
+const char *aom_codec_error(aom_codec_ctx_t *ctx);
 
 /*!\brief Retrieve detailed error information for codec context
  *
@@ -334,7 +334,7 @@
  * \retval NULL
  *     No detailed information is available.
  */
-const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx);
+const char *aom_codec_error_detail(aom_codec_ctx_t *ctx);
 
 /* REQUIRED FUNCTIONS
  *
@@ -348,12 +348,12 @@
  *
  * \param[in] ctx   Pointer to this instance's context
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     The codec algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
+ * \retval #AOM_CODEC_MEM_ERROR
  *     Memory allocation failed.
  */
-vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx);
+aom_codec_err_t aom_codec_destroy(aom_codec_ctx_t *ctx);
 
 /*!\brief Get the capabilities of an algorithm.
  *
@@ -362,7 +362,7 @@
  * \param[in] iface   Pointer to the algorithm interface
  *
  */
-vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface);
+aom_codec_caps_t aom_codec_get_caps(aom_codec_iface_t *iface);
 
 /*!\brief Control algorithm
  *
@@ -372,46 +372,46 @@
  *
  * This wrapper function dispatches the request to the helper function
  * associated with the given ctrl_id. It tries to call this function
- * transparently, but will return #VPX_CODEC_ERROR if the request could not
+ * transparently, but will return #AOM_CODEC_ERROR if the request could not
  * be dispatched.
  *
  * Note that this function should not be used directly. Call the
- * #vpx_codec_control wrapper macro instead.
+ * #aom_codec_control wrapper macro instead.
  *
  * \param[in]     ctx              Pointer to this instance's context
  * \param[in]     ctrl_id          Algorithm specific control identifier
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     The control request was processed.
- * \retval #VPX_CODEC_ERROR
+ * \retval #AOM_CODEC_ERROR
  *     The control request was not processed.
- * \retval #VPX_CODEC_INVALID_PARAM
+ * \retval #AOM_CODEC_INVALID_PARAM
  *     The data was not valid.
  */
-vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...);
-#if defined(VPX_DISABLE_CTRL_TYPECHECKS) && VPX_DISABLE_CTRL_TYPECHECKS
-#define vpx_codec_control(ctx, id, data) vpx_codec_control_(ctx, id, data)
-#define VPX_CTRL_USE_TYPE(id, typ)
-#define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ)
-#define VPX_CTRL_VOID(id, typ)
+aom_codec_err_t aom_codec_control_(aom_codec_ctx_t *ctx, int ctrl_id, ...);
+#if defined(AOM_DISABLE_CTRL_TYPECHECKS) && AOM_DISABLE_CTRL_TYPECHECKS
+#define aom_codec_control(ctx, id, data) aom_codec_control_(ctx, id, data)
+#define AOM_CTRL_USE_TYPE(id, typ)
+#define AOM_CTRL_USE_TYPE_DEPRECATED(id, typ)
+#define AOM_CTRL_VOID(id, typ)
 
 #else
-/*!\brief vpx_codec_control wrapper macro
+/*!\brief aom_codec_control wrapper macro
  *
  * This macro allows for type safe conversions across the variadic parameter
- * to vpx_codec_control_().
+ * to aom_codec_control_().
  *
  * \internal
  * It works by dispatching the call to the control function through a wrapper
  * function named with the id parameter.
  */
-#define vpx_codec_control(ctx, id, data) \
-  vpx_codec_control_##id(ctx, id, data) /**<\hideinitializer*/
+#define aom_codec_control(ctx, id, data) \
+  aom_codec_control_##id(ctx, id, data) /**<\hideinitializer*/
 
-/*!\brief vpx_codec_control type definition macro
+/*!\brief aom_codec_control type definition macro
  *
  * This macro allows for type safe conversions across the variadic parameter
- * to vpx_codec_control_(). It defines the type of the argument for a given
+ * to aom_codec_control_(). It defines the type of the argument for a given
  * control identifier.
  *
  * \internal
@@ -419,18 +419,18 @@
  * the correctly typed arguments as a wrapper to the type-unsafe internal
  * function.
  */
-#define VPX_CTRL_USE_TYPE(id, typ)                                           \
-  static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *, int, typ) \
+#define AOM_CTRL_USE_TYPE(id, typ)                                           \
+  static aom_codec_err_t aom_codec_control_##id(aom_codec_ctx_t *, int, typ) \
       UNUSED;                                                                \
                                                                              \
-  static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *ctx,        \
+  static aom_codec_err_t aom_codec_control_##id(aom_codec_ctx_t *ctx,        \
                                                 int ctrl_id, typ data) {     \
-    return vpx_codec_control_(ctx, ctrl_id, data);                           \
+    return aom_codec_control_(ctx, ctrl_id, data);                           \
   } /**<\hideinitializer*/
 
-/*!\brief vpx_codec_control deprecated type definition macro
+/*!\brief aom_codec_control deprecated type definition macro
  *
- * Like #VPX_CTRL_USE_TYPE, but indicates that the specified control is
+ * Like #AOM_CTRL_USE_TYPE, but indicates that the specified control is
  * deprecated and should not be used. Consult the documentation for your
  * codec for more information.
  *
@@ -438,32 +438,32 @@
  * It defines a static function with the correctly typed arguments as a
  * wrapper to the type-unsafe internal function.
  */
-#define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ)                        \
-  DECLSPEC_DEPRECATED static vpx_codec_err_t vpx_codec_control_##id( \
-      vpx_codec_ctx_t *, int, typ) DEPRECATED UNUSED;                \
+#define AOM_CTRL_USE_TYPE_DEPRECATED(id, typ)                        \
+  DECLSPEC_DEPRECATED static aom_codec_err_t aom_codec_control_##id( \
+      aom_codec_ctx_t *, int, typ) DEPRECATED UNUSED;                \
                                                                      \
-  DECLSPEC_DEPRECATED static vpx_codec_err_t vpx_codec_control_##id( \
-      vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {                 \
-    return vpx_codec_control_(ctx, ctrl_id, data);                   \
+  DECLSPEC_DEPRECATED static aom_codec_err_t aom_codec_control_##id( \
+      aom_codec_ctx_t *ctx, int ctrl_id, typ data) {                 \
+    return aom_codec_control_(ctx, ctrl_id, data);                   \
   } /**<\hideinitializer*/
 
-/*!\brief vpx_codec_control void type definition macro
+/*!\brief aom_codec_control void type definition macro
  *
  * This macro allows for type safe conversions across the variadic parameter
- * to vpx_codec_control_(). It indicates that a given control identifier takes
+ * to aom_codec_control_(). It indicates that a given control identifier takes
  * no argument.
  *
  * \internal
  * It defines a static function without a data argument as a wrapper to the
  * type-unsafe internal function.
  */
-#define VPX_CTRL_VOID(id)                                               \
-  static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *, int) \
+#define AOM_CTRL_VOID(id)                                               \
+  static aom_codec_err_t aom_codec_control_##id(aom_codec_ctx_t *, int) \
       UNUSED;                                                           \
                                                                         \
-  static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *ctx,   \
+  static aom_codec_err_t aom_codec_control_##id(aom_codec_ctx_t *ctx,   \
                                                 int ctrl_id) {          \
-    return vpx_codec_control_(ctx, ctrl_id);                            \
+    return aom_codec_control_(ctx, ctrl_id);                            \
   } /**<\hideinitializer*/
 
 #endif
@@ -472,4 +472,4 @@
 #ifdef __cplusplus
 }
 #endif
-#endif  // VPX_VPX_CODEC_H_
+#endif  // AOM_AOM_CODEC_H_
diff --git a/aom/aom_codec.mk b/aom/aom_codec.mk
new file mode 100644
index 0000000..9a31306
--- /dev/null
+++ b/aom/aom_codec.mk
@@ -0,0 +1,41 @@
+##
+##  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+##  Use of this source code is governed by a BSD-style license
+##  that can be found in the LICENSE file in the root of the source
+##  tree. An additional intellectual property rights grant can be found
+##  in the file PATENTS.  All contributing project authors may
+##  be found in the AUTHORS file in the root of the source tree.
+##
+
+
+API_EXPORTS += exports
+
+API_SRCS-$(CONFIG_AV1_ENCODER) += aom.h
+API_SRCS-$(CONFIG_AV1_ENCODER) += aomcx.h
+API_DOC_SRCS-$(CONFIG_AV1_ENCODER) += aom.h
+API_DOC_SRCS-$(CONFIG_AV1_ENCODER) += aomcx.h
+
+API_SRCS-$(CONFIG_AV1_DECODER) += aom.h
+API_SRCS-$(CONFIG_AV1_DECODER) += aomdx.h
+API_DOC_SRCS-$(CONFIG_AV1_DECODER) += aom.h
+API_DOC_SRCS-$(CONFIG_AV1_DECODER) += aomdx.h
+
+API_DOC_SRCS-yes += aom_codec.h
+API_DOC_SRCS-yes += aom_decoder.h
+API_DOC_SRCS-yes += aom_encoder.h
+API_DOC_SRCS-yes += aom_frame_buffer.h
+API_DOC_SRCS-yes += aom_image.h
+
+API_SRCS-yes += src/aom_decoder.c
+API_SRCS-yes += aom_decoder.h
+API_SRCS-yes += src/aom_encoder.c
+API_SRCS-yes += aom_encoder.h
+API_SRCS-yes += internal/aom_codec_internal.h
+API_SRCS-yes += src/aom_codec.c
+API_SRCS-yes += src/aom_image.c
+API_SRCS-yes += aom_codec.h
+API_SRCS-yes += aom_codec.mk
+API_SRCS-yes += aom_frame_buffer.h
+API_SRCS-yes += aom_image.h
+API_SRCS-yes += aom_integer.h
diff --git a/aom/vpx_decoder.h b/aom/aom_decoder.h
similarity index 72%
rename from aom/vpx_decoder.h
rename to aom/aom_decoder.h
index 2fb3be1..affb6fc 100644
--- a/aom/vpx_decoder.h
+++ b/aom/aom_decoder.h
@@ -7,8 +7,8 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#ifndef VPX_VPX_DECODER_H_
-#define VPX_VPX_DECODER_H_
+#ifndef AOM_AOM_DECODER_H_
+#define AOM_AOM_DECODER_H_
 
 /*!\defgroup decoder Decoder Algorithm Interface
  * \ingroup codec
@@ -29,8 +29,8 @@
 extern "C" {
 #endif
 
-#include "./vpx_codec.h"
-#include "./vpx_frame_buffer.h"
+#include "./aom_codec.h"
+#include "./aom_frame_buffer.h"
 
 /*!\brief Current ABI version number
  *
@@ -40,45 +40,45 @@
  * types, removing or reassigning enums, adding/removing/rearranging
  * fields to structures
  */
-#define VPX_DECODER_ABI_VERSION \
-  (3 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/
+#define AOM_DECODER_ABI_VERSION \
+  (3 + AOM_CODEC_ABI_VERSION) /**<\hideinitializer*/
 
 /*! \brief Decoder capabilities bitfield
  *
  *  Each decoder advertises the capabilities it supports as part of its
- *  ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
+ *  ::aom_codec_iface_t interface structure. Capabilities are extra interfaces
  *  or functionality, and are not required to be supported by a decoder.
  *
- *  The available flags are specified by VPX_CODEC_CAP_* defines.
+ *  The available flags are specified by AOM_CODEC_CAP_* defines.
  */
-#define VPX_CODEC_CAP_PUT_SLICE 0x10000 /**< Will issue put_slice callbacks */
-#define VPX_CODEC_CAP_PUT_FRAME 0x20000 /**< Will issue put_frame callbacks */
-#define VPX_CODEC_CAP_POSTPROC 0x40000  /**< Can postprocess decoded frame */
+#define AOM_CODEC_CAP_PUT_SLICE 0x10000 /**< Will issue put_slice callbacks */
+#define AOM_CODEC_CAP_PUT_FRAME 0x20000 /**< Will issue put_frame callbacks */
+#define AOM_CODEC_CAP_POSTPROC 0x40000  /**< Can postprocess decoded frame */
 /*!\brief Can conceal errors due to packet loss */
-#define VPX_CODEC_CAP_ERROR_CONCEALMENT 0x80000
+#define AOM_CODEC_CAP_ERROR_CONCEALMENT 0x80000
 /*!\brief Can receive encoded frames one fragment at a time */
-#define VPX_CODEC_CAP_INPUT_FRAGMENTS 0x100000
+#define AOM_CODEC_CAP_INPUT_FRAGMENTS 0x100000
 
 /*! \brief Initialization-time Feature Enabling
  *
  *  Certain codec features must be known at initialization time, to allow for
  *  proper memory allocation.
  *
- *  The available flags are specified by VPX_CODEC_USE_* defines.
+ *  The available flags are specified by AOM_CODEC_USE_* defines.
  */
 /*!\brief Can support frame-based multi-threading */
-#define VPX_CODEC_CAP_FRAME_THREADING 0x200000
+#define AOM_CODEC_CAP_FRAME_THREADING 0x200000
 /*!brief Can support external frame buffers */
-#define VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER 0x400000
+#define AOM_CODEC_CAP_EXTERNAL_FRAME_BUFFER 0x400000
 
-#define VPX_CODEC_USE_POSTPROC 0x10000 /**< Postprocess decoded frame */
+#define AOM_CODEC_USE_POSTPROC 0x10000 /**< Postprocess decoded frame */
 /*!\brief Conceal errors in decoded frames */
-#define VPX_CODEC_USE_ERROR_CONCEALMENT 0x20000
+#define AOM_CODEC_USE_ERROR_CONCEALMENT 0x20000
 /*!\brief The input frame should be passed to the decoder one fragment at a
  * time */
-#define VPX_CODEC_USE_INPUT_FRAGMENTS 0x40000
+#define AOM_CODEC_USE_INPUT_FRAGMENTS 0x40000
 /*!\brief Enable frame-based multi-threading */
-#define VPX_CODEC_USE_FRAME_THREADING 0x80000
+#define AOM_CODEC_USE_FRAME_THREADING 0x80000
 
 /*!\brief Stream properties
  *
@@ -86,12 +86,12 @@
  * stream. Algorithms may extend this structure with data specific
  * to their bitstream by setting the sz member appropriately.
  */
-typedef struct vpx_codec_stream_info {
+typedef struct aom_codec_stream_info {
   unsigned int sz;    /**< Size of this structure */
   unsigned int w;     /**< Width (or 0 for unknown/default) */
   unsigned int h;     /**< Height (or 0 for unknown/default) */
   unsigned int is_kf; /**< Current frame is a keyframe */
-} vpx_codec_stream_info_t;
+} aom_codec_stream_info_t;
 
 /* REQUIRED FUNCTIONS
  *
@@ -104,16 +104,16 @@
  * This structure is used to pass init time configuration options to the
  * decoder.
  */
-typedef struct vpx_codec_dec_cfg {
+typedef struct aom_codec_dec_cfg {
   unsigned int threads; /**< Maximum number of threads to use, default 1 */
   unsigned int w;       /**< Width */
   unsigned int h;       /**< Height */
-} vpx_codec_dec_cfg_t;  /**< alias for struct vpx_codec_dec_cfg */
+} aom_codec_dec_cfg_t;  /**< alias for struct aom_codec_dec_cfg */
 
 /*!\brief Initialize a decoder instance
  *
  * Initializes a decoder context using the given interface. Applications
- * should call the vpx_codec_dec_init convenience macro instead of this
+ * should call the aom_codec_dec_init convenience macro instead of this
  * function directly, to ensure that the ABI version number parameter
  * is properly initialized.
  *
@@ -124,25 +124,25 @@
  * \param[in]    ctx     Pointer to this instance's context.
  * \param[in]    iface   Pointer to the algorithm interface to use.
  * \param[in]    cfg     Configuration to use, if known. May be NULL.
- * \param[in]    flags   Bitfield of VPX_CODEC_USE_* flags
+ * \param[in]    flags   Bitfield of AOM_CODEC_USE_* flags
  * \param[in]    ver     ABI version number. Must be set to
- *                       VPX_DECODER_ABI_VERSION
- * \retval #VPX_CODEC_OK
+ *                       AOM_DECODER_ABI_VERSION
+ * \retval #AOM_CODEC_OK
  *     The decoder algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
+ * \retval #AOM_CODEC_MEM_ERROR
  *     Memory allocation failed.
  */
-vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx,
-                                       vpx_codec_iface_t *iface,
-                                       const vpx_codec_dec_cfg_t *cfg,
-                                       vpx_codec_flags_t flags, int ver);
+aom_codec_err_t aom_codec_dec_init_ver(aom_codec_ctx_t *ctx,
+                                       aom_codec_iface_t *iface,
+                                       const aom_codec_dec_cfg_t *cfg,
+                                       aom_codec_flags_t flags, int ver);
 
-/*!\brief Convenience macro for vpx_codec_dec_init_ver()
+/*!\brief Convenience macro for aom_codec_dec_init_ver()
  *
  * Ensures the ABI version parameter is properly set.
  */
-#define vpx_codec_dec_init(ctx, iface, cfg, flags) \
-  vpx_codec_dec_init_ver(ctx, iface, cfg, flags, VPX_DECODER_ABI_VERSION)
+#define aom_codec_dec_init(ctx, iface, cfg, flags) \
+  aom_codec_dec_init_ver(ctx, iface, cfg, flags, AOM_DECODER_ABI_VERSION)
 
 /*!\brief Parse stream info from a buffer
  *
@@ -158,13 +158,13 @@
  *                         clobbered by the algorithm. This parameter \ref MAY
  *                         be NULL.
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     Bitstream is parsable and stream information updated
  */
-vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface,
+aom_codec_err_t aom_codec_peek_stream_info(aom_codec_iface_t *iface,
                                            const uint8_t *data,
                                            unsigned int data_sz,
-                                           vpx_codec_stream_info_t *si);
+                                           aom_codec_stream_info_t *si);
 
 /*!\brief Return information about the current stream.
  *
@@ -176,11 +176,11 @@
  *                         clobbered by the algorithm. This parameter \ref MAY
  *                         be NULL.
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     Bitstream is parsable and stream information updated
  */
-vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx,
-                                          vpx_codec_stream_info_t *si);
+aom_codec_err_t aom_codec_get_stream_info(aom_codec_ctx_t *ctx,
+                                          aom_codec_stream_info_t *si);
 
 /*!\brief Decode data
  *
@@ -189,7 +189,7 @@
  * generated, as appropriate. Encoded data \ref MUST be passed in DTS (decode
  * time stamp) order. Frames produced will always be in PTS (presentation
  * time stamp) order.
- * If the decoder is configured with VPX_CODEC_USE_INPUT_FRAGMENTS enabled,
+ * If the decoder is configured with AOM_CODEC_USE_INPUT_FRAGMENTS enabled,
  * data and data_sz can contain a fragment of the encoded frame. Fragment
  * \#n must contain at least partition \#n, but can also contain subsequent
  * partitions (\#n+1 - \#n+i), and if so, fragments \#n+1, .., \#n+i must
@@ -199,7 +199,7 @@
  *
  * \param[in] ctx          Pointer to this instance's context
  * \param[in] data         Pointer to this block of new coded data. If
- *                         NULL, a VPX_CODEC_CB_PUT_FRAME event is posted
+ *                         NULL, a AOM_CODEC_CB_PUT_FRAME event is posted
  *                         for the previously decoded frame.
  * \param[in] data_sz      Size of the coded data, in bytes.
  * \param[in] user_priv    Application specific data to associate with
@@ -207,12 +207,12 @@
  * \param[in] deadline     Soft deadline the decoder should attempt to meet,
  *                         in us. Set to zero for unlimited.
  *
- * \return Returns #VPX_CODEC_OK if the coded data was processed completely
+ * \return Returns #AOM_CODEC_OK if the coded data was processed completely
  *         and future pictures can be decoded without error. Otherwise,
- *         see the descriptions of the other error codes in ::vpx_codec_err_t
+ *         see the descriptions of the other error codes in ::aom_codec_err_t
  *         for recoverability capabilities.
  */
-vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data,
+aom_codec_err_t aom_codec_decode(aom_codec_ctx_t *ctx, const uint8_t *data,
                                  unsigned int data_sz, void *user_priv,
                                  long deadline);
 
@@ -223,8 +223,8 @@
  * complete when this function returns NULL.
  *
  * The list of available frames becomes valid upon completion of the
- * vpx_codec_decode call, and remains valid until the next call to
- * vpx_codec_decode.
+ * aom_codec_decode call, and remains valid until the next call to
+ * aom_codec_decode.
  *
  * \param[in]     ctx      Pointer to this instance's context
  * \param[in,out] iter     Iterator storage, initialized to NULL
@@ -232,15 +232,15 @@
  * \return Returns a pointer to an image, if one is ready for display. Frames
  *         produced will always be in PTS (presentation time stamp) order.
  */
-vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, vpx_codec_iter_t *iter);
+aom_image_t *aom_codec_get_frame(aom_codec_ctx_t *ctx, aom_codec_iter_t *iter);
 
 /*!\defgroup cap_put_frame Frame-Based Decoding Functions
  *
  * The following functions are required to be implemented for all decoders
- * that advertise the VPX_CODEC_CAP_PUT_FRAME capability. Calling these
+ * that advertise the AOM_CODEC_CAP_PUT_FRAME capability. Calling these
  * functions
  * for codecs that don't advertise this capability will result in an error
- * code being returned, usually VPX_CODEC_ERROR
+ * code being returned, usually AOM_CODEC_ERROR
  * @{
  */
 
@@ -249,8 +249,8 @@
  * This callback is invoked by the decoder to notify the application of
  * the availability of decoded image data.
  */
-typedef void (*vpx_codec_put_frame_cb_fn_t)(void *user_priv,
-                                            const vpx_image_t *img);
+typedef void (*aom_codec_put_frame_cb_fn_t)(void *user_priv,
+                                            const aom_image_t *img);
 
 /*!\brief Register for notification of frame completion.
  *
@@ -261,14 +261,14 @@
  * \param[in] cb           Pointer to the callback function
  * \param[in] user_priv    User's private data
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     Callback successfully registered.
- * \retval #VPX_CODEC_ERROR
+ * \retval #AOM_CODEC_ERROR
  *     Decoder context not initialized, or algorithm not capable of
  *     posting slice completion.
  */
-vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx,
-                                                vpx_codec_put_frame_cb_fn_t cb,
+aom_codec_err_t aom_codec_register_put_frame_cb(aom_codec_ctx_t *ctx,
+                                                aom_codec_put_frame_cb_fn_t cb,
                                                 void *user_priv);
 
 /*!@} - end defgroup cap_put_frame */
@@ -276,10 +276,10 @@
 /*!\defgroup cap_put_slice Slice-Based Decoding Functions
  *
  * The following functions are required to be implemented for all decoders
- * that advertise the VPX_CODEC_CAP_PUT_SLICE capability. Calling these
+ * that advertise the AOM_CODEC_CAP_PUT_SLICE capability. Calling these
  * functions
  * for codecs that don't advertise this capability will result in an error
- * code being returned, usually VPX_CODEC_ERROR
+ * code being returned, usually AOM_CODEC_ERROR
  * @{
  */
 
@@ -288,10 +288,10 @@
  * This callback is invoked by the decoder to notify the application of
  * the availability of partially decoded image data. The
  */
-typedef void (*vpx_codec_put_slice_cb_fn_t)(void *user_priv,
-                                            const vpx_image_t *img,
-                                            const vpx_image_rect_t *valid,
-                                            const vpx_image_rect_t *update);
+typedef void (*aom_codec_put_slice_cb_fn_t)(void *user_priv,
+                                            const aom_image_t *img,
+                                            const aom_image_rect_t *valid,
+                                            const aom_image_rect_t *update);
 
 /*!\brief Register for notification of slice completion.
  *
@@ -302,14 +302,14 @@
  * \param[in] cb           Pointer to the callback function
  * \param[in] user_priv    User's private data
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     Callback successfully registered.
- * \retval #VPX_CODEC_ERROR
+ * \retval #AOM_CODEC_ERROR
  *     Decoder context not initialized, or algorithm not capable of
  *     posting slice completion.
  */
-vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx,
-                                                vpx_codec_put_slice_cb_fn_t cb,
+aom_codec_err_t aom_codec_register_put_slice_cb(aom_codec_ctx_t *ctx,
+                                                aom_codec_put_slice_cb_fn_t cb,
                                                 void *user_priv);
 
 /*!@} - end defgroup cap_put_slice*/
@@ -317,12 +317,12 @@
 /*!\defgroup cap_external_frame_buffer External Frame Buffer Functions
  *
  * The following section is required to be implemented for all decoders
- * that advertise the VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER capability.
+ * that advertise the AOM_CODEC_CAP_EXTERNAL_FRAME_BUFFER capability.
  * Calling this function for codecs that don't advertise this capability
- * will result in an error code being returned, usually VPX_CODEC_ERROR.
+ * will result in an error code being returned, usually AOM_CODEC_ERROR.
  *
  * \note
- * Currently this only works with VP9.
+ * Currently this only works with AV1.
  * @{
  */
 
@@ -339,22 +339,22 @@
  * \param[in] cb_release   Pointer to the release callback function
  * \param[in] cb_priv      Callback's private data
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     External frame buffers will be used by libaom.
- * \retval #VPX_CODEC_INVALID_PARAM
+ * \retval #AOM_CODEC_INVALID_PARAM
  *     One or more of the callbacks were NULL.
- * \retval #VPX_CODEC_ERROR
+ * \retval #AOM_CODEC_ERROR
  *     Decoder context not initialized, or algorithm not capable of
  *     using external frame buffers.
  *
  * \note
- * When decoding VP9, the application may be required to pass in at least
- * #VPX_MAXIMUM_WORK_BUFFERS external frame
+ * When decoding AV1, the application may be required to pass in at least
+ * #AOM_MAXIMUM_WORK_BUFFERS external frame
  * buffers.
  */
-vpx_codec_err_t vpx_codec_set_frame_buffer_functions(
-    vpx_codec_ctx_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get,
-    vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv);
+aom_codec_err_t aom_codec_set_frame_buffer_functions(
+    aom_codec_ctx_t *ctx, aom_get_frame_buffer_cb_fn_t cb_get,
+    aom_release_frame_buffer_cb_fn_t cb_release, void *cb_priv);
 
 /*!@} - end defgroup cap_external_frame_buffer */
 
@@ -362,4 +362,4 @@
 #ifdef __cplusplus
 }
 #endif
-#endif  // VPX_VPX_DECODER_H_
+#endif  // AOM_AOM_DECODER_H_
diff --git a/aom/vpx_encoder.h b/aom/aom_encoder.h
similarity index 78%
rename from aom/vpx_encoder.h
rename to aom/aom_encoder.h
index 62c3ce0..f0c3d2d 100644
--- a/aom/vpx_encoder.h
+++ b/aom/aom_encoder.h
@@ -7,8 +7,8 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#ifndef VPX_VPX_ENCODER_H_
-#define VPX_VPX_ENCODER_H_
+#ifndef AOM_AOM_ENCODER_H_
+#define AOM_AOM_ENCODER_H_
 
 /*!\defgroup encoder Encoder Algorithm Interface
  * \ingroup codec
@@ -29,7 +29,7 @@
 extern "C" {
 #endif
 
-#include "./vpx_codec.h"
+#include "./aom_codec.h"
 
 /*!\brief Current ABI version number
  *
@@ -39,58 +39,58 @@
  * types, removing or reassigning enums, adding/removing/rearranging
  * fields to structures
  */
-#define VPX_ENCODER_ABI_VERSION \
-  (5 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/
+#define AOM_ENCODER_ABI_VERSION \
+  (5 + AOM_CODEC_ABI_VERSION) /**<\hideinitializer*/
 
 /*! \brief Encoder capabilities bitfield
  *
  *  Each encoder advertises the capabilities it supports as part of its
- *  ::vpx_codec_iface_t interface structure. Capabilities are extra
+ *  ::aom_codec_iface_t interface structure. Capabilities are extra
  *  interfaces or functionality, and are not required to be supported
  *  by an encoder.
  *
- *  The available flags are specified by VPX_CODEC_CAP_* defines.
+ *  The available flags are specified by AOM_CODEC_CAP_* defines.
  */
-#define VPX_CODEC_CAP_PSNR 0x10000 /**< Can issue PSNR packets */
+#define AOM_CODEC_CAP_PSNR 0x10000 /**< Can issue PSNR packets */
 
 /*! Can output one partition at a time. Each partition is returned in its
- *  own VPX_CODEC_CX_FRAME_PKT, with the FRAME_IS_FRAGMENT flag set for
+ *  own AOM_CODEC_CX_FRAME_PKT, with the FRAME_IS_FRAGMENT flag set for
  *  every partition but the last. In this mode all frames are always
  *  returned partition by partition.
  */
-#define VPX_CODEC_CAP_OUTPUT_PARTITION 0x20000
+#define AOM_CODEC_CAP_OUTPUT_PARTITION 0x20000
 
 /*! Can support input images at greater than 8 bitdepth.
  */
-#define VPX_CODEC_CAP_HIGHBITDEPTH 0x40000
+#define AOM_CODEC_CAP_HIGHBITDEPTH 0x40000
 
 /*! \brief Initialization-time Feature Enabling
  *
  *  Certain codec features must be known at initialization time, to allow
  *  for proper memory allocation.
  *
- *  The available flags are specified by VPX_CODEC_USE_* defines.
+ *  The available flags are specified by AOM_CODEC_USE_* defines.
  */
-#define VPX_CODEC_USE_PSNR 0x10000 /**< Calculate PSNR on each frame */
+#define AOM_CODEC_USE_PSNR 0x10000 /**< Calculate PSNR on each frame */
 /*!\brief Make the encoder output one  partition at a time. */
-#define VPX_CODEC_USE_OUTPUT_PARTITION 0x20000
-#define VPX_CODEC_USE_HIGHBITDEPTH 0x40000 /**< Use high bitdepth */
+#define AOM_CODEC_USE_OUTPUT_PARTITION 0x20000
+#define AOM_CODEC_USE_HIGHBITDEPTH 0x40000 /**< Use high bitdepth */
 
 /*!\brief Generic fixed size buffer structure
  *
  * This structure is able to hold a reference to any fixed size buffer.
  */
-typedef struct vpx_fixed_buf {
+typedef struct aom_fixed_buf {
   void *buf;       /**< Pointer to the data */
   size_t sz;       /**< Length of the buffer, in chars */
-} vpx_fixed_buf_t; /**< alias for struct vpx_fixed_buf */
+} aom_fixed_buf_t; /**< alias for struct aom_fixed_buf */
 
 /*!\brief Time Stamp Type
  *
  * An integer, which when multiplied by the stream's time base, provides
  * the absolute time of a sample.
  */
-typedef int64_t vpx_codec_pts_t;
+typedef int64_t aom_codec_pts_t;
 
 /*!\brief Compressed Frame Flags
  *
@@ -99,43 +99,43 @@
  * can be used by an algorithm to provide additional detail, for example to
  * support frame types that are codec specific (MPEG-1 D-frames for example)
  */
-typedef uint32_t vpx_codec_frame_flags_t;
-#define VPX_FRAME_IS_KEY 0x1 /**< frame is the start of a GOP */
+typedef uint32_t aom_codec_frame_flags_t;
+#define AOM_FRAME_IS_KEY 0x1 /**< frame is the start of a GOP */
 /*!\brief frame can be dropped without affecting the stream (no future frame
  * depends on this one) */
-#define VPX_FRAME_IS_DROPPABLE 0x2
+#define AOM_FRAME_IS_DROPPABLE 0x2
 /*!\brief frame should be decoded but will not be shown */
-#define VPX_FRAME_IS_INVISIBLE 0x4
+#define AOM_FRAME_IS_INVISIBLE 0x4
 /*!\brief this is a fragment of the encoded frame */
-#define VPX_FRAME_IS_FRAGMENT 0x8
+#define AOM_FRAME_IS_FRAGMENT 0x8
 
 /*!\brief Error Resilient flags
  *
  * These flags define which error resilient features to enable in the
  * encoder. The flags are specified through the
- * vpx_codec_enc_cfg::g_error_resilient variable.
+ * aom_codec_enc_cfg::g_error_resilient variable.
  */
-typedef uint32_t vpx_codec_er_flags_t;
+typedef uint32_t aom_codec_er_flags_t;
 /*!\brief Improve resiliency against losses of whole frames */
-#define VPX_ERROR_RESILIENT_DEFAULT 0x1
+#define AOM_ERROR_RESILIENT_DEFAULT 0x1
 /*!\brief The frame partitions are independently decodable by the bool decoder,
  * meaning that partitions can be decoded even though earlier partitions have
  * been lost. Note that intra prediction is still done over the partition
  * boundary. */
-#define VPX_ERROR_RESILIENT_PARTITIONS 0x2
+#define AOM_ERROR_RESILIENT_PARTITIONS 0x2
 
 /*!\brief Encoder output packet variants
  *
  * This enumeration lists the different kinds of data packets that can be
- * returned by calls to vpx_codec_get_cx_data(). Algorithms \ref MAY
+ * returned by calls to aom_codec_get_cx_data(). Algorithms \ref MAY
  * extend this list to provide additional functionality.
  */
-enum vpx_codec_cx_pkt_kind {
-  VPX_CODEC_CX_FRAME_PKT,    /**< Compressed video frame */
-  VPX_CODEC_STATS_PKT,       /**< Two-pass statistics for this frame */
-  VPX_CODEC_FPMB_STATS_PKT,  /**< first pass mb statistics for this frame */
-  VPX_CODEC_PSNR_PKT,        /**< PSNR statistics for this frame */
-  VPX_CODEC_CUSTOM_PKT = 256 /**< Algorithm extensions  */
+enum aom_codec_cx_pkt_kind {
+  AOM_CODEC_CX_FRAME_PKT,    /**< Compressed video frame */
+  AOM_CODEC_STATS_PKT,       /**< Two-pass statistics for this frame */
+  AOM_CODEC_FPMB_STATS_PKT,  /**< first pass mb statistics for this frame */
+  AOM_CODEC_PSNR_PKT,        /**< PSNR statistics for this frame */
+  AOM_CODEC_CUSTOM_PKT = 256 /**< Algorithm extensions  */
 };
 
 /*!\brief Encoder output packet
@@ -143,87 +143,87 @@
  * This structure contains the different kinds of output data the encoder
  * may produce while compressing a frame.
  */
-typedef struct vpx_codec_cx_pkt {
-  enum vpx_codec_cx_pkt_kind kind; /**< packet variant */
+typedef struct aom_codec_cx_pkt {
+  enum aom_codec_cx_pkt_kind kind; /**< packet variant */
   union {
     struct {
       void *buf; /**< compressed data buffer */
       size_t sz; /**< length of compressed data */
       /*!\brief time stamp to show frame (in timebase units) */
-      vpx_codec_pts_t pts;
+      aom_codec_pts_t pts;
       /*!\brief duration to show frame (in timebase units) */
       unsigned long duration;
-      vpx_codec_frame_flags_t flags; /**< flags for this frame */
+      aom_codec_frame_flags_t flags; /**< flags for this frame */
       /*!\brief the partition id defines the decoding order of the partitions.
        * Only applicable when "output partition" mode is enabled. First
        * partition has id 0.*/
       int partition_id;
     } frame;                            /**< data for compressed frame packet */
-    vpx_fixed_buf_t twopass_stats;      /**< data for two-pass packet */
-    vpx_fixed_buf_t firstpass_mb_stats; /**< first pass mb packet */
-    struct vpx_psnr_pkt {
+    aom_fixed_buf_t twopass_stats;      /**< data for two-pass packet */
+    aom_fixed_buf_t firstpass_mb_stats; /**< first pass mb packet */
+    struct aom_psnr_pkt {
       unsigned int samples[4]; /**< Number of samples, total/y/u/v */
       uint64_t sse[4];         /**< sum squared error, total/y/u/v */
       double psnr[4];          /**< PSNR, total/y/u/v */
     } psnr;                    /**< data for PSNR packet */
-    vpx_fixed_buf_t raw;       /**< data for arbitrary packets */
+    aom_fixed_buf_t raw;       /**< data for arbitrary packets */
 
     /* This packet size is fixed to allow codecs to extend this
      * interface without having to manage storage for raw packets,
      * i.e., if it's smaller than 128 bytes, you can store in the
      * packet list directly.
      */
-    char pad[128 - sizeof(enum vpx_codec_cx_pkt_kind)]; /**< fixed sz */
+    char pad[128 - sizeof(enum aom_codec_cx_pkt_kind)]; /**< fixed sz */
   } data;                                               /**< packet data */
-} vpx_codec_cx_pkt_t; /**< alias for struct vpx_codec_cx_pkt */
+} aom_codec_cx_pkt_t; /**< alias for struct aom_codec_cx_pkt */
 
 /*!\brief Rational Number
  *
  * This structure holds a fractional value.
  */
-typedef struct vpx_rational {
+typedef struct aom_rational {
   int num;        /**< fraction numerator */
   int den;        /**< fraction denominator */
-} vpx_rational_t; /**< alias for struct vpx_rational */
+} aom_rational_t; /**< alias for struct aom_rational */
 
 /*!\brief Multi-pass Encoding Pass */
-enum vpx_enc_pass {
-  VPX_RC_ONE_PASS,   /**< Single pass mode */
-  VPX_RC_FIRST_PASS, /**< First pass of multi-pass mode */
-  VPX_RC_LAST_PASS   /**< Final pass of multi-pass mode */
+enum aom_enc_pass {
+  AOM_RC_ONE_PASS,   /**< Single pass mode */
+  AOM_RC_FIRST_PASS, /**< First pass of multi-pass mode */
+  AOM_RC_LAST_PASS   /**< Final pass of multi-pass mode */
 };
 
 /*!\brief Rate control mode */
-enum vpx_rc_mode {
-  VPX_VBR, /**< Variable Bit Rate (VBR) mode */
-  VPX_CBR, /**< Constant Bit Rate (CBR) mode */
-  VPX_CQ,  /**< Constrained Quality (CQ)  mode */
-  VPX_Q,   /**< Constant Quality (Q) mode */
+enum aom_rc_mode {
+  AOM_VBR, /**< Variable Bit Rate (VBR) mode */
+  AOM_CBR, /**< Constant Bit Rate (CBR) mode */
+  AOM_CQ,  /**< Constrained Quality (CQ)  mode */
+  AOM_Q,   /**< Constant Quality (Q) mode */
 };
 
 /*!\brief Keyframe placement mode.
  *
  * This enumeration determines whether keyframes are placed automatically by
  * the encoder or whether this behavior is disabled. Older releases of this
- * SDK were implemented such that VPX_KF_FIXED meant keyframes were disabled.
+ * SDK were implemented such that AOM_KF_FIXED meant keyframes were disabled.
  * This name is confusing for this behavior, so the new symbols to be used
- * are VPX_KF_AUTO and VPX_KF_DISABLED.
+ * are AOM_KF_AUTO and AOM_KF_DISABLED.
  */
-enum vpx_kf_mode {
-  VPX_KF_FIXED,       /**< deprecated, implies VPX_KF_DISABLED */
-  VPX_KF_AUTO,        /**< Encoder determines optimal placement automatically */
-  VPX_KF_DISABLED = 0 /**< Encoder does not place keyframes. */
+enum aom_kf_mode {
+  AOM_KF_FIXED,       /**< deprecated, implies AOM_KF_DISABLED */
+  AOM_KF_AUTO,        /**< Encoder determines optimal placement automatically */
+  AOM_KF_DISABLED = 0 /**< Encoder does not place keyframes. */
 };
 
 /*!\brief Encoded Frame Flags
  *
- * This type indicates a bitfield to be passed to vpx_codec_encode(), defining
+ * This type indicates a bitfield to be passed to aom_codec_encode(), defining
  * per-frame boolean values. By convention, bits common to all codecs will be
- * named VPX_EFLAG_*, and bits specific to an algorithm will be named
+ * named AOM_EFLAG_*, and bits specific to an algorithm will be named
  * /algo/_eflag_*. The lower order 16 bits are reserved for common use.
  */
-typedef long vpx_enc_frame_flags_t;
-#define VPX_EFLAG_FORCE_KF (1 << 0) /**< Force this frame to be a keyframe */
+typedef long aom_enc_frame_flags_t;
+#define AOM_EFLAG_FORCE_KF (1 << 0) /**< Force this frame to be a keyframe */
 
 /*!\brief Encoder configuration structure
  *
@@ -231,7 +231,7 @@
  * across all codecs. This doesn't imply that all codecs support all features,
  * however.
  */
-typedef struct vpx_codec_enc_cfg {
+typedef struct aom_codec_enc_cfg {
   /*
    * generic settings (g)
    */
@@ -285,9 +285,9 @@
    *
    * This value identifies the bit_depth of the codec,
    * Only certain bit-depths are supported as identified in the
-   * vpx_bit_depth_t enum.
+   * aom_bit_depth_t enum.
    */
-  vpx_bit_depth_t g_bit_depth;
+  aom_bit_depth_t g_bit_depth;
 
   /*!\brief Bit-depth of the input frames
    *
@@ -309,7 +309,7 @@
    * \ref RECOMMENDED method is to set the timebase to that of the parent
    * container or multimedia framework (ex: 1/1000 for ms, as in FLV).
    */
-  struct vpx_rational g_timebase;
+  struct aom_rational g_timebase;
 
   /*!\brief Enable error resilient modes.
    *
@@ -317,14 +317,14 @@
    * it should enable to take measures for streaming over lossy or noisy
    * links.
    */
-  vpx_codec_er_flags_t g_error_resilient;
+  aom_codec_er_flags_t g_error_resilient;
 
   /*!\brief Multi-pass Encoding Mode
    *
    * This value should be set to the current phase for multi-pass encoding.
-   * For single pass, set to #VPX_RC_ONE_PASS.
+   * For single pass, set to #AOM_RC_ONE_PASS.
    */
-  enum vpx_enc_pass g_pass;
+  enum aom_enc_pass g_pass;
 
   /*!\brief Allow lagged encoding
    *
@@ -352,7 +352,7 @@
    * trade-off is often acceptable, but for many applications is not. It can
    * be disabled in these cases.
    *
-   * Note that not all codecs support this feature. All vpx VPx codecs do.
+   * Note that not all codecs support this feature. All aom AVx codecs do.
    * For other codecs, consult the documentation for that algorithm.
    *
    * This threshold is described as a percentage of the target data buffer.
@@ -409,21 +409,21 @@
    * bandwidth link, as from a local disk, where higher variations in
    * bitrate are acceptable.
    */
-  enum vpx_rc_mode rc_end_usage;
+  enum aom_rc_mode rc_end_usage;
 
   /*!\brief Two-pass stats buffer.
    *
    * A buffer containing all of the stats packets produced in the first
    * pass, concatenated.
    */
-  vpx_fixed_buf_t rc_twopass_stats_in;
+  aom_fixed_buf_t rc_twopass_stats_in;
 
   /*!\brief first pass mb stats buffer.
    *
    * A buffer containing all of the first pass mb stats packets produced
    * in the first pass, concatenated.
    */
-  vpx_fixed_buf_t rc_firstpass_mb_stats_in;
+  aom_fixed_buf_t rc_firstpass_mb_stats_in;
 
   /*!\brief Target data rate
    *
@@ -441,7 +441,7 @@
    * encoded image. The range of valid values for the quantizer is codec
    * specific. Consult the documentation for the codec to determine the
    * values to use. To determine the range programmatically, call
-   * vpx_codec_enc_config_default() with a usage value of 0.
+   * aom_codec_enc_config_default() with a usage value of 0.
    */
   unsigned int rc_min_quantizer;
 
@@ -451,7 +451,7 @@
    * encoded image. The range of valid values for the quantizer is codec
    * specific. Consult the documentation for the codec to determine the
    * values to use. To determine the range programmatically, call
-   * vpx_codec_enc_config_default() with a usage value of 0.
+   * aom_codec_enc_config_default() with a usage value of 0.
    */
   unsigned int rc_max_quantizer;
 
@@ -554,7 +554,7 @@
    * fixed interval, or determine the optimal placement automatically
    * (as governed by the #kf_min_dist and #kf_max_dist parameters)
    */
-  enum vpx_kf_mode kf_mode;
+  enum aom_kf_mode kf_mode;
 
   /*!\brief Keyframe minimum interval
    *
@@ -573,12 +573,12 @@
    * equal to kf_max_dist for a fixed interval.
    */
   unsigned int kf_max_dist;
-} vpx_codec_enc_cfg_t; /**< alias for struct vpx_codec_enc_cfg */
+} aom_codec_enc_cfg_t; /**< alias for struct aom_codec_enc_cfg */
 
 /*!\brief Initialize an encoder instance
  *
  * Initializes a encoder context using the given interface. Applications
- * should call the vpx_codec_enc_init convenience macro instead of this
+ * should call the aom_codec_enc_init convenience macro instead of this
  * function directly, to ensure that the ABI version number parameter
  * is properly initialized.
  *
@@ -589,30 +589,30 @@
  * \param[in]    ctx     Pointer to this instance's context.
  * \param[in]    iface   Pointer to the algorithm interface to use.
  * \param[in]    cfg     Configuration to use, if known. May be NULL.
- * \param[in]    flags   Bitfield of VPX_CODEC_USE_* flags
+ * \param[in]    flags   Bitfield of AOM_CODEC_USE_* flags
  * \param[in]    ver     ABI version number. Must be set to
- *                       VPX_ENCODER_ABI_VERSION
- * \retval #VPX_CODEC_OK
+ *                       AOM_ENCODER_ABI_VERSION
+ * \retval #AOM_CODEC_OK
  *     The decoder algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
+ * \retval #AOM_CODEC_MEM_ERROR
  *     Memory allocation failed.
  */
-vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx,
-                                       vpx_codec_iface_t *iface,
-                                       const vpx_codec_enc_cfg_t *cfg,
-                                       vpx_codec_flags_t flags, int ver);
+aom_codec_err_t aom_codec_enc_init_ver(aom_codec_ctx_t *ctx,
+                                       aom_codec_iface_t *iface,
+                                       const aom_codec_enc_cfg_t *cfg,
+                                       aom_codec_flags_t flags, int ver);
 
-/*!\brief Convenience macro for vpx_codec_enc_init_ver()
+/*!\brief Convenience macro for aom_codec_enc_init_ver()
  *
  * Ensures the ABI version parameter is properly set.
  */
-#define vpx_codec_enc_init(ctx, iface, cfg, flags) \
-  vpx_codec_enc_init_ver(ctx, iface, cfg, flags, VPX_ENCODER_ABI_VERSION)
+#define aom_codec_enc_init(ctx, iface, cfg, flags) \
+  aom_codec_enc_init_ver(ctx, iface, cfg, flags, AOM_ENCODER_ABI_VERSION)
 
 /*!\brief Initialize multi-encoder instance
  *
  * Initializes multi-encoder context using the given interface.
- * Applications should call the vpx_codec_enc_init_multi convenience macro
+ * Applications should call the aom_codec_enc_init_multi convenience macro
  * instead of this function directly, to ensure that the ABI version number
  * parameter is properly initialized.
  *
@@ -620,26 +620,26 @@
  * \param[in]    iface   Pointer to the algorithm interface to use.
  * \param[in]    cfg     Configuration to use, if known. May be NULL.
  * \param[in]    num_enc Total number of encoders.
- * \param[in]    flags   Bitfield of VPX_CODEC_USE_* flags
+ * \param[in]    flags   Bitfield of AOM_CODEC_USE_* flags
  * \param[in]    dsf     Pointer to down-sampling factors.
  * \param[in]    ver     ABI version number. Must be set to
- *                       VPX_ENCODER_ABI_VERSION
- * \retval #VPX_CODEC_OK
+ *                       AOM_ENCODER_ABI_VERSION
+ * \retval #AOM_CODEC_OK
  *     The decoder algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
+ * \retval #AOM_CODEC_MEM_ERROR
  *     Memory allocation failed.
  */
-vpx_codec_err_t vpx_codec_enc_init_multi_ver(
-    vpx_codec_ctx_t *ctx, vpx_codec_iface_t *iface, vpx_codec_enc_cfg_t *cfg,
-    int num_enc, vpx_codec_flags_t flags, vpx_rational_t *dsf, int ver);
+aom_codec_err_t aom_codec_enc_init_multi_ver(
+    aom_codec_ctx_t *ctx, aom_codec_iface_t *iface, aom_codec_enc_cfg_t *cfg,
+    int num_enc, aom_codec_flags_t flags, aom_rational_t *dsf, int ver);
 
-/*!\brief Convenience macro for vpx_codec_enc_init_multi_ver()
+/*!\brief Convenience macro for aom_codec_enc_init_multi_ver()
  *
  * Ensures the ABI version parameter is properly set.
  */
-#define vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf) \
-  vpx_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf,   \
-                               VPX_ENCODER_ABI_VERSION)
+#define aom_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf) \
+  aom_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf,   \
+                               AOM_ENCODER_ABI_VERSION)
 
 /*!\brief Get a default configuration
  *
@@ -651,17 +651,17 @@
  *
  * \param[in]    iface     Pointer to the algorithm interface to use.
  * \param[out]   cfg       Configuration buffer to populate.
- * \param[in]    reserved  Must set to 0 for VP8 and VP9.
+ * \param[in]    reserved  Must set to 0 for VP8 and AV1.
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     The configuration was populated.
- * \retval #VPX_CODEC_INCAPABLE
+ * \retval #AOM_CODEC_INCAPABLE
  *     Interface is not an encoder interface.
- * \retval #VPX_CODEC_INVALID_PARAM
+ * \retval #AOM_CODEC_INVALID_PARAM
  *     A parameter was NULL, or the usage value was not recognized.
  */
-vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
-                                             vpx_codec_enc_cfg_t *cfg,
+aom_codec_err_t aom_codec_enc_config_default(aom_codec_iface_t *iface,
+                                             aom_codec_enc_cfg_t *cfg,
                                              unsigned int reserved);
 
 /*!\brief Set or change configuration
@@ -671,15 +671,15 @@
  * \param[in]    ctx     Pointer to this instance's context
  * \param[in]    cfg     Configuration buffer to use
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     The configuration was populated.
- * \retval #VPX_CODEC_INCAPABLE
+ * \retval #AOM_CODEC_INCAPABLE
  *     Interface is not an encoder interface.
- * \retval #VPX_CODEC_INVALID_PARAM
+ * \retval #AOM_CODEC_INVALID_PARAM
  *     A parameter was NULL, or the usage value was not recognized.
  */
-vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx,
-                                         const vpx_codec_enc_cfg_t *cfg);
+aom_codec_err_t aom_codec_enc_config_set(aom_codec_ctx_t *ctx,
+                                         const aom_codec_enc_cfg_t *cfg);
 
 /*!\brief Get global stream headers
  *
@@ -692,14 +692,14 @@
  * \retval Non-NULL
  *     Pointer to buffer containing global header packet
  */
-vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx);
+aom_fixed_buf_t *aom_codec_get_global_headers(aom_codec_ctx_t *ctx);
 
-/*!\brief deadline parameter analogous to VPx REALTIME mode. */
-#define VPX_DL_REALTIME (1)
-/*!\brief deadline parameter analogous to  VPx GOOD QUALITY mode. */
-#define VPX_DL_GOOD_QUALITY (1000000)
-/*!\brief deadline parameter analogous to VPx BEST QUALITY mode. */
-#define VPX_DL_BEST_QUALITY (0)
+/*!\brief deadline parameter analogous to AVx REALTIME mode. */
+#define AOM_DL_REALTIME (1)
+/*!\brief deadline parameter analogous to  AVx GOOD QUALITY mode. */
+#define AOM_DL_GOOD_QUALITY (1000000)
+/*!\brief deadline parameter analogous to AVx BEST QUALITY mode. */
+#define AOM_DL_BEST_QUALITY (0)
 /*!\brief Encode a frame
  *
  * Encodes a video frame at the given "presentation time." The presentation
@@ -711,16 +711,16 @@
  * implicit that limiting the available time to encode will degrade the
  * output quality. The encoder can be given an unlimited time to produce the
  * best possible frame by specifying a deadline of '0'. This deadline
- * supercedes the VPx notion of "best quality, good quality, realtime".
+ * supercedes the AVx notion of "best quality, good quality, realtime".
  * Applications that wish to map these former settings to the new deadline
- * based system can use the symbols #VPX_DL_REALTIME, #VPX_DL_GOOD_QUALITY,
- * and #VPX_DL_BEST_QUALITY.
+ * based system can use the symbols #AOM_DL_REALTIME, #AOM_DL_GOOD_QUALITY,
+ * and #AOM_DL_BEST_QUALITY.
  *
  * When the last frame has been passed to the encoder, this function should
  * continue to be called, with the img parameter set to NULL. This will
  * signal the end-of-stream condition to the encoder and allow it to encode
- * any held buffers. Encoding is complete when vpx_codec_encode() is called
- * and vpx_codec_get_cx_data() returns no data.
+ * any held buffers. Encoding is complete when aom_codec_encode() is called
+ * and aom_codec_get_cx_data() returns no data.
  *
  * \param[in]    ctx       Pointer to this instance's context
  * \param[in]    img       Image data to encode, NULL to flush.
@@ -729,23 +729,23 @@
  * \param[in]    flags     Flags to use for encoding this frame.
  * \param[in]    deadline  Time to spend encoding, in microseconds. (0=infinite)
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     The configuration was populated.
- * \retval #VPX_CODEC_INCAPABLE
+ * \retval #AOM_CODEC_INCAPABLE
  *     Interface is not an encoder interface.
- * \retval #VPX_CODEC_INVALID_PARAM
+ * \retval #AOM_CODEC_INVALID_PARAM
  *     A parameter was NULL, the image format is unsupported, etc.
  */
-vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img,
-                                 vpx_codec_pts_t pts, unsigned long duration,
-                                 vpx_enc_frame_flags_t flags,
+aom_codec_err_t aom_codec_encode(aom_codec_ctx_t *ctx, const aom_image_t *img,
+                                 aom_codec_pts_t pts, unsigned long duration,
+                                 aom_enc_frame_flags_t flags,
                                  unsigned long deadline);
 
 /*!\brief Set compressed data output buffer
  *
  * Sets the buffer that the codec should output the compressed data
  * into. This call effectively sets the buffer pointer returned in the
- * next VPX_CODEC_CX_FRAME_PKT packet. Subsequent packets will be
+ * next AOM_CODEC_CX_FRAME_PKT packet. Subsequent packets will be
  * appended into this buffer. The buffer is preserved across frames,
  * so applications must periodically call this function after flushing
  * the accumulated compressed data to disk or to the network to reset
@@ -772,20 +772,20 @@
  * buffer.
  *
  * Applications \ref MUSTNOT call this function during iteration of
- * vpx_codec_get_cx_data().
+ * aom_codec_get_cx_data().
  *
  * \param[in]    ctx         Pointer to this instance's context
  * \param[in]    buf         Buffer to store compressed data into
  * \param[in]    pad_before  Bytes to skip before writing compressed data
  * \param[in]    pad_after   Bytes to skip after writing compressed data
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     The buffer was set successfully.
- * \retval #VPX_CODEC_INVALID_PARAM
+ * \retval #AOM_CODEC_INVALID_PARAM
  *     A parameter was NULL, the image format is unsupported, etc.
  */
-vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx,
-                                          const vpx_fixed_buf_t *buf,
+aom_codec_err_t aom_codec_set_cx_data_buf(aom_codec_ctx_t *ctx,
+                                          const aom_fixed_buf_t *buf,
                                           unsigned int pad_before,
                                           unsigned int pad_after);
 
@@ -793,17 +793,17 @@
  *
  * Iterates over a list of data packets to be passed from the encoder to the
  * application. The different kinds of packets available are enumerated in
- * #vpx_codec_cx_pkt_kind.
+ * #aom_codec_cx_pkt_kind.
  *
- * #VPX_CODEC_CX_FRAME_PKT packets should be passed to the application's
+ * #AOM_CODEC_CX_FRAME_PKT packets should be passed to the application's
  * muxer. Multiple compressed frames may be in the list.
- * #VPX_CODEC_STATS_PKT packets should be appended to a global buffer.
+ * #AOM_CODEC_STATS_PKT packets should be appended to a global buffer.
  *
  * The application \ref MUST silently ignore any packet kinds that it does
  * not recognize or support.
  *
  * The data buffers returned from this function are only guaranteed to be
- * valid until the application makes another call to any vpx_codec_* function.
+ * valid until the application makes another call to any aom_codec_* function.
  *
  * \param[in]     ctx      Pointer to this instance's context
  * \param[in,out] iter     Iterator storage, initialized to NULL
@@ -812,8 +812,8 @@
  *         two-pass statistics, etc.) or NULL to signal end-of-list.
  *
  */
-const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
-                                                vpx_codec_iter_t *iter);
+const aom_codec_cx_pkt_t *aom_codec_get_cx_data(aom_codec_ctx_t *ctx,
+                                                aom_codec_iter_t *iter);
 
 /*!\brief Get Preview Frame
  *
@@ -827,10 +827,10 @@
  *         available.
  *
  */
-const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx);
+const aom_image_t *aom_codec_get_preview_frame(aom_codec_ctx_t *ctx);
 
 /*!@} - end defgroup encoder*/
 #ifdef __cplusplus
 }
 #endif
-#endif  // VPX_VPX_ENCODER_H_
+#endif  // AOM_AOM_ENCODER_H_
diff --git a/aom/vpx_frame_buffer.h b/aom/aom_frame_buffer.h
similarity index 76%
rename from aom/vpx_frame_buffer.h
rename to aom/aom_frame_buffer.h
index 86945f7..15e06d8 100644
--- a/aom/vpx_frame_buffer.h
+++ b/aom/aom_frame_buffer.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_VPX_FRAME_BUFFER_H_
-#define VPX_VPX_FRAME_BUFFER_H_
+#ifndef AOM_AOM_FRAME_BUFFER_H_
+#define AOM_AOM_FRAME_BUFFER_H_
 
 /*!\file
  * \brief Describes the decoder external frame buffer interface.
@@ -19,28 +19,28 @@
 extern "C" {
 #endif
 
-#include "./vpx_integer.h"
+#include "./aom_integer.h"
 
 /*!\brief The maximum number of work buffers used by libaom.
  *  Support maximum 4 threads to decode video in parallel.
  *  Each thread will use one work buffer.
  * TODO(hkuang): Add support to set number of worker threads dynamically.
  */
-#define VPX_MAXIMUM_WORK_BUFFERS 8
+#define AOM_MAXIMUM_WORK_BUFFERS 8
 
-/*!\brief The maximum number of reference buffers that a VP9 encoder may use.
+/*!\brief The maximum number of reference buffers that a AV1 encoder may use.
  */
-#define VPX_MAXIMUM_REF_BUFFERS 8
+#define AOM_MAXIMUM_REF_BUFFERS 8
 
 /*!\brief External frame buffer
  *
  * This structure holds allocated frame buffers used by the decoder.
  */
-typedef struct vpx_codec_frame_buffer {
+typedef struct aom_codec_frame_buffer {
   uint8_t *data; /**< Pointer to the data buffer */
   size_t size;   /**< Size of data in bytes */
   void *priv;    /**< Frame's private data */
-} vpx_codec_frame_buffer_t;
+} aom_codec_frame_buffer_t;
 
 /*!\brief get frame buffer callback prototype
  *
@@ -51,17 +51,17 @@
  * to the allocated size. The application does not need to align the allocated
  * data. The callback is triggered when the decoder needs a frame buffer to
  * decode a compressed image into. This function may be called more than once
- * for every call to vpx_codec_decode. The application may set fb->priv to
+ * for every call to aom_codec_decode. The application may set fb->priv to
  * some data which will be passed back in the ximage and the release function
  * call. |fb| is guaranteed to not be NULL. On success the callback must
  * return 0. Any failure the callback must return a value less than 0.
  *
  * \param[in] priv         Callback's private data
  * \param[in] new_size     Size in bytes needed by the buffer
- * \param[in,out] fb       Pointer to vpx_codec_frame_buffer_t
+ * \param[in,out] fb       Pointer to aom_codec_frame_buffer_t
  */
-typedef int (*vpx_get_frame_buffer_cb_fn_t)(void *priv, size_t min_size,
-                                            vpx_codec_frame_buffer_t *fb);
+typedef int (*aom_get_frame_buffer_cb_fn_t)(void *priv, size_t min_size,
+                                            aom_codec_frame_buffer_t *fb);
 
 /*!\brief release frame buffer callback prototype
  *
@@ -71,13 +71,13 @@
  * a value less than 0.
  *
  * \param[in] priv         Callback's private data
- * \param[in] fb           Pointer to vpx_codec_frame_buffer_t
+ * \param[in] fb           Pointer to aom_codec_frame_buffer_t
  */
-typedef int (*vpx_release_frame_buffer_cb_fn_t)(void *priv,
-                                                vpx_codec_frame_buffer_t *fb);
+typedef int (*aom_release_frame_buffer_cb_fn_t)(void *priv,
+                                                aom_codec_frame_buffer_t *fb);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_VPX_FRAME_BUFFER_H_
+#endif  // AOM_AOM_FRAME_BUFFER_H_
diff --git a/aom/vpx_image.h b/aom/aom_image.h
similarity index 61%
rename from aom/vpx_image.h
rename to aom/aom_image.h
index d6d3166..1c0b2d5 100644
--- a/aom/vpx_image.h
+++ b/aom/aom_image.h
@@ -9,11 +9,11 @@
  */
 
 /*!\file
- * \brief Describes the vpx image descriptor and associated operations
+ * \brief Describes the aom image descriptor and associated operations
  *
  */
-#ifndef VPX_VPX_IMAGE_H_
-#define VPX_VPX_IMAGE_H_
+#ifndef AOM_AOM_IMAGE_H_
+#define AOM_AOM_IMAGE_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -27,68 +27,68 @@
  * types, removing or reassigning enums, adding/removing/rearranging
  * fields to structures
  */
-#define VPX_IMAGE_ABI_VERSION (4) /**<\hideinitializer*/
+#define AOM_IMAGE_ABI_VERSION (4) /**<\hideinitializer*/
 
-#define VPX_IMG_FMT_PLANAR 0x100       /**< Image is a planar format. */
-#define VPX_IMG_FMT_UV_FLIP 0x200      /**< V plane precedes U in memory. */
-#define VPX_IMG_FMT_HAS_ALPHA 0x400    /**< Image has an alpha channel. */
-#define VPX_IMG_FMT_HIGHBITDEPTH 0x800 /**< Image uses 16bit framebuffer. */
+#define AOM_IMG_FMT_PLANAR 0x100       /**< Image is a planar format. */
+#define AOM_IMG_FMT_UV_FLIP 0x200      /**< V plane precedes U in memory. */
+#define AOM_IMG_FMT_HAS_ALPHA 0x400    /**< Image has an alpha channel. */
+#define AOM_IMG_FMT_HIGHBITDEPTH 0x800 /**< Image uses 16bit framebuffer. */
 
 /*!\brief List of supported image formats */
-typedef enum vpx_img_fmt {
-  VPX_IMG_FMT_NONE,
-  VPX_IMG_FMT_RGB24,     /**< 24 bit per pixel packed RGB */
-  VPX_IMG_FMT_RGB32,     /**< 32 bit per pixel packed 0RGB */
-  VPX_IMG_FMT_RGB565,    /**< 16 bit per pixel, 565 */
-  VPX_IMG_FMT_RGB555,    /**< 16 bit per pixel, 555 */
-  VPX_IMG_FMT_UYVY,      /**< UYVY packed YUV */
-  VPX_IMG_FMT_YUY2,      /**< YUYV packed YUV */
-  VPX_IMG_FMT_YVYU,      /**< YVYU packed YUV */
-  VPX_IMG_FMT_BGR24,     /**< 24 bit per pixel packed BGR */
-  VPX_IMG_FMT_RGB32_LE,  /**< 32 bit packed BGR0 */
-  VPX_IMG_FMT_ARGB,      /**< 32 bit packed ARGB, alpha=255 */
-  VPX_IMG_FMT_ARGB_LE,   /**< 32 bit packed BGRA, alpha=255 */
-  VPX_IMG_FMT_RGB565_LE, /**< 16 bit per pixel, gggbbbbb rrrrrggg */
-  VPX_IMG_FMT_RGB555_LE, /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */
-  VPX_IMG_FMT_YV12 =
-      VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */
-  VPX_IMG_FMT_I420 = VPX_IMG_FMT_PLANAR | 2,
-  VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP |
-                        3, /** < planar 4:2:0 format with vpx color space */
-  VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4,
-  VPX_IMG_FMT_I422 = VPX_IMG_FMT_PLANAR | 5,
-  VPX_IMG_FMT_I444 = VPX_IMG_FMT_PLANAR | 6,
-  VPX_IMG_FMT_I440 = VPX_IMG_FMT_PLANAR | 7,
-  VPX_IMG_FMT_444A = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_HAS_ALPHA | 6,
-  VPX_IMG_FMT_I42016 = VPX_IMG_FMT_I420 | VPX_IMG_FMT_HIGHBITDEPTH,
-  VPX_IMG_FMT_I42216 = VPX_IMG_FMT_I422 | VPX_IMG_FMT_HIGHBITDEPTH,
-  VPX_IMG_FMT_I44416 = VPX_IMG_FMT_I444 | VPX_IMG_FMT_HIGHBITDEPTH,
-  VPX_IMG_FMT_I44016 = VPX_IMG_FMT_I440 | VPX_IMG_FMT_HIGHBITDEPTH
-} vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */
+typedef enum aom_img_fmt {
+  AOM_IMG_FMT_NONE,
+  AOM_IMG_FMT_RGB24,     /**< 24 bit per pixel packed RGB */
+  AOM_IMG_FMT_RGB32,     /**< 32 bit per pixel packed 0RGB */
+  AOM_IMG_FMT_RGB565,    /**< 16 bit per pixel, 565 */
+  AOM_IMG_FMT_RGB555,    /**< 16 bit per pixel, 555 */
+  AOM_IMG_FMT_UYVY,      /**< UYVY packed YUV */
+  AOM_IMG_FMT_YUY2,      /**< YUYV packed YUV */
+  AOM_IMG_FMT_YVYU,      /**< YVYU packed YUV */
+  AOM_IMG_FMT_BGR24,     /**< 24 bit per pixel packed BGR */
+  AOM_IMG_FMT_RGB32_LE,  /**< 32 bit packed BGR0 */
+  AOM_IMG_FMT_ARGB,      /**< 32 bit packed ARGB, alpha=255 */
+  AOM_IMG_FMT_ARGB_LE,   /**< 32 bit packed BGRA, alpha=255 */
+  AOM_IMG_FMT_RGB565_LE, /**< 16 bit per pixel, gggbbbbb rrrrrggg */
+  AOM_IMG_FMT_RGB555_LE, /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */
+  AOM_IMG_FMT_YV12 =
+      AOM_IMG_FMT_PLANAR | AOM_IMG_FMT_UV_FLIP | 1, /**< planar YVU */
+  AOM_IMG_FMT_I420 = AOM_IMG_FMT_PLANAR | 2,
+  AOM_IMG_FMT_AOMYV12 = AOM_IMG_FMT_PLANAR | AOM_IMG_FMT_UV_FLIP |
+                        3, /** < planar 4:2:0 format with aom color space */
+  AOM_IMG_FMT_AOMI420 = AOM_IMG_FMT_PLANAR | 4,
+  AOM_IMG_FMT_I422 = AOM_IMG_FMT_PLANAR | 5,
+  AOM_IMG_FMT_I444 = AOM_IMG_FMT_PLANAR | 6,
+  AOM_IMG_FMT_I440 = AOM_IMG_FMT_PLANAR | 7,
+  AOM_IMG_FMT_444A = AOM_IMG_FMT_PLANAR | AOM_IMG_FMT_HAS_ALPHA | 6,
+  AOM_IMG_FMT_I42016 = AOM_IMG_FMT_I420 | AOM_IMG_FMT_HIGHBITDEPTH,
+  AOM_IMG_FMT_I42216 = AOM_IMG_FMT_I422 | AOM_IMG_FMT_HIGHBITDEPTH,
+  AOM_IMG_FMT_I44416 = AOM_IMG_FMT_I444 | AOM_IMG_FMT_HIGHBITDEPTH,
+  AOM_IMG_FMT_I44016 = AOM_IMG_FMT_I440 | AOM_IMG_FMT_HIGHBITDEPTH
+} aom_img_fmt_t; /**< alias for enum aom_img_fmt */
 
 /*!\brief List of supported color spaces */
-typedef enum vpx_color_space {
-  VPX_CS_UNKNOWN = 0,   /**< Unknown */
-  VPX_CS_BT_601 = 1,    /**< BT.601 */
-  VPX_CS_BT_709 = 2,    /**< BT.709 */
-  VPX_CS_SMPTE_170 = 3, /**< SMPTE.170 */
-  VPX_CS_SMPTE_240 = 4, /**< SMPTE.240 */
-  VPX_CS_BT_2020 = 5,   /**< BT.2020 */
-  VPX_CS_RESERVED = 6,  /**< Reserved */
-  VPX_CS_SRGB = 7       /**< sRGB */
-} vpx_color_space_t;    /**< alias for enum vpx_color_space */
+typedef enum aom_color_space {
+  AOM_CS_UNKNOWN = 0,   /**< Unknown */
+  AOM_CS_BT_601 = 1,    /**< BT.601 */
+  AOM_CS_BT_709 = 2,    /**< BT.709 */
+  AOM_CS_SMPTE_170 = 3, /**< SMPTE.170 */
+  AOM_CS_SMPTE_240 = 4, /**< SMPTE.240 */
+  AOM_CS_BT_2020 = 5,   /**< BT.2020 */
+  AOM_CS_RESERVED = 6,  /**< Reserved */
+  AOM_CS_SRGB = 7       /**< sRGB */
+} aom_color_space_t;    /**< alias for enum aom_color_space */
 
 /*!\brief List of supported color range */
-typedef enum vpx_color_range {
-  VPX_CR_STUDIO_RANGE = 0, /**< Y [16..235], UV [16..240] */
-  VPX_CR_FULL_RANGE = 1    /**< YUV/RGB [0..255] */
-} vpx_color_range_t;       /**< alias for enum vpx_color_range */
+typedef enum aom_color_range {
+  AOM_CR_STUDIO_RANGE = 0, /**< Y [16..235], UV [16..240] */
+  AOM_CR_FULL_RANGE = 1    /**< YUV/RGB [0..255] */
+} aom_color_range_t;       /**< alias for enum aom_color_range */
 
 /**\brief Image Descriptor */
-typedef struct vpx_image {
-  vpx_img_fmt_t fmt;       /**< Image Format */
-  vpx_color_space_t cs;    /**< Color Space */
-  vpx_color_range_t range; /**< Color Range */
+typedef struct aom_image {
+  aom_img_fmt_t fmt;       /**< Image Format */
+  aom_color_space_t cs;    /**< Color Space */
+  aom_color_range_t range; /**< Color Range */
 
   /* Image storage dimensions */
   unsigned int w;         /**< Stored image width */
@@ -108,11 +108,11 @@
   unsigned int y_chroma_shift; /**< subsampling order, Y */
 
 /* Image data pointers. */
-#define VPX_PLANE_PACKED 0  /**< To be used for all packed formats */
-#define VPX_PLANE_Y 0       /**< Y (Luminance) plane */
-#define VPX_PLANE_U 1       /**< U (Chroma) plane */
-#define VPX_PLANE_V 2       /**< V (Chroma) plane */
-#define VPX_PLANE_ALPHA 3   /**< A (Transparency) plane */
+#define AOM_PLANE_PACKED 0  /**< To be used for all packed formats */
+#define AOM_PLANE_Y 0       /**< Y (Luminance) plane */
+#define AOM_PLANE_U 1       /**< U (Chroma) plane */
+#define AOM_PLANE_V 2       /**< V (Chroma) plane */
+#define AOM_PLANE_ALPHA 3   /**< A (Transparency) plane */
   unsigned char *planes[4]; /**< pointer to the top left pixel for each plane */
   int stride[4];            /**< stride between rows for each plane */
 
@@ -129,15 +129,15 @@
   int self_allocd;         /**< private */
 
   void *fb_priv; /**< Frame buffer data associated with the image. */
-} vpx_image_t;   /**< alias for struct vpx_image */
+} aom_image_t;   /**< alias for struct aom_image */
 
 /**\brief Representation of a rectangle on a surface */
-typedef struct vpx_image_rect {
+typedef struct aom_image_rect {
   unsigned int x;   /**< leftmost column */
   unsigned int y;   /**< topmost row */
   unsigned int w;   /**< width */
   unsigned int h;   /**< height */
-} vpx_image_rect_t; /**< alias for struct vpx_image_rect */
+} aom_image_rect_t; /**< alias for struct aom_image_rect */
 
 /*!\brief Open a descriptor, allocating storage for the underlying image
  *
@@ -157,7 +157,7 @@
  *         parameter is non-null, the value of the img parameter will be
  *         returned.
  */
-vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt,
+aom_image_t *aom_img_alloc(aom_image_t *img, aom_img_fmt_t fmt,
                            unsigned int d_w, unsigned int d_h,
                            unsigned int align);
 
@@ -180,7 +180,7 @@
  *         parameter is non-null, the value of the img parameter will be
  *         returned.
  */
-vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w,
+aom_image_t *aom_img_wrap(aom_image_t *img, aom_img_fmt_t fmt, unsigned int d_w,
                           unsigned int d_h, unsigned int align,
                           unsigned char *img_data);
 
@@ -197,7 +197,7 @@
  *
  * \return 0 if the requested rectangle is valid, nonzero otherwise.
  */
-int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y,
+int aom_img_set_rect(aom_image_t *img, unsigned int x, unsigned int y,
                      unsigned int w, unsigned int h);
 
 /*!\brief Flip the image vertically (top for bottom)
@@ -207,7 +207,7 @@
  *
  * \param[in]    img       Image descriptor
  */
-void vpx_img_flip(vpx_image_t *img);
+void aom_img_flip(aom_image_t *img);
 
 /*!\brief Close an image descriptor
  *
@@ -215,10 +215,10 @@
  *
  * \param[in]    img       Image descriptor
  */
-void vpx_img_free(vpx_image_t *img);
+void aom_img_free(aom_image_t *img);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_VPX_IMAGE_H_
+#endif  // AOM_AOM_IMAGE_H_
diff --git a/aom/vpx_integer.h b/aom/aom_integer.h
similarity index 82%
rename from aom/vpx_integer.h
rename to aom/aom_integer.h
index 09bad92..0148e665 100644
--- a/aom/vpx_integer.h
+++ b/aom/aom_integer.h
@@ -8,22 +8,22 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_VPX_INTEGER_H_
-#define VPX_VPX_INTEGER_H_
+#ifndef AOM_AOM_INTEGER_H_
+#define AOM_AOM_INTEGER_H_
 
 /* get ptrdiff_t, size_t, wchar_t, NULL */
 #include <stddef.h>
 
 #if defined(_MSC_VER)
-#define VPX_FORCE_INLINE __forceinline
-#define VPX_INLINE __inline
+#define AOM_FORCE_INLINE __forceinline
+#define AOM_INLINE __inline
 #else
-#define VPX_FORCE_INLINE __inline__ __attribute__(always_inline)
+#define AOM_FORCE_INLINE __inline__ __attribute__(always_inline)
 // TODO(jbb): Allow a way to force inline off for older compilers.
-#define VPX_INLINE inline
+#define AOM_INLINE inline
 #endif
 
-#if defined(VPX_EMULATE_INTTYPES)
+#if defined(AOM_EMULATE_INTTYPES)
 typedef signed char int8_t;
 typedef signed short int16_t;
 typedef signed int int32_t;
@@ -60,4 +60,4 @@
 #include <inttypes.h>
 #endif
 
-#endif  // VPX_VPX_INTEGER_H_
+#endif  // AOM_AOM_INTEGER_H_
diff --git a/aom/vp8cx.h b/aom/aomcx.h
similarity index 61%
rename from aom/vp8cx.h
rename to aom/aomcx.h
index 087604d..903068f 100644
--- a/aom/vp8cx.h
+++ b/aom/aomcx.h
@@ -7,33 +7,33 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#ifndef VPX_VP8CX_H_
-#define VPX_VP8CX_H_
+#ifndef AOM_AOMCX_H_
+#define AOM_AOMCX_H_
 
-/*!\defgroup vp8_encoder WebM VP8/VP9 Encoder
+/*!\defgroup vp8_encoder WebM VP8/AV1 Encoder
  * \ingroup vp8
  *
  * @{
  */
-#include "./vp8.h"
-#include "./vpx_encoder.h"
+#include "./aom.h"
+#include "./aom_encoder.h"
 
 /*!\file
- * \brief Provides definitions for using VP8 or VP9 encoder algorithm within the
- *        vpx Codec Interface.
+ * \brief Provides definitions for using VP8 or AV1 encoder algorithm within the
+ *        aom Codec Interface.
  */
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-/*!\name Algorithm interface for VP10
+/*!\name Algorithm interface for AV1
  *
- * This interface provides the capability to encode raw VP10 streams.
+ * This interface provides the capability to encode raw AV1 streams.
  * @{
  */
-extern vpx_codec_iface_t vpx_codec_vp10_cx_algo;
-extern vpx_codec_iface_t *vpx_codec_vp10_cx(void);
+extern aom_codec_iface_t aom_codec_av1_cx_algo;
+extern aom_codec_iface_t *aom_codec_av1_cx(void);
 /*!@} - end algorithm interface member group*/
 
 /*
@@ -46,7 +46,7 @@
  * predictor. When not set, the encoder will choose whether to use the
  * last frame or not automatically.
  */
-#define VP8_EFLAG_NO_REF_LAST (1 << 16)
+#define AOM_EFLAG_NO_REF_LAST (1 << 16)
 
 /*!\brief Don't reference the golden frame
  *
@@ -54,7 +54,7 @@
  * predictor. When not set, the encoder will choose whether to use the
  * golden frame or not automatically.
  */
-#define VP8_EFLAG_NO_REF_GF (1 << 17)
+#define AOM_EFLAG_NO_REF_GF (1 << 17)
 
 /*!\brief Don't reference the alternate reference frame
  *
@@ -62,81 +62,81 @@
  * predictor. When not set, the encoder will choose whether to use the
  * alt ref frame or not automatically.
  */
-#define VP8_EFLAG_NO_REF_ARF (1 << 21)
+#define AOM_EFLAG_NO_REF_ARF (1 << 21)
 
 /*!\brief Don't update the last frame
  *
  * When this flag is set, the encoder will not update the last frame with
  * the contents of the current frame.
  */
-#define VP8_EFLAG_NO_UPD_LAST (1 << 18)
+#define AOM_EFLAG_NO_UPD_LAST (1 << 18)
 
 /*!\brief Don't update the golden frame
  *
  * When this flag is set, the encoder will not update the golden frame with
  * the contents of the current frame.
  */
-#define VP8_EFLAG_NO_UPD_GF (1 << 22)
+#define AOM_EFLAG_NO_UPD_GF (1 << 22)
 
 /*!\brief Don't update the alternate reference frame
  *
  * When this flag is set, the encoder will not update the alt ref frame with
  * the contents of the current frame.
  */
-#define VP8_EFLAG_NO_UPD_ARF (1 << 23)
+#define AOM_EFLAG_NO_UPD_ARF (1 << 23)
 
 /*!\brief Force golden frame update
  *
  * When this flag is set, the encoder copy the contents of the current frame
  * to the golden frame buffer.
  */
-#define VP8_EFLAG_FORCE_GF (1 << 19)
+#define AOM_EFLAG_FORCE_GF (1 << 19)
 
 /*!\brief Force alternate reference frame update
  *
  * When this flag is set, the encoder copy the contents of the current frame
  * to the alternate reference frame buffer.
  */
-#define VP8_EFLAG_FORCE_ARF (1 << 24)
+#define AOM_EFLAG_FORCE_ARF (1 << 24)
 
 /*!\brief Disable entropy update
  *
  * When this flag is set, the encoder will not update its internal entropy
  * model based on the entropy of this frame.
  */
-#define VP8_EFLAG_NO_UPD_ENTROPY (1 << 20)
+#define AOM_EFLAG_NO_UPD_ENTROPY (1 << 20)
 
-/*!\brief VPx encoder control functions
+/*!\brief AVx encoder control functions
  *
- * This set of macros define the control functions available for VPx
+ * This set of macros define the control functions available for AVx
  * encoder interface.
  *
- * \sa #vpx_codec_control
+ * \sa #aom_codec_control
  */
-enum vp8e_enc_control_id {
+enum aome_enc_control_id {
   /*!\brief Codec control function to set which reference frame encoder can use.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_USE_REFERENCE = 7,
+  AOME_USE_REFERENCE = 7,
 
   /*!\brief Codec control function to pass an ROI map to encoder.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_ROI_MAP = 8,
+  AOME_SET_ROI_MAP = 8,
 
   /*!\brief Codec control function to pass an Active map to encoder.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_ACTIVEMAP,
+  AOME_SET_ACTIVEMAP,
 
   /*!\brief Codec control function to set encoder scaling mode.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_SCALEMODE = 11,
+  AOME_SET_SCALEMODE = 11,
 
   /*!\brief Codec control function to set encoder internal speed settings.
    *
@@ -145,25 +145,25 @@
    * speed at the expense of quality.
    *
    * \note Valid range for VP8: -16..16
-   * \note Valid range for VP9: -8..8
+   * \note Valid range for AV1: -8..8
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_CPUUSED = 13,
+  AOME_SET_CPUUSED = 13,
 
   /*!\brief Codec control function to enable automatic set and use alf frames.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_ENABLEAUTOALTREF,
+  AOME_SET_ENABLEAUTOALTREF,
 
 #if CONFIG_EXT_REFS
   /*!\brief Codec control function to enable automatic set and use
    * bwd-pred frames.
    *
-   * Supported in codecs: VP10
+   * Supported in codecs: AV1
    */
-  VP8E_SET_ENABLEAUTOBWDREF,
+  AOME_SET_ENABLEAUTOBWDREF,
 #endif  // CONFIG_EXT_REFS
 
   /*!\brief control function to set noise sensitivity
@@ -173,73 +173,73 @@
    *
    * Supported in codecs: VP8
    */
-  VP8E_SET_NOISE_SENSITIVITY,
+  AOME_SET_NOISE_SENSITIVITY,
 
   /*!\brief Codec control function to set sharpness.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_SHARPNESS,
+  AOME_SET_SHARPNESS,
 
   /*!\brief Codec control function to set the threshold for MBs treated static.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_STATIC_THRESHOLD,
+  AOME_SET_STATIC_THRESHOLD,
 
   /*!\brief Codec control function to set the number of token partitions.
    *
    * Supported in codecs: VP8
    */
-  VP8E_SET_TOKEN_PARTITIONS,
+  AOME_SET_TOKEN_PARTITIONS,
 
   /*!\brief Codec control function to get last quantizer chosen by the encoder.
    *
    * Return value uses internal quantizer scale defined by the codec.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_GET_LAST_QUANTIZER,
+  AOME_GET_LAST_QUANTIZER,
 
   /*!\brief Codec control function to get last quantizer chosen by the encoder.
    *
    * Return value uses the 0..63 scale as used by the rc_*_quantizer config
    * parameters.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_GET_LAST_QUANTIZER_64,
+  AOME_GET_LAST_QUANTIZER_64,
 
   /*!\brief Codec control function to set the max no of frames to create arf.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_ARNR_MAXFRAMES,
+  AOME_SET_ARNR_MAXFRAMES,
 
   /*!\brief Codec control function to set the filter strength for the arf.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_ARNR_STRENGTH,
+  AOME_SET_ARNR_STRENGTH,
 
   /*!\deprecated control function to set the filter type to use for the arf. */
-  VP8E_SET_ARNR_TYPE,
+  AOME_SET_ARNR_TYPE,
 
   /*!\brief Codec control function to set visual tuning.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_TUNING,
+  AOME_SET_TUNING,
 
   /*!\brief Codec control function to set constrained quality level.
    *
-   * \attention For this value to be used vpx_codec_enc_cfg_t::g_usage must be
-   *            set to #VPX_CQ.
+   * \attention For this value to be used aom_codec_enc_cfg_t::g_usage must be
+   *            set to #AOM_CQ.
    * \note Valid range: 0..63
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_CQ_LEVEL,
+  AOME_SET_CQ_LEVEL,
 
   /*!\brief Codec control function to set Max data rate for Intra frames.
    *
@@ -252,15 +252,15 @@
    * For example, to allocate no more than 4.5 frames worth of bitrate
    * to a keyframe, set this to 450.
    *
-   * Supported in codecs: VP8, VP9
+   * Supported in codecs: VP8, AV1
    */
-  VP8E_SET_MAX_INTRA_BITRATE_PCT,
+  AOME_SET_MAX_INTRA_BITRATE_PCT,
 
   /*!\brief Codec control function to set reference and update frame flags.
    *
    *  Supported in codecs: VP8
    */
-  VP8E_SET_FRAME_FLAGS,
+  AOME_SET_FRAME_FLAGS,
 
   /*!\brief Codec control function to set max data rate for Inter frames.
    *
@@ -273,9 +273,9 @@
    * For example, to allow no more than 4.5 frames worth of bitrate
    * to an inter frame, set this to 450.
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_MAX_INTER_BITRATE_PCT,
+  AV1E_SET_MAX_INTER_BITRATE_PCT,
 
   /*!\brief Boost percentage for Golden Frame in CBR mode.
    *
@@ -288,9 +288,9 @@
    * For example, to allow 100% more bits, i.e, 2X, in a golden frame
    * than average frame, set this to 100.
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_GF_CBR_BOOST_PCT,
+  AV1E_SET_GF_CBR_BOOST_PCT,
 
   /*!\brief Codec control function to set encoder screen content mode.
    *
@@ -298,11 +298,11 @@
    *
    * Supported in codecs: VP8
    */
-  VP8E_SET_SCREEN_CONTENT_MODE,
+  AOME_SET_SCREEN_CONTENT_MODE,
 
   /*!\brief Codec control function to set lossless encoding mode.
    *
-   * VP9 can operate in lossless encoding mode, in which the bitstream
+   * AV1 can operate in lossless encoding mode, in which the bitstream
    * produced will be able to decode and reconstruct a perfect copy of
    * input source. This control function provides a mean to switch encoder
    * into lossless coding mode(1) or normal coding mode(0) that may be lossy.
@@ -311,9 +311,9 @@
    *
    *  By default, encoder operates in normal coding mode (maybe lossy).
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_LOSSLESS,
+  AV1E_SET_LOSSLESS,
 #if CONFIG_AOM_QM
   /*!\brief Codec control function to encode with quantisation matrices.
    *
@@ -327,7 +327,7 @@
    * Supported in codecs: AOM
    */
 
-  VP9E_SET_ENABLE_QM,
+  AV1E_SET_ENABLE_QM,
 
   /*!\brief Codec control function to set the min quant matrix flatness.
    *
@@ -341,7 +341,7 @@
    *
    * Supported in codecs: AOM
    */
-  VP9E_SET_QM_MIN,
+  AV1E_SET_QM_MIN,
 
   /*!\brief Codec control function to set the max quant matrix flatness.
    *
@@ -354,12 +354,12 @@
    *
    * Supported in codecs: AOM
    */
-  VP9E_SET_QM_MAX,
+  AV1E_SET_QM_MAX,
 #endif
 
   /*!\brief Codec control function to set number of tile columns.
    *
-   * In encoding and decoding, VP9 allows an input image frame be partitioned
+   * In encoding and decoding, AV1 allows an input image frame be partitioned
    * into separated vertical tile columns, which can be encoded or decoded
    * independently. This enables easy implementation of parallel encoding and
    * decoding. This control requests the encoder to use column tiles in
@@ -376,13 +376,13 @@
    *
    * By default, the value is 0, i.e. one single column tile for entire image.
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_TILE_COLUMNS,
+  AV1E_SET_TILE_COLUMNS,
 
   /*!\brief Codec control function to set number of tile rows.
    *
-   * In encoding and decoding, VP9 allows an input image frame be partitioned
+   * In encoding and decoding, AV1 allows an input image frame be partitioned
    * into separated horizontal tile rows. Tile rows are encoded or decoded
    * sequentially. Even though encoding/decoding of later tile rows depends on
    * earlier ones, this allows the encoder to output data packets for tile rows
@@ -396,13 +396,13 @@
    *
    * By default, the value is 0, i.e. one single row tile for entire image.
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_TILE_ROWS,
+  AV1E_SET_TILE_ROWS,
 
   /*!\brief Codec control function to enable frame parallel decoding feature.
    *
-   * VP9 has a bitstream feature to reduce decoding dependency between frames
+   * AV1 has a bitstream feature to reduce decoding dependency between frames
    * by turning off backward update of probability context used in encoding
    * and decoding. This allows staged parallel processing of more than one
    * video frames in the decoder. This control function provides a mean to
@@ -410,26 +410,26 @@
    *
    * By default, this feature is off.
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_FRAME_PARALLEL_DECODING,
+  AV1E_SET_FRAME_PARALLEL_DECODING,
 
   /*!\brief Codec control function to set adaptive quantization mode.
    *
-   * VP9 has a segment based feature that allows encoder to adaptively change
+   * AV1 has a segment based feature that allows encoder to adaptively change
    * quantization parameter for each segment within a frame to improve the
    * subjective quality. This control makes encoder operate in one of the
    * several AQ_modes supported.
    *
    * By default, encoder operates with AQ_Mode 0(adaptive quantization off).
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_AQ_MODE,
+  AV1E_SET_AQ_MODE,
 
   /*!\brief Codec control function to enable/disable periodic Q boost.
    *
-   * One VP9 encoder speed feature is to enable quality boost by lowering
+   * One AV1 encoder speed feature is to enable quality boost by lowering
    * frame level Q periodically. This control function provides a mean to
    * turn on/off this feature.
    *               0 = off
@@ -438,26 +438,26 @@
    * By default, the encoder is allowed to use this feature for appropriate
    * encoding modes.
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_FRAME_PERIODIC_BOOST,
+  AV1E_SET_FRAME_PERIODIC_BOOST,
 
   /*!\brief Codec control function to set noise sensitivity.
    *
    *  0: off, 1: On(YOnly)
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_NOISE_SENSITIVITY,
+  AV1E_SET_NOISE_SENSITIVITY,
 
   /*!\brief Codec control function to set content type.
    * \note Valid parameter range:
-   *              VPX_CONTENT_DEFAULT = Regular video content (Default)
-   *              VPX_CONTENT_SCREEN  = Screen capture content
+   *              AOM_CONTENT_DEFAULT = Regular video content (Default)
+   *              AOM_CONTENT_SCREEN  = Screen capture content
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_TUNE_CONTENT,
+  AV1E_SET_TUNE_CONTENT,
 
   /*!\brief Codec control function to set color space info.
    * \note Valid ranges: 0..7, default is "UNKNOWN".
@@ -470,127 +470,127 @@
    *                     6 = RESERVED
    *                     7 = SRGB
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_COLOR_SPACE,
+  AV1E_SET_COLOR_SPACE,
 
   /*!\brief Codec control function to set minimum interval between GF/ARF frames
    *
    * By default the value is set as 4.
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_MIN_GF_INTERVAL,
+  AV1E_SET_MIN_GF_INTERVAL,
 
   /*!\brief Codec control function to set minimum interval between GF/ARF frames
    *
    * By default the value is set as 16.
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_MAX_GF_INTERVAL,
+  AV1E_SET_MAX_GF_INTERVAL,
 
   /*!\brief Codec control function to get an Active map back from the encoder.
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_GET_ACTIVEMAP,
+  AV1E_GET_ACTIVEMAP,
 
   /*!\brief Codec control function to set color range bit.
    * \note Valid ranges: 0..1, default is 0
    *                     0 = Limited range (16..235 or HBD equivalent)
    *                     1 = Full range (0..255 or HBD equivalent)
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_COLOR_RANGE,
+  AV1E_SET_COLOR_RANGE,
 
   /*!\brief Codec control function to set intended rendering image size.
    *
    * By default, this is identical to the image size in pixels.
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_RENDER_SIZE,
+  AV1E_SET_RENDER_SIZE,
 
   /*!\brief Codec control function to set target level.
    *
    * 255: off (default); 0: only keep level stats; 10: target for level 1.0;
    * 11: target for level 1.1; ... 62: target for level 6.2
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_SET_TARGET_LEVEL,
+  AV1E_SET_TARGET_LEVEL,
 
   /*!\brief Codec control function to get bitstream level.
    *
-   * Supported in codecs: VP9
+   * Supported in codecs: AV1
    */
-  VP9E_GET_LEVEL,
+  AV1E_GET_LEVEL,
 
   /*!\brief Codec control function to set intended superblock size.
    *
    * By default, the superblock size is determined separately for each
    * frame by the encoder.
    *
-   * Supported in codecs: VP10
+   * Supported in codecs: AV1
    */
-  VP10E_SET_SUPERBLOCK_SIZE,
+  AV1E_SET_SUPERBLOCK_SIZE,
 };
 
-/*!\brief vpx 1-D scaling mode
+/*!\brief aom 1-D scaling mode
  *
- * This set of constants define 1-D vpx scaling modes
+ * This set of constants define 1-D aom scaling modes
  */
-typedef enum vpx_scaling_mode_1d {
-  VP8E_NORMAL = 0,
-  VP8E_FOURFIVE = 1,
-  VP8E_THREEFIVE = 2,
-  VP8E_ONETWO = 3
-} VPX_SCALING_MODE;
+typedef enum aom_scaling_mode_1d {
+  AOME_NORMAL = 0,
+  AOME_FOURFIVE = 1,
+  AOME_THREEFIVE = 2,
+  AOME_ONETWO = 3
+} AOM_SCALING_MODE;
 
-/*!\brief  vpx region of interest map
+/*!\brief  aom region of interest map
  *
  * These defines the data structures for the region of interest map
  *
  */
 
-typedef struct vpx_roi_map {
+typedef struct aom_roi_map {
   /*! An id between 0 and 3 for each 16x16 region within a frame. */
   unsigned char *roi_map;
   unsigned int rows; /**< Number of rows. */
   unsigned int cols; /**< Number of columns. */
-  // TODO(paulwilkins): broken for VP9 which has 8 segments
+  // TODO(paulwilkins): broken for AV1 which has 8 segments
   // q and loop filter deltas for each segment
   // (see MAX_MB_SEGMENTS)
   int delta_q[4];  /**< Quantizer deltas. */
   int delta_lf[4]; /**< Loop filter deltas. */
   /*! Static breakout threshold for each segment. */
   unsigned int static_threshold[4];
-} vpx_roi_map_t;
+} aom_roi_map_t;
 
-/*!\brief  vpx active region map
+/*!\brief  aom active region map
  *
  * These defines the data structures for active region map
  *
  */
 
-typedef struct vpx_active_map {
+typedef struct aom_active_map {
   /*!\brief specify an on (1) or off (0) each 16x16 region within a frame */
   unsigned char *active_map;
   unsigned int rows; /**< number of rows */
   unsigned int cols; /**< number of cols */
-} vpx_active_map_t;
+} aom_active_map_t;
 
-/*!\brief  vpx image scaling mode
+/*!\brief  aom image scaling mode
  *
  * This defines the data structure for image scaling mode
  *
  */
-typedef struct vpx_scaling_mode {
-  VPX_SCALING_MODE h_scaling_mode; /**< horizontal scaling mode */
-  VPX_SCALING_MODE v_scaling_mode; /**< vertical scaling mode   */
-} vpx_scaling_mode_t;
+typedef struct aom_scaling_mode {
+  AOM_SCALING_MODE h_scaling_mode; /**< horizontal scaling mode */
+  AOM_SCALING_MODE v_scaling_mode; /**< vertical scaling mode   */
+} aom_scaling_mode_t;
 
 /*!\brief VP8 token partition mode
  *
@@ -600,159 +600,159 @@
  */
 
 typedef enum {
-  VP8_ONE_TOKENPARTITION = 0,
-  VP8_TWO_TOKENPARTITION = 1,
-  VP8_FOUR_TOKENPARTITION = 2,
-  VP8_EIGHT_TOKENPARTITION = 3
-} vp8e_token_partitions;
+  AOM_ONE_TOKENPARTITION = 0,
+  AOM_TWO_TOKENPARTITION = 1,
+  AOM_FOUR_TOKENPARTITION = 2,
+  AOM_EIGHT_TOKENPARTITION = 3
+} aome_token_partitions;
 
-/*!brief VP9 encoder content type */
+/*!brief AV1 encoder content type */
 typedef enum {
-  VPX_CONTENT_DEFAULT,
-  VPX_CONTENT_SCREEN,
-  VPX_CONTENT_INVALID
-} vpx_tune_content;
+  AOM_CONTENT_DEFAULT,
+  AOM_CONTENT_SCREEN,
+  AOM_CONTENT_INVALID
+} aom_tune_content;
 
 /*!\brief VP8 model tuning parameters
  *
  * Changes the encoder to tune for certain types of input material.
  *
  */
-typedef enum { VPX_TUNE_PSNR, VPX_TUNE_SSIM } vpx_tune_metric;
+typedef enum { AOM_TUNE_PSNR, AOM_TUNE_SSIM } aom_tune_metric;
 
 /*!\cond */
 /*!\brief VP8 encoder control function parameter type
  *
  * Defines the data types that VP8E control functions take. Note that
- * additional common controls are defined in vp8.h
+ * additional common controls are defined in aom.h
  *
  */
 
-VPX_CTRL_USE_TYPE_DEPRECATED(VP8E_USE_REFERENCE, int)
-#define VPX_CTRL_VP8E_USE_REFERENCE
-VPX_CTRL_USE_TYPE(VP8E_SET_FRAME_FLAGS, int)
-#define VPX_CTRL_VP8E_SET_FRAME_FLAGS
-VPX_CTRL_USE_TYPE(VP8E_SET_ROI_MAP, vpx_roi_map_t *)
-#define VPX_CTRL_VP8E_SET_ROI_MAP
-VPX_CTRL_USE_TYPE(VP8E_SET_ACTIVEMAP, vpx_active_map_t *)
-#define VPX_CTRL_VP8E_SET_ACTIVEMAP
-VPX_CTRL_USE_TYPE(VP8E_SET_SCALEMODE, vpx_scaling_mode_t *)
-#define VPX_CTRL_VP8E_SET_SCALEMODE
+AOM_CTRL_USE_TYPE_DEPRECATED(AOME_USE_REFERENCE, int)
+#define AOM_CTRL_AOME_USE_REFERENCE
+AOM_CTRL_USE_TYPE(AOME_SET_FRAME_FLAGS, int)
+#define AOM_CTRL_AOME_SET_FRAME_FLAGS
+AOM_CTRL_USE_TYPE(AOME_SET_ROI_MAP, aom_roi_map_t *)
+#define AOM_CTRL_AOME_SET_ROI_MAP
+AOM_CTRL_USE_TYPE(AOME_SET_ACTIVEMAP, aom_active_map_t *)
+#define AOM_CTRL_AOME_SET_ACTIVEMAP
+AOM_CTRL_USE_TYPE(AOME_SET_SCALEMODE, aom_scaling_mode_t *)
+#define AOM_CTRL_AOME_SET_SCALEMODE
 
-VPX_CTRL_USE_TYPE(VP8E_SET_CPUUSED, int)
-#define VPX_CTRL_VP8E_SET_CPUUSED
-VPX_CTRL_USE_TYPE(VP8E_SET_ENABLEAUTOALTREF, unsigned int)
-#define VPX_CTRL_VP8E_SET_ENABLEAUTOALTREF
+AOM_CTRL_USE_TYPE(AOME_SET_CPUUSED, int)
+#define AOM_CTRL_AOME_SET_CPUUSED
+AOM_CTRL_USE_TYPE(AOME_SET_ENABLEAUTOALTREF, unsigned int)
+#define AOM_CTRL_AOME_SET_ENABLEAUTOALTREF
 
 #if CONFIG_EXT_REFS
-VPX_CTRL_USE_TYPE(VP8E_SET_ENABLEAUTOBWDREF, unsigned int)
-#define VPX_CTRL_VP8E_SET_ENABLEAUTOBWDREF
+AOM_CTRL_USE_TYPE(AOME_SET_ENABLEAUTOBWDREF, unsigned int)
+#define AOM_CTRL_AOME_SET_ENABLEAUTOBWDREF
 #endif  // CONFIG_EXT_REFS
 
-VPX_CTRL_USE_TYPE(VP8E_SET_NOISE_SENSITIVITY, unsigned int)
-#define VPX_CTRL_VP8E_SET_NOISE_SENSITIVITY
-VPX_CTRL_USE_TYPE(VP8E_SET_SHARPNESS, unsigned int)
-#define VPX_CTRL_VP8E_SET_SHARPNESS
-VPX_CTRL_USE_TYPE(VP8E_SET_STATIC_THRESHOLD, unsigned int)
-#define VPX_CTRL_VP8E_SET_STATIC_THRESHOLD
-VPX_CTRL_USE_TYPE(VP8E_SET_TOKEN_PARTITIONS, int) /* vp8e_token_partitions */
-#define VPX_CTRL_VP8E_SET_TOKEN_PARTITIONS
+AOM_CTRL_USE_TYPE(AOME_SET_NOISE_SENSITIVITY, unsigned int)
+#define AOM_CTRL_AOME_SET_NOISE_SENSITIVITY
+AOM_CTRL_USE_TYPE(AOME_SET_SHARPNESS, unsigned int)
+#define AOM_CTRL_AOME_SET_SHARPNESS
+AOM_CTRL_USE_TYPE(AOME_SET_STATIC_THRESHOLD, unsigned int)
+#define AOM_CTRL_AOME_SET_STATIC_THRESHOLD
+AOM_CTRL_USE_TYPE(AOME_SET_TOKEN_PARTITIONS, int) /* aome_token_partitions */
+#define AOM_CTRL_AOME_SET_TOKEN_PARTITIONS
 
-VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_MAXFRAMES, unsigned int)
-#define VPX_CTRL_VP8E_SET_ARNR_MAXFRAMES
-VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_STRENGTH, unsigned int)
-#define VPX_CTRL_VP8E_SET_ARNR_STRENGTH
-VPX_CTRL_USE_TYPE_DEPRECATED(VP8E_SET_ARNR_TYPE, unsigned int)
-#define VPX_CTRL_VP8E_SET_ARNR_TYPE
-VPX_CTRL_USE_TYPE(VP8E_SET_TUNING, int) /* vpx_tune_metric */
-#define VPX_CTRL_VP8E_SET_TUNING
-VPX_CTRL_USE_TYPE(VP8E_SET_CQ_LEVEL, unsigned int)
-#define VPX_CTRL_VP8E_SET_CQ_LEVEL
+AOM_CTRL_USE_TYPE(AOME_SET_ARNR_MAXFRAMES, unsigned int)
+#define AOM_CTRL_AOME_SET_ARNR_MAXFRAMES
+AOM_CTRL_USE_TYPE(AOME_SET_ARNR_STRENGTH, unsigned int)
+#define AOM_CTRL_AOME_SET_ARNR_STRENGTH
+AOM_CTRL_USE_TYPE_DEPRECATED(AOME_SET_ARNR_TYPE, unsigned int)
+#define AOM_CTRL_AOME_SET_ARNR_TYPE
+AOM_CTRL_USE_TYPE(AOME_SET_TUNING, int) /* aom_tune_metric */
+#define AOM_CTRL_AOME_SET_TUNING
+AOM_CTRL_USE_TYPE(AOME_SET_CQ_LEVEL, unsigned int)
+#define AOM_CTRL_AOME_SET_CQ_LEVEL
 
-VPX_CTRL_USE_TYPE(VP9E_SET_TILE_COLUMNS, int)
-#define VPX_CTRL_VP9E_SET_TILE_COLUMNS
-VPX_CTRL_USE_TYPE(VP9E_SET_TILE_ROWS, int)
-#define VPX_CTRL_VP9E_SET_TILE_ROWS
+AOM_CTRL_USE_TYPE(AV1E_SET_TILE_COLUMNS, int)
+#define AOM_CTRL_AV1E_SET_TILE_COLUMNS
+AOM_CTRL_USE_TYPE(AV1E_SET_TILE_ROWS, int)
+#define AOM_CTRL_AV1E_SET_TILE_ROWS
 
-VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER, int *)
-#define VPX_CTRL_VP8E_GET_LAST_QUANTIZER
-VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER_64, int *)
-#define VPX_CTRL_VP8E_GET_LAST_QUANTIZER_64
+AOM_CTRL_USE_TYPE(AOME_GET_LAST_QUANTIZER, int *)
+#define AOM_CTRL_AOME_GET_LAST_QUANTIZER
+AOM_CTRL_USE_TYPE(AOME_GET_LAST_QUANTIZER_64, int *)
+#define AOM_CTRL_AOME_GET_LAST_QUANTIZER_64
 
-VPX_CTRL_USE_TYPE(VP8E_SET_MAX_INTRA_BITRATE_PCT, unsigned int)
-#define VPX_CTRL_VP8E_SET_MAX_INTRA_BITRATE_PCT
-VPX_CTRL_USE_TYPE(VP8E_SET_MAX_INTER_BITRATE_PCT, unsigned int)
-#define VPX_CTRL_VP8E_SET_MAX_INTER_BITRATE_PCT
+AOM_CTRL_USE_TYPE(AOME_SET_MAX_INTRA_BITRATE_PCT, unsigned int)
+#define AOM_CTRL_AOME_SET_MAX_INTRA_BITRATE_PCT
+AOM_CTRL_USE_TYPE(AOME_SET_MAX_INTER_BITRATE_PCT, unsigned int)
+#define AOM_CTRL_AOME_SET_MAX_INTER_BITRATE_PCT
 
-VPX_CTRL_USE_TYPE(VP8E_SET_SCREEN_CONTENT_MODE, unsigned int)
-#define VPX_CTRL_VP8E_SET_SCREEN_CONTENT_MODE
+AOM_CTRL_USE_TYPE(AOME_SET_SCREEN_CONTENT_MODE, unsigned int)
+#define AOM_CTRL_AOME_SET_SCREEN_CONTENT_MODE
 
-VPX_CTRL_USE_TYPE(VP9E_SET_GF_CBR_BOOST_PCT, unsigned int)
-#define VPX_CTRL_VP9E_SET_GF_CBR_BOOST_PCT
+AOM_CTRL_USE_TYPE(AV1E_SET_GF_CBR_BOOST_PCT, unsigned int)
+#define AOM_CTRL_AV1E_SET_GF_CBR_BOOST_PCT
 
-VPX_CTRL_USE_TYPE(VP9E_SET_LOSSLESS, unsigned int)
-#define VPX_CTRL_VP9E_SET_LOSSLESS
+AOM_CTRL_USE_TYPE(AV1E_SET_LOSSLESS, unsigned int)
+#define AOM_CTRL_AV1E_SET_LOSSLESS
 
 #if CONFIG_AOM_QM
-VPX_CTRL_USE_TYPE(VP9E_SET_ENABLE_QM, unsigned int)
-#define VPX_CTRL_VP9E_SET_ENABLE_QM
+AOM_CTRL_USE_TYPE(AV1E_SET_ENABLE_QM, unsigned int)
+#define AOM_CTRL_AV1E_SET_ENABLE_QM
 
-VPX_CTRL_USE_TYPE(VP9E_SET_QM_MIN, unsigned int)
-#define VPX_CTRL_VP9E_SET_QM_MIN
+AOM_CTRL_USE_TYPE(AV1E_SET_QM_MIN, unsigned int)
+#define AOM_CTRL_AV1E_SET_QM_MIN
 
-VPX_CTRL_USE_TYPE(VP9E_SET_QM_MAX, unsigned int)
-#define VPX_CTRL_VP9E_SET_QM_MAX
+AOM_CTRL_USE_TYPE(AV1E_SET_QM_MAX, unsigned int)
+#define AOM_CTRL_AV1E_SET_QM_MAX
 #endif
 
-VPX_CTRL_USE_TYPE(VP9E_SET_FRAME_PARALLEL_DECODING, unsigned int)
-#define VPX_CTRL_VP9E_SET_FRAME_PARALLEL_DECODING
+AOM_CTRL_USE_TYPE(AV1E_SET_FRAME_PARALLEL_DECODING, unsigned int)
+#define AOM_CTRL_AV1E_SET_FRAME_PARALLEL_DECODING
 
-VPX_CTRL_USE_TYPE(VP9E_SET_AQ_MODE, unsigned int)
-#define VPX_CTRL_VP9E_SET_AQ_MODE
+AOM_CTRL_USE_TYPE(AV1E_SET_AQ_MODE, unsigned int)
+#define AOM_CTRL_AV1E_SET_AQ_MODE
 
-VPX_CTRL_USE_TYPE(VP9E_SET_FRAME_PERIODIC_BOOST, unsigned int)
-#define VPX_CTRL_VP9E_SET_FRAME_PERIODIC_BOOST
+AOM_CTRL_USE_TYPE(AV1E_SET_FRAME_PERIODIC_BOOST, unsigned int)
+#define AOM_CTRL_AV1E_SET_FRAME_PERIODIC_BOOST
 
-VPX_CTRL_USE_TYPE(VP9E_SET_NOISE_SENSITIVITY, unsigned int)
-#define VPX_CTRL_VP9E_SET_NOISE_SENSITIVITY
+AOM_CTRL_USE_TYPE(AV1E_SET_NOISE_SENSITIVITY, unsigned int)
+#define AOM_CTRL_AV1E_SET_NOISE_SENSITIVITY
 
-VPX_CTRL_USE_TYPE(VP9E_SET_TUNE_CONTENT, int) /* vpx_tune_content */
-#define VPX_CTRL_VP9E_SET_TUNE_CONTENT
+AOM_CTRL_USE_TYPE(AV1E_SET_TUNE_CONTENT, int) /* aom_tune_content */
+#define AOM_CTRL_AV1E_SET_TUNE_CONTENT
 
-VPX_CTRL_USE_TYPE(VP9E_SET_COLOR_SPACE, int)
-#define VPX_CTRL_VP9E_SET_COLOR_SPACE
+AOM_CTRL_USE_TYPE(AV1E_SET_COLOR_SPACE, int)
+#define AOM_CTRL_AV1E_SET_COLOR_SPACE
 
-VPX_CTRL_USE_TYPE(VP9E_SET_MIN_GF_INTERVAL, unsigned int)
-#define VPX_CTRL_VP9E_SET_MIN_GF_INTERVAL
+AOM_CTRL_USE_TYPE(AV1E_SET_MIN_GF_INTERVAL, unsigned int)
+#define AOM_CTRL_AV1E_SET_MIN_GF_INTERVAL
 
-VPX_CTRL_USE_TYPE(VP9E_SET_MAX_GF_INTERVAL, unsigned int)
-#define VPX_CTRL_VP9E_SET_MAX_GF_INTERVAL
+AOM_CTRL_USE_TYPE(AV1E_SET_MAX_GF_INTERVAL, unsigned int)
+#define AOM_CTRL_AV1E_SET_MAX_GF_INTERVAL
 
-VPX_CTRL_USE_TYPE(VP9E_GET_ACTIVEMAP, vpx_active_map_t *)
-#define VPX_CTRL_VP9E_GET_ACTIVEMAP
+AOM_CTRL_USE_TYPE(AV1E_GET_ACTIVEMAP, aom_active_map_t *)
+#define AOM_CTRL_AV1E_GET_ACTIVEMAP
 
-VPX_CTRL_USE_TYPE(VP9E_SET_COLOR_RANGE, int)
-#define VPX_CTRL_VP9E_SET_COLOR_RANGE
+AOM_CTRL_USE_TYPE(AV1E_SET_COLOR_RANGE, int)
+#define AOM_CTRL_AV1E_SET_COLOR_RANGE
 
 /*!\brief
  *
  * TODO(rbultje) : add support of the control in ffmpeg
  */
-#define VPX_CTRL_VP9E_SET_RENDER_SIZE
-VPX_CTRL_USE_TYPE(VP9E_SET_RENDER_SIZE, int *)
+#define AOM_CTRL_AV1E_SET_RENDER_SIZE
+AOM_CTRL_USE_TYPE(AV1E_SET_RENDER_SIZE, int *)
 
-VPX_CTRL_USE_TYPE(VP10E_SET_SUPERBLOCK_SIZE, unsigned int)
-#define VPX_CTRL_VP10E_SET_SUPERBLOCK_SIZE
+AOM_CTRL_USE_TYPE(AV1E_SET_SUPERBLOCK_SIZE, unsigned int)
+#define AOM_CTRL_AV1E_SET_SUPERBLOCK_SIZE
 
-VPX_CTRL_USE_TYPE(VP9E_SET_TARGET_LEVEL, unsigned int)
-#define VPX_CTRL_VP9E_SET_TARGET_LEVEL
+AOM_CTRL_USE_TYPE(AV1E_SET_TARGET_LEVEL, unsigned int)
+#define AOM_CTRL_AV1E_SET_TARGET_LEVEL
 
-VPX_CTRL_USE_TYPE(VP9E_GET_LEVEL, int *)
-#define VPX_CTRL_VP9E_GET_LEVEL
+AOM_CTRL_USE_TYPE(AV1E_GET_LEVEL, int *)
+#define AOM_CTRL_AV1E_GET_LEVEL
 /*!\endcond */
 /*! @} - end defgroup vp8_encoder */
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_VP8CX_H_
+#endif  // AOM_AOMCX_H_
diff --git a/aom/aomdx.h b/aom/aomdx.h
new file mode 100644
index 0000000..3c397e4
--- /dev/null
+++ b/aom/aomdx.h
@@ -0,0 +1,176 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*!\defgroup aom_decoder AOMedia AOM/AV1 Decoder
+ * \ingroup aom
+ *
+ * @{
+ */
+/*!\file
+ * \brief Provides definitions for using AOM or AV1 within the aom Decoder
+ *        interface.
+ */
+#ifndef AOM_AOMDX_H_
+#define AOM_AOMDX_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Include controls common to both the encoder and decoder */
+#include "./aom.h"
+
+/*!\name Algorithm interface for AV1
+ *
+ * This interface provides the capability to decode AV1 streams.
+ * @{
+ */
+extern aom_codec_iface_t aom_codec_av1_dx_algo;
+extern aom_codec_iface_t *aom_codec_av1_dx(void);
+/*!@} - end algorithm interface member group*/
+
+/*!\enum aom_dec_control_id
+ * \brief AOM decoder control functions
+ *
+ * This set of macros define the control functions available for the AOM
+ * decoder interface.
+ *
+ * \sa #aom_codec_control
+ */
+enum aom_dec_control_id {
+  /** control function to get info on which reference frames were updated
+   *  by the last decode
+   */
+  AOMD_GET_LAST_REF_UPDATES = AOM_DECODER_CTRL_ID_START,
+
+  /** check if the indicated frame is corrupted */
+  AOMD_GET_FRAME_CORRUPTED,
+
+  /** control function to get info on which reference frames were used
+   *  by the last decode
+   */
+  AOMD_GET_LAST_REF_USED,
+
+  /** decryption function to decrypt encoded buffer data immediately
+   * before decoding. Takes a aom_decrypt_init, which contains
+   * a callback function and opaque context pointer.
+   */
+  AOMD_SET_DECRYPTOR,
+  // AOMD_SET_DECRYPTOR = AOMD_SET_DECRYPTOR,
+
+  /** control function to get the dimensions that the current frame is decoded
+   * at. This may be different to the intended display size for the frame as
+   * specified in the wrapper or frame header (see AV1D_GET_DISPLAY_SIZE). */
+  AV1D_GET_FRAME_SIZE,
+
+  /** control function to get the current frame's intended display dimensions
+   * (as specified in the wrapper or frame header). This may be different to
+   * the decoded dimensions of this frame (see AV1D_GET_FRAME_SIZE). */
+  AV1D_GET_DISPLAY_SIZE,
+
+  /** control function to get the bit depth of the stream. */
+  AV1D_GET_BIT_DEPTH,
+
+  /** control function to set the byte alignment of the planes in the reference
+   * buffers. Valid values are power of 2, from 32 to 1024. A value of 0 sets
+   * legacy alignment. I.e. Y plane is aligned to 32 bytes, U plane directly
+   * follows Y plane, and V plane directly follows U plane. Default value is 0.
+   */
+  AV1_SET_BYTE_ALIGNMENT,
+
+  /** control function to invert the decoding order to from right to left. The
+   * function is used in a test to confirm the decoding independence of tile
+   * columns. The function may be used in application where this order
+   * of decoding is desired.
+   *
+   * TODO(yaowu): Rework the unit test that uses this control, and in a future
+   *              release, this test-only control shall be removed.
+   */
+  AV1_INVERT_TILE_DECODE_ORDER,
+
+  /** control function to set the skip loop filter flag. Valid values are
+   * integers. The decoder will skip the loop filter when its value is set to
+   * nonzero. If the loop filter is skipped the decoder may accumulate decode
+   * artifacts. The default value is 0.
+   */
+  AV1_SET_SKIP_LOOP_FILTER,
+
+  AOM_DECODER_CTRL_ID_MAX,
+
+  /** control function to set the range of tile decoding. A value that is
+   * greater and equal to zero indicates only the specific row/column is
+   * decoded. A value that is -1 indicates the whole row/column is decoded.
+   * A special case is both values are -1 that means the whole frame is
+   * decoded.
+   */
+  AV1_SET_DECODE_TILE_ROW,
+  AV1_SET_DECODE_TILE_COL
+};
+
+/** Decrypt n bytes of data from input -> output, using the decrypt_state
+ *  passed in AOMD_SET_DECRYPTOR.
+ */
+typedef void (*aom_decrypt_cb)(void *decrypt_state, const unsigned char *input,
+                               unsigned char *output, int count);
+
+/*!\brief Structure to hold decryption state
+ *
+ * Defines a structure to hold the decryption state and access function.
+ */
+typedef struct aom_decrypt_init {
+  /*! Decrypt callback. */
+  aom_decrypt_cb decrypt_cb;
+
+  /*! Decryption state. */
+  void *decrypt_state;
+} aom_decrypt_init;
+
+/*!\brief A deprecated alias for aom_decrypt_init.
+ */
+typedef aom_decrypt_init aom_decrypt_init;
+
+/*!\cond */
+/*!\brief AOM decoder control function parameter type
+ *
+ * Defines the data types that AOMD control functions take. Note that
+ * additional common controls are defined in aom.h
+ *
+ */
+
+AOM_CTRL_USE_TYPE(AOMD_GET_LAST_REF_UPDATES, int *)
+#define AOM_CTRL_AOMD_GET_LAST_REF_UPDATES
+AOM_CTRL_USE_TYPE(AOMD_GET_FRAME_CORRUPTED, int *)
+#define AOM_CTRL_AOMD_GET_FRAME_CORRUPTED
+AOM_CTRL_USE_TYPE(AOMD_GET_LAST_REF_USED, int *)
+#define AOM_CTRL_AOMD_GET_LAST_REF_USED
+AOM_CTRL_USE_TYPE(AOMD_SET_DECRYPTOR, aom_decrypt_init *)
+#define AOM_CTRL_AOMD_SET_DECRYPTOR
+// AOM_CTRL_USE_TYPE(AOMD_SET_DECRYPTOR, aom_decrypt_init *)
+//#define AOM_CTRL_AOMD_SET_DECRYPTOR
+AOM_CTRL_USE_TYPE(AV1D_GET_DISPLAY_SIZE, int *)
+#define AOM_CTRL_AV1D_GET_DISPLAY_SIZE
+AOM_CTRL_USE_TYPE(AV1D_GET_BIT_DEPTH, unsigned int *)
+#define AOM_CTRL_AV1D_GET_BIT_DEPTH
+AOM_CTRL_USE_TYPE(AV1D_GET_FRAME_SIZE, int *)
+#define AOM_CTRL_AV1D_GET_FRAME_SIZE
+AOM_CTRL_USE_TYPE(AV1_INVERT_TILE_DECODE_ORDER, int)
+#define AOM_CTRL_AV1_INVERT_TILE_DECODE_ORDER
+AOM_CTRL_USE_TYPE(AV1_SET_DECODE_TILE_ROW, int)
+#define AOM_CTRL_AV1_SET_DECODE_TILE_ROW
+AOM_CTRL_USE_TYPE(AV1_SET_DECODE_TILE_COL, int)
+#define AOM_CTRL_AV1_SET_DECODE_TILE_COL
+/*!\endcond */
+/*! @} - end defgroup vp8_decoder */
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // AOM_AOMDX_H_
diff --git a/aom/exports_com b/aom/exports_com
index 2ab0509..0c79fa1 100644
--- a/aom/exports_com
+++ b/aom/exports_com
@@ -1,16 +1,16 @@
-text vpx_codec_build_config
-text vpx_codec_control_
-text vpx_codec_destroy
-text vpx_codec_err_to_string
-text vpx_codec_error
-text vpx_codec_error_detail
-text vpx_codec_get_caps
-text vpx_codec_iface_name
-text vpx_codec_version
-text vpx_codec_version_extra_str
-text vpx_codec_version_str
-text vpx_img_alloc
-text vpx_img_flip
-text vpx_img_free
-text vpx_img_set_rect
-text vpx_img_wrap
+text aom_codec_build_config
+text aom_codec_control_
+text aom_codec_destroy
+text aom_codec_err_to_string
+text aom_codec_error
+text aom_codec_error_detail
+text aom_codec_get_caps
+text aom_codec_iface_name
+text aom_codec_version
+text aom_codec_version_extra_str
+text aom_codec_version_str
+text aom_img_alloc
+text aom_img_flip
+text aom_img_free
+text aom_img_set_rect
+text aom_img_wrap
diff --git a/aom/exports_dec b/aom/exports_dec
index c694eba..de8fe44 100644
--- a/aom/exports_dec
+++ b/aom/exports_dec
@@ -1,8 +1,8 @@
-text vpx_codec_dec_init_ver
-text vpx_codec_decode
-text vpx_codec_get_frame
-text vpx_codec_get_stream_info
-text vpx_codec_peek_stream_info
-text vpx_codec_register_put_frame_cb
-text vpx_codec_register_put_slice_cb
-text vpx_codec_set_frame_buffer_functions
+text aom_codec_dec_init_ver
+text aom_codec_decode
+text aom_codec_get_frame
+text aom_codec_get_stream_info
+text aom_codec_peek_stream_info
+text aom_codec_register_put_frame_cb
+text aom_codec_register_put_slice_cb
+text aom_codec_set_frame_buffer_functions
diff --git a/aom/exports_enc b/aom/exports_enc
index 914e36c..0dcca7d 100644
--- a/aom/exports_enc
+++ b/aom/exports_enc
@@ -1,9 +1,9 @@
-text vpx_codec_enc_config_default
-text vpx_codec_enc_config_set
-text vpx_codec_enc_init_multi_ver
-text vpx_codec_enc_init_ver
-text vpx_codec_encode
-text vpx_codec_get_cx_data
-text vpx_codec_get_global_headers
-text vpx_codec_get_preview_frame
-text vpx_codec_set_cx_data_buf
+text aom_codec_enc_config_default
+text aom_codec_enc_config_set
+text aom_codec_enc_init_multi_ver
+text aom_codec_enc_init_ver
+text aom_codec_encode
+text aom_codec_get_cx_data
+text aom_codec_get_global_headers
+text aom_codec_get_preview_frame
+text aom_codec_set_cx_data_buf
diff --git a/aom/internal/vpx_codec_internal.h b/aom/internal/aom_codec_internal.h
similarity index 62%
rename from aom/internal/vpx_codec_internal.h
rename to aom/internal/aom_codec_internal.h
index 4ac77b7..ee346e2 100644
--- a/aom/internal/vpx_codec_internal.h
+++ b/aom/internal/aom_codec_internal.h
@@ -19,31 +19,31 @@
  * into the global namespace:
  *     <pre>
  *     my_codec.c:
- *       vpx_codec_iface_t my_codec = {
+ *       aom_codec_iface_t my_codec = {
  *           "My Codec v1.0",
- *           VPX_CODEC_ALG_ABI_VERSION,
+ *           AOM_CODEC_ALG_ABI_VERSION,
  *           ...
  *       };
  *     </pre>
  *
  * An application instantiates a specific decoder instance by using
- * vpx_codec_init() and a pointer to the algorithm's interface structure:
+ * aom_codec_init() and a pointer to the algorithm's interface structure:
  *     <pre>
  *     my_app.c:
- *       extern vpx_codec_iface_t my_codec;
+ *       extern aom_codec_iface_t my_codec;
  *       {
- *           vpx_codec_ctx_t algo;
- *           res = vpx_codec_init(&algo, &my_codec);
+ *           aom_codec_ctx_t algo;
+ *           res = aom_codec_init(&algo, &my_codec);
  *       }
  *     </pre>
  *
  * Once initialized, the instance is manged using other functions from
- * the vpx_codec_* family.
+ * the aom_codec_* family.
  */
-#ifndef VPX_INTERNAL_VPX_CODEC_INTERNAL_H_
-#define VPX_INTERNAL_VPX_CODEC_INTERNAL_H_
-#include "../vpx_decoder.h"
-#include "../vpx_encoder.h"
+#ifndef AOM_INTERNAL_AOM_CODEC_INTERNAL_H_
+#define AOM_INTERNAL_AOM_CODEC_INTERNAL_H_
+#include "../aom_decoder.h"
+#include "../aom_encoder.h"
 #include <stdarg.h>
 
 #ifdef __cplusplus
@@ -58,46 +58,46 @@
  * types, removing or reassigning enums, adding/removing/rearranging
  * fields to structures
  */
-#define VPX_CODEC_INTERNAL_ABI_VERSION (5) /**<\hideinitializer*/
+#define AOM_CODEC_INTERNAL_ABI_VERSION (5) /**<\hideinitializer*/
 
-typedef struct vpx_codec_alg_priv vpx_codec_alg_priv_t;
-typedef struct vpx_codec_priv_enc_mr_cfg vpx_codec_priv_enc_mr_cfg_t;
+typedef struct aom_codec_alg_priv aom_codec_alg_priv_t;
+typedef struct aom_codec_priv_enc_mr_cfg aom_codec_priv_enc_mr_cfg_t;
 
 /*!\brief init function pointer prototype
  *
  * Performs algorithm-specific initialization of the decoder context. This
- * function is called by the generic vpx_codec_init() wrapper function, so
+ * function is called by the generic aom_codec_init() wrapper function, so
  * plugins implementing this interface may trust the input parameters to be
  * properly initialized.
  *
  * \param[in] ctx   Pointer to this instance's context
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     The input stream was recognized and decoder initialized.
- * \retval #VPX_CODEC_MEM_ERROR
+ * \retval #AOM_CODEC_MEM_ERROR
  *     Memory operation failed.
  */
-typedef vpx_codec_err_t (*vpx_codec_init_fn_t)(
-    vpx_codec_ctx_t *ctx, vpx_codec_priv_enc_mr_cfg_t *data);
+typedef aom_codec_err_t (*aom_codec_init_fn_t)(
+    aom_codec_ctx_t *ctx, aom_codec_priv_enc_mr_cfg_t *data);
 
 /*!\brief destroy function pointer prototype
  *
  * Performs algorithm-specific destruction of the decoder context. This
- * function is called by the generic vpx_codec_destroy() wrapper function,
+ * function is called by the generic aom_codec_destroy() wrapper function,
  * so plugins implementing this interface may trust the input parameters
  * to be properly initialized.
  *
  * \param[in] ctx   Pointer to this instance's context
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     The input stream was recognized and decoder initialized.
- * \retval #VPX_CODEC_MEM_ERROR
+ * \retval #AOM_CODEC_MEM_ERROR
  *     Memory operation failed.
  */
-typedef vpx_codec_err_t (*vpx_codec_destroy_fn_t)(vpx_codec_alg_priv_t *ctx);
+typedef aom_codec_err_t (*aom_codec_destroy_fn_t)(aom_codec_alg_priv_t *ctx);
 
 /*!\brief parse stream info function pointer prototype
  *
  * Performs high level parsing of the bitstream. This function is called by the
- * generic vpx_codec_peek_stream_info() wrapper function, so plugins
+ * generic aom_codec_peek_stream_info() wrapper function, so plugins
  * implementing this interface may trust the input parameters to be properly
  * initialized.
  *
@@ -108,12 +108,12 @@
  *                         clobbered by the algorithm. This parameter \ref MAY
  *                         be NULL.
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     Bitstream is parsable and stream information updated
  */
-typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t *data,
+typedef aom_codec_err_t (*aom_codec_peek_si_fn_t)(const uint8_t *data,
                                                   unsigned int data_sz,
-                                                  vpx_codec_stream_info_t *si);
+                                                  aom_codec_stream_info_t *si);
 
 /*!\brief Return information about the current stream.
  *
@@ -125,11 +125,11 @@
  *                         clobbered by the algorithm. This parameter \ref MAY
  *                         be NULL.
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     Bitstream is parsable and stream information updated
  */
-typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t *ctx,
-                                                 vpx_codec_stream_info_t *si);
+typedef aom_codec_err_t (*aom_codec_get_si_fn_t)(aom_codec_alg_priv_t *ctx,
+                                                 aom_codec_stream_info_t *si);
 
 /*!\brief control function pointer prototype
  *
@@ -137,7 +137,7 @@
  * instance. This can be used to implement features specific to a particular
  * algorithm.
  *
- * This function is called by the generic vpx_codec_control() wrapper
+ * This function is called by the generic aom_codec_control() wrapper
  * function, so plugins implementing this interface may trust the input
  * parameters to be properly initialized. However,  this interface does not
  * provide type safety for the exchanged data or assign meanings to the
@@ -150,49 +150,49 @@
  * \param[in]     ctrl_id          Algorithm specific control identifier
  * \param[in,out] data             Data to exchange with algorithm instance.
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     The internal state data was deserialized.
  */
-typedef vpx_codec_err_t (*vpx_codec_control_fn_t)(vpx_codec_alg_priv_t *ctx,
+typedef aom_codec_err_t (*aom_codec_control_fn_t)(aom_codec_alg_priv_t *ctx,
                                                   va_list ap);
 
 /*!\brief control function pointer mapping
  *
  * This structure stores the mapping between control identifiers and
  * implementing functions. Each algorithm provides a list of these
- * mappings. This list is searched by the vpx_codec_control() wrapper
+ * mappings. This list is searched by the aom_codec_control() wrapper
  * function to determine which function to invoke. The special
  * value {0, NULL} is used to indicate end-of-list, and must be
  * present. The special value {0, <non-null>} can be used as a catch-all
  * mapping. This implies that ctrl_id values chosen by the algorithm
  * \ref MUST be non-zero.
  */
-typedef const struct vpx_codec_ctrl_fn_map {
+typedef const struct aom_codec_ctrl_fn_map {
   int ctrl_id;
-  vpx_codec_control_fn_t fn;
-} vpx_codec_ctrl_fn_map_t;
+  aom_codec_control_fn_t fn;
+} aom_codec_ctrl_fn_map_t;
 
 /*!\brief decode data function pointer prototype
  *
  * Processes a buffer of coded data. If the processing results in a new
- * decoded frame becoming available, #VPX_CODEC_CB_PUT_SLICE and
- * #VPX_CODEC_CB_PUT_FRAME events are generated as appropriate. This
- * function is called by the generic vpx_codec_decode() wrapper function,
+ * decoded frame becoming available, #AOM_CODEC_CB_PUT_SLICE and
+ * #AOM_CODEC_CB_PUT_FRAME events are generated as appropriate. This
+ * function is called by the generic aom_codec_decode() wrapper function,
  * so plugins implementing this interface may trust the input parameters
  * to be properly initialized.
  *
  * \param[in] ctx          Pointer to this instance's context
  * \param[in] data         Pointer to this block of new coded data. If
- *                         NULL, a #VPX_CODEC_CB_PUT_FRAME event is posted
+ *                         NULL, a #AOM_CODEC_CB_PUT_FRAME event is posted
  *                         for the previously decoded frame.
  * \param[in] data_sz      Size of the coded data, in bytes.
  *
- * \return Returns #VPX_CODEC_OK if the coded data was processed completely
+ * \return Returns #AOM_CODEC_OK if the coded data was processed completely
  *         and future pictures can be decoded without error. Otherwise,
- *         see the descriptions of the other error codes in ::vpx_codec_err_t
+ *         see the descriptions of the other error codes in ::aom_codec_err_t
  *         for recoverability capabilities.
  */
-typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx,
+typedef aom_codec_err_t (*aom_codec_decode_fn_t)(aom_codec_alg_priv_t *ctx,
                                                  const uint8_t *data,
                                                  unsigned int data_sz,
                                                  void *user_priv,
@@ -205,8 +205,8 @@
  * complete when this function returns NULL.
  *
  * The list of available frames becomes valid upon completion of the
- * vpx_codec_decode call, and remains valid until the next call to
- * vpx_codec_decode.
+ * aom_codec_decode call, and remains valid until the next call to
+ * aom_codec_decode.
  *
  * \param[in]     ctx      Pointer to this instance's context
  * \param[in out] iter     Iterator storage, initialized to NULL
@@ -214,8 +214,8 @@
  * \return Returns a pointer to an image, if one is ready for display. Frames
  *         produced will always be in PTS (presentation time stamp) order.
  */
-typedef vpx_image_t *(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx,
-                                                 vpx_codec_iter_t *iter);
+typedef aom_image_t *(*aom_codec_get_frame_fn_t)(aom_codec_alg_priv_t *ctx,
+                                                 aom_codec_iter_t *iter);
 
 /*!\brief Pass in external frame buffers for the decoder to use.
  *
@@ -230,103 +230,103 @@
  * \param[in] cb_release   Pointer to the release callback function
  * \param[in] cb_priv      Callback's private data
  *
- * \retval #VPX_CODEC_OK
+ * \retval #AOM_CODEC_OK
  *     External frame buffers will be used by libaom.
- * \retval #VPX_CODEC_INVALID_PARAM
+ * \retval #AOM_CODEC_INVALID_PARAM
  *     One or more of the callbacks were NULL.
- * \retval #VPX_CODEC_ERROR
+ * \retval #AOM_CODEC_ERROR
  *     Decoder context not initialized, or algorithm not capable of
  *     using external frame buffers.
  *
  * \note
- * When decoding VP9, the application may be required to pass in at least
- * #VPX_MAXIMUM_WORK_BUFFERS external frame
+ * When decoding AV1, the application may be required to pass in at least
+ * #AOM_MAXIMUM_WORK_BUFFERS external frame
  * buffers.
  */
-typedef vpx_codec_err_t (*vpx_codec_set_fb_fn_t)(
-    vpx_codec_alg_priv_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get,
-    vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv);
+typedef aom_codec_err_t (*aom_codec_set_fb_fn_t)(
+    aom_codec_alg_priv_t *ctx, aom_get_frame_buffer_cb_fn_t cb_get,
+    aom_release_frame_buffer_cb_fn_t cb_release, void *cb_priv);
 
-typedef vpx_codec_err_t (*vpx_codec_encode_fn_t)(vpx_codec_alg_priv_t *ctx,
-                                                 const vpx_image_t *img,
-                                                 vpx_codec_pts_t pts,
+typedef aom_codec_err_t (*aom_codec_encode_fn_t)(aom_codec_alg_priv_t *ctx,
+                                                 const aom_image_t *img,
+                                                 aom_codec_pts_t pts,
                                                  unsigned long duration,
-                                                 vpx_enc_frame_flags_t flags,
+                                                 aom_enc_frame_flags_t flags,
                                                  unsigned long deadline);
-typedef const vpx_codec_cx_pkt_t *(*vpx_codec_get_cx_data_fn_t)(
-    vpx_codec_alg_priv_t *ctx, vpx_codec_iter_t *iter);
+typedef const aom_codec_cx_pkt_t *(*aom_codec_get_cx_data_fn_t)(
+    aom_codec_alg_priv_t *ctx, aom_codec_iter_t *iter);
 
-typedef vpx_codec_err_t (*vpx_codec_enc_config_set_fn_t)(
-    vpx_codec_alg_priv_t *ctx, const vpx_codec_enc_cfg_t *cfg);
-typedef vpx_fixed_buf_t *(*vpx_codec_get_global_headers_fn_t)(
-    vpx_codec_alg_priv_t *ctx);
+typedef aom_codec_err_t (*aom_codec_enc_config_set_fn_t)(
+    aom_codec_alg_priv_t *ctx, const aom_codec_enc_cfg_t *cfg);
+typedef aom_fixed_buf_t *(*aom_codec_get_global_headers_fn_t)(
+    aom_codec_alg_priv_t *ctx);
 
-typedef vpx_image_t *(*vpx_codec_get_preview_frame_fn_t)(
-    vpx_codec_alg_priv_t *ctx);
+typedef aom_image_t *(*aom_codec_get_preview_frame_fn_t)(
+    aom_codec_alg_priv_t *ctx);
 
-typedef vpx_codec_err_t (*vpx_codec_enc_mr_get_mem_loc_fn_t)(
-    const vpx_codec_enc_cfg_t *cfg, void **mem_loc);
+typedef aom_codec_err_t (*aom_codec_enc_mr_get_mem_loc_fn_t)(
+    const aom_codec_enc_cfg_t *cfg, void **mem_loc);
 
 /*!\brief usage configuration mapping
  *
  * This structure stores the mapping between usage identifiers and
  * configuration structures. Each algorithm provides a list of these
- * mappings. This list is searched by the vpx_codec_enc_config_default()
+ * mappings. This list is searched by the aom_codec_enc_config_default()
  * wrapper function to determine which config to return. The special value
  * {-1, {0}} is used to indicate end-of-list, and must be present. At least
  * one mapping must be present, in addition to the end-of-list.
  *
  */
-typedef const struct vpx_codec_enc_cfg_map {
+typedef const struct aom_codec_enc_cfg_map {
   int usage;
-  vpx_codec_enc_cfg_t cfg;
-} vpx_codec_enc_cfg_map_t;
+  aom_codec_enc_cfg_t cfg;
+} aom_codec_enc_cfg_map_t;
 
 /*!\brief Decoder algorithm interface interface
  *
  * All decoders \ref MUST expose a variable of this type.
  */
-struct vpx_codec_iface {
+struct aom_codec_iface {
   const char *name;                   /**< Identification String  */
   int abi_version;                    /**< Implemented ABI version */
-  vpx_codec_caps_t caps;              /**< Decoder capabilities */
-  vpx_codec_init_fn_t init;           /**< \copydoc ::vpx_codec_init_fn_t */
-  vpx_codec_destroy_fn_t destroy;     /**< \copydoc ::vpx_codec_destroy_fn_t */
-  vpx_codec_ctrl_fn_map_t *ctrl_maps; /**< \copydoc ::vpx_codec_ctrl_fn_map_t */
-  struct vpx_codec_dec_iface {
-    vpx_codec_peek_si_fn_t peek_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */
-    vpx_codec_get_si_fn_t get_si;   /**< \copydoc ::vpx_codec_get_si_fn_t */
-    vpx_codec_decode_fn_t decode;   /**< \copydoc ::vpx_codec_decode_fn_t */
-    vpx_codec_get_frame_fn_t
-        get_frame;                   /**< \copydoc ::vpx_codec_get_frame_fn_t */
-    vpx_codec_set_fb_fn_t set_fb_fn; /**< \copydoc ::vpx_codec_set_fb_fn_t */
+  aom_codec_caps_t caps;              /**< Decoder capabilities */
+  aom_codec_init_fn_t init;           /**< \copydoc ::aom_codec_init_fn_t */
+  aom_codec_destroy_fn_t destroy;     /**< \copydoc ::aom_codec_destroy_fn_t */
+  aom_codec_ctrl_fn_map_t *ctrl_maps; /**< \copydoc ::aom_codec_ctrl_fn_map_t */
+  struct aom_codec_dec_iface {
+    aom_codec_peek_si_fn_t peek_si; /**< \copydoc ::aom_codec_peek_si_fn_t */
+    aom_codec_get_si_fn_t get_si;   /**< \copydoc ::aom_codec_get_si_fn_t */
+    aom_codec_decode_fn_t decode;   /**< \copydoc ::aom_codec_decode_fn_t */
+    aom_codec_get_frame_fn_t
+        get_frame;                   /**< \copydoc ::aom_codec_get_frame_fn_t */
+    aom_codec_set_fb_fn_t set_fb_fn; /**< \copydoc ::aom_codec_set_fb_fn_t */
   } dec;
-  struct vpx_codec_enc_iface {
+  struct aom_codec_enc_iface {
     int cfg_map_count;
-    vpx_codec_enc_cfg_map_t
-        *cfg_maps;                /**< \copydoc ::vpx_codec_enc_cfg_map_t */
-    vpx_codec_encode_fn_t encode; /**< \copydoc ::vpx_codec_encode_fn_t */
-    vpx_codec_get_cx_data_fn_t
-        get_cx_data; /**< \copydoc ::vpx_codec_get_cx_data_fn_t */
-    vpx_codec_enc_config_set_fn_t
-        cfg_set; /**< \copydoc ::vpx_codec_enc_config_set_fn_t */
-    vpx_codec_get_global_headers_fn_t
-        get_glob_hdrs; /**< \copydoc ::vpx_codec_get_global_headers_fn_t */
-    vpx_codec_get_preview_frame_fn_t
-        get_preview; /**< \copydoc ::vpx_codec_get_preview_frame_fn_t */
-    vpx_codec_enc_mr_get_mem_loc_fn_t
-        mr_get_mem_loc; /**< \copydoc ::vpx_codec_enc_mr_get_mem_loc_fn_t */
+    aom_codec_enc_cfg_map_t
+        *cfg_maps;                /**< \copydoc ::aom_codec_enc_cfg_map_t */
+    aom_codec_encode_fn_t encode; /**< \copydoc ::aom_codec_encode_fn_t */
+    aom_codec_get_cx_data_fn_t
+        get_cx_data; /**< \copydoc ::aom_codec_get_cx_data_fn_t */
+    aom_codec_enc_config_set_fn_t
+        cfg_set; /**< \copydoc ::aom_codec_enc_config_set_fn_t */
+    aom_codec_get_global_headers_fn_t
+        get_glob_hdrs; /**< \copydoc ::aom_codec_get_global_headers_fn_t */
+    aom_codec_get_preview_frame_fn_t
+        get_preview; /**< \copydoc ::aom_codec_get_preview_frame_fn_t */
+    aom_codec_enc_mr_get_mem_loc_fn_t
+        mr_get_mem_loc; /**< \copydoc ::aom_codec_enc_mr_get_mem_loc_fn_t */
   } enc;
 };
 
 /*!\brief Callback function pointer / user data pair storage */
-typedef struct vpx_codec_priv_cb_pair {
+typedef struct aom_codec_priv_cb_pair {
   union {
-    vpx_codec_put_frame_cb_fn_t put_frame;
-    vpx_codec_put_slice_cb_fn_t put_slice;
+    aom_codec_put_frame_cb_fn_t put_frame;
+    aom_codec_put_slice_cb_fn_t put_slice;
   } u;
   void *user_priv;
-} vpx_codec_priv_cb_pair_t;
+} aom_codec_priv_cb_pair_t;
 
 /*!\brief Instance private storage
  *
@@ -336,18 +336,18 @@
  * structure can be made the first member of the algorithm specific structure,
  * and the pointer cast to the proper type.
  */
-struct vpx_codec_priv {
+struct aom_codec_priv {
   const char *err_detail;
-  vpx_codec_flags_t init_flags;
+  aom_codec_flags_t init_flags;
   struct {
-    vpx_codec_priv_cb_pair_t put_frame_cb;
-    vpx_codec_priv_cb_pair_t put_slice_cb;
+    aom_codec_priv_cb_pair_t put_frame_cb;
+    aom_codec_priv_cb_pair_t put_slice_cb;
   } dec;
   struct {
-    vpx_fixed_buf_t cx_data_dst_buf;
+    aom_fixed_buf_t cx_data_dst_buf;
     unsigned int cx_data_pad_before;
     unsigned int cx_data_pad_after;
-    vpx_codec_cx_pkt_t cx_data_pkt;
+    aom_codec_cx_pkt_t cx_data_pkt;
     unsigned int total_encoders;
   } enc;
 };
@@ -355,20 +355,20 @@
 /*
  * Multi-resolution encoding internal configuration
  */
-struct vpx_codec_priv_enc_mr_cfg {
+struct aom_codec_priv_enc_mr_cfg {
   unsigned int mr_total_resolutions;
   unsigned int mr_encoder_id;
-  struct vpx_rational mr_down_sampling_factor;
+  struct aom_rational mr_down_sampling_factor;
   void *mr_low_res_mode_info;
 };
 
-#undef VPX_CTRL_USE_TYPE
-#define VPX_CTRL_USE_TYPE(id, typ) \
-  static VPX_INLINE typ id##__value(va_list args) { return va_arg(args, typ); }
+#undef AOM_CTRL_USE_TYPE
+#define AOM_CTRL_USE_TYPE(id, typ) \
+  static AOM_INLINE typ id##__value(va_list args) { return va_arg(args, typ); }
 
-#undef VPX_CTRL_USE_TYPE_DEPRECATED
-#define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \
-  static VPX_INLINE typ id##__value(va_list args) { return va_arg(args, typ); }
+#undef AOM_CTRL_USE_TYPE_DEPRECATED
+#define AOM_CTRL_USE_TYPE_DEPRECATED(id, typ) \
+  static AOM_INLINE typ id##__value(va_list args) { return va_arg(args, typ); }
 
 #define CAST(id, arg) id##__value(arg)
 
@@ -382,44 +382,44 @@
  * macro is provided to define this getter function automatically.
  */
 #define CODEC_INTERFACE(id)                          \
-  vpx_codec_iface_t *id(void) { return &id##_algo; } \
-  vpx_codec_iface_t id##_algo
+  aom_codec_iface_t *id(void) { return &id##_algo; } \
+  aom_codec_iface_t id##_algo
 
 /* Internal Utility Functions
  *
  * The following functions are intended to be used inside algorithms as
- * utilities for manipulating vpx_codec_* data structures.
+ * utilities for manipulating aom_codec_* data structures.
  */
-struct vpx_codec_pkt_list {
+struct aom_codec_pkt_list {
   unsigned int cnt;
   unsigned int max;
-  struct vpx_codec_cx_pkt pkts[1];
+  struct aom_codec_cx_pkt pkts[1];
 };
 
-#define vpx_codec_pkt_list_decl(n)     \
+#define aom_codec_pkt_list_decl(n)     \
   union {                              \
-    struct vpx_codec_pkt_list head;    \
+    struct aom_codec_pkt_list head;    \
     struct {                           \
-      struct vpx_codec_pkt_list head;  \
-      struct vpx_codec_cx_pkt pkts[n]; \
+      struct aom_codec_pkt_list head;  \
+      struct aom_codec_cx_pkt pkts[n]; \
     } alloc;                           \
   }
 
-#define vpx_codec_pkt_list_init(m) \
+#define aom_codec_pkt_list_init(m) \
   (m)->alloc.head.cnt = 0,         \
   (m)->alloc.head.max = sizeof((m)->alloc.pkts) / sizeof((m)->alloc.pkts[0])
 
-int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *,
-                           const struct vpx_codec_cx_pkt *);
+int aom_codec_pkt_list_add(struct aom_codec_pkt_list *,
+                           const struct aom_codec_cx_pkt *);
 
-const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get(
-    struct vpx_codec_pkt_list *list, vpx_codec_iter_t *iter);
+const aom_codec_cx_pkt_t *aom_codec_pkt_list_get(
+    struct aom_codec_pkt_list *list, aom_codec_iter_t *iter);
 
 #include <stdio.h>
 #include <setjmp.h>
 
-struct vpx_internal_error_info {
-  vpx_codec_err_t error_code;
+struct aom_internal_error_info {
+  aom_codec_err_t error_code;
   int has_detail;
   char detail[80];
   int setjmp;
@@ -434,12 +434,12 @@
 #endif
 #endif
 
-void vpx_internal_error(struct vpx_internal_error_info *info,
-                        vpx_codec_err_t error, const char *fmt,
+void aom_internal_error(struct aom_internal_error_info *info,
+                        aom_codec_err_t error, const char *fmt,
                         ...) CLANG_ANALYZER_NORETURN;
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_INTERNAL_VPX_CODEC_INTERNAL_H_
+#endif  // AOM_INTERNAL_AOM_CODEC_INTERNAL_H_
diff --git a/aom/src/aom_codec.c b/aom/src/aom_codec.c
new file mode 100644
index 0000000..f7c9ea5
--- /dev/null
+++ b/aom/src/aom_codec.c
@@ -0,0 +1,133 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*!\file
+ * \brief Provides the high level interface to wrap decoder algorithms.
+ *
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include "aom/aom_integer.h"
+#include "aom/internal/aom_codec_internal.h"
+#include "aom_version.h"
+
+#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var)
+
+int aom_codec_version(void) { return VERSION_PACKED; }
+
+const char *aom_codec_version_str(void) { return VERSION_STRING_NOSP; }
+
+const char *aom_codec_version_extra_str(void) { return VERSION_EXTRA; }
+
+const char *aom_codec_iface_name(aom_codec_iface_t *iface) {
+  return iface ? iface->name : "<invalid interface>";
+}
+
+const char *aom_codec_err_to_string(aom_codec_err_t err) {
+  switch (err) {
+    case AOM_CODEC_OK: return "Success";
+    case AOM_CODEC_ERROR: return "Unspecified internal error";
+    case AOM_CODEC_MEM_ERROR: return "Memory allocation error";
+    case AOM_CODEC_ABI_MISMATCH: return "ABI version mismatch";
+    case AOM_CODEC_INCAPABLE:
+      return "Codec does not implement requested capability";
+    case AOM_CODEC_UNSUP_BITSTREAM:
+      return "Bitstream not supported by this decoder";
+    case AOM_CODEC_UNSUP_FEATURE:
+      return "Bitstream required feature not supported by this decoder";
+    case AOM_CODEC_CORRUPT_FRAME: return "Corrupt frame detected";
+    case AOM_CODEC_INVALID_PARAM: return "Invalid parameter";
+    case AOM_CODEC_LIST_END: return "End of iterated list";
+  }
+
+  return "Unrecognized error code";
+}
+
+const char *aom_codec_error(aom_codec_ctx_t *ctx) {
+  return (ctx) ? aom_codec_err_to_string(ctx->err)
+               : aom_codec_err_to_string(AOM_CODEC_INVALID_PARAM);
+}
+
+const char *aom_codec_error_detail(aom_codec_ctx_t *ctx) {
+  if (ctx && ctx->err)
+    return ctx->priv ? ctx->priv->err_detail : ctx->err_detail;
+
+  return NULL;
+}
+
+aom_codec_err_t aom_codec_destroy(aom_codec_ctx_t *ctx) {
+  aom_codec_err_t res;
+
+  if (!ctx)
+    res = AOM_CODEC_INVALID_PARAM;
+  else if (!ctx->iface || !ctx->priv)
+    res = AOM_CODEC_ERROR;
+  else {
+    ctx->iface->destroy((aom_codec_alg_priv_t *)ctx->priv);
+
+    ctx->iface = NULL;
+    ctx->name = NULL;
+    ctx->priv = NULL;
+    res = AOM_CODEC_OK;
+  }
+
+  return SAVE_STATUS(ctx, res);
+}
+
+aom_codec_caps_t aom_codec_get_caps(aom_codec_iface_t *iface) {
+  return (iface) ? iface->caps : 0;
+}
+
+aom_codec_err_t aom_codec_control_(aom_codec_ctx_t *ctx, int ctrl_id, ...) {
+  aom_codec_err_t res;
+
+  if (!ctx || !ctrl_id)
+    res = AOM_CODEC_INVALID_PARAM;
+  else if (!ctx->iface || !ctx->priv || !ctx->iface->ctrl_maps)
+    res = AOM_CODEC_ERROR;
+  else {
+    aom_codec_ctrl_fn_map_t *entry;
+
+    res = AOM_CODEC_ERROR;
+
+    for (entry = ctx->iface->ctrl_maps; entry && entry->fn; entry++) {
+      if (!entry->ctrl_id || entry->ctrl_id == ctrl_id) {
+        va_list ap;
+
+        va_start(ap, ctrl_id);
+        res = entry->fn((aom_codec_alg_priv_t *)ctx->priv, ap);
+        va_end(ap);
+        break;
+      }
+    }
+  }
+
+  return SAVE_STATUS(ctx, res);
+}
+
+void aom_internal_error(struct aom_internal_error_info *info,
+                        aom_codec_err_t error, const char *fmt, ...) {
+  va_list ap;
+
+  info->error_code = error;
+  info->has_detail = 0;
+
+  if (fmt) {
+    size_t sz = sizeof(info->detail);
+
+    info->has_detail = 1;
+    va_start(ap, fmt);
+    vsnprintf(info->detail, sz - 1, fmt, ap);
+    va_end(ap);
+    info->detail[sz - 1] = '\0';
+  }
+
+  if (info->setjmp) longjmp(info->jmp, info->error_code);
+}
diff --git a/aom/src/aom_decoder.c b/aom/src/aom_decoder.c
new file mode 100644
index 0000000..1fa1dbe
--- /dev/null
+++ b/aom/src/aom_decoder.c
@@ -0,0 +1,188 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*!\file
+ * \brief Provides the high level interface to wrap decoder algorithms.
+ *
+ */
+#include <string.h>
+#include "aom/internal/aom_codec_internal.h"
+
+#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var)
+
+static aom_codec_alg_priv_t *get_alg_priv(aom_codec_ctx_t *ctx) {
+  return (aom_codec_alg_priv_t *)ctx->priv;
+}
+
+aom_codec_err_t aom_codec_dec_init_ver(aom_codec_ctx_t *ctx,
+                                       aom_codec_iface_t *iface,
+                                       const aom_codec_dec_cfg_t *cfg,
+                                       aom_codec_flags_t flags, int ver) {
+  aom_codec_err_t res;
+
+  if (ver != AOM_DECODER_ABI_VERSION)
+    res = AOM_CODEC_ABI_MISMATCH;
+  else if (!ctx || !iface)
+    res = AOM_CODEC_INVALID_PARAM;
+  else if (iface->abi_version != AOM_CODEC_INTERNAL_ABI_VERSION)
+    res = AOM_CODEC_ABI_MISMATCH;
+  else if ((flags & AOM_CODEC_USE_POSTPROC) &&
+           !(iface->caps & AOM_CODEC_CAP_POSTPROC))
+    res = AOM_CODEC_INCAPABLE;
+  else if ((flags & AOM_CODEC_USE_ERROR_CONCEALMENT) &&
+           !(iface->caps & AOM_CODEC_CAP_ERROR_CONCEALMENT))
+    res = AOM_CODEC_INCAPABLE;
+  else if ((flags & AOM_CODEC_USE_INPUT_FRAGMENTS) &&
+           !(iface->caps & AOM_CODEC_CAP_INPUT_FRAGMENTS))
+    res = AOM_CODEC_INCAPABLE;
+  else if (!(iface->caps & AOM_CODEC_CAP_DECODER))
+    res = AOM_CODEC_INCAPABLE;
+  else {
+    memset(ctx, 0, sizeof(*ctx));
+    ctx->iface = iface;
+    ctx->name = iface->name;
+    ctx->priv = NULL;
+    ctx->init_flags = flags;
+    ctx->config.dec = cfg;
+
+    res = ctx->iface->init(ctx, NULL);
+    if (res) {
+      ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL;
+      aom_codec_destroy(ctx);
+    }
+  }
+
+  return SAVE_STATUS(ctx, res);
+}
+
+aom_codec_err_t aom_codec_peek_stream_info(aom_codec_iface_t *iface,
+                                           const uint8_t *data,
+                                           unsigned int data_sz,
+                                           aom_codec_stream_info_t *si) {
+  aom_codec_err_t res;
+
+  if (!iface || !data || !data_sz || !si ||
+      si->sz < sizeof(aom_codec_stream_info_t))
+    res = AOM_CODEC_INVALID_PARAM;
+  else {
+    /* Set default/unknown values */
+    si->w = 0;
+    si->h = 0;
+
+    res = iface->dec.peek_si(data, data_sz, si);
+  }
+
+  return res;
+}
+
+aom_codec_err_t aom_codec_get_stream_info(aom_codec_ctx_t *ctx,
+                                          aom_codec_stream_info_t *si) {
+  aom_codec_err_t res;
+
+  if (!ctx || !si || si->sz < sizeof(aom_codec_stream_info_t))
+    res = AOM_CODEC_INVALID_PARAM;
+  else if (!ctx->iface || !ctx->priv)
+    res = AOM_CODEC_ERROR;
+  else {
+    /* Set default/unknown values */
+    si->w = 0;
+    si->h = 0;
+
+    res = ctx->iface->dec.get_si(get_alg_priv(ctx), si);
+  }
+
+  return SAVE_STATUS(ctx, res);
+}
+
+aom_codec_err_t aom_codec_decode(aom_codec_ctx_t *ctx, const uint8_t *data,
+                                 unsigned int data_sz, void *user_priv,
+                                 long deadline) {
+  aom_codec_err_t res;
+
+  /* Sanity checks */
+  /* NULL data ptr allowed if data_sz is 0 too */
+  if (!ctx || (!data && data_sz) || (data && !data_sz))
+    res = AOM_CODEC_INVALID_PARAM;
+  else if (!ctx->iface || !ctx->priv)
+    res = AOM_CODEC_ERROR;
+  else {
+    res = ctx->iface->dec.decode(get_alg_priv(ctx), data, data_sz, user_priv,
+                                 deadline);
+  }
+
+  return SAVE_STATUS(ctx, res);
+}
+
+aom_image_t *aom_codec_get_frame(aom_codec_ctx_t *ctx, aom_codec_iter_t *iter) {
+  aom_image_t *img;
+
+  if (!ctx || !iter || !ctx->iface || !ctx->priv)
+    img = NULL;
+  else
+    img = ctx->iface->dec.get_frame(get_alg_priv(ctx), iter);
+
+  return img;
+}
+
+aom_codec_err_t aom_codec_register_put_frame_cb(aom_codec_ctx_t *ctx,
+                                                aom_codec_put_frame_cb_fn_t cb,
+                                                void *user_priv) {
+  aom_codec_err_t res;
+
+  if (!ctx || !cb)
+    res = AOM_CODEC_INVALID_PARAM;
+  else if (!ctx->iface || !ctx->priv ||
+           !(ctx->iface->caps & AOM_CODEC_CAP_PUT_FRAME))
+    res = AOM_CODEC_ERROR;
+  else {
+    ctx->priv->dec.put_frame_cb.u.put_frame = cb;
+    ctx->priv->dec.put_frame_cb.user_priv = user_priv;
+    res = AOM_CODEC_OK;
+  }
+
+  return SAVE_STATUS(ctx, res);
+}
+
+aom_codec_err_t aom_codec_register_put_slice_cb(aom_codec_ctx_t *ctx,
+                                                aom_codec_put_slice_cb_fn_t cb,
+                                                void *user_priv) {
+  aom_codec_err_t res;
+
+  if (!ctx || !cb)
+    res = AOM_CODEC_INVALID_PARAM;
+  else if (!ctx->iface || !ctx->priv ||
+           !(ctx->iface->caps & AOM_CODEC_CAP_PUT_SLICE))
+    res = AOM_CODEC_ERROR;
+  else {
+    ctx->priv->dec.put_slice_cb.u.put_slice = cb;
+    ctx->priv->dec.put_slice_cb.user_priv = user_priv;
+    res = AOM_CODEC_OK;
+  }
+
+  return SAVE_STATUS(ctx, res);
+}
+
+aom_codec_err_t aom_codec_set_frame_buffer_functions(
+    aom_codec_ctx_t *ctx, aom_get_frame_buffer_cb_fn_t cb_get,
+    aom_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
+  aom_codec_err_t res;
+
+  if (!ctx || !cb_get || !cb_release) {
+    res = AOM_CODEC_INVALID_PARAM;
+  } else if (!ctx->iface || !ctx->priv ||
+             !(ctx->iface->caps & AOM_CODEC_CAP_EXTERNAL_FRAME_BUFFER)) {
+    res = AOM_CODEC_ERROR;
+  } else {
+    res = ctx->iface->dec.set_fb_fn(get_alg_priv(ctx), cb_get, cb_release,
+                                    cb_priv);
+  }
+
+  return SAVE_STATUS(ctx, res);
+}
diff --git a/aom/src/vpx_encoder.c b/aom/src/aom_encoder.c
similarity index 61%
rename from aom/src/vpx_encoder.c
rename to aom/src/aom_encoder.c
index f3689e3..0c6fa3d 100644
--- a/aom/src/vpx_encoder.c
+++ b/aom/src/aom_encoder.c
@@ -14,34 +14,34 @@
  */
 #include <limits.h>
 #include <string.h>
-#include "vpx_config.h"
-#include "aom/internal/vpx_codec_internal.h"
+#include "aom_config.h"
+#include "aom/internal/aom_codec_internal.h"
 
 #define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var)
 
-static vpx_codec_alg_priv_t *get_alg_priv(vpx_codec_ctx_t *ctx) {
-  return (vpx_codec_alg_priv_t *)ctx->priv;
+static aom_codec_alg_priv_t *get_alg_priv(aom_codec_ctx_t *ctx) {
+  return (aom_codec_alg_priv_t *)ctx->priv;
 }
 
-vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx,
-                                       vpx_codec_iface_t *iface,
-                                       const vpx_codec_enc_cfg_t *cfg,
-                                       vpx_codec_flags_t flags, int ver) {
-  vpx_codec_err_t res;
+aom_codec_err_t aom_codec_enc_init_ver(aom_codec_ctx_t *ctx,
+                                       aom_codec_iface_t *iface,
+                                       const aom_codec_enc_cfg_t *cfg,
+                                       aom_codec_flags_t flags, int ver) {
+  aom_codec_err_t res;
 
-  if (ver != VPX_ENCODER_ABI_VERSION)
-    res = VPX_CODEC_ABI_MISMATCH;
+  if (ver != AOM_ENCODER_ABI_VERSION)
+    res = AOM_CODEC_ABI_MISMATCH;
   else if (!ctx || !iface || !cfg)
-    res = VPX_CODEC_INVALID_PARAM;
-  else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
-    res = VPX_CODEC_ABI_MISMATCH;
-  else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
-    res = VPX_CODEC_INCAPABLE;
-  else if ((flags & VPX_CODEC_USE_PSNR) && !(iface->caps & VPX_CODEC_CAP_PSNR))
-    res = VPX_CODEC_INCAPABLE;
-  else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION) &&
-           !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
-    res = VPX_CODEC_INCAPABLE;
+    res = AOM_CODEC_INVALID_PARAM;
+  else if (iface->abi_version != AOM_CODEC_INTERNAL_ABI_VERSION)
+    res = AOM_CODEC_ABI_MISMATCH;
+  else if (!(iface->caps & AOM_CODEC_CAP_ENCODER))
+    res = AOM_CODEC_INCAPABLE;
+  else if ((flags & AOM_CODEC_USE_PSNR) && !(iface->caps & AOM_CODEC_CAP_PSNR))
+    res = AOM_CODEC_INCAPABLE;
+  else if ((flags & AOM_CODEC_USE_OUTPUT_PARTITION) &&
+           !(iface->caps & AOM_CODEC_CAP_OUTPUT_PARTITION))
+    res = AOM_CODEC_INCAPABLE;
   else {
     ctx->iface = iface;
     ctx->name = iface->name;
@@ -52,43 +52,43 @@
 
     if (res) {
       ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL;
-      vpx_codec_destroy(ctx);
+      aom_codec_destroy(ctx);
     }
   }
 
   return SAVE_STATUS(ctx, res);
 }
 
-vpx_codec_err_t vpx_codec_enc_init_multi_ver(
-    vpx_codec_ctx_t *ctx, vpx_codec_iface_t *iface, vpx_codec_enc_cfg_t *cfg,
-    int num_enc, vpx_codec_flags_t flags, vpx_rational_t *dsf, int ver) {
-  vpx_codec_err_t res = VPX_CODEC_OK;
+aom_codec_err_t aom_codec_enc_init_multi_ver(
+    aom_codec_ctx_t *ctx, aom_codec_iface_t *iface, aom_codec_enc_cfg_t *cfg,
+    int num_enc, aom_codec_flags_t flags, aom_rational_t *dsf, int ver) {
+  aom_codec_err_t res = AOM_CODEC_OK;
 
-  if (ver != VPX_ENCODER_ABI_VERSION)
-    res = VPX_CODEC_ABI_MISMATCH;
+  if (ver != AOM_ENCODER_ABI_VERSION)
+    res = AOM_CODEC_ABI_MISMATCH;
   else if (!ctx || !iface || !cfg || (num_enc > 16 || num_enc < 1))
-    res = VPX_CODEC_INVALID_PARAM;
-  else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
-    res = VPX_CODEC_ABI_MISMATCH;
-  else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
-    res = VPX_CODEC_INCAPABLE;
-  else if ((flags & VPX_CODEC_USE_PSNR) && !(iface->caps & VPX_CODEC_CAP_PSNR))
-    res = VPX_CODEC_INCAPABLE;
-  else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION) &&
-           !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
-    res = VPX_CODEC_INCAPABLE;
+    res = AOM_CODEC_INVALID_PARAM;
+  else if (iface->abi_version != AOM_CODEC_INTERNAL_ABI_VERSION)
+    res = AOM_CODEC_ABI_MISMATCH;
+  else if (!(iface->caps & AOM_CODEC_CAP_ENCODER))
+    res = AOM_CODEC_INCAPABLE;
+  else if ((flags & AOM_CODEC_USE_PSNR) && !(iface->caps & AOM_CODEC_CAP_PSNR))
+    res = AOM_CODEC_INCAPABLE;
+  else if ((flags & AOM_CODEC_USE_OUTPUT_PARTITION) &&
+           !(iface->caps & AOM_CODEC_CAP_OUTPUT_PARTITION))
+    res = AOM_CODEC_INCAPABLE;
   else {
     int i;
     void *mem_loc = NULL;
 
     if (!(res = iface->enc.mr_get_mem_loc(cfg, &mem_loc))) {
       for (i = 0; i < num_enc; i++) {
-        vpx_codec_priv_enc_mr_cfg_t mr_cfg;
+        aom_codec_priv_enc_mr_cfg_t mr_cfg;
 
         /* Validate down-sampling factor. */
         if (dsf->num < 1 || dsf->num > 4096 || dsf->den < 1 ||
             dsf->den > dsf->num) {
-          res = VPX_CODEC_INVALID_PARAM;
+          res = AOM_CODEC_INVALID_PARAM;
           break;
         }
 
@@ -102,7 +102,7 @@
          * resolution always use the same frame_type chosen by the
          * lowest-resolution encoder.
          */
-        if (mr_cfg.mr_encoder_id) cfg->kf_mode = VPX_KF_DISABLED;
+        if (mr_cfg.mr_encoder_id) cfg->kf_mode = AOM_KF_DISABLED;
 
         ctx->iface = iface;
         ctx->name = iface->name;
@@ -115,13 +115,13 @@
           const char *error_detail = ctx->priv ? ctx->priv->err_detail : NULL;
           /* Destroy current ctx */
           ctx->err_detail = error_detail;
-          vpx_codec_destroy(ctx);
+          aom_codec_destroy(ctx);
 
           /* Destroy already allocated high-level ctx */
           while (i) {
             ctx--;
             ctx->err_detail = error_detail;
-            vpx_codec_destroy(ctx);
+            aom_codec_destroy(ctx);
             i--;
           }
         }
@@ -139,26 +139,26 @@
   return SAVE_STATUS(ctx, res);
 }
 
-vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
-                                             vpx_codec_enc_cfg_t *cfg,
+aom_codec_err_t aom_codec_enc_config_default(aom_codec_iface_t *iface,
+                                             aom_codec_enc_cfg_t *cfg,
                                              unsigned int usage) {
-  vpx_codec_err_t res;
-  vpx_codec_enc_cfg_map_t *map;
+  aom_codec_err_t res;
+  aom_codec_enc_cfg_map_t *map;
   int i;
 
   if (!iface || !cfg || usage > INT_MAX)
-    res = VPX_CODEC_INVALID_PARAM;
-  else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
-    res = VPX_CODEC_INCAPABLE;
+    res = AOM_CODEC_INVALID_PARAM;
+  else if (!(iface->caps & AOM_CODEC_CAP_ENCODER))
+    res = AOM_CODEC_INCAPABLE;
   else {
-    res = VPX_CODEC_INVALID_PARAM;
+    res = AOM_CODEC_INVALID_PARAM;
 
     for (i = 0; i < iface->enc.cfg_map_count; ++i) {
       map = iface->enc.cfg_maps + i;
       if (map->usage == (int)usage) {
         *cfg = map->cfg;
         cfg->g_usage = usage;
-        res = VPX_CODEC_OK;
+        res = AOM_CODEC_OK;
         break;
       }
     }
@@ -185,18 +185,18 @@
 static void FLOATING_POINT_RESTORE() {}
 #endif
 
-vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img,
-                                 vpx_codec_pts_t pts, unsigned long duration,
-                                 vpx_enc_frame_flags_t flags,
+aom_codec_err_t aom_codec_encode(aom_codec_ctx_t *ctx, const aom_image_t *img,
+                                 aom_codec_pts_t pts, unsigned long duration,
+                                 aom_enc_frame_flags_t flags,
                                  unsigned long deadline) {
-  vpx_codec_err_t res = VPX_CODEC_OK;
+  aom_codec_err_t res = AOM_CODEC_OK;
 
   if (!ctx || (img && !duration))
-    res = VPX_CODEC_INVALID_PARAM;
+    res = AOM_CODEC_INVALID_PARAM;
   else if (!ctx->iface || !ctx->priv)
-    res = VPX_CODEC_ERROR;
-  else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
-    res = VPX_CODEC_INCAPABLE;
+    res = AOM_CODEC_ERROR;
+  else if (!(ctx->iface->caps & AOM_CODEC_CAP_ENCODER))
+    res = AOM_CODEC_INCAPABLE;
   else {
     unsigned int num_enc = ctx->priv->enc.total_encoders;
 
@@ -236,33 +236,33 @@
   return SAVE_STATUS(ctx, res);
 }
 
-const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
-                                                vpx_codec_iter_t *iter) {
-  const vpx_codec_cx_pkt_t *pkt = NULL;
+const aom_codec_cx_pkt_t *aom_codec_get_cx_data(aom_codec_ctx_t *ctx,
+                                                aom_codec_iter_t *iter) {
+  const aom_codec_cx_pkt_t *pkt = NULL;
 
   if (ctx) {
     if (!iter)
-      ctx->err = VPX_CODEC_INVALID_PARAM;
+      ctx->err = AOM_CODEC_INVALID_PARAM;
     else if (!ctx->iface || !ctx->priv)
-      ctx->err = VPX_CODEC_ERROR;
-    else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
-      ctx->err = VPX_CODEC_INCAPABLE;
+      ctx->err = AOM_CODEC_ERROR;
+    else if (!(ctx->iface->caps & AOM_CODEC_CAP_ENCODER))
+      ctx->err = AOM_CODEC_INCAPABLE;
     else
       pkt = ctx->iface->enc.get_cx_data(get_alg_priv(ctx), iter);
   }
 
-  if (pkt && pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
+  if (pkt && pkt->kind == AOM_CODEC_CX_FRAME_PKT) {
     // If the application has specified a destination area for the
     // compressed data, and the codec has not placed the data there,
     // and it fits, copy it.
-    vpx_codec_priv_t *const priv = ctx->priv;
+    aom_codec_priv_t *const priv = ctx->priv;
     char *const dst_buf = (char *)priv->enc.cx_data_dst_buf.buf;
 
     if (dst_buf && pkt->data.raw.buf != dst_buf &&
         pkt->data.raw.sz + priv->enc.cx_data_pad_before +
                 priv->enc.cx_data_pad_after <=
             priv->enc.cx_data_dst_buf.sz) {
-      vpx_codec_cx_pkt_t *modified_pkt = &priv->enc.cx_data_pkt;
+      aom_codec_cx_pkt_t *modified_pkt = &priv->enc.cx_data_pkt;
 
       memcpy(dst_buf + priv->enc.cx_data_pad_before, pkt->data.raw.buf,
              pkt->data.raw.sz);
@@ -282,11 +282,11 @@
   return pkt;
 }
 
-vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx,
-                                          const vpx_fixed_buf_t *buf,
+aom_codec_err_t aom_codec_set_cx_data_buf(aom_codec_ctx_t *ctx,
+                                          const aom_fixed_buf_t *buf,
                                           unsigned int pad_before,
                                           unsigned int pad_after) {
-  if (!ctx || !ctx->priv) return VPX_CODEC_INVALID_PARAM;
+  if (!ctx || !ctx->priv) return AOM_CODEC_INVALID_PARAM;
 
   if (buf) {
     ctx->priv->enc.cx_data_dst_buf = *buf;
@@ -299,19 +299,19 @@
     ctx->priv->enc.cx_data_pad_after = 0;
   }
 
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx) {
-  vpx_image_t *img = NULL;
+const aom_image_t *aom_codec_get_preview_frame(aom_codec_ctx_t *ctx) {
+  aom_image_t *img = NULL;
 
   if (ctx) {
     if (!ctx->iface || !ctx->priv)
-      ctx->err = VPX_CODEC_ERROR;
-    else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
-      ctx->err = VPX_CODEC_INCAPABLE;
+      ctx->err = AOM_CODEC_ERROR;
+    else if (!(ctx->iface->caps & AOM_CODEC_CAP_ENCODER))
+      ctx->err = AOM_CODEC_INCAPABLE;
     else if (!ctx->iface->enc.get_preview)
-      ctx->err = VPX_CODEC_INCAPABLE;
+      ctx->err = AOM_CODEC_INCAPABLE;
     else
       img = ctx->iface->enc.get_preview(get_alg_priv(ctx));
   }
@@ -319,16 +319,16 @@
   return img;
 }
 
-vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx) {
-  vpx_fixed_buf_t *buf = NULL;
+aom_fixed_buf_t *aom_codec_get_global_headers(aom_codec_ctx_t *ctx) {
+  aom_fixed_buf_t *buf = NULL;
 
   if (ctx) {
     if (!ctx->iface || !ctx->priv)
-      ctx->err = VPX_CODEC_ERROR;
-    else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
-      ctx->err = VPX_CODEC_INCAPABLE;
+      ctx->err = AOM_CODEC_ERROR;
+    else if (!(ctx->iface->caps & AOM_CODEC_CAP_ENCODER))
+      ctx->err = AOM_CODEC_INCAPABLE;
     else if (!ctx->iface->enc.get_glob_hdrs)
-      ctx->err = VPX_CODEC_INCAPABLE;
+      ctx->err = AOM_CODEC_INCAPABLE;
     else
       buf = ctx->iface->enc.get_glob_hdrs(get_alg_priv(ctx));
   }
@@ -336,22 +336,22 @@
   return buf;
 }
 
-vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx,
-                                         const vpx_codec_enc_cfg_t *cfg) {
-  vpx_codec_err_t res;
+aom_codec_err_t aom_codec_enc_config_set(aom_codec_ctx_t *ctx,
+                                         const aom_codec_enc_cfg_t *cfg) {
+  aom_codec_err_t res;
 
   if (!ctx || !ctx->iface || !ctx->priv || !cfg)
-    res = VPX_CODEC_INVALID_PARAM;
-  else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
-    res = VPX_CODEC_INCAPABLE;
+    res = AOM_CODEC_INVALID_PARAM;
+  else if (!(ctx->iface->caps & AOM_CODEC_CAP_ENCODER))
+    res = AOM_CODEC_INCAPABLE;
   else
     res = ctx->iface->enc.cfg_set(get_alg_priv(ctx), cfg);
 
   return SAVE_STATUS(ctx, res);
 }
 
-int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *list,
-                           const struct vpx_codec_cx_pkt *pkt) {
+int aom_codec_pkt_list_add(struct aom_codec_pkt_list *list,
+                           const struct aom_codec_cx_pkt *pkt) {
   if (list->cnt < list->max) {
     list->pkts[list->cnt++] = *pkt;
     return 0;
@@ -360,15 +360,15 @@
   return 1;
 }
 
-const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get(
-    struct vpx_codec_pkt_list *list, vpx_codec_iter_t *iter) {
-  const vpx_codec_cx_pkt_t *pkt;
+const aom_codec_cx_pkt_t *aom_codec_pkt_list_get(
+    struct aom_codec_pkt_list *list, aom_codec_iter_t *iter) {
+  const aom_codec_cx_pkt_t *pkt;
 
   if (!(*iter)) {
     *iter = list->pkts;
   }
 
-  pkt = (const vpx_codec_cx_pkt_t *)*iter;
+  pkt = (const aom_codec_cx_pkt_t *)*iter;
 
   if ((size_t)(pkt - list->pkts) < list->cnt)
     *iter = pkt + 1;
diff --git a/aom/src/aom_image.c b/aom/src/aom_image.c
new file mode 100644
index 0000000..6572409
--- /dev/null
+++ b/aom/src/aom_image.c
@@ -0,0 +1,239 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "aom/aom_image.h"
+#include "aom/aom_integer.h"
+#include "aom_mem/aom_mem.h"
+
+static aom_image_t *img_alloc_helper(aom_image_t *img, aom_img_fmt_t fmt,
+                                     unsigned int d_w, unsigned int d_h,
+                                     unsigned int buf_align,
+                                     unsigned int stride_align,
+                                     unsigned char *img_data) {
+  unsigned int h, w, s, xcs, ycs, bps;
+  unsigned int stride_in_bytes;
+  int align;
+
+  /* Treat align==0 like align==1 */
+  if (!buf_align) buf_align = 1;
+
+  /* Validate alignment (must be power of 2) */
+  if (buf_align & (buf_align - 1)) goto fail;
+
+  /* Treat align==0 like align==1 */
+  if (!stride_align) stride_align = 1;
+
+  /* Validate alignment (must be power of 2) */
+  if (stride_align & (stride_align - 1)) goto fail;
+
+  /* Get sample size for this format */
+  switch (fmt) {
+    case AOM_IMG_FMT_RGB32:
+    case AOM_IMG_FMT_RGB32_LE:
+    case AOM_IMG_FMT_ARGB:
+    case AOM_IMG_FMT_ARGB_LE: bps = 32; break;
+    case AOM_IMG_FMT_RGB24:
+    case AOM_IMG_FMT_BGR24: bps = 24; break;
+    case AOM_IMG_FMT_RGB565:
+    case AOM_IMG_FMT_RGB565_LE:
+    case AOM_IMG_FMT_RGB555:
+    case AOM_IMG_FMT_RGB555_LE:
+    case AOM_IMG_FMT_UYVY:
+    case AOM_IMG_FMT_YUY2:
+    case AOM_IMG_FMT_YVYU: bps = 16; break;
+    case AOM_IMG_FMT_I420:
+    case AOM_IMG_FMT_YV12:
+    case AOM_IMG_FMT_AOMI420:
+    case AOM_IMG_FMT_AOMYV12: bps = 12; break;
+    case AOM_IMG_FMT_I422:
+    case AOM_IMG_FMT_I440: bps = 16; break;
+    case AOM_IMG_FMT_I444: bps = 24; break;
+    case AOM_IMG_FMT_I42016: bps = 24; break;
+    case AOM_IMG_FMT_I42216:
+    case AOM_IMG_FMT_I44016: bps = 32; break;
+    case AOM_IMG_FMT_I44416: bps = 48; break;
+    default: bps = 16; break;
+  }
+
+  /* Get chroma shift values for this format */
+  switch (fmt) {
+    case AOM_IMG_FMT_I420:
+    case AOM_IMG_FMT_YV12:
+    case AOM_IMG_FMT_AOMI420:
+    case AOM_IMG_FMT_AOMYV12:
+    case AOM_IMG_FMT_I422:
+    case AOM_IMG_FMT_I42016:
+    case AOM_IMG_FMT_I42216: xcs = 1; break;
+    default: xcs = 0; break;
+  }
+
+  switch (fmt) {
+    case AOM_IMG_FMT_I420:
+    case AOM_IMG_FMT_I440:
+    case AOM_IMG_FMT_YV12:
+    case AOM_IMG_FMT_AOMI420:
+    case AOM_IMG_FMT_AOMYV12:
+    case AOM_IMG_FMT_I42016:
+    case AOM_IMG_FMT_I44016: ycs = 1; break;
+    default: ycs = 0; break;
+  }
+
+  /* Calculate storage sizes given the chroma subsampling */
+  align = (1 << xcs) - 1;
+  w = (d_w + align) & ~align;
+  align = (1 << ycs) - 1;
+  h = (d_h + align) & ~align;
+  s = (fmt & AOM_IMG_FMT_PLANAR) ? w : bps * w / 8;
+  s = (s + stride_align - 1) & ~(stride_align - 1);
+  stride_in_bytes = (fmt & AOM_IMG_FMT_HIGHBITDEPTH) ? s * 2 : s;
+
+  /* Allocate the new image */
+  if (!img) {
+    img = (aom_image_t *)calloc(1, sizeof(aom_image_t));
+
+    if (!img) goto fail;
+
+    img->self_allocd = 1;
+  } else {
+    memset(img, 0, sizeof(aom_image_t));
+  }
+
+  img->img_data = img_data;
+
+  if (!img_data) {
+    const uint64_t alloc_size = (fmt & AOM_IMG_FMT_PLANAR)
+                                    ? (uint64_t)h * s * bps / 8
+                                    : (uint64_t)h * s;
+
+    if (alloc_size != (size_t)alloc_size) goto fail;
+
+    img->img_data = (uint8_t *)aom_memalign(buf_align, (size_t)alloc_size);
+    img->img_data_owner = 1;
+  }
+
+  if (!img->img_data) goto fail;
+
+  img->fmt = fmt;
+  img->bit_depth = (fmt & AOM_IMG_FMT_HIGHBITDEPTH) ? 16 : 8;
+  img->w = w;
+  img->h = h;
+  img->x_chroma_shift = xcs;
+  img->y_chroma_shift = ycs;
+  img->bps = bps;
+
+  /* Calculate strides */
+  img->stride[AOM_PLANE_Y] = img->stride[AOM_PLANE_ALPHA] = stride_in_bytes;
+  img->stride[AOM_PLANE_U] = img->stride[AOM_PLANE_V] = stride_in_bytes >> xcs;
+
+  /* Default viewport to entire image */
+  if (!aom_img_set_rect(img, 0, 0, d_w, d_h)) return img;
+
+fail:
+  aom_img_free(img);
+  return NULL;
+}
+
+aom_image_t *aom_img_alloc(aom_image_t *img, aom_img_fmt_t fmt,
+                           unsigned int d_w, unsigned int d_h,
+                           unsigned int align) {
+  return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
+}
+
+aom_image_t *aom_img_wrap(aom_image_t *img, aom_img_fmt_t fmt, unsigned int d_w,
+                          unsigned int d_h, unsigned int stride_align,
+                          unsigned char *img_data) {
+  /* By setting buf_align = 1, we don't change buffer alignment in this
+   * function. */
+  return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
+}
+
+int aom_img_set_rect(aom_image_t *img, unsigned int x, unsigned int y,
+                     unsigned int w, unsigned int h) {
+  unsigned char *data;
+
+  if (x + w <= img->w && y + h <= img->h) {
+    img->d_w = w;
+    img->d_h = h;
+
+    /* Calculate plane pointers */
+    if (!(img->fmt & AOM_IMG_FMT_PLANAR)) {
+      img->planes[AOM_PLANE_PACKED] =
+          img->img_data + x * img->bps / 8 + y * img->stride[AOM_PLANE_PACKED];
+    } else {
+      const int bytes_per_sample =
+          (img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
+      data = img->img_data;
+
+      if (img->fmt & AOM_IMG_FMT_HAS_ALPHA) {
+        img->planes[AOM_PLANE_ALPHA] =
+            data + x * bytes_per_sample + y * img->stride[AOM_PLANE_ALPHA];
+        data += img->h * img->stride[AOM_PLANE_ALPHA];
+      }
+
+      img->planes[AOM_PLANE_Y] =
+          data + x * bytes_per_sample + y * img->stride[AOM_PLANE_Y];
+      data += img->h * img->stride[AOM_PLANE_Y];
+
+      if (!(img->fmt & AOM_IMG_FMT_UV_FLIP)) {
+        img->planes[AOM_PLANE_U] =
+            data + (x >> img->x_chroma_shift) * bytes_per_sample +
+            (y >> img->y_chroma_shift) * img->stride[AOM_PLANE_U];
+        data += (img->h >> img->y_chroma_shift) * img->stride[AOM_PLANE_U];
+        img->planes[AOM_PLANE_V] =
+            data + (x >> img->x_chroma_shift) * bytes_per_sample +
+            (y >> img->y_chroma_shift) * img->stride[AOM_PLANE_V];
+      } else {
+        img->planes[AOM_PLANE_V] =
+            data + (x >> img->x_chroma_shift) * bytes_per_sample +
+            (y >> img->y_chroma_shift) * img->stride[AOM_PLANE_V];
+        data += (img->h >> img->y_chroma_shift) * img->stride[AOM_PLANE_V];
+        img->planes[AOM_PLANE_U] =
+            data + (x >> img->x_chroma_shift) * bytes_per_sample +
+            (y >> img->y_chroma_shift) * img->stride[AOM_PLANE_U];
+      }
+    }
+    return 0;
+  }
+  return -1;
+}
+
+void aom_img_flip(aom_image_t *img) {
+  /* Note: In the calculation pointer adjustment calculation, we want the
+   * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
+   * standard indicates that if the adjustment parameter is unsigned, the
+   * stride parameter will be promoted to unsigned, causing errors when
+   * the lhs is a larger type than the rhs.
+   */
+  img->planes[AOM_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[AOM_PLANE_Y];
+  img->stride[AOM_PLANE_Y] = -img->stride[AOM_PLANE_Y];
+
+  img->planes[AOM_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1) *
+                              img->stride[AOM_PLANE_U];
+  img->stride[AOM_PLANE_U] = -img->stride[AOM_PLANE_U];
+
+  img->planes[AOM_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1) *
+                              img->stride[AOM_PLANE_V];
+  img->stride[AOM_PLANE_V] = -img->stride[AOM_PLANE_V];
+
+  img->planes[AOM_PLANE_ALPHA] +=
+      (signed)(img->d_h - 1) * img->stride[AOM_PLANE_ALPHA];
+  img->stride[AOM_PLANE_ALPHA] = -img->stride[AOM_PLANE_ALPHA];
+}
+
+void aom_img_free(aom_image_t *img) {
+  if (img) {
+    if (img->img_data && img->img_data_owner) aom_free(img->img_data);
+
+    if (img->self_allocd) free(img);
+  }
+}
diff --git a/aom/src/vpx_codec.c b/aom/src/vpx_codec.c
deleted file mode 100644
index 7bdc870..0000000
--- a/aom/src/vpx_codec.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-/*!\file
- * \brief Provides the high level interface to wrap decoder algorithms.
- *
- */
-#include <stdarg.h>
-#include <stdlib.h>
-#include "aom/vpx_integer.h"
-#include "aom/internal/vpx_codec_internal.h"
-#include "vpx_version.h"
-
-#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var)
-
-int vpx_codec_version(void) { return VERSION_PACKED; }
-
-const char *vpx_codec_version_str(void) { return VERSION_STRING_NOSP; }
-
-const char *vpx_codec_version_extra_str(void) { return VERSION_EXTRA; }
-
-const char *vpx_codec_iface_name(vpx_codec_iface_t *iface) {
-  return iface ? iface->name : "<invalid interface>";
-}
-
-const char *vpx_codec_err_to_string(vpx_codec_err_t err) {
-  switch (err) {
-    case VPX_CODEC_OK: return "Success";
-    case VPX_CODEC_ERROR: return "Unspecified internal error";
-    case VPX_CODEC_MEM_ERROR: return "Memory allocation error";
-    case VPX_CODEC_ABI_MISMATCH: return "ABI version mismatch";
-    case VPX_CODEC_INCAPABLE:
-      return "Codec does not implement requested capability";
-    case VPX_CODEC_UNSUP_BITSTREAM:
-      return "Bitstream not supported by this decoder";
-    case VPX_CODEC_UNSUP_FEATURE:
-      return "Bitstream required feature not supported by this decoder";
-    case VPX_CODEC_CORRUPT_FRAME: return "Corrupt frame detected";
-    case VPX_CODEC_INVALID_PARAM: return "Invalid parameter";
-    case VPX_CODEC_LIST_END: return "End of iterated list";
-  }
-
-  return "Unrecognized error code";
-}
-
-const char *vpx_codec_error(vpx_codec_ctx_t *ctx) {
-  return (ctx) ? vpx_codec_err_to_string(ctx->err)
-               : vpx_codec_err_to_string(VPX_CODEC_INVALID_PARAM);
-}
-
-const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx) {
-  if (ctx && ctx->err)
-    return ctx->priv ? ctx->priv->err_detail : ctx->err_detail;
-
-  return NULL;
-}
-
-vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx) {
-  vpx_codec_err_t res;
-
-  if (!ctx)
-    res = VPX_CODEC_INVALID_PARAM;
-  else if (!ctx->iface || !ctx->priv)
-    res = VPX_CODEC_ERROR;
-  else {
-    ctx->iface->destroy((vpx_codec_alg_priv_t *)ctx->priv);
-
-    ctx->iface = NULL;
-    ctx->name = NULL;
-    ctx->priv = NULL;
-    res = VPX_CODEC_OK;
-  }
-
-  return SAVE_STATUS(ctx, res);
-}
-
-vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface) {
-  return (iface) ? iface->caps : 0;
-}
-
-vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...) {
-  vpx_codec_err_t res;
-
-  if (!ctx || !ctrl_id)
-    res = VPX_CODEC_INVALID_PARAM;
-  else if (!ctx->iface || !ctx->priv || !ctx->iface->ctrl_maps)
-    res = VPX_CODEC_ERROR;
-  else {
-    vpx_codec_ctrl_fn_map_t *entry;
-
-    res = VPX_CODEC_ERROR;
-
-    for (entry = ctx->iface->ctrl_maps; entry && entry->fn; entry++) {
-      if (!entry->ctrl_id || entry->ctrl_id == ctrl_id) {
-        va_list ap;
-
-        va_start(ap, ctrl_id);
-        res = entry->fn((vpx_codec_alg_priv_t *)ctx->priv, ap);
-        va_end(ap);
-        break;
-      }
-    }
-  }
-
-  return SAVE_STATUS(ctx, res);
-}
-
-void vpx_internal_error(struct vpx_internal_error_info *info,
-                        vpx_codec_err_t error, const char *fmt, ...) {
-  va_list ap;
-
-  info->error_code = error;
-  info->has_detail = 0;
-
-  if (fmt) {
-    size_t sz = sizeof(info->detail);
-
-    info->has_detail = 1;
-    va_start(ap, fmt);
-    vsnprintf(info->detail, sz - 1, fmt, ap);
-    va_end(ap);
-    info->detail[sz - 1] = '\0';
-  }
-
-  if (info->setjmp) longjmp(info->jmp, info->error_code);
-}
diff --git a/aom/src/vpx_decoder.c b/aom/src/vpx_decoder.c
deleted file mode 100644
index 97709d1..0000000
--- a/aom/src/vpx_decoder.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-/*!\file
- * \brief Provides the high level interface to wrap decoder algorithms.
- *
- */
-#include <string.h>
-#include "aom/internal/vpx_codec_internal.h"
-
-#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var)
-
-static vpx_codec_alg_priv_t *get_alg_priv(vpx_codec_ctx_t *ctx) {
-  return (vpx_codec_alg_priv_t *)ctx->priv;
-}
-
-vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx,
-                                       vpx_codec_iface_t *iface,
-                                       const vpx_codec_dec_cfg_t *cfg,
-                                       vpx_codec_flags_t flags, int ver) {
-  vpx_codec_err_t res;
-
-  if (ver != VPX_DECODER_ABI_VERSION)
-    res = VPX_CODEC_ABI_MISMATCH;
-  else if (!ctx || !iface)
-    res = VPX_CODEC_INVALID_PARAM;
-  else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
-    res = VPX_CODEC_ABI_MISMATCH;
-  else if ((flags & VPX_CODEC_USE_POSTPROC) &&
-           !(iface->caps & VPX_CODEC_CAP_POSTPROC))
-    res = VPX_CODEC_INCAPABLE;
-  else if ((flags & VPX_CODEC_USE_ERROR_CONCEALMENT) &&
-           !(iface->caps & VPX_CODEC_CAP_ERROR_CONCEALMENT))
-    res = VPX_CODEC_INCAPABLE;
-  else if ((flags & VPX_CODEC_USE_INPUT_FRAGMENTS) &&
-           !(iface->caps & VPX_CODEC_CAP_INPUT_FRAGMENTS))
-    res = VPX_CODEC_INCAPABLE;
-  else if (!(iface->caps & VPX_CODEC_CAP_DECODER))
-    res = VPX_CODEC_INCAPABLE;
-  else {
-    memset(ctx, 0, sizeof(*ctx));
-    ctx->iface = iface;
-    ctx->name = iface->name;
-    ctx->priv = NULL;
-    ctx->init_flags = flags;
-    ctx->config.dec = cfg;
-
-    res = ctx->iface->init(ctx, NULL);
-    if (res) {
-      ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL;
-      vpx_codec_destroy(ctx);
-    }
-  }
-
-  return SAVE_STATUS(ctx, res);
-}
-
-vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface,
-                                           const uint8_t *data,
-                                           unsigned int data_sz,
-                                           vpx_codec_stream_info_t *si) {
-  vpx_codec_err_t res;
-
-  if (!iface || !data || !data_sz || !si ||
-      si->sz < sizeof(vpx_codec_stream_info_t))
-    res = VPX_CODEC_INVALID_PARAM;
-  else {
-    /* Set default/unknown values */
-    si->w = 0;
-    si->h = 0;
-
-    res = iface->dec.peek_si(data, data_sz, si);
-  }
-
-  return res;
-}
-
-vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx,
-                                          vpx_codec_stream_info_t *si) {
-  vpx_codec_err_t res;
-
-  if (!ctx || !si || si->sz < sizeof(vpx_codec_stream_info_t))
-    res = VPX_CODEC_INVALID_PARAM;
-  else if (!ctx->iface || !ctx->priv)
-    res = VPX_CODEC_ERROR;
-  else {
-    /* Set default/unknown values */
-    si->w = 0;
-    si->h = 0;
-
-    res = ctx->iface->dec.get_si(get_alg_priv(ctx), si);
-  }
-
-  return SAVE_STATUS(ctx, res);
-}
-
-vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data,
-                                 unsigned int data_sz, void *user_priv,
-                                 long deadline) {
-  vpx_codec_err_t res;
-
-  /* Sanity checks */
-  /* NULL data ptr allowed if data_sz is 0 too */
-  if (!ctx || (!data && data_sz) || (data && !data_sz))
-    res = VPX_CODEC_INVALID_PARAM;
-  else if (!ctx->iface || !ctx->priv)
-    res = VPX_CODEC_ERROR;
-  else {
-    res = ctx->iface->dec.decode(get_alg_priv(ctx), data, data_sz, user_priv,
-                                 deadline);
-  }
-
-  return SAVE_STATUS(ctx, res);
-}
-
-vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, vpx_codec_iter_t *iter) {
-  vpx_image_t *img;
-
-  if (!ctx || !iter || !ctx->iface || !ctx->priv)
-    img = NULL;
-  else
-    img = ctx->iface->dec.get_frame(get_alg_priv(ctx), iter);
-
-  return img;
-}
-
-vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx,
-                                                vpx_codec_put_frame_cb_fn_t cb,
-                                                void *user_priv) {
-  vpx_codec_err_t res;
-
-  if (!ctx || !cb)
-    res = VPX_CODEC_INVALID_PARAM;
-  else if (!ctx->iface || !ctx->priv ||
-           !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME))
-    res = VPX_CODEC_ERROR;
-  else {
-    ctx->priv->dec.put_frame_cb.u.put_frame = cb;
-    ctx->priv->dec.put_frame_cb.user_priv = user_priv;
-    res = VPX_CODEC_OK;
-  }
-
-  return SAVE_STATUS(ctx, res);
-}
-
-vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx,
-                                                vpx_codec_put_slice_cb_fn_t cb,
-                                                void *user_priv) {
-  vpx_codec_err_t res;
-
-  if (!ctx || !cb)
-    res = VPX_CODEC_INVALID_PARAM;
-  else if (!ctx->iface || !ctx->priv ||
-           !(ctx->iface->caps & VPX_CODEC_CAP_PUT_SLICE))
-    res = VPX_CODEC_ERROR;
-  else {
-    ctx->priv->dec.put_slice_cb.u.put_slice = cb;
-    ctx->priv->dec.put_slice_cb.user_priv = user_priv;
-    res = VPX_CODEC_OK;
-  }
-
-  return SAVE_STATUS(ctx, res);
-}
-
-vpx_codec_err_t vpx_codec_set_frame_buffer_functions(
-    vpx_codec_ctx_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get,
-    vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
-  vpx_codec_err_t res;
-
-  if (!ctx || !cb_get || !cb_release) {
-    res = VPX_CODEC_INVALID_PARAM;
-  } else if (!ctx->iface || !ctx->priv ||
-             !(ctx->iface->caps & VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER)) {
-    res = VPX_CODEC_ERROR;
-  } else {
-    res = ctx->iface->dec.set_fb_fn(get_alg_priv(ctx), cb_get, cb_release,
-                                    cb_priv);
-  }
-
-  return SAVE_STATUS(ctx, res);
-}
diff --git a/aom/src/vpx_image.c b/aom/src/vpx_image.c
deleted file mode 100644
index 0970b06..0000000
--- a/aom/src/vpx_image.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "aom/vpx_image.h"
-#include "aom/vpx_integer.h"
-#include "aom_mem/vpx_mem.h"
-
-static vpx_image_t *img_alloc_helper(vpx_image_t *img, vpx_img_fmt_t fmt,
-                                     unsigned int d_w, unsigned int d_h,
-                                     unsigned int buf_align,
-                                     unsigned int stride_align,
-                                     unsigned char *img_data) {
-  unsigned int h, w, s, xcs, ycs, bps;
-  unsigned int stride_in_bytes;
-  int align;
-
-  /* Treat align==0 like align==1 */
-  if (!buf_align) buf_align = 1;
-
-  /* Validate alignment (must be power of 2) */
-  if (buf_align & (buf_align - 1)) goto fail;
-
-  /* Treat align==0 like align==1 */
-  if (!stride_align) stride_align = 1;
-
-  /* Validate alignment (must be power of 2) */
-  if (stride_align & (stride_align - 1)) goto fail;
-
-  /* Get sample size for this format */
-  switch (fmt) {
-    case VPX_IMG_FMT_RGB32:
-    case VPX_IMG_FMT_RGB32_LE:
-    case VPX_IMG_FMT_ARGB:
-    case VPX_IMG_FMT_ARGB_LE: bps = 32; break;
-    case VPX_IMG_FMT_RGB24:
-    case VPX_IMG_FMT_BGR24: bps = 24; break;
-    case VPX_IMG_FMT_RGB565:
-    case VPX_IMG_FMT_RGB565_LE:
-    case VPX_IMG_FMT_RGB555:
-    case VPX_IMG_FMT_RGB555_LE:
-    case VPX_IMG_FMT_UYVY:
-    case VPX_IMG_FMT_YUY2:
-    case VPX_IMG_FMT_YVYU: bps = 16; break;
-    case VPX_IMG_FMT_I420:
-    case VPX_IMG_FMT_YV12:
-    case VPX_IMG_FMT_VPXI420:
-    case VPX_IMG_FMT_VPXYV12: bps = 12; break;
-    case VPX_IMG_FMT_I422:
-    case VPX_IMG_FMT_I440: bps = 16; break;
-    case VPX_IMG_FMT_I444: bps = 24; break;
-    case VPX_IMG_FMT_I42016: bps = 24; break;
-    case VPX_IMG_FMT_I42216:
-    case VPX_IMG_FMT_I44016: bps = 32; break;
-    case VPX_IMG_FMT_I44416: bps = 48; break;
-    default: bps = 16; break;
-  }
-
-  /* Get chroma shift values for this format */
-  switch (fmt) {
-    case VPX_IMG_FMT_I420:
-    case VPX_IMG_FMT_YV12:
-    case VPX_IMG_FMT_VPXI420:
-    case VPX_IMG_FMT_VPXYV12:
-    case VPX_IMG_FMT_I422:
-    case VPX_IMG_FMT_I42016:
-    case VPX_IMG_FMT_I42216: xcs = 1; break;
-    default: xcs = 0; break;
-  }
-
-  switch (fmt) {
-    case VPX_IMG_FMT_I420:
-    case VPX_IMG_FMT_I440:
-    case VPX_IMG_FMT_YV12:
-    case VPX_IMG_FMT_VPXI420:
-    case VPX_IMG_FMT_VPXYV12:
-    case VPX_IMG_FMT_I42016:
-    case VPX_IMG_FMT_I44016: ycs = 1; break;
-    default: ycs = 0; break;
-  }
-
-  /* Calculate storage sizes given the chroma subsampling */
-  align = (1 << xcs) - 1;
-  w = (d_w + align) & ~align;
-  align = (1 << ycs) - 1;
-  h = (d_h + align) & ~align;
-  s = (fmt & VPX_IMG_FMT_PLANAR) ? w : bps * w / 8;
-  s = (s + stride_align - 1) & ~(stride_align - 1);
-  stride_in_bytes = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? s * 2 : s;
-
-  /* Allocate the new image */
-  if (!img) {
-    img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
-
-    if (!img) goto fail;
-
-    img->self_allocd = 1;
-  } else {
-    memset(img, 0, sizeof(vpx_image_t));
-  }
-
-  img->img_data = img_data;
-
-  if (!img_data) {
-    const uint64_t alloc_size = (fmt & VPX_IMG_FMT_PLANAR)
-                                    ? (uint64_t)h * s * bps / 8
-                                    : (uint64_t)h * s;
-
-    if (alloc_size != (size_t)alloc_size) goto fail;
-
-    img->img_data = (uint8_t *)vpx_memalign(buf_align, (size_t)alloc_size);
-    img->img_data_owner = 1;
-  }
-
-  if (!img->img_data) goto fail;
-
-  img->fmt = fmt;
-  img->bit_depth = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 16 : 8;
-  img->w = w;
-  img->h = h;
-  img->x_chroma_shift = xcs;
-  img->y_chroma_shift = ycs;
-  img->bps = bps;
-
-  /* Calculate strides */
-  img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = stride_in_bytes;
-  img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = stride_in_bytes >> xcs;
-
-  /* Default viewport to entire image */
-  if (!vpx_img_set_rect(img, 0, 0, d_w, d_h)) return img;
-
-fail:
-  vpx_img_free(img);
-  return NULL;
-}
-
-vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt,
-                           unsigned int d_w, unsigned int d_h,
-                           unsigned int align) {
-  return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
-}
-
-vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w,
-                          unsigned int d_h, unsigned int stride_align,
-                          unsigned char *img_data) {
-  /* By setting buf_align = 1, we don't change buffer alignment in this
-   * function. */
-  return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
-}
-
-int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y,
-                     unsigned int w, unsigned int h) {
-  unsigned char *data;
-
-  if (x + w <= img->w && y + h <= img->h) {
-    img->d_w = w;
-    img->d_h = h;
-
-    /* Calculate plane pointers */
-    if (!(img->fmt & VPX_IMG_FMT_PLANAR)) {
-      img->planes[VPX_PLANE_PACKED] =
-          img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
-    } else {
-      const int bytes_per_sample =
-          (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
-      data = img->img_data;
-
-      if (img->fmt & VPX_IMG_FMT_HAS_ALPHA) {
-        img->planes[VPX_PLANE_ALPHA] =
-            data + x * bytes_per_sample + y * img->stride[VPX_PLANE_ALPHA];
-        data += img->h * img->stride[VPX_PLANE_ALPHA];
-      }
-
-      img->planes[VPX_PLANE_Y] =
-          data + x * bytes_per_sample + y * img->stride[VPX_PLANE_Y];
-      data += img->h * img->stride[VPX_PLANE_Y];
-
-      if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) {
-        img->planes[VPX_PLANE_U] =
-            data + (x >> img->x_chroma_shift) * bytes_per_sample +
-            (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
-        data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
-        img->planes[VPX_PLANE_V] =
-            data + (x >> img->x_chroma_shift) * bytes_per_sample +
-            (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
-      } else {
-        img->planes[VPX_PLANE_V] =
-            data + (x >> img->x_chroma_shift) * bytes_per_sample +
-            (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
-        data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
-        img->planes[VPX_PLANE_U] =
-            data + (x >> img->x_chroma_shift) * bytes_per_sample +
-            (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
-      }
-    }
-    return 0;
-  }
-  return -1;
-}
-
-void vpx_img_flip(vpx_image_t *img) {
-  /* Note: In the calculation pointer adjustment calculation, we want the
-   * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
-   * standard indicates that if the adjustment parameter is unsigned, the
-   * stride parameter will be promoted to unsigned, causing errors when
-   * the lhs is a larger type than the rhs.
-   */
-  img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
-  img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
-
-  img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1) *
-                              img->stride[VPX_PLANE_U];
-  img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
-
-  img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1) *
-                              img->stride[VPX_PLANE_V];
-  img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
-
-  img->planes[VPX_PLANE_ALPHA] +=
-      (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
-  img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
-}
-
-void vpx_img_free(vpx_image_t *img) {
-  if (img) {
-    if (img->img_data && img->img_data_owner) vpx_free(img->img_data);
-
-    if (img->self_allocd) free(img);
-  }
-}
diff --git a/aom/vp8.h b/aom/vp8.h
deleted file mode 100644
index e27b705..0000000
--- a/aom/vp8.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-/*!\defgroup vp8 VP8
- * \ingroup codecs
- * VP8 is vpx's newest video compression algorithm that uses motion
- * compensated prediction, Discrete Cosine Transform (DCT) coding of the
- * prediction error signal and context dependent entropy coding techniques
- * based on arithmetic principles. It features:
- *  - YUV 4:2:0 image format
- *  - Macro-block based coding (16x16 luma plus two 8x8 chroma)
- *  - 1/4 (1/8) pixel accuracy motion compensated prediction
- *  - 4x4 DCT transform
- *  - 128 level linear quantizer
- *  - In loop deblocking filter
- *  - Context-based entropy coding
- *
- * @{
- */
-/*!\file
- * \brief Provides controls common to both the VP8 encoder and decoder.
- */
-#ifndef VPX_VP8_H_
-#define VPX_VP8_H_
-
-#include "./vpx_codec.h"
-#include "./vpx_image.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*!\brief Control functions
- *
- * The set of macros define the control functions of VP8 interface
- */
-enum vp8_com_control_id {
-  /*!\brief pass in an external frame into decoder to be used as reference frame
-   */
-  VP8_SET_REFERENCE = 1,
-  VP8_COPY_REFERENCE = 2, /**< get a copy of reference frame from the decoder */
-  VP8_SET_POSTPROC = 3,   /**< set the decoder's post processing settings  */
-  VP8_SET_DBG_COLOR_REF_FRAME =
-      4, /**< set the reference frames to color for each macroblock */
-  VP8_SET_DBG_COLOR_MB_MODES = 5, /**< set which macro block modes to color */
-  VP8_SET_DBG_COLOR_B_MODES = 6,  /**< set which blocks modes to color */
-  VP8_SET_DBG_DISPLAY_MV = 7,     /**< set which motion vector modes to draw */
-
-  /* TODO(jkoleszar): The encoder incorrectly reuses some of these values (5+)
-   * for its control ids. These should be migrated to something like the
-   * VP8_DECODER_CTRL_ID_START range next time we're ready to break the ABI.
-   */
-  VP9_GET_REFERENCE = 128, /**< get a pointer to a reference frame */
-  VP8_COMMON_CTRL_ID_MAX,
-
-  VP10_GET_NEW_FRAME_IMAGE = 192, /**< get a pointer to the new frame */
-
-  VP8_DECODER_CTRL_ID_START = 256
-};
-
-/*!\brief post process flags
- *
- * The set of macros define VP8 decoder post processing flags
- */
-enum vp8_postproc_level {
-  VP8_NOFILTERING = 0,
-  VP8_DEBLOCK = 1 << 0,
-  VP8_DEMACROBLOCK = 1 << 1,
-  VP8_ADDNOISE = 1 << 2,
-  VP8_DEBUG_TXT_FRAME_INFO = 1 << 3, /**< print frame information */
-  VP8_DEBUG_TXT_MBLK_MODES =
-      1 << 4, /**< print macro block modes over each macro block */
-  VP8_DEBUG_TXT_DC_DIFF = 1 << 5,   /**< print dc diff for each macro block */
-  VP8_DEBUG_TXT_RATE_INFO = 1 << 6, /**< print video rate info (encoder only) */
-  VP8_MFQE = 1 << 10
-};
-
-/*!\brief post process flags
- *
- * This define a structure that describe the post processing settings. For
- * the best objective measure (using the PSNR metric) set post_proc_flag
- * to VP8_DEBLOCK and deblocking_level to 1.
- */
-
-typedef struct vp8_postproc_cfg {
-  /*!\brief the types of post processing to be done, should be combination of
-   * "vp8_postproc_level" */
-  int post_proc_flag;
-  int deblocking_level; /**< the strength of deblocking, valid range [0, 16] */
-  int noise_level; /**< the strength of additive noise, valid range [0, 16] */
-} vp8_postproc_cfg_t;
-
-/*!\brief reference frame type
- *
- * The set of macros define the type of VP8 reference frames
- */
-typedef enum vpx_ref_frame_type {
-  VP8_LAST_FRAME = 1,
-  VP8_GOLD_FRAME = 2,
-  VP8_ALTR_FRAME = 4
-} vpx_ref_frame_type_t;
-
-/*!\brief reference frame data struct
- *
- * Define the data struct to access vp8 reference frames.
- */
-typedef struct vpx_ref_frame {
-  vpx_ref_frame_type_t frame_type; /**< which reference frame */
-  vpx_image_t img;                 /**< reference frame data in image format */
-} vpx_ref_frame_t;
-
-/*!\brief VP9 specific reference frame data struct
- *
- * Define the data struct to access vp9 reference frames.
- */
-typedef struct vp9_ref_frame {
-  int idx;         /**< frame index to get (input) */
-  vpx_image_t img; /**< img structure to populate (output) */
-} vp9_ref_frame_t;
-
-/*!\cond */
-/*!\brief vp8 decoder control function parameter type
- *
- * defines the data type for each of VP8 decoder control function requires
- */
-VPX_CTRL_USE_TYPE(VP8_SET_REFERENCE, vpx_ref_frame_t *)
-#define VPX_CTRL_VP8_SET_REFERENCE
-VPX_CTRL_USE_TYPE(VP8_COPY_REFERENCE, vpx_ref_frame_t *)
-#define VPX_CTRL_VP8_COPY_REFERENCE
-VPX_CTRL_USE_TYPE(VP8_SET_POSTPROC, vp8_postproc_cfg_t *)
-#define VPX_CTRL_VP8_SET_POSTPROC
-VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_REF_FRAME, int)
-#define VPX_CTRL_VP8_SET_DBG_COLOR_REF_FRAME
-VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_MB_MODES, int)
-#define VPX_CTRL_VP8_SET_DBG_COLOR_MB_MODES
-VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_B_MODES, int)
-#define VPX_CTRL_VP8_SET_DBG_COLOR_B_MODES
-VPX_CTRL_USE_TYPE(VP8_SET_DBG_DISPLAY_MV, int)
-#define VPX_CTRL_VP8_SET_DBG_DISPLAY_MV
-VPX_CTRL_USE_TYPE(VP9_GET_REFERENCE, vp9_ref_frame_t *)
-#define VPX_CTRL_VP9_GET_REFERENCE
-VPX_CTRL_USE_TYPE(VP10_GET_NEW_FRAME_IMAGE, vpx_image_t *)
-#define VPX_CTRL_VP10_GET_NEW_FRAME_IMAGE
-
-/*!\endcond */
-/*! @} - end defgroup vp8 */
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // VPX_VP8_H_
diff --git a/aom/vp8dx.h b/aom/vp8dx.h
deleted file mode 100644
index 2239b86..0000000
--- a/aom/vp8dx.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-/*!\defgroup vp8_decoder WebM VP8/VP9 Decoder
- * \ingroup vp8
- *
- * @{
- */
-/*!\file
- * \brief Provides definitions for using VP8 or VP9 within the vpx Decoder
- *        interface.
- */
-#ifndef VPX_VP8DX_H_
-#define VPX_VP8DX_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Include controls common to both the encoder and decoder */
-#include "./vp8.h"
-
-/*!\name Algorithm interface for VP10
- *
- * This interface provides the capability to decode VP10 streams.
- * @{
- */
-extern vpx_codec_iface_t vpx_codec_vp10_dx_algo;
-extern vpx_codec_iface_t *vpx_codec_vp10_dx(void);
-/*!@} - end algorithm interface member group*/
-
-/*!\enum vp8_dec_control_id
- * \brief VP8 decoder control functions
- *
- * This set of macros define the control functions available for the VP8
- * decoder interface.
- *
- * \sa #vpx_codec_control
- */
-enum vp8_dec_control_id {
-  /** control function to get info on which reference frames were updated
-   *  by the last decode
-   */
-  VP8D_GET_LAST_REF_UPDATES = VP8_DECODER_CTRL_ID_START,
-
-  /** check if the indicated frame is corrupted */
-  VP8D_GET_FRAME_CORRUPTED,
-
-  /** control function to get info on which reference frames were used
-   *  by the last decode
-   */
-  VP8D_GET_LAST_REF_USED,
-
-  /** decryption function to decrypt encoded buffer data immediately
-   * before decoding. Takes a vpx_decrypt_init, which contains
-   * a callback function and opaque context pointer.
-   */
-  VPXD_SET_DECRYPTOR,
-  VP8D_SET_DECRYPTOR = VPXD_SET_DECRYPTOR,
-
-  /** control function to get the dimensions that the current frame is decoded
-   * at. This may be different to the intended display size for the frame as
-   * specified in the wrapper or frame header (see VP9D_GET_DISPLAY_SIZE). */
-  VP9D_GET_FRAME_SIZE,
-
-  /** control function to get the current frame's intended display dimensions
-   * (as specified in the wrapper or frame header). This may be different to
-   * the decoded dimensions of this frame (see VP9D_GET_FRAME_SIZE). */
-  VP9D_GET_DISPLAY_SIZE,
-
-  /** control function to get the bit depth of the stream. */
-  VP9D_GET_BIT_DEPTH,
-
-  /** control function to set the byte alignment of the planes in the reference
-   * buffers. Valid values are power of 2, from 32 to 1024. A value of 0 sets
-   * legacy alignment. I.e. Y plane is aligned to 32 bytes, U plane directly
-   * follows Y plane, and V plane directly follows U plane. Default value is 0.
-   */
-  VP9_SET_BYTE_ALIGNMENT,
-
-  /** control function to invert the decoding order to from right to left. The
-   * function is used in a test to confirm the decoding independence of tile
-   * columns. The function may be used in application where this order
-   * of decoding is desired.
-   *
-   * TODO(yaowu): Rework the unit test that uses this control, and in a future
-   *              release, this test-only control shall be removed.
-   */
-  VP9_INVERT_TILE_DECODE_ORDER,
-
-  /** control function to set the skip loop filter flag. Valid values are
-   * integers. The decoder will skip the loop filter when its value is set to
-   * nonzero. If the loop filter is skipped the decoder may accumulate decode
-   * artifacts. The default value is 0.
-   */
-  VP9_SET_SKIP_LOOP_FILTER,
-
-  VP8_DECODER_CTRL_ID_MAX,
-
-  /** control function to set the range of tile decoding. A value that is
-   * greater and equal to zero indicates only the specific row/column is
-   * decoded. A value that is -1 indicates the whole row/column is decoded.
-   * A special case is both values are -1 that means the whole frame is
-   * decoded.
-   */
-  VP10_SET_DECODE_TILE_ROW,
-  VP10_SET_DECODE_TILE_COL
-};
-
-/** Decrypt n bytes of data from input -> output, using the decrypt_state
- *  passed in VPXD_SET_DECRYPTOR.
- */
-typedef void (*vpx_decrypt_cb)(void *decrypt_state, const unsigned char *input,
-                               unsigned char *output, int count);
-
-/*!\brief Structure to hold decryption state
- *
- * Defines a structure to hold the decryption state and access function.
- */
-typedef struct vpx_decrypt_init {
-  /*! Decrypt callback. */
-  vpx_decrypt_cb decrypt_cb;
-
-  /*! Decryption state. */
-  void *decrypt_state;
-} vpx_decrypt_init;
-
-/*!\brief A deprecated alias for vpx_decrypt_init.
- */
-typedef vpx_decrypt_init vp8_decrypt_init;
-
-/*!\cond */
-/*!\brief VP8 decoder control function parameter type
- *
- * Defines the data types that VP8D control functions take. Note that
- * additional common controls are defined in vp8.h
- *
- */
-
-VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_UPDATES, int *)
-#define VPX_CTRL_VP8D_GET_LAST_REF_UPDATES
-VPX_CTRL_USE_TYPE(VP8D_GET_FRAME_CORRUPTED, int *)
-#define VPX_CTRL_VP8D_GET_FRAME_CORRUPTED
-VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_USED, int *)
-#define VPX_CTRL_VP8D_GET_LAST_REF_USED
-VPX_CTRL_USE_TYPE(VPXD_SET_DECRYPTOR, vpx_decrypt_init *)
-#define VPX_CTRL_VPXD_SET_DECRYPTOR
-VPX_CTRL_USE_TYPE(VP8D_SET_DECRYPTOR, vpx_decrypt_init *)
-#define VPX_CTRL_VP8D_SET_DECRYPTOR
-VPX_CTRL_USE_TYPE(VP9D_GET_DISPLAY_SIZE, int *)
-#define VPX_CTRL_VP9D_GET_DISPLAY_SIZE
-VPX_CTRL_USE_TYPE(VP9D_GET_BIT_DEPTH, unsigned int *)
-#define VPX_CTRL_VP9D_GET_BIT_DEPTH
-VPX_CTRL_USE_TYPE(VP9D_GET_FRAME_SIZE, int *)
-#define VPX_CTRL_VP9D_GET_FRAME_SIZE
-VPX_CTRL_USE_TYPE(VP9_INVERT_TILE_DECODE_ORDER, int)
-#define VPX_CTRL_VP9_INVERT_TILE_DECODE_ORDER
-VPX_CTRL_USE_TYPE(VP10_SET_DECODE_TILE_ROW, int)
-#define VPX_CTRL_VP10_SET_DECODE_TILE_ROW
-VPX_CTRL_USE_TYPE(VP10_SET_DECODE_TILE_COL, int)
-#define VPX_CTRL_VP10_SET_DECODE_TILE_COL
-/*!\endcond */
-/*! @} - end defgroup vp8_decoder */
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // VPX_VP8DX_H_
diff --git a/aom/vpx_codec.mk b/aom/vpx_codec.mk
deleted file mode 100644
index a5ad13e..0000000
--- a/aom/vpx_codec.mk
+++ /dev/null
@@ -1,41 +0,0 @@
-##
-##  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-##
-##  Use of this source code is governed by a BSD-style license
-##  that can be found in the LICENSE file in the root of the source
-##  tree. An additional intellectual property rights grant can be found
-##  in the file PATENTS.  All contributing project authors may
-##  be found in the AUTHORS file in the root of the source tree.
-##
-
-
-API_EXPORTS += exports
-
-API_SRCS-$(CONFIG_VP10_ENCODER) += vp8.h
-API_SRCS-$(CONFIG_VP10_ENCODER) += vp8cx.h
-API_DOC_SRCS-$(CONFIG_VP10_ENCODER) += vp8.h
-API_DOC_SRCS-$(CONFIG_VP10_ENCODER) += vp8cx.h
-
-API_SRCS-$(CONFIG_VP10_DECODER) += vp8.h
-API_SRCS-$(CONFIG_VP10_DECODER) += vp8dx.h
-API_DOC_SRCS-$(CONFIG_VP10_DECODER) += vp8.h
-API_DOC_SRCS-$(CONFIG_VP10_DECODER) += vp8dx.h
-
-API_DOC_SRCS-yes += vpx_codec.h
-API_DOC_SRCS-yes += vpx_decoder.h
-API_DOC_SRCS-yes += vpx_encoder.h
-API_DOC_SRCS-yes += vpx_frame_buffer.h
-API_DOC_SRCS-yes += vpx_image.h
-
-API_SRCS-yes += src/vpx_decoder.c
-API_SRCS-yes += vpx_decoder.h
-API_SRCS-yes += src/vpx_encoder.c
-API_SRCS-yes += vpx_encoder.h
-API_SRCS-yes += internal/vpx_codec_internal.h
-API_SRCS-yes += src/vpx_codec.c
-API_SRCS-yes += src/vpx_image.c
-API_SRCS-yes += vpx_codec.h
-API_SRCS-yes += vpx_codec.mk
-API_SRCS-yes += vpx_frame_buffer.h
-API_SRCS-yes += vpx_image.h
-API_SRCS-yes += vpx_integer.h
diff --git a/aom_dsp/add_noise.c b/aom_dsp/add_noise.c
index 826d935..2b281b7 100644
--- a/aom_dsp/add_noise.c
+++ b/aom_dsp/add_noise.c
@@ -11,13 +11,13 @@
 #include <math.h>
 #include <stdlib.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
-void vpx_plane_add_noise_c(uint8_t *start, char *noise, char blackclamp[16],
+void aom_plane_add_noise_c(uint8_t *start, char *noise, char blackclamp[16],
                            char whiteclamp[16], char bothclamp[16],
                            unsigned int width, unsigned int height, int pitch) {
   unsigned int i, j;
@@ -43,7 +43,7 @@
          (exp(-(x - mu) * (x - mu) / (2 * sigma * sigma)));
 }
 
-int vpx_setup_noise(double sigma, int size, char *noise) {
+int aom_setup_noise(double sigma, int size, char *noise) {
   char char_dist[256];
   int next = 0, i, j;
 
diff --git a/aom_dsp/vpx_convolve.c b/aom_dsp/aom_convolve.c
similarity index 90%
rename from aom_dsp/vpx_convolve.c
rename to aom_dsp/aom_convolve.c
index 370ad77..b0630d2 100644
--- a/aom_dsp/vpx_convolve.c
+++ b/aom_dsp/aom_convolve.c
@@ -11,12 +11,12 @@
 #include <assert.h>
 #include <string.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
 #include "aom_ports/mem.h"
 
 static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride,
@@ -155,7 +155,7 @@
   return (int)((const InterpKernel *)(intptr_t)f - base);
 }
 
-void vpx_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
                            uint8_t *dst, ptrdiff_t dst_stride,
                            const int16_t *filter_x, int x_step_q4,
                            const int16_t *filter_y, int y_step_q4, int w,
@@ -170,7 +170,7 @@
                  w, h);
 }
 
-void vpx_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
                                uint8_t *dst, ptrdiff_t dst_stride,
                                const int16_t *filter_x, int x_step_q4,
                                const int16_t *filter_y, int y_step_q4, int w,
@@ -185,7 +185,7 @@
                      x_step_q4, w, h);
 }
 
-void vpx_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
                           uint8_t *dst, ptrdiff_t dst_stride,
                           const int16_t *filter_x, int x_step_q4,
                           const int16_t *filter_y, int y_step_q4, int w,
@@ -200,7 +200,7 @@
                 w, h);
 }
 
-void vpx_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
                               const int16_t *filter_x, int x_step_q4,
                               const int16_t *filter_y, int y_step_q4, int w,
@@ -215,7 +215,7 @@
                     y_step_q4, w, h);
 }
 
-void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                      ptrdiff_t dst_stride, const int16_t *filter_x,
                      int x_step_q4, const int16_t *filter_y, int y_step_q4,
                      int w, int h) {
@@ -229,7 +229,7 @@
            filters_y, y0_q4, y_step_q4, w, h);
 }
 
-void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                          ptrdiff_t dst_stride, const int16_t *filter_x,
                          int x_step_q4, const int16_t *filter_y, int y_step_q4,
                          int w, int h) {
@@ -238,13 +238,13 @@
   assert(w <= MAX_SB_SIZE);
   assert(h <= MAX_SB_SIZE);
 
-  vpx_convolve8_c(src, src_stride, temp, MAX_SB_SIZE, filter_x, x_step_q4,
+  aom_convolve8_c(src, src_stride, temp, MAX_SB_SIZE, filter_x, x_step_q4,
                   filter_y, y_step_q4, w, h);
-  vpx_convolve_avg_c(temp, MAX_SB_SIZE, dst, dst_stride, NULL, 0, NULL, 0, w,
+  aom_convolve_avg_c(temp, MAX_SB_SIZE, dst, dst_stride, NULL, 0, NULL, 0, w,
                      h);
 }
 
-void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                          ptrdiff_t dst_stride, const int16_t *filter_x,
                          int filter_x_stride, const int16_t *filter_y,
                          int filter_y_stride, int w, int h) {
@@ -262,7 +262,7 @@
   }
 }
 
-void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                         ptrdiff_t dst_stride, const int16_t *filter_x,
                         int filter_x_stride, const int16_t *filter_y,
                         int filter_y_stride, int w, int h) {
@@ -281,57 +281,57 @@
   }
 }
 
-void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                         ptrdiff_t dst_stride, const int16_t *filter_x,
                         int x_step_q4, const int16_t *filter_y, int y_step_q4,
                         int w, int h) {
-  vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+  aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                         filter_y, y_step_q4, w, h);
 }
 
-void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                        ptrdiff_t dst_stride, const int16_t *filter_x,
                        int x_step_q4, const int16_t *filter_y, int y_step_q4,
                        int w, int h) {
-  vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+  aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                        filter_y, y_step_q4, w, h);
 }
 
-void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                      ptrdiff_t dst_stride, const int16_t *filter_x,
                      int x_step_q4, const int16_t *filter_y, int y_step_q4,
                      int w, int h) {
-  vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+  aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                   filter_y, y_step_q4, w, h);
 }
 
-void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x, int x_step_q4,
                             const int16_t *filter_y, int y_step_q4, int w,
                             int h) {
-  vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+  aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
                             x_step_q4, filter_y, y_step_q4, w, h);
 }
 
-void vpx_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
                            uint8_t *dst, ptrdiff_t dst_stride,
                            const int16_t *filter_x, int x_step_q4,
                            const int16_t *filter_y, int y_step_q4, int w,
                            int h) {
-  vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+  aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
                            x_step_q4, filter_y, y_step_q4, w, h);
 }
 
-void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                          ptrdiff_t dst_stride, const int16_t *filter_x,
                          int x_step_q4, const int16_t *filter_y, int y_step_q4,
                          int w, int h) {
-  vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+  aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                       filter_y, y_step_q4, w, h);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride,
                                   uint8_t *dst8, ptrdiff_t dst_stride,
                                   const InterpKernel *x_filters, int x0_q4,
@@ -466,7 +466,7 @@
       MAX_SB_SIZE, dst, dst_stride, y_filters, y0_q4, y_step_q4, w, h, bd);
 }
 
-void vpx_highbd_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
                                   uint8_t *dst, ptrdiff_t dst_stride,
                                   const int16_t *filter_x, int x_step_q4,
                                   const int16_t *filter_y, int y_step_q4, int w,
@@ -480,7 +480,7 @@
                         x_step_q4, w, h, bd);
 }
 
-void vpx_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
                                       uint8_t *dst, ptrdiff_t dst_stride,
                                       const int16_t *filter_x, int x_step_q4,
                                       const int16_t *filter_y, int y_step_q4,
@@ -494,7 +494,7 @@
                             x_step_q4, w, h, bd);
 }
 
-void vpx_highbd_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
                                  uint8_t *dst, ptrdiff_t dst_stride,
                                  const int16_t *filter_x, int x_step_q4,
                                  const int16_t *filter_y, int y_step_q4, int w,
@@ -508,7 +508,7 @@
                        y_step_q4, w, h, bd);
 }
 
-void vpx_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
                                      uint8_t *dst, ptrdiff_t dst_stride,
                                      const int16_t *filter_x, int x_step_q4,
                                      const int16_t *filter_y, int y_step_q4,
@@ -522,7 +522,7 @@
                            y_step_q4, w, h, bd);
 }
 
-void vpx_highbd_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x, int x_step_q4,
                             const int16_t *filter_y, int y_step_q4, int w,
@@ -537,7 +537,7 @@
                   filters_y, y0_q4, y_step_q4, w, h, bd);
 }
 
-void vpx_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
                                 uint8_t *dst, ptrdiff_t dst_stride,
                                 const int16_t *filter_x, int x_step_q4,
                                 const int16_t *filter_y, int y_step_q4, int w,
@@ -547,13 +547,13 @@
   assert(w <= MAX_SB_SIZE);
   assert(h <= MAX_SB_SIZE);
 
-  vpx_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), MAX_SB_SIZE,
+  aom_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), MAX_SB_SIZE,
                          filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd);
-  vpx_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), MAX_SB_SIZE, dst,
+  aom_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), MAX_SB_SIZE, dst,
                             dst_stride, NULL, 0, NULL, 0, w, h, bd);
 }
 
-void vpx_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride,
+void aom_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride,
                                 uint8_t *dst8, ptrdiff_t dst_stride,
                                 const int16_t *filter_x, int filter_x_stride,
                                 const int16_t *filter_y, int filter_y_stride,
@@ -574,7 +574,7 @@
   }
 }
 
-void vpx_highbd_convolve_avg_c(const uint8_t *src8, ptrdiff_t src_stride,
+void aom_highbd_convolve_avg_c(const uint8_t *src8, ptrdiff_t src_stride,
                                uint8_t *dst8, ptrdiff_t dst_stride,
                                const int16_t *filter_x, int filter_x_stride,
                                const int16_t *filter_y, int filter_y_stride,
diff --git a/aom_dsp/vpx_convolve.h b/aom_dsp/aom_convolve.h
similarity index 88%
rename from aom_dsp/vpx_convolve.h
rename to aom_dsp/aom_convolve.h
index 20bef23..3441323 100644
--- a/aom_dsp/vpx_convolve.h
+++ b/aom_dsp/aom_convolve.h
@@ -7,11 +7,11 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#ifndef VPX_DSP_VPX_CONVOLVE_H_
-#define VPX_DSP_VPX_CONVOLVE_H_
+#ifndef AOM_DSP_AOM_CONVOLVE_H_
+#define AOM_DSP_AOM_CONVOLVE_H_
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -29,11 +29,11 @@
 // --Must round-up because block may be located at sub-pixel position.
 // --Require an additional SUBPEL_TAPS rows for the 8-tap filter tails.
 // --((64 - 1) * 32 + 15) >> 4 + 8 = 135.
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
 #define MAX_EXT_SIZE 263
 #else
 #define MAX_EXT_SIZE 135
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
 
 typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
@@ -41,7 +41,7 @@
                               const int16_t *filter_y, int y_step_q4, int w,
                               int h);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*highbd_convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
                                      uint8_t *dst, ptrdiff_t dst_stride,
                                      const int16_t *filter_x, int x_step_q4,
@@ -53,4 +53,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_VPX_CONVOLVE_H_
+#endif  // AOM_DSP_AOM_CONVOLVE_H_
diff --git a/aom_dsp/vpx_dsp.mk b/aom_dsp/aom_dsp.mk
similarity index 79%
rename from aom_dsp/vpx_dsp.mk
rename to aom_dsp/aom_dsp.mk
index c8933dd..b671a32 100644
--- a/aom_dsp/vpx_dsp.mk
+++ b/aom_dsp/aom_dsp.mk
@@ -8,8 +8,8 @@
 ##  be found in the AUTHORS file in the root of the source tree.
 ##
 
-DSP_SRCS-yes += vpx_dsp.mk
-DSP_SRCS-yes += vpx_dsp_common.h
+DSP_SRCS-yes += aom_dsp.mk
+DSP_SRCS-yes += aom_dsp_common.h
 
 DSP_SRCS-$(HAVE_MSA)    += mips/macros_msa.h
 
@@ -45,12 +45,12 @@
 DSP_SRCS-$(HAVE_SSE) += x86/intrapred_sse2.asm
 DSP_SRCS-$(HAVE_SSE2) += x86/intrapred_sse2.asm
 DSP_SRCS-$(HAVE_SSSE3) += x86/intrapred_ssse3.asm
-DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_ssse3.asm
+DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_ssse3.asm
 
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE)  += x86/highbd_intrapred_sse2.asm
 DSP_SRCS-$(HAVE_SSE2) += x86/highbd_intrapred_sse2.asm
-endif  # CONFIG_VP9_HIGHBITDEPTH
+endif  # CONFIG_AOM_HIGHBITDEPTH
 
 DSP_SRCS-$(HAVE_NEON_ASM) += arm/intrapred_neon_asm$(ASM)
 DSP_SRCS-$(HAVE_NEON) += arm/intrapred_neon.c
@@ -64,7 +64,7 @@
 
 # inter predictions
 
-ifeq ($(CONFIG_VP10),yes)
+ifeq ($(CONFIG_AV1),yes)
 DSP_SRCS-yes            += blend.h
 DSP_SRCS-yes            += blend_a64_mask.c
 DSP_SRCS-yes            += blend_a64_hmask.c
@@ -73,54 +73,54 @@
 DSP_SRCS-$(HAVE_SSE4_1) += x86/blend_a64_mask_sse4.c
 DSP_SRCS-$(HAVE_SSE4_1) += x86/blend_a64_hmask_sse4.c
 DSP_SRCS-$(HAVE_SSE4_1) += x86/blend_a64_vmask_sse4.c
-endif  #CONFIG_VP10
+endif  #CONFIG_AV1
 
 # interpolation filters
-DSP_SRCS-yes += vpx_convolve.c
-DSP_SRCS-yes += vpx_convolve.h
-DSP_SRCS-yes += vpx_filter.h
+DSP_SRCS-yes += aom_convolve.c
+DSP_SRCS-yes += aom_convolve.h
+DSP_SRCS-yes += aom_filter.h
 
 DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/convolve.h
-DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/vpx_asm_stubs.c
-DSP_SRCS-$(HAVE_SSE2)  += x86/vpx_subpixel_8t_sse2.asm
-DSP_SRCS-$(HAVE_SSE2)  += x86/vpx_subpixel_bilinear_sse2.asm
-DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_ssse3.asm
-DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_bilinear_ssse3.asm
-DSP_SRCS-$(HAVE_AVX2)  += x86/vpx_subpixel_8t_intrin_avx2.c
-DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_intrin_ssse3.c
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-DSP_SRCS-$(HAVE_SSE2)  += x86/vpx_high_subpixel_8t_sse2.asm
-DSP_SRCS-$(HAVE_SSE2)  += x86/vpx_high_subpixel_bilinear_sse2.asm
+DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/aom_asm_stubs.c
+DSP_SRCS-$(HAVE_SSE2)  += x86/aom_subpixel_8t_sse2.asm
+DSP_SRCS-$(HAVE_SSE2)  += x86/aom_subpixel_bilinear_sse2.asm
+DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_ssse3.asm
+DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_bilinear_ssse3.asm
+DSP_SRCS-$(HAVE_AVX2)  += x86/aom_subpixel_8t_intrin_avx2.c
+DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_intrin_ssse3.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+DSP_SRCS-$(HAVE_SSE2)  += x86/aom_high_subpixel_8t_sse2.asm
+DSP_SRCS-$(HAVE_SSE2)  += x86/aom_high_subpixel_bilinear_sse2.asm
 endif
 
-DSP_SRCS-$(HAVE_SSE2)  += x86/vpx_convolve_copy_sse2.asm
+DSP_SRCS-$(HAVE_SSE2)  += x86/aom_convolve_copy_sse2.asm
 
 ifeq ($(HAVE_NEON_ASM),yes)
-DSP_SRCS-yes += arm/vpx_convolve_copy_neon_asm$(ASM)
-DSP_SRCS-yes += arm/vpx_convolve8_avg_neon_asm$(ASM)
-DSP_SRCS-yes += arm/vpx_convolve8_neon_asm$(ASM)
-DSP_SRCS-yes += arm/vpx_convolve_avg_neon_asm$(ASM)
-DSP_SRCS-yes += arm/vpx_convolve_neon.c
+DSP_SRCS-yes += arm/aom_convolve_copy_neon_asm$(ASM)
+DSP_SRCS-yes += arm/aom_convolve8_avg_neon_asm$(ASM)
+DSP_SRCS-yes += arm/aom_convolve8_neon_asm$(ASM)
+DSP_SRCS-yes += arm/aom_convolve_avg_neon_asm$(ASM)
+DSP_SRCS-yes += arm/aom_convolve_neon.c
 else
 ifeq ($(HAVE_NEON),yes)
-DSP_SRCS-yes += arm/vpx_convolve_copy_neon.c
-DSP_SRCS-yes += arm/vpx_convolve8_avg_neon.c
-DSP_SRCS-yes += arm/vpx_convolve8_neon.c
-DSP_SRCS-yes += arm/vpx_convolve_avg_neon.c
-DSP_SRCS-yes += arm/vpx_convolve_neon.c
+DSP_SRCS-yes += arm/aom_convolve_copy_neon.c
+DSP_SRCS-yes += arm/aom_convolve8_avg_neon.c
+DSP_SRCS-yes += arm/aom_convolve8_neon.c
+DSP_SRCS-yes += arm/aom_convolve_avg_neon.c
+DSP_SRCS-yes += arm/aom_convolve_neon.c
 endif  # HAVE_NEON
 endif  # HAVE_NEON_ASM
 
 # common (msa)
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_avg_horiz_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_avg_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_avg_vert_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_horiz_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_vert_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve_avg_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve_copy_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve_msa.h
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_avg_horiz_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_avg_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_avg_vert_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_horiz_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_vert_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve_avg_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve_copy_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve_msa.h
 
 # common (dspr2)
 DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve_common_dspr2.h
@@ -167,15 +167,15 @@
 DSP_SRCS-$(HAVE_DSPR2)  += mips/loopfilter_mb_horiz_dspr2.c
 DSP_SRCS-$(HAVE_DSPR2)  += mips/loopfilter_mb_vert_dspr2.c
 
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_loopfilter_sse2.c
-endif  # CONFIG_VP9_HIGHBITDEPTH
+endif  # CONFIG_AOM_HIGHBITDEPTH
 
 DSP_SRCS-yes            += txfm_common.h
 DSP_SRCS-$(HAVE_SSE2)   += x86/txfm_common_sse2.h
 DSP_SRCS-$(HAVE_MSA)    += mips/txfm_macros_msa.h
 # forward transform
-ifeq ($(CONFIG_VP10),yes)
+ifeq ($(CONFIG_AV1),yes)
 DSP_SRCS-yes            += fwd_txfm.c
 DSP_SRCS-yes            += fwd_txfm.h
 DSP_SRCS-$(HAVE_SSE2)   += x86/fwd_txfm_sse2.h
@@ -191,10 +191,10 @@
 DSP_SRCS-$(HAVE_MSA)    += mips/fwd_txfm_msa.h
 DSP_SRCS-$(HAVE_MSA)    += mips/fwd_txfm_msa.c
 DSP_SRCS-$(HAVE_MSA)    += mips/fwd_dct32x32_msa.c
-endif  # CONFIG_VP10_ENCODER
+endif  # CONFIG_AV1_ENCODER
 
 # inverse transform
-ifeq ($(CONFIG_VP10), yes)
+ifeq ($(CONFIG_AV1), yes)
 DSP_SRCS-yes            += inv_txfm.h
 DSP_SRCS-yes            += inv_txfm.c
 DSP_SRCS-$(HAVE_SSE2)   += x86/inv_txfm_sse2.h
@@ -234,23 +234,23 @@
 DSP_SRCS-$(HAVE_MSA)   += mips/idct16x16_msa.c
 DSP_SRCS-$(HAVE_MSA)   += mips/idct32x32_msa.c
 
-ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_DSPR2) += mips/inv_txfm_dspr2.h
 DSP_SRCS-$(HAVE_DSPR2) += mips/itrans4_dspr2.c
 DSP_SRCS-$(HAVE_DSPR2) += mips/itrans8_dspr2.c
 DSP_SRCS-$(HAVE_DSPR2) += mips/itrans16_dspr2.c
 DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_dspr2.c
 DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_cols_dspr2.c
-endif  # CONFIG_VP9_HIGHBITDEPTH
-endif  # CONFIG_VP10
+endif  # CONFIG_AOM_HIGHBITDEPTH
+endif  # CONFIG_AV1
 
 # quantization
-ifneq ($(filter yes,$(CONFIG_VP10_ENCODER)),)
+ifneq ($(filter yes,$(CONFIG_AV1_ENCODER)),)
 DSP_SRCS-yes            += quantize.c
 DSP_SRCS-yes            += quantize.h
 
 DSP_SRCS-$(HAVE_SSE2)   += x86/quantize_sse2.c
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_quantize_intrin_sse2.c
 endif
 ifeq ($(ARCH_X86_64),yes)
@@ -269,17 +269,17 @@
 endif
 
 # high bit depth subtract
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE2)  += x86/highbd_subtract_sse2.c
 endif
 
-endif  # CONFIG_VP10_ENCODER
+endif  # CONFIG_AV1_ENCODER
 
-ifeq ($(CONFIG_VP10_ENCODER),yes)
+ifeq ($(CONFIG_AV1_ENCODER),yes)
 DSP_SRCS-yes            += sum_squares.c
 
 DSP_SRCS-$(HAVE_SSE2)   += x86/sum_squares_sse2.c
-endif # CONFIG_VP10_ENCODER
+endif # CONFIG_AV1_ENCODER
 
 ifeq ($(CONFIG_ENCODERS),yes)
 DSP_SRCS-yes            += sad.c
@@ -299,7 +299,7 @@
 DSP_SRCS-$(HAVE_AVX2)   += x86/sad4d_avx2.c
 DSP_SRCS-$(HAVE_AVX2)   += x86/sad_avx2.c
 
-ifeq ($(CONFIG_VP10_ENCODER),yes)
+ifeq ($(CONFIG_AV1_ENCODER),yes)
 ifeq ($(CONFIG_EXT_INTER),yes)
 DSP_SRCS-$(HAVE_SSSE3)  += x86/masked_sad_intrin_ssse3.c
 DSP_SRCS-$(HAVE_SSSE3)  += x86/masked_variance_intrin_ssse3.c
@@ -308,7 +308,7 @@
 DSP_SRCS-$(HAVE_SSE4_1) += x86/obmc_sad_sse4.c
 DSP_SRCS-$(HAVE_SSE4_1) += x86/obmc_variance_sse4.c
 endif  #CONFIG_OBMC
-endif  #CONFIG_VP10_ENCODER
+endif  #CONFIG_AV1_ENCODER
 
 DSP_SRCS-$(HAVE_SSE)    += x86/sad4d_sse2.asm
 DSP_SRCS-$(HAVE_SSE)    += x86/sad_sse2.asm
@@ -316,10 +316,10 @@
 DSP_SRCS-$(HAVE_SSE2)   += x86/sad_sse2.asm
 DSP_SRCS-$(HAVE_SSE2)   += x86/subtract_sse2.asm
 
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad4d_sse2.asm
 DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad_sse2.asm
-endif  # CONFIG_VP9_HIGHBITDEPTH
+endif  # CONFIG_AOM_HIGHBITDEPTH
 
 endif  # CONFIG_ENCODERS
 
@@ -353,17 +353,17 @@
 DSP_SRCS-$(HAVE_SSE)    += x86/subpel_variance_sse2.asm
 DSP_SRCS-$(HAVE_SSE2)   += x86/subpel_variance_sse2.asm  # Contains SSE2 and SSSE3
 
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_variance_sse2.c
 DSP_SRCS-$(HAVE_SSE4_1) += x86/highbd_variance_sse4.c
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_variance_impl_sse2.asm
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_subpel_variance_impl_sse2.asm
-endif  # CONFIG_VP9_HIGHBITDEPTH
+endif  # CONFIG_AOM_HIGHBITDEPTH
 endif  # CONFIG_ENCODERS
 
 DSP_SRCS-no += $(DSP_SRCS_REMOVE-yes)
 
-DSP_SRCS-yes += vpx_dsp_rtcd.c
-DSP_SRCS-yes += vpx_dsp_rtcd_defs.pl
+DSP_SRCS-yes += aom_dsp_rtcd.c
+DSP_SRCS-yes += aom_dsp_rtcd_defs.pl
 
-$(eval $(call rtcd_h_template,vpx_dsp_rtcd,aom_dsp/vpx_dsp_rtcd_defs.pl))
+$(eval $(call rtcd_h_template,aom_dsp_rtcd,aom_dsp/aom_dsp_rtcd_defs.pl))
diff --git a/aom_dsp/vpx_dsp_common.h b/aom_dsp/aom_dsp_common.h
similarity index 82%
rename from aom_dsp/vpx_dsp_common.h
rename to aom_dsp/aom_dsp_common.h
index 8f911dd..0524169 100644
--- a/aom_dsp/vpx_dsp_common.h
+++ b/aom_dsp/aom_dsp_common.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_VPX_DSP_COMMON_H_
-#define VPX_DSP_VPX_DSP_COMMON_H_
+#ifndef AOM_DSP_AOM_DSP_COMMON_H_
+#define AOM_DSP_AOM_DSP_COMMON_H_
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
 #ifdef __cplusplus
@@ -20,15 +20,15 @@
 #endif
 
 #ifndef MAX_SB_SIZE
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
 #define MAX_SB_SIZE 128
 #else
 #define MAX_SB_SIZE 64
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
 #endif  // ndef MAX_SB_SIZE
 
-#define VPXMIN(x, y) (((x) < (y)) ? (x) : (y))
-#define VPXMAX(x, y) (((x) > (y)) ? (x) : (y))
+#define AOMMIN(x, y) (((x) < (y)) ? (x) : (y))
+#define AOMMAX(x, y) (((x) > (y)) ? (x) : (y))
 
 #define IMPLIES(a, b) (!(a) || (b))  //  Logical 'a implies b' (or 'a -> b')
 
@@ -46,7 +46,7 @@
 #define UNLIKELY(v) (v)
 #endif
 
-#define VPX_SWAP(type, a, b) \
+#define AOM_SWAP(type, a, b) \
   do {                       \
     type c = (b);            \
     b = a;                   \
@@ -57,7 +57,7 @@
 typedef uint16_t qm_val_t;
 #define AOM_QM_BITS 6
 #endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 // Note:
 // tran_low_t  is the datatype used for final transform coefficients.
 // tran_high_t is the datatype used for intermediate transform stages.
@@ -69,7 +69,7 @@
 // tran_high_t is the datatype used for intermediate transform stages.
 typedef int32_t tran_high_t;
 typedef int16_t tran_low_t;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static INLINE uint8_t clip_pixel(int val) {
   return (val > 255) ? 255 : (val < 0) ? 0 : val;
@@ -83,7 +83,7 @@
   return value < low ? low : (value > high ? high : value);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE uint16_t clip_pixel_highbd(int val, int bd) {
   switch (bd) {
     case 8:
@@ -92,10 +92,10 @@
     case 12: return (uint16_t)clamp(val, 0, 4095);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_VPX_DSP_COMMON_H_
+#endif  // AOM_DSP_AOM_DSP_COMMON_H_
diff --git a/aom_dsp/vpx_dsp_rtcd.c b/aom_dsp/aom_dsp_rtcd.c
similarity index 75%
rename from aom_dsp/vpx_dsp_rtcd.c
rename to aom_dsp/aom_dsp_rtcd.c
index 3cd0cc1..438f901 100644
--- a/aom_dsp/vpx_dsp_rtcd.c
+++ b/aom_dsp/aom_dsp_rtcd.c
@@ -7,9 +7,9 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #define RTCD_C
-#include "./vpx_dsp_rtcd.h"
-#include "aom_ports/vpx_once.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_ports/aom_once.h"
 
-void vpx_dsp_rtcd() { once(setup_rtcd_internal); }
+void aom_dsp_rtcd() { once(setup_rtcd_internal); }
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
new file mode 100644
index 0000000..2afb3cd
--- /dev/null
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -0,0 +1,1929 @@
+sub aom_dsp_forward_decls() {
+print <<EOF
+/*
+ * DSP
+ */
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
+
+EOF
+}
+forward_decls qw/aom_dsp_forward_decls/;
+
+# optimizations which depend on multiple features
+$avx2_ssse3 = '';
+if ((aom_config("HAVE_AVX2") eq "yes") && (aom_config("HAVE_SSSE3") eq "yes")) {
+  $avx2_ssse3 = 'avx2';
+}
+
+# functions that are 64 bit only.
+$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
+if ($opts{arch} eq "x86_64") {
+  $mmx_x86_64 = 'mmx';
+  $sse2_x86_64 = 'sse2';
+  $ssse3_x86_64 = 'ssse3';
+  $avx_x86_64 = 'avx';
+  $avx2_x86_64 = 'avx2';
+}
+
+if (aom_config("CONFIG_EXT_PARTITION") eq "yes") {
+  @block_widths = (4, 8, 16, 32, 64, 128)
+} else {
+  @block_widths = (4, 8, 16, 32, 64)
+}
+
+@block_sizes = ();
+foreach $w (@block_widths) {
+  foreach $h (@block_widths) {
+    push @block_sizes, [$w, $h] if ($w <= 2*$h && $h <= 2*$w) ;
+  }
+}
+
+#
+# Intra prediction
+#
+
+add_proto qw/void aom_d207_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207_predictor_4x4 sse2/;
+
+add_proto qw/void aom_d207e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207e_predictor_4x4/;
+
+add_proto qw/void aom_d45_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45_predictor_4x4 neon sse2/;
+
+add_proto qw/void aom_d45e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45e_predictor_4x4/;
+
+add_proto qw/void aom_d63_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63_predictor_4x4 ssse3/;
+
+add_proto qw/void aom_d63e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63e_predictor_4x4/;
+
+add_proto qw/void aom_d63f_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63f_predictor_4x4/;
+
+add_proto qw/void aom_h_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_h_predictor_4x4 neon dspr2 msa sse2/;
+
+add_proto qw/void aom_he_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_he_predictor_4x4/;
+
+add_proto qw/void aom_d117_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d117_predictor_4x4/;
+
+add_proto qw/void aom_d135_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d135_predictor_4x4 neon/;
+
+add_proto qw/void aom_d153_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d153_predictor_4x4 ssse3/;
+
+add_proto qw/void aom_v_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_v_predictor_4x4 neon msa sse2/;
+
+add_proto qw/void aom_ve_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_ve_predictor_4x4/;
+
+add_proto qw/void aom_tm_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_tm_predictor_4x4 neon dspr2 msa sse2/;
+
+add_proto qw/void aom_dc_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_predictor_4x4 dspr2 msa neon sse2/;
+
+add_proto qw/void aom_dc_top_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_top_predictor_4x4 msa neon sse2/;
+
+add_proto qw/void aom_dc_left_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_left_predictor_4x4 msa neon sse2/;
+
+add_proto qw/void aom_dc_128_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_128_predictor_4x4 msa neon sse2/;
+
+add_proto qw/void aom_d207_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207_predictor_8x8 ssse3/;
+
+add_proto qw/void aom_d207e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207e_predictor_8x8/;
+
+add_proto qw/void aom_d45_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45_predictor_8x8 neon sse2/;
+
+add_proto qw/void aom_d45e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45e_predictor_8x8/;
+
+add_proto qw/void aom_d63_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63_predictor_8x8 ssse3/;
+
+add_proto qw/void aom_d63e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63e_predictor_8x8/;
+
+add_proto qw/void aom_h_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_h_predictor_8x8 neon dspr2 msa sse2/;
+
+add_proto qw/void aom_d117_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d117_predictor_8x8/;
+
+add_proto qw/void aom_d135_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d135_predictor_8x8/;
+
+add_proto qw/void aom_d153_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d153_predictor_8x8 ssse3/;
+
+add_proto qw/void aom_v_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_v_predictor_8x8 neon msa sse2/;
+
+add_proto qw/void aom_tm_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_tm_predictor_8x8 neon dspr2 msa sse2/;
+
+add_proto qw/void aom_dc_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_predictor_8x8 dspr2 neon msa sse2/;
+
+add_proto qw/void aom_dc_top_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_top_predictor_8x8 neon msa sse2/;
+
+add_proto qw/void aom_dc_left_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_left_predictor_8x8 neon msa sse2/;
+
+add_proto qw/void aom_dc_128_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_128_predictor_8x8 neon msa sse2/;
+
+add_proto qw/void aom_d207_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207_predictor_16x16 ssse3/;
+
+add_proto qw/void aom_d207e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207e_predictor_16x16/;
+
+add_proto qw/void aom_d45_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45_predictor_16x16 neon ssse3/;
+
+add_proto qw/void aom_d45e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45e_predictor_16x16/;
+
+add_proto qw/void aom_d63_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63_predictor_16x16 ssse3/;
+
+add_proto qw/void aom_d63e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63e_predictor_16x16/;
+
+add_proto qw/void aom_h_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_h_predictor_16x16 neon dspr2 msa sse2/;
+
+add_proto qw/void aom_d117_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d117_predictor_16x16/;
+
+add_proto qw/void aom_d135_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d135_predictor_16x16/;
+
+add_proto qw/void aom_d153_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d153_predictor_16x16 ssse3/;
+
+add_proto qw/void aom_v_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_v_predictor_16x16 neon msa sse2/;
+
+add_proto qw/void aom_tm_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_tm_predictor_16x16 neon msa sse2/;
+
+add_proto qw/void aom_dc_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_predictor_16x16 dspr2 neon msa sse2/;
+
+add_proto qw/void aom_dc_top_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_top_predictor_16x16 neon msa sse2/;
+
+add_proto qw/void aom_dc_left_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_left_predictor_16x16 neon msa sse2/;
+
+add_proto qw/void aom_dc_128_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_128_predictor_16x16 neon msa sse2/;
+
+add_proto qw/void aom_d207_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207_predictor_32x32 ssse3/;
+
+add_proto qw/void aom_d207e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207e_predictor_32x32/;
+
+add_proto qw/void aom_d45_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45_predictor_32x32 ssse3/;
+
+add_proto qw/void aom_d45e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45e_predictor_32x32/;
+
+add_proto qw/void aom_d63_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63_predictor_32x32 ssse3/;
+
+add_proto qw/void aom_d63e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63e_predictor_32x32/;
+
+add_proto qw/void aom_h_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_h_predictor_32x32 neon msa sse2/;
+
+add_proto qw/void aom_d117_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d117_predictor_32x32/;
+
+add_proto qw/void aom_d135_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d135_predictor_32x32/;
+
+add_proto qw/void aom_d153_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d153_predictor_32x32 ssse3/;
+
+add_proto qw/void aom_v_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_v_predictor_32x32 neon msa sse2/;
+
+add_proto qw/void aom_tm_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_tm_predictor_32x32 neon msa sse2/;
+
+add_proto qw/void aom_dc_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_predictor_32x32 msa neon sse2/;
+
+add_proto qw/void aom_dc_top_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_top_predictor_32x32 msa neon sse2/;
+
+add_proto qw/void aom_dc_left_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_left_predictor_32x32 msa neon sse2/;
+
+add_proto qw/void aom_dc_128_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_128_predictor_32x32 msa neon sse2/;
+
+# High bitdepth functions
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  add_proto qw/void aom_highbd_d207_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d207_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_d207e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d207e_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_d45_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d45_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_d45e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d45e_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_d63_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d63_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_d63e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d63e_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_h_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_h_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_d117_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d117_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_d135_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d135_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_d153_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d153_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_v_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_v_predictor_4x4 sse2/;
+
+  add_proto qw/void aom_highbd_tm_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_tm_predictor_4x4 sse2/;
+
+  add_proto qw/void aom_highbd_dc_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_predictor_4x4 sse2/;
+
+  add_proto qw/void aom_highbd_dc_top_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_top_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_dc_left_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_left_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_dc_128_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_128_predictor_4x4/;
+
+  add_proto qw/void aom_highbd_d207_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d207_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_d207e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d207e_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_d45_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d45_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_d45e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d45e_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_d63_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d63_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_d63e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d63e_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_h_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_h_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_d117_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d117_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_d135_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d135_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_d153_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d153_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_v_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_v_predictor_8x8 sse2/;
+
+  add_proto qw/void aom_highbd_tm_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_tm_predictor_8x8 sse2/;
+
+  add_proto qw/void aom_highbd_dc_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_predictor_8x8 sse2/;;
+
+  add_proto qw/void aom_highbd_dc_top_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_top_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_dc_left_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_left_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_dc_128_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_128_predictor_8x8/;
+
+  add_proto qw/void aom_highbd_d207_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d207_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_d207e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d207e_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_d45_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d45_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_d45e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d45e_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_d63_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d63_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_d63e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d63e_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_h_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_h_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_d117_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d117_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_d135_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d135_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_d153_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d153_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_v_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_v_predictor_16x16 sse2/;
+
+  add_proto qw/void aom_highbd_tm_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_tm_predictor_16x16 sse2/;
+
+  add_proto qw/void aom_highbd_dc_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_predictor_16x16 sse2/;
+
+  add_proto qw/void aom_highbd_dc_top_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_top_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_dc_left_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_left_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_dc_128_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_128_predictor_16x16/;
+
+  add_proto qw/void aom_highbd_d207_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d207_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_d207e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d207e_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_d45_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d45_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_d45e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d45e_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_d63_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d63_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_d63e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d63e_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_h_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_h_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_d117_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d117_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_d135_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d135_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_d153_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_d153_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_v_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_v_predictor_32x32 sse2/;
+
+  add_proto qw/void aom_highbd_tm_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_tm_predictor_32x32 sse2/;
+
+  add_proto qw/void aom_highbd_dc_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_predictor_32x32 sse2/;
+
+  add_proto qw/void aom_highbd_dc_top_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_top_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_dc_left_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_left_predictor_32x32/;
+
+  add_proto qw/void aom_highbd_dc_128_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+  specialize qw/aom_highbd_dc_128_predictor_32x32/;
+}  # CONFIG_AOM_HIGHBITDEPTH
+
+#
+# Sub Pixel Filters
+#
+add_proto qw/void aom_convolve_copy/,       "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve_avg/,        "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8/,           "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_horiz/,     "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_vert/,      "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg/,       "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg_vert/,  "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_2d/,           "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_horiz/,        "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_vert/,         "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_avg_2d/,       "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_avg_horiz/,    "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_avg_vert/,     "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+
+specialize qw/aom_convolve_copy       sse2      /;
+specialize qw/aom_convolve_avg        sse2      /;
+specialize qw/aom_convolve8           sse2 ssse3/, "$avx2_ssse3";
+specialize qw/aom_convolve8_horiz     sse2 ssse3/, "$avx2_ssse3";
+specialize qw/aom_convolve8_vert      sse2 ssse3/, "$avx2_ssse3";
+specialize qw/aom_convolve8_avg       sse2 ssse3/;
+specialize qw/aom_convolve8_avg_horiz sse2 ssse3/;
+specialize qw/aom_convolve8_avg_vert  sse2 ssse3/;
+specialize qw/aom_scaled_2d                ssse3/;
+
+# TODO(any): These need to be extended to up to 128x128 block sizes
+if (!(aom_config("CONFIG_AV1") eq "yes" && aom_config("CONFIG_EXT_PARTITION") eq "yes")) {
+  specialize qw/aom_convolve_copy       neon dspr2 msa/;
+  specialize qw/aom_convolve_avg        neon dspr2 msa/;
+  specialize qw/aom_convolve8           neon dspr2 msa/;
+  specialize qw/aom_convolve8_horiz     neon dspr2 msa/;
+  specialize qw/aom_convolve8_vert      neon dspr2 msa/;
+  specialize qw/aom_convolve8_avg       neon dspr2 msa/;
+  specialize qw/aom_convolve8_avg_horiz neon dspr2 msa/;
+  specialize qw/aom_convolve8_avg_vert  neon dspr2 msa/;
+}
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  add_proto qw/void aom_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/aom_highbd_convolve_copy sse2/;
+
+  add_proto qw/void aom_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/aom_highbd_convolve_avg sse2/;
+
+  add_proto qw/void aom_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/aom_highbd_convolve8/, "$sse2_x86_64";
+
+  add_proto qw/void aom_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/aom_highbd_convolve8_horiz/, "$sse2_x86_64";
+
+  add_proto qw/void aom_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/aom_highbd_convolve8_vert/, "$sse2_x86_64";
+
+  add_proto qw/void aom_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/aom_highbd_convolve8_avg/, "$sse2_x86_64";
+
+  add_proto qw/void aom_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/aom_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
+
+  add_proto qw/void aom_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/aom_highbd_convolve8_avg_vert/, "$sse2_x86_64";
+}  # CONFIG_AOM_HIGHBITDEPTH
+
+#
+# Loopfilter
+#
+add_proto qw/void aom_lpf_vertical_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_vertical_16 sse2 neon_asm dspr2 msa/;
+$aom_lpf_vertical_16_neon_asm=aom_lpf_vertical_16_neon;
+
+add_proto qw/void aom_lpf_vertical_16_dual/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_vertical_16_dual sse2 neon_asm dspr2 msa/;
+$aom_lpf_vertical_16_dual_neon_asm=aom_lpf_vertical_16_dual_neon;
+
+add_proto qw/void aom_lpf_vertical_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_vertical_8 sse2 neon dspr2 msa/;
+
+add_proto qw/void aom_lpf_vertical_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
+specialize qw/aom_lpf_vertical_8_dual sse2 neon_asm dspr2 msa/;
+$aom_lpf_vertical_8_dual_neon_asm=aom_lpf_vertical_8_dual_neon;
+
+add_proto qw/void aom_lpf_vertical_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_vertical_4 sse2 neon dspr2 msa/;
+
+add_proto qw/void aom_lpf_vertical_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
+specialize qw/aom_lpf_vertical_4_dual sse2 neon dspr2 msa/;
+
+add_proto qw/void aom_lpf_horizontal_edge_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_horizontal_edge_8 sse2 avx2 neon_asm dspr2 msa/;
+$aom_lpf_horizontal_edge_8_neon_asm=aom_lpf_horizontal_edge_8_neon;
+
+add_proto qw/void aom_lpf_horizontal_edge_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_horizontal_edge_16 sse2 avx2 neon_asm dspr2 msa/;
+$aom_lpf_horizontal_edge_16_neon_asm=aom_lpf_horizontal_edge_16_neon;
+
+add_proto qw/void aom_lpf_horizontal_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_horizontal_8 sse2 neon dspr2 msa/;
+
+add_proto qw/void aom_lpf_horizontal_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
+specialize qw/aom_lpf_horizontal_8_dual sse2 neon_asm dspr2 msa/;
+$aom_lpf_horizontal_8_dual_neon_asm=aom_lpf_horizontal_8_dual_neon;
+
+add_proto qw/void aom_lpf_horizontal_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_horizontal_4 sse2 neon dspr2 msa/;
+
+add_proto qw/void aom_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
+specialize qw/aom_lpf_horizontal_4_dual sse2 neon dspr2 msa/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  add_proto qw/void aom_highbd_lpf_vertical_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+  specialize qw/aom_highbd_lpf_vertical_16 sse2/;
+
+  add_proto qw/void aom_highbd_lpf_vertical_16_dual/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+  specialize qw/aom_highbd_lpf_vertical_16_dual sse2/;
+
+  add_proto qw/void aom_highbd_lpf_vertical_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+  specialize qw/aom_highbd_lpf_vertical_8 sse2/;
+
+  add_proto qw/void aom_highbd_lpf_vertical_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
+  specialize qw/aom_highbd_lpf_vertical_8_dual sse2/;
+
+  add_proto qw/void aom_highbd_lpf_vertical_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+  specialize qw/aom_highbd_lpf_vertical_4 sse2/;
+
+  add_proto qw/void aom_highbd_lpf_vertical_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
+  specialize qw/aom_highbd_lpf_vertical_4_dual sse2/;
+
+  add_proto qw/void aom_highbd_lpf_horizontal_edge_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+  specialize qw/aom_highbd_lpf_horizontal_edge_8 sse2/;
+
+  add_proto qw/void aom_highbd_lpf_horizontal_edge_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+  specialize qw/aom_highbd_lpf_horizontal_edge_16 sse2/;
+
+  add_proto qw/void aom_highbd_lpf_horizontal_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+  specialize qw/aom_highbd_lpf_horizontal_8 sse2/;
+
+  add_proto qw/void aom_highbd_lpf_horizontal_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
+  specialize qw/aom_highbd_lpf_horizontal_8_dual sse2/;
+
+  add_proto qw/void aom_highbd_lpf_horizontal_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+  specialize qw/aom_highbd_lpf_horizontal_4 sse2/;
+
+  add_proto qw/void aom_highbd_lpf_horizontal_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
+  specialize qw/aom_highbd_lpf_horizontal_4_dual sse2/;
+}  # CONFIG_AOM_HIGHBITDEPTH
+
+#
+# Encoder functions.
+#
+
+#
+# Forward transform
+#
+if ((aom_config("CONFIG_AV1_ENCODER") eq "yes")) {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  add_proto qw/void aom_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct4x4 sse2/;
+
+  add_proto qw/void aom_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct4x4_1 sse2/;
+
+  add_proto qw/void aom_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct8x8 sse2/;
+
+  add_proto qw/void aom_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct8x8_1 sse2/;
+
+  add_proto qw/void aom_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct16x16 sse2/;
+
+  add_proto qw/void aom_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct16x16_1 sse2/;
+
+  add_proto qw/void aom_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct32x32 sse2/;
+
+  add_proto qw/void aom_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct32x32_rd sse2/;
+
+  add_proto qw/void aom_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct32x32_1 sse2/;
+
+  add_proto qw/void aom_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_highbd_fdct4x4 sse2/;
+
+  add_proto qw/void aom_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_highbd_fdct8x8 sse2/;
+
+  add_proto qw/void aom_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_highbd_fdct8x8_1/;
+
+  add_proto qw/void aom_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_highbd_fdct16x16 sse2/;
+
+  add_proto qw/void aom_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_highbd_fdct16x16_1/;
+
+  add_proto qw/void aom_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_highbd_fdct32x32 sse2/;
+
+  add_proto qw/void aom_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_highbd_fdct32x32_rd sse2/;
+
+  add_proto qw/void aom_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_highbd_fdct32x32_1/;
+} else {
+  add_proto qw/void aom_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct4x4 sse2 msa/;
+
+  add_proto qw/void aom_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct4x4_1 sse2/;
+
+  add_proto qw/void aom_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct8x8 sse2 neon msa/, "$ssse3_x86_64";
+
+  add_proto qw/void aom_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct8x8_1 sse2 neon msa/;
+
+  add_proto qw/void aom_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct16x16 sse2 msa/;
+
+  add_proto qw/void aom_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct16x16_1 sse2 msa/;
+
+  add_proto qw/void aom_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct32x32 sse2 avx2 msa/;
+
+  add_proto qw/void aom_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct32x32_rd sse2 avx2 msa/;
+
+  add_proto qw/void aom_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/aom_fdct32x32_1 sse2 msa/;
+}  # CONFIG_AOM_HIGHBITDEPTH
+}  # CONFIG_AV1_ENCODER
+
+#
+# Inverse transform
+if (aom_config("CONFIG_AV1") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  # Note as optimized versions of these functions are added we need to add a check to ensure
+  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
+  add_proto qw/void aom_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/aom_iwht4x4_1_add/;
+
+  add_proto qw/void aom_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/aom_iwht4x4_16_add sse2/;
+
+  add_proto qw/void aom_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/aom_highbd_idct4x4_1_add/;
+
+  add_proto qw/void aom_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/aom_highbd_idct8x8_1_add/;
+
+  add_proto qw/void aom_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/aom_highbd_idct16x16_1_add/;
+
+  add_proto qw/void aom_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/aom_highbd_idct32x32_1024_add/;
+
+  add_proto qw/void aom_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/aom_highbd_idct32x32_34_add/;
+
+  add_proto qw/void aom_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/aom_highbd_idct32x32_1_add/;
+
+  add_proto qw/void aom_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/aom_highbd_iwht4x4_1_add/;
+
+  add_proto qw/void aom_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/aom_highbd_iwht4x4_16_add/;
+
+  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+  if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+    add_proto qw/void aom_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct4x4_16_add/;
+
+    add_proto qw/void aom_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct4x4_1_add/;
+
+    add_proto qw/void aom_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_64_add/;
+
+    add_proto qw/void aom_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_12_add/;
+
+    add_proto qw/void aom_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_1_add/;
+
+    add_proto qw/void aom_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_256_add/;
+
+    add_proto qw/void aom_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_10_add/;
+
+    add_proto qw/void aom_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_1_add/;
+
+    add_proto qw/void aom_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_1024_add/;
+
+    add_proto qw/void aom_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_135_add/;
+
+    add_proto qw/void aom_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_34_add/;
+
+    add_proto qw/void aom_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_1_add/;
+
+    add_proto qw/void aom_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/aom_highbd_idct4x4_16_add/;
+
+    add_proto qw/void aom_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/aom_highbd_idct8x8_64_add/;
+
+    add_proto qw/void aom_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/aom_highbd_idct8x8_10_add/;
+
+    add_proto qw/void aom_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/aom_highbd_idct16x16_256_add/;
+
+    add_proto qw/void aom_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/aom_highbd_idct16x16_10_add/;
+  } else {
+    add_proto qw/void aom_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct4x4_16_add sse2/;
+
+    add_proto qw/void aom_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct4x4_1_add sse2/;
+
+    add_proto qw/void aom_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_64_add sse2/, "$ssse3_x86_64";
+
+    add_proto qw/void aom_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_12_add sse2/, "$ssse3_x86_64";
+
+    add_proto qw/void aom_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_1_add sse2/;
+
+    add_proto qw/void aom_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_256_add sse2/;
+
+    add_proto qw/void aom_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_10_add sse2/;
+
+    add_proto qw/void aom_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_1_add sse2/;
+
+    add_proto qw/void aom_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_1024_add sse2/, "$ssse3_x86_64";
+
+    add_proto qw/void aom_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_135_add sse2/, "$ssse3_x86_64";
+    # Need to add 135 eob idct32x32 implementations.
+    $aom_idct32x32_135_add_sse2=aom_idct32x32_1024_add_sse2;
+
+    add_proto qw/void aom_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_34_add sse2/, "$ssse3_x86_64";
+
+    add_proto qw/void aom_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_1_add sse2/;
+
+    add_proto qw/void aom_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/aom_highbd_idct4x4_16_add sse2/;
+
+    add_proto qw/void aom_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/aom_highbd_idct8x8_64_add sse2/;
+
+    add_proto qw/void aom_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/aom_highbd_idct8x8_10_add sse2/;
+
+    add_proto qw/void aom_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/aom_highbd_idct16x16_256_add sse2/;
+
+    add_proto qw/void aom_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/aom_highbd_idct16x16_10_add sse2/;
+  }  # CONFIG_EMULATE_HARDWARE
+} else {
+  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+  if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+    add_proto qw/void aom_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct4x4_1_add/;
+
+    add_proto qw/void aom_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct4x4_16_add/;
+
+    add_proto qw/void aom_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_1_add/;
+
+    add_proto qw/void aom_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_64_add/;
+
+    add_proto qw/void aom_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_12_add/;
+
+    add_proto qw/void aom_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_1_add/;
+
+    add_proto qw/void aom_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_256_add/;
+
+    add_proto qw/void aom_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_10_add/;
+
+    add_proto qw/void aom_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_1024_add/;
+
+    add_proto qw/void aom_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_135_add/;
+
+    add_proto qw/void aom_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_34_add/;
+
+    add_proto qw/void aom_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_1_add/;
+
+    add_proto qw/void aom_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_iwht4x4_1_add/;
+
+    add_proto qw/void aom_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_iwht4x4_16_add/;
+  } else {
+    add_proto qw/void aom_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct4x4_1_add sse2 neon dspr2 msa/;
+
+    add_proto qw/void aom_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct4x4_16_add sse2 neon dspr2 msa/;
+
+    add_proto qw/void aom_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_1_add sse2 neon dspr2 msa/;
+
+    add_proto qw/void aom_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_64_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
+
+    add_proto qw/void aom_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct8x8_12_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
+
+    add_proto qw/void aom_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_1_add sse2 neon dspr2 msa/;
+
+    add_proto qw/void aom_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_256_add sse2 neon dspr2 msa/;
+
+    add_proto qw/void aom_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct16x16_10_add sse2 neon dspr2 msa/;
+
+    add_proto qw/void aom_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_1024_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
+
+    add_proto qw/void aom_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_135_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
+    # Need to add 135 eob idct32x32 implementations.
+    $aom_idct32x32_135_add_sse2=aom_idct32x32_1024_add_sse2;
+    $aom_idct32x32_135_add_neon=aom_idct32x32_1024_add_neon;
+    $aom_idct32x32_135_add_dspr2=aom_idct32x32_1024_add_dspr2;
+    $aom_idct32x32_135_add_msa=aom_idct32x32_1024_add_msa;
+
+    add_proto qw/void aom_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_34_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
+    # Need to add 34 eob idct32x32 neon implementation.
+    $aom_idct32x32_34_add_neon=aom_idct32x32_1024_add_neon;
+
+    add_proto qw/void aom_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_idct32x32_1_add sse2 neon dspr2 msa/;
+
+    add_proto qw/void aom_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_iwht4x4_1_add msa/;
+
+    add_proto qw/void aom_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/aom_iwht4x4_16_add msa sse2/;
+  }  # CONFIG_EMULATE_HARDWARE
+}  # CONFIG_AOM_HIGHBITDEPTH
+}  # CONFIG_AV1
+
+#
+# Quantization
+#
+if (aom_config("CONFIG_AOM_QM") eq "yes") {
+  if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
+    add_proto qw/void aom_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+
+    add_proto qw/void aom_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+
+    if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+      add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+
+      add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+    }  # CONFIG_AOM_HIGHBITDEPTH
+  }  # CONFIG_AV1_ENCODER
+} else {
+  if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
+    add_proto qw/void aom_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/aom_quantize_b sse2/, "$ssse3_x86_64", "$avx_x86_64";
+
+    add_proto qw/void aom_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/aom_quantize_b_32x32/, "$ssse3_x86_64", "$avx_x86_64";
+
+    if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+      add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+      specialize qw/aom_highbd_quantize_b sse2/;
+
+      add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+      specialize qw/aom_highbd_quantize_b_32x32 sse2/;
+    }  # CONFIG_AOM_HIGHBITDEPTH
+  }  # CONFIG_AV1_ENCODER
+} # CONFIG_AOM_QM
+if (aom_config("CONFIG_AV1") eq "yes") {
+  #
+  # Alpha blending with mask
+  #
+  add_proto qw/void aom_blend_a64_mask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, int suby, int subx";
+  add_proto qw/void aom_blend_a64_hmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w";
+  add_proto qw/void aom_blend_a64_vmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w";
+  specialize "aom_blend_a64_mask", qw/sse4_1/;
+  specialize "aom_blend_a64_hmask", qw/sse4_1/;
+  specialize "aom_blend_a64_vmask", qw/sse4_1/;
+
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    add_proto qw/void aom_highbd_blend_a64_mask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, int suby, int subx, int bd";
+    add_proto qw/void aom_highbd_blend_a64_hmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w, int bd";
+    add_proto qw/void aom_highbd_blend_a64_vmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w, int bd";
+    specialize "aom_highbd_blend_a64_mask", qw/sse4_1/;
+    specialize "aom_highbd_blend_a64_hmask", qw/sse4_1/;
+    specialize "aom_highbd_blend_a64_vmask", qw/sse4_1/;
+  }
+}  # CONFIG_AV1
+
+if (aom_config("CONFIG_ENCODERS") eq "yes") {
+#
+# Block subtraction
+#
+add_proto qw/void aom_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride";
+specialize qw/aom_subtract_block neon msa sse2/;
+
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
+#
+# Sum of Squares
+#
+add_proto qw/uint64_t aom_sum_squares_2d_i16/, "const int16_t *src, int stride, int size";
+specialize qw/aom_sum_squares_2d_i16 sse2/;
+
+add_proto qw/uint64_t aom_sum_squares_i16/, "const int16_t *src, uint32_t N";
+specialize qw/aom_sum_squares_i16 sse2/;
+}
+
+
+# Single block SAD
+#
+add_proto qw/unsigned int aom_sad64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad64x64 avx2 neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad64x32 avx2 msa sse2/;
+
+add_proto qw/unsigned int aom_sad32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad32x64 avx2 msa sse2/;
+
+add_proto qw/unsigned int aom_sad32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad32x32 avx2 neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad32x16 avx2 msa sse2/;
+
+add_proto qw/unsigned int aom_sad16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad16x32 msa sse2/;
+
+add_proto qw/unsigned int aom_sad16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad16x16 media neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad16x8 neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad8x16 neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad8x8 neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad8x4 msa sse2/;
+
+add_proto qw/unsigned int aom_sad4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad4x8 msa sse2/;
+
+add_proto qw/unsigned int aom_sad4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad4x4 neon msa sse2/;
+
+#
+# Avg
+#
+if ((aom_config("CONFIG_AV1_ENCODER") eq "yes")) {
+  #
+  # Avg
+  #
+  add_proto qw/unsigned int aom_avg_8x8/, "const uint8_t *, int p";
+  specialize qw/aom_avg_8x8 sse2 neon msa/;
+  add_proto qw/unsigned int aom_avg_4x4/, "const uint8_t *, int p";
+  specialize qw/aom_avg_4x4 sse2 neon msa/;
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    add_proto qw/unsigned int aom_highbd_avg_8x8/, "const uint8_t *, int p";
+    specialize qw/aom_highbd_avg_8x8/;
+    add_proto qw/unsigned int aom_highbd_avg_4x4/, "const uint8_t *, int p";
+    specialize qw/aom_highbd_avg_4x4/;
+    add_proto qw/void aom_highbd_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride, int bd";
+    specialize qw/aom_highbd_subtract_block sse2/;
+  }
+
+  #
+  # Minmax
+  #
+  add_proto qw/void aom_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
+  specialize qw/aom_minmax_8x8 sse2 neon/;
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    add_proto qw/void aom_highbd_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
+    specialize qw/aom_highbd_minmax_8x8/;
+  }
+
+  add_proto qw/void aom_hadamard_8x8/, "const int16_t *src_diff, int src_stride, int16_t *coeff";
+  specialize qw/aom_hadamard_8x8 sse2 neon/, "$ssse3_x86_64";
+
+  add_proto qw/void aom_hadamard_16x16/, "const int16_t *src_diff, int src_stride, int16_t *coeff";
+  specialize qw/aom_hadamard_16x16 sse2 neon/;
+
+  add_proto qw/int aom_satd/, "const int16_t *coeff, int length";
+  specialize qw/aom_satd sse2 neon/;
+
+  add_proto qw/void aom_int_pro_row/, "int16_t *hbuf, const uint8_t *ref, const int ref_stride, const int height";
+  specialize qw/aom_int_pro_row sse2 neon/;
+
+  add_proto qw/int16_t aom_int_pro_col/, "const uint8_t *ref, const int width";
+  specialize qw/aom_int_pro_col sse2 neon/;
+
+  add_proto qw/int aom_vector_var/, "const int16_t *ref, const int16_t *src, const int bwl";
+  specialize qw/aom_vector_var neon sse2/;
+}  # CONFIG_AV1_ENCODER
+
+#
+# Single block SAD / Single block Avg SAD
+#
+foreach (@block_sizes) {
+  ($w, $h) = @$_;
+  add_proto qw/unsigned int/, "aom_sad${w}x${h}", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+  add_proto qw/unsigned int/, "aom_sad${w}x${h}_avg", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
+}
+
+specialize qw/aom_sad128x128                        sse2/;
+specialize qw/aom_sad128x64                         sse2/;
+specialize qw/aom_sad64x128                         sse2/;
+specialize qw/aom_sad64x64      avx2            msa sse2/;
+specialize qw/aom_sad64x32      avx2            msa sse2/;
+specialize qw/aom_sad32x64      avx2            msa sse2/;
+specialize qw/aom_sad32x32      avx2       neon msa sse2/;
+specialize qw/aom_sad32x16      avx2            msa sse2/;
+specialize qw/aom_sad16x32                      msa sse2/;
+specialize qw/aom_sad16x16           media neon msa sse2/;
+specialize qw/aom_sad16x8                  neon msa sse2/;
+specialize qw/aom_sad8x16                  neon msa sse2/;
+specialize qw/aom_sad8x8                   neon msa sse2/;
+specialize qw/aom_sad8x4                        msa sse2/;
+specialize qw/aom_sad4x8                        msa sse2/;
+specialize qw/aom_sad4x4                   neon msa sse2/;
+
+specialize qw/aom_sad128x128_avg          sse2/;
+specialize qw/aom_sad128x64_avg           sse2/;
+specialize qw/aom_sad64x128_avg           sse2/;
+specialize qw/aom_sad64x64_avg   avx2 msa sse2/;
+specialize qw/aom_sad64x32_avg   avx2 msa sse2/;
+specialize qw/aom_sad32x64_avg   avx2 msa sse2/;
+specialize qw/aom_sad32x32_avg   avx2 msa sse2/;
+specialize qw/aom_sad32x16_avg   avx2 msa sse2/; 
+specialize qw/aom_sad16x32_avg        msa sse2/;
+specialize qw/aom_sad16x16_avg        msa sse2/;
+specialize qw/aom_sad16x8_avg         msa sse2/;
+specialize qw/aom_sad8x16_avg         msa sse2/;
+specialize qw/aom_sad8x8_avg          msa sse2/;
+specialize qw/aom_sad8x4_avg          msa sse2/;
+specialize qw/aom_sad4x8_avg          msa sse2/;
+specialize qw/aom_sad4x4_avg          msa sse2/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  foreach (@block_sizes) {
+    ($w, $h) = @$_;
+    add_proto qw/unsigned int/, "aom_highbd_sad${w}x${h}", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+    add_proto qw/unsigned int/, "aom_highbd_sad${w}x${h}_avg", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
+    if ($w != 128 && $h != 128 && $w != 4) {
+      specialize "aom_highbd_sad${w}x${h}", qw/sse2/;
+      specialize "aom_highbd_sad${w}x${h}_avg", qw/sse2/;
+    }
+  }
+}
+
+#
+# Masked SAD
+#
+if (aom_config("CONFIG_EXT_INTER") eq "yes") {
+  foreach (@block_sizes) {
+    ($w, $h) = @$_;
+    add_proto qw/unsigned int/, "aom_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+    specialize "aom_masked_sad${w}x${h}", qw/ssse3/;
+  }
+
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    foreach (@block_sizes) {
+      ($w, $h) = @$_;
+      add_proto qw/unsigned int/, "aom_highbd_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+      specialize "aom_highbd_masked_sad${w}x${h}", qw/ssse3/;
+    }
+  }
+}
+
+#
+# OBMC SAD
+#
+if (aom_config("CONFIG_OBMC") eq "yes") {
+  foreach (@block_sizes) {
+    ($w, $h) = @$_;
+    add_proto qw/unsigned int/, "aom_obmc_sad${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask";
+    specialize "aom_obmc_sad${w}x${h}", qw/sse4_1/;
+  }
+
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    foreach (@block_sizes) {
+      ($w, $h) = @$_;
+      add_proto qw/unsigned int/, "aom_highbd_obmc_sad${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask";
+      specialize "aom_highbd_obmc_sad${w}x${h}", qw/sse4_1/;
+    }
+  }
+}
+
+#
+# Multi-block SAD, comparing a reference to N blocks 1 pixel apart horizontally
+#
+# Blocks of 3
+foreach $s (@block_widths) {
+  add_proto qw/void/, "aom_sad${s}x${s}x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+}
+specialize qw/aom_sad64x64x3            msa/;
+specialize qw/aom_sad32x32x3            msa/;
+specialize qw/aom_sad16x16x3 sse3 ssse3 msa/;
+specialize qw/aom_sad8x8x3   sse3       msa/;
+specialize qw/aom_sad4x4x3   sse3       msa/;
+
+add_proto qw/void/, "aom_sad16x8x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad16x8x3 sse3 ssse3 msa/;
+add_proto qw/void/, "aom_sad8x16x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad8x16x3 sse3 msa/;
+
+# Blocks of 8
+foreach $s (@block_widths) {
+  add_proto qw/void/, "aom_sad${s}x${s}x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+}
+specialize qw/aom_sad64x64x8        msa/;
+specialize qw/aom_sad32x32x8        msa/;
+specialize qw/aom_sad16x16x8 sse4_1 msa/;
+specialize qw/aom_sad8x8x8   sse4_1 msa/;
+specialize qw/aom_sad4x4x8   sse4_1 msa/;
+
+add_proto qw/void/, "aom_sad16x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad16x8x8 sse4_1 msa/;
+add_proto qw/void/, "aom_sad8x16x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad8x16x8 sse4_1 msa/;
+add_proto qw/void/, "aom_sad8x4x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad8x4x8 msa/;
+add_proto qw/void/, "aom_sad4x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad4x8x8 msa/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  foreach $s (@block_widths) {
+    # Blocks of 3
+    add_proto qw/void/, "aom_highbd_sad${s}x${s}x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+    # Blocks of 8
+    add_proto qw/void/, "aom_highbd_sad${s}x${s}x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+  }
+  # Blocks of 3
+  add_proto qw/void/, "aom_highbd_sad16x8x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+  add_proto qw/void/, "aom_highbd_sad8x16x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+  # Blocks of 8
+  add_proto qw/void/, "aom_highbd_sad16x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+  add_proto qw/void/, "aom_highbd_sad8x16x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+  add_proto qw/void/, "aom_highbd_sad8x4x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+  add_proto qw/void/, "aom_highbd_sad4x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+}
+
+#
+# Multi-block SAD, comparing a reference to N independent blocks
+#
+foreach (@block_sizes) {
+  ($w, $h) = @$_;
+  add_proto qw/void/, "aom_sad${w}x${h}x4d", "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+}
+
+specialize qw/aom_sad128x128x4d              sse2/;
+specialize qw/aom_sad128x64x4d               sse2/;
+specialize qw/aom_sad64x128x4d               sse2/;
+specialize qw/aom_sad64x64x4d  avx2 neon msa sse2/;
+specialize qw/aom_sad64x32x4d            msa sse2/;
+specialize qw/aom_sad32x64x4d            msa sse2/;
+specialize qw/aom_sad32x32x4d  avx2 neon msa sse2/;
+specialize qw/aom_sad32x16x4d            msa sse2/;
+specialize qw/aom_sad16x32x4d            msa sse2/;
+specialize qw/aom_sad16x16x4d       neon msa sse2/;
+specialize qw/aom_sad16x8x4d             msa sse2/;
+specialize qw/aom_sad8x16x4d             msa sse2/;
+specialize qw/aom_sad8x8x4d              msa sse2/;
+specialize qw/aom_sad8x4x4d              msa sse2/;
+specialize qw/aom_sad4x8x4d              msa sse2/;
+specialize qw/aom_sad4x4x4d              msa sse2/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  #
+  # Multi-block SAD, comparing a reference to N independent blocks
+  #
+  foreach (@block_sizes) {
+    ($w, $h) = @$_;
+    add_proto qw/void/, "aom_highbd_sad${w}x${h}x4d", "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+    if ($w != 128 && $h != 128) {
+      specialize "aom_highbd_sad${w}x${h}x4d", qw/sse2/;
+    }
+  }
+}
+
+#
+# Structured Similarity (SSIM)
+#
+if (aom_config("CONFIG_INTERNAL_STATS") eq "yes") {
+  add_proto qw/void aom_ssim_parms_8x8/, "const uint8_t *s, int sp, const uint8_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
+  specialize qw/aom_ssim_parms_8x8/, "$sse2_x86_64";
+
+  add_proto qw/void aom_ssim_parms_16x16/, "const uint8_t *s, int sp, const uint8_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
+  specialize qw/aom_ssim_parms_16x16/, "$sse2_x86_64";
+
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    add_proto qw/void aom_highbd_ssim_parms_8x8/, "const uint16_t *s, int sp, const uint16_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
+  }
+}
+}  # CONFIG_ENCODERS
+
+if (aom_config("CONFIG_ENCODERS") eq "yes") {
+
+#
+# Variance
+#
+add_proto qw/unsigned int aom_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance64x64 sse2 avx2 neon msa/;
+
+add_proto qw/unsigned int aom_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance64x32 sse2 avx2 neon msa/;
+
+add_proto qw/unsigned int aom_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance32x64 sse2 neon msa/;
+
+add_proto qw/unsigned int aom_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance32x32 sse2 avx2 neon msa/;
+
+add_proto qw/unsigned int aom_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance32x16 sse2 avx2 msa/;
+
+add_proto qw/unsigned int aom_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance16x32 sse2 msa/;
+
+add_proto qw/unsigned int aom_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance16x16 sse2 avx2 media neon msa/;
+
+add_proto qw/unsigned int aom_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance16x8 sse2 neon msa/;
+
+add_proto qw/unsigned int aom_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance8x16 sse2 neon msa/;
+
+add_proto qw/unsigned int aom_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance8x8 sse2 media neon msa/;
+
+add_proto qw/unsigned int aom_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance8x4 sse2 msa/;
+
+add_proto qw/unsigned int aom_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance4x8 sse2 msa/;
+
+add_proto qw/unsigned int aom_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_variance4x4 sse2 msa/;
+
+#
+# Specialty Variance
+#
+add_proto qw/void aom_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+add_proto qw/void aom_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+specialize qw/aom_get16x16var sse2 avx2 neon msa/;
+specialize qw/aom_get8x8var   sse2      neon msa/;
+
+
+add_proto qw/unsigned int aom_mse16x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+add_proto qw/unsigned int aom_mse16x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+add_proto qw/unsigned int aom_mse8x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+add_proto qw/unsigned int aom_mse8x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+
+specialize qw/aom_mse16x16          sse2 avx2 media neon msa/;
+specialize qw/aom_mse16x8           sse2                 msa/;
+specialize qw/aom_mse8x16           sse2                 msa/;
+specialize qw/aom_mse8x8            sse2                 msa/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  foreach $bd (8, 10, 12) {
+    add_proto qw/void/, "aom_highbd_${bd}_get16x16var", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+    add_proto qw/void/, "aom_highbd_${bd}_get8x8var", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+    add_proto qw/unsigned int/, "aom_highbd_${bd}_mse16x16", "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+    add_proto qw/unsigned int/, "aom_highbd_${bd}_mse16x8", "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+    add_proto qw/unsigned int/, "aom_highbd_${bd}_mse8x16", "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+    add_proto qw/unsigned int/, "aom_highbd_${bd}_mse8x8", "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+
+    specialize "aom_highbd_${bd}_mse16x16", qw/sse2/;
+    specialize "aom_highbd_${bd}_mse8x8", qw/sse2/;
+  }
+}
+
+#
+# ...
+#
+add_proto qw/void aom_upsampled_pred/, "uint8_t *comp_pred, int width, int height, const uint8_t *ref, int ref_stride";
+specialize qw/aom_upsampled_pred sse2/;
+add_proto qw/void aom_comp_avg_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
+specialize qw/aom_comp_avg_upsampled_pred sse2/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  add_proto qw/void aom_highbd_upsampled_pred/, "uint16_t *comp_pred, int width, int height, const uint8_t *ref8, int ref_stride";
+  specialize qw/aom_highbd_upsampled_pred sse2/;
+  add_proto qw/void aom_highbd_comp_avg_upsampled_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride";
+  specialize qw/aom_highbd_comp_avg_upsampled_pred sse2/;
+}
+
+#
+# ...
+#
+add_proto qw/unsigned int aom_get_mb_ss/, "const int16_t *";
+add_proto qw/unsigned int aom_get4x4sse_cs/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride";
+
+specialize qw/aom_get_mb_ss sse2 msa/;
+specialize qw/aom_get4x4sse_cs neon msa/;
+
+#
+# Variance / Subpixel Variance / Subpixel Avg Variance
+#
+foreach (@block_sizes) {
+  ($w, $h) = @$_;
+  add_proto qw/unsigned int/, "aom_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  add_proto qw/uint32_t/, "aom_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  add_proto qw/uint32_t/, "aom_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+}
+
+specialize qw/aom_variance64x64     sse2 avx2       neon msa/;
+specialize qw/aom_variance64x32     sse2 avx2       neon msa/;
+specialize qw/aom_variance32x64     sse2            neon msa/;
+specialize qw/aom_variance32x32     sse2 avx2       neon msa/;
+specialize qw/aom_variance32x16     sse2 avx2            msa/;
+specialize qw/aom_variance16x32     sse2                 msa/;
+specialize qw/aom_variance16x16     sse2 avx2 media neon msa/;
+specialize qw/aom_variance16x8      sse2            neon msa/;
+specialize qw/aom_variance8x16      sse2            neon msa/;
+specialize qw/aom_variance8x8       sse2      media neon msa/;
+specialize qw/aom_variance8x4       sse2                 msa/;
+specialize qw/aom_variance4x8       sse2                 msa/;
+specialize qw/aom_variance4x4       sse2                 msa/;
+
+specialize qw/aom_sub_pixel_variance64x64     avx2       neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance64x32                     msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance32x64                     msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance32x32     avx2       neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance32x16                     msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance16x32                     msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance16x16          media neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance16x8                      msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance8x16                      msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance8x8            media neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance8x4                       msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance4x8                       msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance4x4                       msa sse2 ssse3/;
+
+specialize qw/aom_sub_pixel_avg_variance64x64 avx2 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance64x32      msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance32x64      msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance32x32 avx2 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance32x16      msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance16x32      msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance16x16      msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance16x8       msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance8x16       msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance8x8        msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance8x4        msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance4x8        msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance4x4        msa sse2 ssse3/;
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  foreach $bd (8, 10, 12) {
+    foreach (@block_sizes) {
+      ($w, $h) = @$_;
+      add_proto qw/unsigned int/, "aom_highbd_${bd}_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+      add_proto qw/uint32_t/, "aom_highbd_${bd}_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+      add_proto qw/uint32_t/, "aom_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+      if ($w != 128 && $h != 128 && $w != 4 && $h != 4) {
+        specialize "aom_highbd_${bd}_variance${w}x${h}", "sse2";
+      }
+      if ($w == 4 && $h == 4) {
+        specialize "aom_highbd_${bd}_variance${w}x${h}", "sse4_1";
+      }
+      if ($w != 128 && $h != 128 && $w != 4) {
+        specialize "aom_highbd_${bd}_sub_pixel_variance${w}x${h}", qw/sse2/;
+        specialize "aom_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", qw/sse2/;
+      }
+      if ($w == 4 && $h == 4) {
+        specialize "aom_highbd_${bd}_sub_pixel_variance${w}x${h}", "sse4_1";
+        specialize "aom_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", "sse4_1";
+      }
+    }
+  }
+}  # CONFIG_AOM_HIGHBITDEPTH
+
+if (aom_config("CONFIG_EXT_INTER") eq "yes") {
+#
+# Masked Variance / Masked Subpixel Variance
+#
+  foreach (@block_sizes) {
+    ($w, $h) = @$_;
+    add_proto qw/unsigned int/, "aom_masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    add_proto qw/unsigned int/, "aom_masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize "aom_masked_variance${w}x${h}", qw/ssse3/;
+    specialize "aom_masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
+  }
+
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    foreach $bd ("_", "_10_", "_12_") {
+      foreach (@block_sizes) {
+        ($w, $h) = @$_;
+        add_proto qw/unsigned int/, "aom_highbd${bd}masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+        add_proto qw/unsigned int/, "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+        specialize "aom_highbd${bd}masked_variance${w}x${h}", qw/ssse3/;
+        specialize "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
+      }
+    }
+  }
+}
+
+#
+# OBMC Variance / OBMC Subpixel Variance
+#
+if (aom_config("CONFIG_OBMC") eq "yes") {
+  foreach (@block_sizes) {
+    ($w, $h) = @$_;
+    add_proto qw/unsigned int/, "aom_obmc_variance${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
+    add_proto qw/unsigned int/, "aom_obmc_sub_pixel_variance${w}x${h}", "const uint8_t *pre, int pre_stride, int xoffset, int yoffset, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
+    specialize "aom_obmc_variance${w}x${h}", q/sse4_1/;
+    specialize "aom_obmc_sub_pixel_variance${w}x${h}";
+  }
+
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    foreach $bd ("_", "_10_", "_12_") {
+      foreach (@block_sizes) {
+        ($w, $h) = @$_;
+        add_proto qw/unsigned int/, "aom_highbd${bd}obmc_variance${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
+        add_proto qw/unsigned int/, "aom_highbd${bd}obmc_sub_pixel_variance${w}x${h}", "const uint8_t *pre, int pre_stride, int xoffset, int yoffset, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
+        specialize "aom_highbd${bd}obmc_variance${w}x${h}", qw/sse4_1/;
+        specialize "aom_highbd${bd}obmc_sub_pixel_variance${w}x${h}";
+      }
+    }
+  }
+}
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance64x64 avx2 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance64x32 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance32x64 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance32x32 avx2 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance32x16 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance16x32 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance16x16 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance16x8 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance8x16 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance8x8 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance8x4 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance4x8 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_sub_pixel_avg_variance4x4 msa sse2 ssse3/;
+
+#
+# Specialty Subpixel
+#
+add_proto qw/uint32_t aom_variance_halfpixvar16x16_h/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, uint32_t *sse";
+  specialize qw/aom_variance_halfpixvar16x16_h sse2 media/;
+
+add_proto qw/uint32_t aom_variance_halfpixvar16x16_v/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, uint32_t *sse";
+  specialize qw/aom_variance_halfpixvar16x16_v sse2 media/;
+
+add_proto qw/uint32_t aom_variance_halfpixvar16x16_hv/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, uint32_t *sse";
+  specialize qw/aom_variance_halfpixvar16x16_hv sse2 media/;
+
+#
+# Comp Avg
+#
+add_proto qw/void aom_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  add_proto qw/unsigned int aom_highbd_12_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_variance64x64 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_variance64x32 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_variance32x64 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_variance32x32 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_variance32x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_variance16x32 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_variance16x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_variance16x8 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_variance8x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_variance8x8 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_12_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_12_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+
+  add_proto qw/unsigned int aom_highbd_10_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_variance64x64 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_variance64x32 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_variance32x64 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_variance32x32 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_variance32x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_variance16x32 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_variance16x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_variance16x8 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_variance8x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_variance8x8 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_10_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_10_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+
+  add_proto qw/unsigned int aom_highbd_8_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_variance64x64 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_8_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_variance64x32 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_8_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_variance32x64 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_8_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_variance32x32 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_8_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_variance32x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_8_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_variance16x32 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_8_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_variance16x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_8_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_variance16x8 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_8_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_variance8x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_8_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_variance8x8 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_8_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_8_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_8_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+
+  add_proto qw/void aom_highbd_8_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+  add_proto qw/void aom_highbd_8_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+  add_proto qw/void aom_highbd_10_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+  add_proto qw/void aom_highbd_10_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+  add_proto qw/void aom_highbd_12_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+  add_proto qw/void aom_highbd_12_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+  add_proto qw/unsigned int aom_highbd_8_mse16x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_mse16x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_8_mse16x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_8_mse8x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_8_mse8x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  specialize qw/aom_highbd_8_mse8x8 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_mse16x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_mse16x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_10_mse16x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_10_mse8x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_10_mse8x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  specialize qw/aom_highbd_10_mse8x8 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_mse16x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_mse16x16 sse2/;
+
+  add_proto qw/unsigned int aom_highbd_12_mse16x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_12_mse8x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  add_proto qw/unsigned int aom_highbd_12_mse8x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
+  specialize qw/aom_highbd_12_mse8x8 sse2/;
+
+  add_proto qw/void aom_highbd_comp_avg_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride";
+
+  #
+  # Subpixel Variance
+  #
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_12_sub_pixel_variance64x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_12_sub_pixel_variance64x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_12_sub_pixel_variance32x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_12_sub_pixel_variance32x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_12_sub_pixel_variance32x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_12_sub_pixel_variance16x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_12_sub_pixel_variance16x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_12_sub_pixel_variance16x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_12_sub_pixel_variance8x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_12_sub_pixel_variance8x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_12_sub_pixel_variance8x4 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_10_sub_pixel_variance64x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_10_sub_pixel_variance64x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_10_sub_pixel_variance32x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_10_sub_pixel_variance32x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_10_sub_pixel_variance32x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_10_sub_pixel_variance16x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_10_sub_pixel_variance16x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_10_sub_pixel_variance16x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_10_sub_pixel_variance8x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_10_sub_pixel_variance8x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_10_sub_pixel_variance8x4 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_8_sub_pixel_variance64x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_8_sub_pixel_variance64x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_8_sub_pixel_variance32x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_8_sub_pixel_variance32x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_8_sub_pixel_variance32x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_8_sub_pixel_variance16x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_8_sub_pixel_variance16x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_8_sub_pixel_variance16x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_8_sub_pixel_variance8x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_8_sub_pixel_variance8x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  specialize qw/aom_highbd_8_sub_pixel_variance8x4 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_12_sub_pixel_avg_variance64x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_12_sub_pixel_avg_variance64x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_12_sub_pixel_avg_variance32x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_12_sub_pixel_avg_variance32x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_12_sub_pixel_avg_variance32x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_12_sub_pixel_avg_variance16x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_12_sub_pixel_avg_variance16x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_12_sub_pixel_avg_variance16x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_12_sub_pixel_avg_variance8x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_12_sub_pixel_avg_variance8x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_12_sub_pixel_avg_variance8x4 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_10_sub_pixel_avg_variance64x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_10_sub_pixel_avg_variance64x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_10_sub_pixel_avg_variance32x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_10_sub_pixel_avg_variance32x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_10_sub_pixel_avg_variance32x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_10_sub_pixel_avg_variance16x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_10_sub_pixel_avg_variance16x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_10_sub_pixel_avg_variance16x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_10_sub_pixel_avg_variance8x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_10_sub_pixel_avg_variance8x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_10_sub_pixel_avg_variance8x4 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_8_sub_pixel_avg_variance64x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_8_sub_pixel_avg_variance64x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_8_sub_pixel_avg_variance32x64 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_8_sub_pixel_avg_variance32x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_8_sub_pixel_avg_variance32x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_8_sub_pixel_avg_variance16x32 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_8_sub_pixel_avg_variance16x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_8_sub_pixel_avg_variance16x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_8_sub_pixel_avg_variance8x16 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_8_sub_pixel_avg_variance8x8 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  specialize qw/aom_highbd_8_sub_pixel_avg_variance8x4 sse2/;
+
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+  add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+
+}  # CONFIG_AOM_HIGHBITDEPTH
+
+}  # CONFIG_ENCODERS
+
+1;
diff --git a/aom_dsp/vpx_filter.h b/aom_dsp/aom_filter.h
similarity index 88%
rename from aom_dsp/vpx_filter.h
rename to aom_dsp/aom_filter.h
index d977bd3..0a71817 100644
--- a/aom_dsp/vpx_filter.h
+++ b/aom_dsp/aom_filter.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_VPX_FILTER_H_
-#define VPX_DSP_VPX_FILTER_H_
+#ifndef AOM_DSP_AOM_FILTER_H_
+#define AOM_DSP_AOM_FILTER_H_
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -39,4 +39,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_VPX_FILTER_H_
+#endif  // AOM_DSP_AOM_FILTER_H_
diff --git a/aom_dsp/arm/vpx_convolve8_avg_neon.c b/aom_dsp/arm/aom_convolve8_avg_neon.c
similarity index 98%
rename from aom_dsp/arm/vpx_convolve8_avg_neon.c
rename to aom_dsp/arm/aom_convolve8_avg_neon.c
index c6b1831..701d5b9 100644
--- a/aom_dsp/arm/vpx_convolve8_avg_neon.c
+++ b/aom_dsp/arm/aom_convolve8_avg_neon.c
@@ -11,9 +11,9 @@
 #include <arm_neon.h>
 #include <assert.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
 static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1,
@@ -38,7 +38,7 @@
   return qdst;
 }
 
-void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
                                   uint8_t *dst, ptrdiff_t dst_stride,
                                   const int16_t *filter_x, int x_step_q4,
                                   const int16_t *filter_y,  // unused
@@ -218,7 +218,7 @@
   return;
 }
 
-void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
                                  uint8_t *dst, ptrdiff_t dst_stride,
                                  const int16_t *filter_x,  // unused
                                  int x_step_q4,            // unused
diff --git a/aom_dsp/arm/vpx_convolve8_avg_neon_asm.asm b/aom_dsp/arm/aom_convolve8_avg_neon_asm.asm
similarity index 93%
rename from aom_dsp/arm/vpx_convolve8_avg_neon_asm.asm
rename to aom_dsp/arm/aom_convolve8_avg_neon_asm.asm
index e279d57..a408d9d 100644
--- a/aom_dsp/arm/vpx_convolve8_avg_neon_asm.asm
+++ b/aom_dsp/arm/aom_convolve8_avg_neon_asm.asm
@@ -14,11 +14,11 @@
     ; w%4 == 0
     ; h%4 == 0
     ; taps == 8
-    ; VP9_FILTER_WEIGHT == 128
-    ; VP9_FILTER_SHIFT == 7
+    ; AV1_FILTER_WEIGHT == 128
+    ; AV1_FILTER_SHIFT == 7
 
-    EXPORT  |vpx_convolve8_avg_horiz_neon|
-    EXPORT  |vpx_convolve8_avg_vert_neon|
+    EXPORT  |aom_convolve8_avg_horiz_neon|
+    EXPORT  |aom_convolve8_avg_vert_neon|
     ARM
     REQUIRE8
     PRESERVE8
@@ -49,7 +49,7 @@
 ; sp[]int w
 ; sp[]int h
 
-|vpx_convolve8_avg_horiz_neon| PROC
+|aom_convolve8_avg_horiz_neon| PROC
     push            {r4-r10, lr}
 
     sub             r0, r0, #3              ; adjust for taps
@@ -72,7 +72,7 @@
 
     mov             r10, r6                 ; w loop counter
 
-vpx_convolve8_avg_loop_horiz_v
+aom_convolve8_avg_loop_horiz_v
     vld1.8          {d24}, [r0], r1
     vld1.8          {d25}, [r0], r1
     vld1.8          {d26}, [r0], r1
@@ -95,7 +95,7 @@
 
     add             r0, r0, #3
 
-vpx_convolve8_avg_loop_horiz
+aom_convolve8_avg_loop_horiz
     add             r5, r0, #64
 
     vld1.32         {d28[]}, [r0], r1
@@ -164,20 +164,20 @@
     vmov            q9,  q13
 
     subs            r6, r6, #4              ; w -= 4
-    bgt             vpx_convolve8_avg_loop_horiz
+    bgt             aom_convolve8_avg_loop_horiz
 
     ; outer loop
     mov             r6, r10                 ; restore w counter
     add             r0, r0, r9              ; src += src_stride * 4 - w
     add             r2, r2, r12             ; dst += dst_stride * 4 - w
     subs            r7, r7, #4              ; h -= 4
-    bgt vpx_convolve8_avg_loop_horiz_v
+    bgt aom_convolve8_avg_loop_horiz_v
 
     pop             {r4-r10, pc}
 
     ENDP
 
-|vpx_convolve8_avg_vert_neon| PROC
+|aom_convolve8_avg_vert_neon| PROC
     push            {r4-r8, lr}
 
     ; adjust for taps
@@ -193,7 +193,7 @@
     lsl             r1, r1, #1
     lsl             r3, r3, #1
 
-vpx_convolve8_avg_loop_vert_h
+aom_convolve8_avg_loop_vert_h
     mov             r4, r0
     add             r7, r0, r1, asr #1
     mov             r5, r2
@@ -213,7 +213,7 @@
     vmovl.u8        q10, d20
     vmovl.u8        q11, d22
 
-vpx_convolve8_avg_loop_vert
+aom_convolve8_avg_loop_vert
     ; always process a 4x4 block at a time
     vld1.u32        {d24[0]}, [r7], r1
     vld1.u32        {d26[0]}, [r4], r1
@@ -278,13 +278,13 @@
     vmov            d22, d25
 
     subs            r12, r12, #4            ; h -= 4
-    bgt             vpx_convolve8_avg_loop_vert
+    bgt             aom_convolve8_avg_loop_vert
 
     ; outer loop
     add             r0, r0, #4
     add             r2, r2, #4
     subs            r6, r6, #4              ; w -= 4
-    bgt             vpx_convolve8_avg_loop_vert_h
+    bgt             aom_convolve8_avg_loop_vert_h
 
     pop             {r4-r8, pc}
 
diff --git a/aom_dsp/arm/vpx_convolve8_neon.c b/aom_dsp/arm/aom_convolve8_neon.c
similarity index 98%
rename from aom_dsp/arm/vpx_convolve8_neon.c
rename to aom_dsp/arm/aom_convolve8_neon.c
index b84be93..3e5aaf0 100644
--- a/aom_dsp/arm/vpx_convolve8_neon.c
+++ b/aom_dsp/arm/aom_convolve8_neon.c
@@ -11,9 +11,9 @@
 #include <arm_neon.h>
 #include <assert.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
 static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1,
@@ -38,7 +38,7 @@
   return qdst;
 }
 
-void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
                               const int16_t *filter_x, int x_step_q4,
                               const int16_t *filter_y,  // unused
@@ -204,7 +204,7 @@
   return;
 }
 
-void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
                              uint8_t *dst, ptrdiff_t dst_stride,
                              const int16_t *filter_x,  // unused
                              int x_step_q4,            // unused
diff --git a/aom_dsp/arm/vpx_convolve8_neon_asm.asm b/aom_dsp/arm/aom_convolve8_neon_asm.asm
similarity index 93%
rename from aom_dsp/arm/vpx_convolve8_neon_asm.asm
rename to aom_dsp/arm/aom_convolve8_neon_asm.asm
index 2d0f2ae..800876f 100644
--- a/aom_dsp/arm/vpx_convolve8_neon_asm.asm
+++ b/aom_dsp/arm/aom_convolve8_neon_asm.asm
@@ -14,11 +14,11 @@
     ; w%4 == 0
     ; h%4 == 0
     ; taps == 8
-    ; VP9_FILTER_WEIGHT == 128
-    ; VP9_FILTER_SHIFT == 7
+    ; AV1_FILTER_WEIGHT == 128
+    ; AV1_FILTER_SHIFT == 7
 
-    EXPORT  |vpx_convolve8_horiz_neon|
-    EXPORT  |vpx_convolve8_vert_neon|
+    EXPORT  |aom_convolve8_horiz_neon|
+    EXPORT  |aom_convolve8_vert_neon|
     ARM
     REQUIRE8
     PRESERVE8
@@ -49,7 +49,7 @@
 ; sp[]int w
 ; sp[]int h
 
-|vpx_convolve8_horiz_neon| PROC
+|aom_convolve8_horiz_neon| PROC
     push            {r4-r10, lr}
 
     sub             r0, r0, #3              ; adjust for taps
@@ -72,7 +72,7 @@
 
     mov             r10, r6                 ; w loop counter
 
-vpx_convolve8_loop_horiz_v
+aom_convolve8_loop_horiz_v
     vld1.8          {d24}, [r0], r1
     vld1.8          {d25}, [r0], r1
     vld1.8          {d26}, [r0], r1
@@ -95,7 +95,7 @@
 
     add             r0, r0, #3
 
-vpx_convolve8_loop_horiz
+aom_convolve8_loop_horiz
     add             r5, r0, #64
 
     vld1.32         {d28[]}, [r0], r1
@@ -153,20 +153,20 @@
     vmov            q9,  q13
 
     subs            r6, r6, #4              ; w -= 4
-    bgt             vpx_convolve8_loop_horiz
+    bgt             aom_convolve8_loop_horiz
 
     ; outer loop
     mov             r6, r10                 ; restore w counter
     add             r0, r0, r9              ; src += src_stride * 4 - w
     add             r2, r2, r12             ; dst += dst_stride * 4 - w
     subs            r7, r7, #4              ; h -= 4
-    bgt vpx_convolve8_loop_horiz_v
+    bgt aom_convolve8_loop_horiz_v
 
     pop             {r4-r10, pc}
 
     ENDP
 
-|vpx_convolve8_vert_neon| PROC
+|aom_convolve8_vert_neon| PROC
     push            {r4-r8, lr}
 
     ; adjust for taps
@@ -182,7 +182,7 @@
     lsl             r1, r1, #1
     lsl             r3, r3, #1
 
-vpx_convolve8_loop_vert_h
+aom_convolve8_loop_vert_h
     mov             r4, r0
     add             r7, r0, r1, asr #1
     mov             r5, r2
@@ -202,7 +202,7 @@
     vmovl.u8        q10, d20
     vmovl.u8        q11, d22
 
-vpx_convolve8_loop_vert
+aom_convolve8_loop_vert
     ; always process a 4x4 block at a time
     vld1.u32        {d24[0]}, [r7], r1
     vld1.u32        {d26[0]}, [r4], r1
@@ -256,13 +256,13 @@
     vmov            d22, d25
 
     subs            r12, r12, #4            ; h -= 4
-    bgt             vpx_convolve8_loop_vert
+    bgt             aom_convolve8_loop_vert
 
     ; outer loop
     add             r0, r0, #4
     add             r2, r2, #4
     subs            r6, r6, #4              ; w -= 4
-    bgt             vpx_convolve8_loop_vert_h
+    bgt             aom_convolve8_loop_vert_h
 
     pop             {r4-r8, pc}
 
diff --git a/aom_dsp/arm/vpx_convolve_avg_neon.c b/aom_dsp/arm/aom_convolve_avg_neon.c
similarity index 97%
rename from aom_dsp/arm/vpx_convolve_avg_neon.c
rename to aom_dsp/arm/aom_convolve_avg_neon.c
index a04d384..cfdfed9 100644
--- a/aom_dsp/arm/vpx_convolve_avg_neon.c
+++ b/aom_dsp/arm/aom_convolve_avg_neon.c
@@ -10,10 +10,10 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
-void vpx_convolve_avg_neon(const uint8_t *src,    // r0
+void aom_convolve_avg_neon(const uint8_t *src,    // r0
                            ptrdiff_t src_stride,  // r1
                            uint8_t *dst,          // r2
                            ptrdiff_t dst_stride,  // r3
diff --git a/aom_dsp/arm/vpx_convolve_avg_neon_asm.asm b/aom_dsp/arm/aom_convolve_avg_neon_asm.asm
similarity index 98%
rename from aom_dsp/arm/vpx_convolve_avg_neon_asm.asm
rename to aom_dsp/arm/aom_convolve_avg_neon_asm.asm
index 97e6189..2177756 100644
--- a/aom_dsp/arm/vpx_convolve_avg_neon_asm.asm
+++ b/aom_dsp/arm/aom_convolve_avg_neon_asm.asm
@@ -8,14 +8,14 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-    EXPORT  |vpx_convolve_avg_neon|
+    EXPORT  |aom_convolve_avg_neon|
     ARM
     REQUIRE8
     PRESERVE8
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
-|vpx_convolve_avg_neon| PROC
+|aom_convolve_avg_neon| PROC
     push                {r4-r6, lr}
     ldrd                r4, r5, [sp, #32]
     mov                 r6, r2
diff --git a/aom_dsp/arm/vpx_convolve_copy_neon.c b/aom_dsp/arm/aom_convolve_copy_neon.c
similarity index 95%
rename from aom_dsp/arm/vpx_convolve_copy_neon.c
rename to aom_dsp/arm/aom_convolve_copy_neon.c
index 8000eb7..bb8a55c 100644
--- a/aom_dsp/arm/vpx_convolve_copy_neon.c
+++ b/aom_dsp/arm/aom_convolve_copy_neon.c
@@ -10,10 +10,10 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
-void vpx_convolve_copy_neon(const uint8_t *src,    // r0
+void aom_convolve_copy_neon(const uint8_t *src,    // r0
                             ptrdiff_t src_stride,  // r1
                             uint8_t *dst,          // r2
                             ptrdiff_t dst_stride,  // r3
diff --git a/aom_dsp/arm/vpx_convolve_copy_neon_asm.asm b/aom_dsp/arm/aom_convolve_copy_neon_asm.asm
similarity index 97%
rename from aom_dsp/arm/vpx_convolve_copy_neon_asm.asm
rename to aom_dsp/arm/aom_convolve_copy_neon_asm.asm
index 89164ad..2d60bee3 100644
--- a/aom_dsp/arm/vpx_convolve_copy_neon_asm.asm
+++ b/aom_dsp/arm/aom_convolve_copy_neon_asm.asm
@@ -8,14 +8,14 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-    EXPORT  |vpx_convolve_copy_neon|
+    EXPORT  |aom_convolve_copy_neon|
     ARM
     REQUIRE8
     PRESERVE8
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
-|vpx_convolve_copy_neon| PROC
+|aom_convolve_copy_neon| PROC
     push                {r4-r5, lr}
     ldrd                r4, r5, [sp, #28]
 
diff --git a/aom_dsp/arm/vpx_convolve_neon.c b/aom_dsp/arm/aom_convolve_neon.c
similarity index 83%
rename from aom_dsp/arm/vpx_convolve_neon.c
rename to aom_dsp/arm/aom_convolve_neon.c
index 297b64b..11bff2a 100644
--- a/aom_dsp/arm/vpx_convolve_neon.c
+++ b/aom_dsp/arm/aom_convolve_neon.c
@@ -10,11 +10,11 @@
 
 #include <assert.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
-void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                         ptrdiff_t dst_stride, const int16_t *filter_x,
                         int x_step_q4, const int16_t *filter_y, int y_step_q4,
                         int w, int h) {
@@ -34,16 +34,16 @@
    * the temp buffer which has lots of extra room and is subsequently discarded
    * this is safe if somewhat less than ideal.
    */
-  vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
+  aom_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
                            x_step_q4, filter_y, y_step_q4, w,
                            intermediate_height);
 
   /* Step into the temp buffer 3 lines to get the actual frame data */
-  vpx_convolve8_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
+  aom_convolve8_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
                           x_step_q4, filter_y, y_step_q4, w, h);
 }
 
-void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x, int x_step_q4,
                             const int16_t *filter_y, int y_step_q4, int w,
@@ -57,9 +57,9 @@
   /* This implementation has the same issues as above. In addition, we only want
    * to average the values after both passes.
    */
-  vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
+  aom_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
                            x_step_q4, filter_y, y_step_q4, w,
                            intermediate_height);
-  vpx_convolve8_avg_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
+  aom_convolve8_avg_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
                               x_step_q4, filter_y, y_step_q4, w, h);
 }
diff --git a/aom_dsp/arm/avg_neon.c b/aom_dsp/arm/avg_neon.c
index ff9cbb9..1463c17 100644
--- a/aom_dsp/arm/avg_neon.c
+++ b/aom_dsp/arm/avg_neon.c
@@ -11,10 +11,10 @@
 #include <arm_neon.h>
 #include <assert.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 static INLINE unsigned int horizontal_add_u16x8(const uint16x8_t v_16x8) {
   const uint32x4_t a = vpaddlq_u16(v_16x8);
@@ -24,7 +24,7 @@
   return vget_lane_u32(c, 0);
 }
 
-unsigned int vpx_avg_4x4_neon(const uint8_t *s, int p) {
+unsigned int aom_avg_4x4_neon(const uint8_t *s, int p) {
   uint16x8_t v_sum;
   uint32x2_t v_s0 = vdup_n_u32(0);
   uint32x2_t v_s1 = vdup_n_u32(0);
@@ -36,7 +36,7 @@
   return (horizontal_add_u16x8(v_sum) + 8) >> 4;
 }
 
-unsigned int vpx_avg_8x8_neon(const uint8_t *s, int p) {
+unsigned int aom_avg_8x8_neon(const uint8_t *s, int p) {
   uint8x8_t v_s0 = vld1_u8(s);
   const uint8x8_t v_s1 = vld1_u8(s + p);
   uint16x8_t v_sum = vaddl_u8(v_s0, v_s1);
@@ -64,7 +64,7 @@
 
 // coeff: 16 bits, dynamic range [-32640, 32640].
 // length: value range {16, 64, 256, 1024}.
-int vpx_satd_neon(const int16_t *coeff, int length) {
+int aom_satd_neon(const int16_t *coeff, int length) {
   const int16x4_t zero = vdup_n_s16(0);
   int32x4_t accum = vdupq_n_s32(0);
 
@@ -89,7 +89,7 @@
   }
 }
 
-void vpx_int_pro_row_neon(int16_t hbuf[16], uint8_t const *ref,
+void aom_int_pro_row_neon(int16_t hbuf[16], uint8_t const *ref,
                           const int ref_stride, const int height) {
   int i;
   uint16x8_t vec_sum_lo = vdupq_n_u16(0);
@@ -142,7 +142,7 @@
   vst1q_s16(hbuf, vreinterpretq_s16_u16(vec_sum_hi));
 }
 
-int16_t vpx_int_pro_col_neon(uint8_t const *ref, const int width) {
+int16_t aom_int_pro_col_neon(uint8_t const *ref, const int width) {
   int i;
   uint16x8_t vec_sum = vdupq_n_u16(0);
 
@@ -158,7 +158,7 @@
 
 // ref, src = [0, 510] - max diff = 16-bits
 // bwl = {2, 3, 4}, width = {16, 32, 64}
-int vpx_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) {
+int aom_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) {
   int width = 4 << bwl;
   int32x4_t sse = vdupq_n_s32(0);
   int16x8_t total = vdupq_n_s16(0);
@@ -198,7 +198,7 @@
   }
 }
 
-void vpx_minmax_8x8_neon(const uint8_t *a, int a_stride, const uint8_t *b,
+void aom_minmax_8x8_neon(const uint8_t *a, int a_stride, const uint8_t *b,
                          int b_stride, int *min, int *max) {
   // Load and concatenate.
   const uint8x16_t a01 = vcombine_u8(vld1_u8(a), vld1_u8(a + a_stride));
diff --git a/aom_dsp/arm/bilinear_filter_media.asm b/aom_dsp/arm/bilinear_filter_media.asm
index f3f9754..fbbef25 100644
--- a/aom_dsp/arm/bilinear_filter_media.asm
+++ b/aom_dsp/arm/bilinear_filter_media.asm
@@ -9,8 +9,8 @@
 ;
 
 
-    EXPORT  |vpx_filter_block2d_bil_first_pass_media|
-    EXPORT  |vpx_filter_block2d_bil_second_pass_media|
+    EXPORT  |aom_filter_block2d_bil_first_pass_media|
+    EXPORT  |aom_filter_block2d_bil_second_pass_media|
 
     AREA    |.text|, CODE, READONLY  ; name this block of code
 
@@ -20,13 +20,13 @@
 ; r2    unsigned int    src_pitch,
 ; r3    unsigned int    height,
 ; stack unsigned int    width,
-; stack const short    *vpx_filter
+; stack const short    *aom_filter
 ;-------------------------------------
 ; The output is transposed stroed in output array to make it easy for second pass filtering.
-|vpx_filter_block2d_bil_first_pass_media| PROC
+|aom_filter_block2d_bil_first_pass_media| PROC
     stmdb   sp!, {r4 - r11, lr}
 
-    ldr     r11, [sp, #40]                  ; vpx_filter address
+    ldr     r11, [sp, #40]                  ; aom_filter address
     ldr     r4, [sp, #36]                   ; width
 
     mov     r12, r3                         ; outer-loop counter
@@ -134,7 +134,7 @@
 
     ldmia   sp!, {r4 - r11, pc}
 
-    ENDP  ; |vpx_filter_block2d_bil_first_pass_media|
+    ENDP  ; |aom_filter_block2d_bil_first_pass_media|
 
 
 ;---------------------------------
@@ -143,12 +143,12 @@
 ; r2    int             dst_pitch,
 ; r3    unsigned int    height,
 ; stack unsigned int    width,
-; stack const short    *vpx_filter
+; stack const short    *aom_filter
 ;---------------------------------
-|vpx_filter_block2d_bil_second_pass_media| PROC
+|aom_filter_block2d_bil_second_pass_media| PROC
     stmdb   sp!, {r4 - r11, lr}
 
-    ldr     r11, [sp, #40]                  ; vpx_filter address
+    ldr     r11, [sp, #40]                  ; aom_filter address
     ldr     r4, [sp, #36]                   ; width
 
     ldr     r5, [r11]                       ; load up filter coefficients
@@ -232,6 +232,6 @@
     bne     bil_height_loop_null_2nd
 
     ldmia   sp!, {r4 - r11, pc}
-    ENDP  ; |vpx_filter_block2d_second_pass_media|
+    ENDP  ; |aom_filter_block2d_second_pass_media|
 
     END
diff --git a/aom_dsp/arm/fwd_txfm_neon.c b/aom_dsp/arm/fwd_txfm_neon.c
index 4763cdb..92fe3d8 100644
--- a/aom_dsp/arm/fwd_txfm_neon.c
+++ b/aom_dsp/arm/fwd_txfm_neon.c
@@ -10,10 +10,10 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_dsp/txfm_common.h"
 
-void vpx_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) {
+void aom_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) {
   int i;
   // stage 1
   int16x8_t input_0 = vshlq_n_s16(vld1q_s16(&input[0 * stride]), 2);
@@ -170,7 +170,7 @@
     }
   }  // for
   {
-    // from vpx_dct_sse2.c
+    // from aom_dct_sse2.c
     // Post-condition (division by two)
     //    division of two 16 bits signed numbers using shifts
     //    n / 2 = (n - (n >> 15)) >> 1
@@ -202,7 +202,7 @@
   }
 }
 
-void vpx_fdct8x8_1_neon(const int16_t *input, int16_t *output, int stride) {
+void aom_fdct8x8_1_neon(const int16_t *input, int16_t *output, int stride) {
   int r;
   int16x8_t sum = vld1q_s16(&input[0]);
   for (r = 1; r < 8; ++r) {
diff --git a/aom_dsp/arm/hadamard_neon.c b/aom_dsp/arm/hadamard_neon.c
index 46b2755..af955f0 100644
--- a/aom_dsp/arm/hadamard_neon.c
+++ b/aom_dsp/arm/hadamard_neon.c
@@ -10,7 +10,7 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
 static void hadamard8x8_one_pass(int16x8_t *a0, int16x8_t *a1, int16x8_t *a2,
                                  int16x8_t *a3, int16x8_t *a4, int16x8_t *a5,
@@ -130,7 +130,7 @@
   *a7 = b3.val[1];
 }
 
-void vpx_hadamard_8x8_neon(const int16_t *src_diff, int src_stride,
+void aom_hadamard_8x8_neon(const int16_t *src_diff, int src_stride,
                            int16_t *coeff) {
   int16x8_t a0 = vld1q_s16(src_diff);
   int16x8_t a1 = vld1q_s16(src_diff + src_stride);
@@ -159,19 +159,19 @@
   vst1q_s16(coeff + 56, a7);
 }
 
-void vpx_hadamard_16x16_neon(const int16_t *src_diff, int src_stride,
+void aom_hadamard_16x16_neon(const int16_t *src_diff, int src_stride,
                              int16_t *coeff) {
   int i;
 
   /* Rearrange 16x16 to 8x32 and remove stride.
    * Top left first. */
-  vpx_hadamard_8x8_neon(src_diff + 0 + 0 * src_stride, src_stride, coeff + 0);
+  aom_hadamard_8x8_neon(src_diff + 0 + 0 * src_stride, src_stride, coeff + 0);
   /* Top right. */
-  vpx_hadamard_8x8_neon(src_diff + 8 + 0 * src_stride, src_stride, coeff + 64);
+  aom_hadamard_8x8_neon(src_diff + 8 + 0 * src_stride, src_stride, coeff + 64);
   /* Bottom left. */
-  vpx_hadamard_8x8_neon(src_diff + 0 + 8 * src_stride, src_stride, coeff + 128);
+  aom_hadamard_8x8_neon(src_diff + 0 + 8 * src_stride, src_stride, coeff + 128);
   /* Bottom right. */
-  vpx_hadamard_8x8_neon(src_diff + 8 + 8 * src_stride, src_stride, coeff + 192);
+  aom_hadamard_8x8_neon(src_diff + 8 + 8 * src_stride, src_stride, coeff + 192);
 
   for (i = 0; i < 64; i += 8) {
     const int16x8_t a0 = vld1q_s16(coeff + 0);
diff --git a/aom_dsp/arm/idct16x16_1_add_neon.asm b/aom_dsp/arm/idct16x16_1_add_neon.asm
index dc459e2..e07614f 100644
--- a/aom_dsp/arm/idct16x16_1_add_neon.asm
+++ b/aom_dsp/arm/idct16x16_1_add_neon.asm
@@ -8,21 +8,21 @@
 ;
 
 
-    EXPORT  |vpx_idct16x16_1_add_neon|
+    EXPORT  |aom_idct16x16_1_add_neon|
     ARM
     REQUIRE8
     PRESERVE8
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
-;void vpx_idct16x16_1_add_neon(int16_t *input, uint8_t *dest,
+;void aom_idct16x16_1_add_neon(int16_t *input, uint8_t *dest,
 ;                                    int dest_stride)
 ;
 ; r0  int16_t input
 ; r1  uint8_t *dest
 ; r2  int dest_stride)
 
-|vpx_idct16x16_1_add_neon| PROC
+|aom_idct16x16_1_add_neon| PROC
     ldrsh            r0, [r0]
 
     ; generate cospi_16_64 = 11585
@@ -193,6 +193,6 @@
     vst1.64          {d31}, [r12], r2
 
     bx               lr
-    ENDP             ; |vpx_idct16x16_1_add_neon|
+    ENDP             ; |aom_idct16x16_1_add_neon|
 
     END
diff --git a/aom_dsp/arm/idct16x16_1_add_neon.c b/aom_dsp/arm/idct16x16_1_add_neon.c
index a37e53c..2bdb333 100644
--- a/aom_dsp/arm/idct16x16_1_add_neon.c
+++ b/aom_dsp/arm/idct16x16_1_add_neon.c
@@ -13,7 +13,7 @@
 #include "aom_dsp/inv_txfm.h"
 #include "aom_ports/mem.h"
 
-void vpx_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
   uint8x8_t d2u8, d3u8, d30u8, d31u8;
   uint64x1_t d2u64, d3u64, d4u64, d5u64;
   uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16;
diff --git a/aom_dsp/arm/idct16x16_add_neon.asm b/aom_dsp/arm/idct16x16_add_neon.asm
index 22a0c95..e22ab3e 100644
--- a/aom_dsp/arm/idct16x16_add_neon.asm
+++ b/aom_dsp/arm/idct16x16_add_neon.asm
@@ -8,10 +8,10 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-    EXPORT  |vpx_idct16x16_256_add_neon_pass1|
-    EXPORT  |vpx_idct16x16_256_add_neon_pass2|
-    EXPORT  |vpx_idct16x16_10_add_neon_pass1|
-    EXPORT  |vpx_idct16x16_10_add_neon_pass2|
+    EXPORT  |aom_idct16x16_256_add_neon_pass1|
+    EXPORT  |aom_idct16x16_256_add_neon_pass2|
+    EXPORT  |aom_idct16x16_10_add_neon_pass1|
+    EXPORT  |aom_idct16x16_10_add_neon_pass2|
     ARM
     REQUIRE8
     PRESERVE8
@@ -36,7 +36,7 @@
     MEND
 
     AREA    Block, CODE, READONLY ; name this block of code
-;void |vpx_idct16x16_256_add_neon_pass1|(int16_t *input,
+;void |aom_idct16x16_256_add_neon_pass1|(int16_t *input,
 ;                                          int16_t *output, int output_stride)
 ;
 ; r0  int16_t input
@@ -46,7 +46,7 @@
 ; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output
 ; will be stored back into q8-q15 registers. This function will touch q0-q7
 ; registers and use them as buffer during calculation.
-|vpx_idct16x16_256_add_neon_pass1| PROC
+|aom_idct16x16_256_add_neon_pass1| PROC
 
     ; TODO(hkuang): Find a better way to load the elements.
     ; load elements of 0, 2, 4, 6, 8, 10, 12, 14 into q8 - q15
@@ -273,9 +273,9 @@
     vst1.64         {d31}, [r1], r2
 
     bx              lr
-    ENDP  ; |vpx_idct16x16_256_add_neon_pass1|
+    ENDP  ; |aom_idct16x16_256_add_neon_pass1|
 
-;void vpx_idct16x16_256_add_neon_pass2(int16_t *src,
+;void aom_idct16x16_256_add_neon_pass2(int16_t *src,
 ;                                        int16_t *output,
 ;                                        int16_t *pass1Output,
 ;                                        int16_t skip_adding,
@@ -292,7 +292,7 @@
 ; idct16 stage1 - stage7 on all the elements loaded in q8-q15. The output
 ; will be stored back into q8-q15 registers. This function will touch q0-q7
 ; registers and use them as buffer during calculation.
-|vpx_idct16x16_256_add_neon_pass2| PROC
+|aom_idct16x16_256_add_neon_pass2| PROC
     push            {r3-r9}
 
     ; TODO(hkuang): Find a better way to load the elements.
@@ -784,9 +784,9 @@
 end_idct16x16_pass2
     pop             {r3-r9}
     bx              lr
-    ENDP  ; |vpx_idct16x16_256_add_neon_pass2|
+    ENDP  ; |aom_idct16x16_256_add_neon_pass2|
 
-;void |vpx_idct16x16_10_add_neon_pass1|(int16_t *input,
+;void |aom_idct16x16_10_add_neon_pass1|(int16_t *input,
 ;                                             int16_t *output, int output_stride)
 ;
 ; r0  int16_t input
@@ -796,7 +796,7 @@
 ; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output
 ; will be stored back into q8-q15 registers. This function will touch q0-q7
 ; registers and use them as buffer during calculation.
-|vpx_idct16x16_10_add_neon_pass1| PROC
+|aom_idct16x16_10_add_neon_pass1| PROC
 
     ; TODO(hkuang): Find a better way to load the elements.
     ; load elements of 0, 2, 4, 6, 8, 10, 12, 14 into q8 - q15
@@ -905,9 +905,9 @@
     vst1.64         {d31}, [r1], r2
 
     bx              lr
-    ENDP  ; |vpx_idct16x16_10_add_neon_pass1|
+    ENDP  ; |aom_idct16x16_10_add_neon_pass1|
 
-;void vpx_idct16x16_10_add_neon_pass2(int16_t *src,
+;void aom_idct16x16_10_add_neon_pass2(int16_t *src,
 ;                                           int16_t *output,
 ;                                           int16_t *pass1Output,
 ;                                           int16_t skip_adding,
@@ -924,7 +924,7 @@
 ; idct16 stage1 - stage7 on all the elements loaded in q8-q15. The output
 ; will be stored back into q8-q15 registers. This function will touch q0-q7
 ; registers and use them as buffer during calculation.
-|vpx_idct16x16_10_add_neon_pass2| PROC
+|aom_idct16x16_10_add_neon_pass2| PROC
     push            {r3-r9}
 
     ; TODO(hkuang): Find a better way to load the elements.
@@ -1175,5 +1175,5 @@
 end_idct10_16x16_pass2
     pop             {r3-r9}
     bx              lr
-    ENDP  ; |vpx_idct16x16_10_add_neon_pass2|
+    ENDP  ; |aom_idct16x16_10_add_neon_pass2|
     END
diff --git a/aom_dsp/arm/idct16x16_add_neon.c b/aom_dsp/arm/idct16x16_add_neon.c
index 2bb92c6..268c2ce 100644
--- a/aom_dsp/arm/idct16x16_add_neon.c
+++ b/aom_dsp/arm/idct16x16_add_neon.c
@@ -10,7 +10,7 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_dsp/txfm_common.h"
 
 static INLINE void TRANSPOSE8X8(int16x8_t *q8s16, int16x8_t *q9s16,
@@ -77,7 +77,7 @@
   return;
 }
 
-void vpx_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out,
+void aom_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out,
                                       int output_stride) {
   int16x4_t d0s16, d1s16, d2s16, d3s16;
   int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
@@ -313,7 +313,7 @@
   return;
 }
 
-void vpx_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
+void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
                                       int16_t *pass1Output, int16_t skip_adding,
                                       uint8_t *dest, int dest_stride) {
   uint8_t *d;
@@ -862,7 +862,7 @@
   return;
 }
 
-void vpx_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out,
+void aom_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out,
                                      int output_stride) {
   int16x4_t d4s16;
   int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
@@ -998,7 +998,7 @@
   return;
 }
 
-void vpx_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out,
+void aom_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out,
                                      int16_t *pass1Output, int16_t skip_adding,
                                      uint8_t *dest, int dest_stride) {
   int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
diff --git a/aom_dsp/arm/idct16x16_neon.c b/aom_dsp/arm/idct16x16_neon.c
index e205056..653603a 100644
--- a/aom_dsp/arm/idct16x16_neon.c
+++ b/aom_dsp/arm/idct16x16_neon.c
@@ -8,26 +8,26 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
-void vpx_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output,
+void aom_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output,
                                       int output_stride);
-void vpx_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *output,
+void aom_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *output,
                                       int16_t *pass1Output, int16_t skip_adding,
                                       uint8_t *dest, int dest_stride);
-void vpx_idct16x16_10_add_neon_pass1(const int16_t *input, int16_t *output,
+void aom_idct16x16_10_add_neon_pass1(const int16_t *input, int16_t *output,
                                      int output_stride);
-void vpx_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *output,
+void aom_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *output,
                                      int16_t *pass1Output, int16_t skip_adding,
                                      uint8_t *dest, int dest_stride);
 
 #if HAVE_NEON_ASM
 /* For ARM NEON, d8-d15 are callee-saved registers, and need to be saved. */
-extern void vpx_push_neon(int64_t *store);
-extern void vpx_pop_neon(int64_t *store);
+extern void aom_push_neon(int64_t *store);
+extern void aom_pop_neon(int64_t *store);
 #endif  // HAVE_NEON_ASM
 
-void vpx_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest,
+void aom_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest,
                                 int dest_stride) {
 #if HAVE_NEON_ASM
   int64_t store_reg[8];
@@ -37,63 +37,63 @@
 
 #if HAVE_NEON_ASM
   // save d8-d15 register values.
-  vpx_push_neon(store_reg);
+  aom_push_neon(store_reg);
 #endif
 
   /* Parallel idct on the upper 8 rows */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(input, pass1_output, 8);
+  aom_idct16x16_256_add_neon_pass1(input, pass1_output, 8);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7
   // which will be saved into row_idct_output.
-  vpx_idct16x16_256_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0,
+  aom_idct16x16_256_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0,
                                    dest, dest_stride);
 
   /* Parallel idct on the lower 8 rows */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output, 8);
+  aom_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output, 8);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7
   // which will be saved into row_idct_output.
-  vpx_idct16x16_256_add_neon_pass2(input + 8 * 16 + 1, row_idct_output + 8,
+  aom_idct16x16_256_add_neon_pass2(input + 8 * 16 + 1, row_idct_output + 8,
                                    pass1_output, 0, dest, dest_stride);
 
   /* Parallel idct on the left 8 columns */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
+  aom_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
   // Then add the result to the destination data.
-  vpx_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output,
+  aom_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output,
                                    pass1_output, 1, dest, dest_stride);
 
   /* Parallel idct on the right 8 columns */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
+  aom_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
   // Then add the result to the destination data.
-  vpx_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1,
+  aom_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1,
                                    row_idct_output + 8, pass1_output, 1,
                                    dest + 8, dest_stride);
 
 #if HAVE_NEON_ASM
   // restore d8-d15 register values.
-  vpx_pop_neon(store_reg);
+  aom_pop_neon(store_reg);
 #endif
 
   return;
 }
 
-void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest,
+void aom_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest,
                                int dest_stride) {
 #if HAVE_NEON_ASM
   int64_t store_reg[8];
@@ -103,18 +103,18 @@
 
 #if HAVE_NEON_ASM
   // save d8-d15 register values.
-  vpx_push_neon(store_reg);
+  aom_push_neon(store_reg);
 #endif
 
   /* Parallel idct on the upper 8 rows */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_10_add_neon_pass1(input, pass1_output, 8);
+  aom_idct16x16_10_add_neon_pass1(input, pass1_output, 8);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7
   // which will be saved into row_idct_output.
-  vpx_idct16x16_10_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0,
+  aom_idct16x16_10_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0,
                                   dest, dest_stride);
 
   /* Skip Parallel idct on the lower 8 rows as they are all 0s */
@@ -122,29 +122,29 @@
   /* Parallel idct on the left 8 columns */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
+  aom_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
   // Then add the result to the destination data.
-  vpx_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output,
+  aom_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output,
                                    pass1_output, 1, dest, dest_stride);
 
   /* Parallel idct on the right 8 columns */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
+  aom_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
   // Then add the result to the destination data.
-  vpx_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1,
+  aom_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1,
                                    row_idct_output + 8, pass1_output, 1,
                                    dest + 8, dest_stride);
 
 #if HAVE_NEON_ASM
   // restore d8-d15 register values.
-  vpx_pop_neon(store_reg);
+  aom_pop_neon(store_reg);
 #endif
 
   return;
diff --git a/aom_dsp/arm/idct32x32_1_add_neon.asm b/aom_dsp/arm/idct32x32_1_add_neon.asm
index 96d276b..9b31287 100644
--- a/aom_dsp/arm/idct32x32_1_add_neon.asm
+++ b/aom_dsp/arm/idct32x32_1_add_neon.asm
@@ -7,7 +7,7 @@
 ;  file in the root of the source tree.
 ;
 
-    EXPORT  |vpx_idct32x32_1_add_neon|
+    EXPORT  |aom_idct32x32_1_add_neon|
     ARM
     REQUIRE8
     PRESERVE8
@@ -64,14 +64,14 @@
     vst1.8           {q15},[$dst], $stride
     MEND
 
-;void vpx_idct32x32_1_add_neon(int16_t *input, uint8_t *dest,
+;void aom_idct32x32_1_add_neon(int16_t *input, uint8_t *dest,
 ;                              int dest_stride)
 ;
 ; r0  int16_t input
 ; r1  uint8_t *dest
 ; r2  int dest_stride
 
-|vpx_idct32x32_1_add_neon| PROC
+|aom_idct32x32_1_add_neon| PROC
     push             {lr}
     pld              [r1]
     add              r3, r1, #16               ; r3 dest + 16 for second loop
@@ -140,5 +140,5 @@
     bne              diff_positive_32_32_loop
     pop              {pc}
 
-    ENDP             ; |vpx_idct32x32_1_add_neon|
+    ENDP             ; |aom_idct32x32_1_add_neon|
     END
diff --git a/aom_dsp/arm/idct32x32_1_add_neon.c b/aom_dsp/arm/idct32x32_1_add_neon.c
index 35bfc66..531ffd8 100644
--- a/aom_dsp/arm/idct32x32_1_add_neon.c
+++ b/aom_dsp/arm/idct32x32_1_add_neon.c
@@ -10,7 +10,7 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #include "aom_dsp/inv_txfm.h"
 #include "aom_ports/mem.h"
@@ -93,7 +93,7 @@
   return;
 }
 
-void vpx_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
   uint8x16_t q0u8, q8u8, q9u8, q10u8, q11u8, q12u8, q13u8, q14u8, q15u8;
   int i, j, dest_stride8;
   uint8_t *d;
diff --git a/aom_dsp/arm/idct32x32_add_neon.asm b/aom_dsp/arm/idct32x32_add_neon.asm
index 7483ee7..10de482 100644
--- a/aom_dsp/arm/idct32x32_add_neon.asm
+++ b/aom_dsp/arm/idct32x32_add_neon.asm
@@ -43,7 +43,7 @@
 cospi_31_64 EQU   804
 
 
-    EXPORT  |vpx_idct32x32_1024_add_neon|
+    EXPORT  |aom_idct32x32_1024_add_neon|
     ARM
     REQUIRE8
     PRESERVE8
@@ -288,7 +288,7 @@
     MEND
     ; --------------------------------------------------------------------------
 
-;void vpx_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
+;void aom_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
 ;
 ;   r0  int16_t *input,
 ;   r1  uint8_t *dest,
@@ -303,7 +303,7 @@
 ;   r9  dest + 15 * dest_stride, descending (14, 13, 12, ...)
 ;   r10 dest + 16 * dest_stride, ascending  (17, 18, 19, ...)
 
-|vpx_idct32x32_1024_add_neon| PROC
+|aom_idct32x32_1024_add_neon| PROC
     ; This function does one pass of idct32x32 transform.
     ;
     ; This is done by transposing the input and then doing a 1d transform on
@@ -1295,5 +1295,5 @@
     vpop {d8-d15}
     pop  {r4-r11}
     bx              lr
-    ENDP  ; |vpx_idct32x32_1024_add_neon|
+    ENDP  ; |aom_idct32x32_1024_add_neon|
     END
diff --git a/aom_dsp/arm/idct32x32_add_neon.c b/aom_dsp/arm/idct32x32_add_neon.c
index 644155c..e1a561b 100644
--- a/aom_dsp/arm/idct32x32_add_neon.c
+++ b/aom_dsp/arm/idct32x32_add_neon.c
@@ -10,7 +10,7 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_dsp/txfm_common.h"
 
 #define LOAD_FROM_TRANSPOSED(prev, first, second) \
@@ -427,7 +427,7 @@
   return;
 }
 
-void vpx_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int stride) {
+void aom_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int stride) {
   int i, idct32_pass_loop;
   int16_t trans_buf[32 * 8];
   int16_t pass1[32 * 32];
diff --git a/aom_dsp/arm/idct4x4_1_add_neon.asm b/aom_dsp/arm/idct4x4_1_add_neon.asm
index adab715..1457527 100644
--- a/aom_dsp/arm/idct4x4_1_add_neon.asm
+++ b/aom_dsp/arm/idct4x4_1_add_neon.asm
@@ -8,21 +8,21 @@
 ;
 
 
-    EXPORT  |vpx_idct4x4_1_add_neon|
+    EXPORT  |aom_idct4x4_1_add_neon|
     ARM
     REQUIRE8
     PRESERVE8
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
-;void vpx_idct4x4_1_add_neon(int16_t *input, uint8_t *dest,
+;void aom_idct4x4_1_add_neon(int16_t *input, uint8_t *dest,
 ;                                  int dest_stride)
 ;
 ; r0  int16_t input
 ; r1  uint8_t *dest
 ; r2  int dest_stride)
 
-|vpx_idct4x4_1_add_neon| PROC
+|aom_idct4x4_1_add_neon| PROC
     ldrsh            r0, [r0]
 
     ; generate cospi_16_64 = 11585
@@ -63,6 +63,6 @@
     vst1.32          {d7[1]}, [r12]
 
     bx               lr
-    ENDP             ; |vpx_idct4x4_1_add_neon|
+    ENDP             ; |aom_idct4x4_1_add_neon|
 
     END
diff --git a/aom_dsp/arm/idct4x4_1_add_neon.c b/aom_dsp/arm/idct4x4_1_add_neon.c
index 0a2e827..23399fd 100644
--- a/aom_dsp/arm/idct4x4_1_add_neon.c
+++ b/aom_dsp/arm/idct4x4_1_add_neon.c
@@ -13,7 +13,7 @@
 #include "aom_dsp/inv_txfm.h"
 #include "aom_ports/mem.h"
 
-void vpx_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
   uint8x8_t d6u8;
   uint32x2_t d2u32 = vdup_n_u32(0);
   uint16x8_t q8u16;
diff --git a/aom_dsp/arm/idct4x4_add_neon.asm b/aom_dsp/arm/idct4x4_add_neon.asm
index 877fbd6..d240f33 100644
--- a/aom_dsp/arm/idct4x4_add_neon.asm
+++ b/aom_dsp/arm/idct4x4_add_neon.asm
@@ -8,7 +8,7 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-    EXPORT  |vpx_idct4x4_16_add_neon|
+    EXPORT  |aom_idct4x4_16_add_neon|
     ARM
     REQUIRE8
     PRESERVE8
@@ -16,13 +16,13 @@
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
     AREA     Block, CODE, READONLY ; name this block of code
-;void vpx_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
+;void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
 ;
 ; r0  int16_t input
 ; r1  uint8_t *dest
 ; r2  int dest_stride)
 
-|vpx_idct4x4_16_add_neon| PROC
+|aom_idct4x4_16_add_neon| PROC
 
     ; The 2D transform is done with two passes which are actually pretty
     ; similar. We first transform the rows. This is done by transposing
@@ -185,6 +185,6 @@
     vst1.32 {d26[1]}, [r1], r2
     vst1.32 {d26[0]}, [r1]  ; no post-increment
     bx              lr
-    ENDP  ; |vpx_idct4x4_16_add_neon|
+    ENDP  ; |aom_idct4x4_16_add_neon|
 
     END
diff --git a/aom_dsp/arm/idct4x4_add_neon.c b/aom_dsp/arm/idct4x4_add_neon.c
index 3826269..5668beb 100644
--- a/aom_dsp/arm/idct4x4_add_neon.c
+++ b/aom_dsp/arm/idct4x4_add_neon.c
@@ -10,7 +10,7 @@
 
 #include <arm_neon.h>
 
-void vpx_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
   uint8x8_t d26u8, d27u8;
   uint32x2_t d26u32, d27u32;
   uint16x8_t q8u16, q9u16;
diff --git a/aom_dsp/arm/idct8x8_1_add_neon.asm b/aom_dsp/arm/idct8x8_1_add_neon.asm
index dbbff36..d2b410d 100644
--- a/aom_dsp/arm/idct8x8_1_add_neon.asm
+++ b/aom_dsp/arm/idct8x8_1_add_neon.asm
@@ -8,21 +8,21 @@
 ;
 
 
-    EXPORT  |vpx_idct8x8_1_add_neon|
+    EXPORT  |aom_idct8x8_1_add_neon|
     ARM
     REQUIRE8
     PRESERVE8
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
-;void vpx_idct8x8_1_add_neon(int16_t *input, uint8_t *dest,
+;void aom_idct8x8_1_add_neon(int16_t *input, uint8_t *dest,
 ;                                  int dest_stride)
 ;
 ; r0  int16_t input
 ; r1  uint8_t *dest
 ; r2  int dest_stride)
 
-|vpx_idct8x8_1_add_neon| PROC
+|aom_idct8x8_1_add_neon| PROC
     ldrsh            r0, [r0]
 
     ; generate cospi_16_64 = 11585
@@ -83,6 +83,6 @@
     vst1.64          {d31}, [r12], r2
 
     bx               lr
-    ENDP             ; |vpx_idct8x8_1_add_neon|
+    ENDP             ; |aom_idct8x8_1_add_neon|
 
     END
diff --git a/aom_dsp/arm/idct8x8_1_add_neon.c b/aom_dsp/arm/idct8x8_1_add_neon.c
index bda5998..393341a 100644
--- a/aom_dsp/arm/idct8x8_1_add_neon.c
+++ b/aom_dsp/arm/idct8x8_1_add_neon.c
@@ -13,7 +13,7 @@
 #include "aom_dsp/inv_txfm.h"
 #include "aom_ports/mem.h"
 
-void vpx_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
   uint8x8_t d2u8, d3u8, d30u8, d31u8;
   uint64x1_t d2u64, d3u64, d4u64, d5u64;
   uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16;
diff --git a/aom_dsp/arm/idct8x8_add_neon.asm b/aom_dsp/arm/idct8x8_add_neon.asm
index 6ab59b4..a03c83d 100644
--- a/aom_dsp/arm/idct8x8_add_neon.asm
+++ b/aom_dsp/arm/idct8x8_add_neon.asm
@@ -8,8 +8,8 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-    EXPORT  |vpx_idct8x8_64_add_neon|
-    EXPORT  |vpx_idct8x8_12_add_neon|
+    EXPORT  |aom_idct8x8_64_add_neon|
+    EXPORT  |aom_idct8x8_12_add_neon|
     ARM
     REQUIRE8
     PRESERVE8
@@ -198,13 +198,13 @@
     MEND
 
     AREA    Block, CODE, READONLY ; name this block of code
-;void vpx_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
+;void aom_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
 ;
 ; r0  int16_t input
 ; r1  uint8_t *dest
 ; r2  int dest_stride)
 
-|vpx_idct8x8_64_add_neon| PROC
+|aom_idct8x8_64_add_neon| PROC
     push            {r4-r9}
     vpush           {d8-d15}
     vld1.s16        {q8,q9}, [r0]!
@@ -308,15 +308,15 @@
     vpop            {d8-d15}
     pop             {r4-r9}
     bx              lr
-    ENDP  ; |vpx_idct8x8_64_add_neon|
+    ENDP  ; |aom_idct8x8_64_add_neon|
 
-;void vpx_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
+;void aom_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
 ;
 ; r0  int16_t input
 ; r1  uint8_t *dest
 ; r2  int dest_stride)
 
-|vpx_idct8x8_12_add_neon| PROC
+|aom_idct8x8_12_add_neon| PROC
     push            {r4-r9}
     vpush           {d8-d15}
     vld1.s16        {q8,q9}, [r0]!
@@ -514,6 +514,6 @@
     vpop            {d8-d15}
     pop             {r4-r9}
     bx              lr
-    ENDP  ; |vpx_idct8x8_12_add_neon|
+    ENDP  ; |aom_idct8x8_12_add_neon|
 
     END
diff --git a/aom_dsp/arm/idct8x8_add_neon.c b/aom_dsp/arm/idct8x8_add_neon.c
index 124c317..bd01aab 100644
--- a/aom_dsp/arm/idct8x8_add_neon.c
+++ b/aom_dsp/arm/idct8x8_add_neon.c
@@ -10,7 +10,7 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_dsp/txfm_common.h"
 
 static INLINE void TRANSPOSE8X8(int16x8_t *q8s16, int16x8_t *q9s16,
@@ -228,7 +228,7 @@
   return;
 }
 
-void vpx_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
   uint8_t *d1, *d2;
   uint8x8_t d0u8, d1u8, d2u8, d3u8;
   uint64x1_t d0u64, d1u64, d2u64, d3u64;
@@ -330,7 +330,7 @@
   return;
 }
 
-void vpx_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
   uint8_t *d1, *d2;
   uint8x8_t d0u8, d1u8, d2u8, d3u8;
   int16x4_t d10s16, d11s16, d12s16, d13s16, d16s16;
diff --git a/aom_dsp/arm/intrapred_neon.c b/aom_dsp/arm/intrapred_neon.c
index 3166a4e..f2a0476 100644
--- a/aom_dsp/arm/intrapred_neon.c
+++ b/aom_dsp/arm/intrapred_neon.c
@@ -10,9 +10,9 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
 //------------------------------------------------------------------------------
 // DC 4x4
@@ -58,24 +58,24 @@
   }
 }
 
-void vpx_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                                const uint8_t *above, const uint8_t *left) {
   dc_4x4(dst, stride, above, left, 1, 1);
 }
 
-void vpx_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                                     const uint8_t *above, const uint8_t *left) {
   (void)above;
   dc_4x4(dst, stride, NULL, left, 0, 1);
 }
 
-void vpx_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                                    const uint8_t *above, const uint8_t *left) {
   (void)left;
   dc_4x4(dst, stride, above, NULL, 1, 0);
 }
 
-void vpx_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                                    const uint8_t *above, const uint8_t *left) {
   (void)above;
   (void)left;
@@ -128,24 +128,24 @@
   }
 }
 
-void vpx_dc_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
                                const uint8_t *above, const uint8_t *left) {
   dc_8x8(dst, stride, above, left, 1, 1);
 }
 
-void vpx_dc_left_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_left_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
                                     const uint8_t *above, const uint8_t *left) {
   (void)above;
   dc_8x8(dst, stride, NULL, left, 0, 1);
 }
 
-void vpx_dc_top_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_top_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
                                    const uint8_t *above, const uint8_t *left) {
   (void)left;
   dc_8x8(dst, stride, above, NULL, 1, 0);
 }
 
-void vpx_dc_128_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_128_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
                                    const uint8_t *above, const uint8_t *left) {
   (void)above;
   (void)left;
@@ -201,26 +201,26 @@
   }
 }
 
-void vpx_dc_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
                                  const uint8_t *above, const uint8_t *left) {
   dc_16x16(dst, stride, above, left, 1, 1);
 }
 
-void vpx_dc_left_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_left_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
                                       const uint8_t *above,
                                       const uint8_t *left) {
   (void)above;
   dc_16x16(dst, stride, NULL, left, 0, 1);
 }
 
-void vpx_dc_top_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_top_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
                                      const uint8_t *above,
                                      const uint8_t *left) {
   (void)left;
   dc_16x16(dst, stride, above, NULL, 1, 0);
 }
 
-void vpx_dc_128_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_128_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
                                      const uint8_t *above,
                                      const uint8_t *left) {
   (void)above;
@@ -284,26 +284,26 @@
   }
 }
 
-void vpx_dc_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
                                  const uint8_t *above, const uint8_t *left) {
   dc_32x32(dst, stride, above, left, 1, 1);
 }
 
-void vpx_dc_left_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_left_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
                                       const uint8_t *above,
                                       const uint8_t *left) {
   (void)above;
   dc_32x32(dst, stride, NULL, left, 0, 1);
 }
 
-void vpx_dc_top_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_top_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
                                      const uint8_t *above,
                                      const uint8_t *left) {
   (void)left;
   dc_32x32(dst, stride, above, NULL, 1, 0);
 }
 
-void vpx_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
                                      const uint8_t *above,
                                      const uint8_t *left) {
   (void)above;
@@ -313,7 +313,7 @@
 
 // -----------------------------------------------------------------------------
 
-void vpx_d45_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_d45_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   const uint64x1_t A0 = vreinterpret_u64_u8(vld1_u8(above));  // top row
   const uint64x1_t A1 = vshr_n_u64(A0, 8);
@@ -336,7 +336,7 @@
   dst[3 * stride + 3] = above[7];
 }
 
-void vpx_d45_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_d45_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   static const uint8_t shuffle1[8] = { 1, 2, 3, 4, 5, 6, 7, 7 };
   static const uint8_t shuffle2[8] = { 2, 3, 4, 5, 6, 7, 7, 7 };
@@ -356,7 +356,7 @@
   vst1_u8(dst + i * stride, row);
 }
 
-void vpx_d45_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_d45_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
                                   const uint8_t *above, const uint8_t *left) {
   const uint8x16_t A0 = vld1q_u8(above);  // top row
   const uint8x16_t above_right = vld1q_dup_u8(above + 15);
@@ -375,7 +375,7 @@
 
 // -----------------------------------------------------------------------------
 
-void vpx_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                                  const uint8_t *above, const uint8_t *left) {
   const uint8x8_t XABCD_u8 = vld1_u8(above - 1);
   const uint64x1_t XABCD = vreinterpret_u64_u8(XABCD_u8);
@@ -405,7 +405,7 @@
 
 #if !HAVE_NEON_ASM
 
-void vpx_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   int i;
   uint32x2_t d0u32 = vdup_n_u32(0);
@@ -416,7 +416,7 @@
     vst1_lane_u32((uint32_t *)dst, d0u32, 0);
 }
 
-void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   int i;
   uint8x8_t d0u8 = vdup_n_u8(0);
@@ -426,7 +426,7 @@
   for (i = 0; i < 8; i++, dst += stride) vst1_u8(dst, d0u8);
 }
 
-void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int i;
   uint8x16_t q0u8 = vdupq_n_u8(0);
@@ -436,7 +436,7 @@
   for (i = 0; i < 16; i++, dst += stride) vst1q_u8(dst, q0u8);
 }
 
-void vpx_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int i;
   uint8x16_t q0u8 = vdupq_n_u8(0);
@@ -451,7 +451,7 @@
   }
 }
 
-void vpx_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   uint8x8_t d0u8 = vdup_n_u8(0);
   uint32x2_t d1u32 = vdup_n_u32(0);
@@ -472,7 +472,7 @@
   vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
 }
 
-void vpx_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   uint8x8_t d0u8 = vdup_n_u8(0);
   uint64x1_t d1u64 = vdup_n_u64(0);
@@ -505,7 +505,7 @@
   vst1_u8(dst, d0u8);
 }
 
-void vpx_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int j;
   uint8x8_t d2u8 = vdup_n_u8(0);
@@ -543,7 +543,7 @@
   }
 }
 
-void vpx_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int j, k;
   uint8x8_t d2u8 = vdup_n_u8(0);
@@ -591,7 +591,7 @@
   }
 }
 
-void vpx_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                                const uint8_t *above, const uint8_t *left) {
   int i;
   uint16x8_t q1u16, q3u16;
@@ -611,7 +611,7 @@
   }
 }
 
-void vpx_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
                                const uint8_t *above, const uint8_t *left) {
   int j;
   uint16x8_t q0u16, q3u16, q10u16;
@@ -653,7 +653,7 @@
   }
 }
 
-void vpx_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
                                  const uint8_t *above, const uint8_t *left) {
   int j, k;
   uint16x8_t q0u16, q2u16, q3u16, q8u16, q10u16;
@@ -716,7 +716,7 @@
   }
 }
 
-void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
                                  const uint8_t *above, const uint8_t *left) {
   int j, k;
   uint16x8_t q0u16, q3u16, q8u16, q9u16, q10u16, q11u16;
diff --git a/aom_dsp/arm/intrapred_neon_asm.asm b/aom_dsp/arm/intrapred_neon_asm.asm
index 115790d..6014a09 100644
--- a/aom_dsp/arm/intrapred_neon_asm.asm
+++ b/aom_dsp/arm/intrapred_neon_asm.asm
@@ -8,25 +8,25 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-    EXPORT  |vpx_v_predictor_4x4_neon|
-    EXPORT  |vpx_v_predictor_8x8_neon|
-    EXPORT  |vpx_v_predictor_16x16_neon|
-    EXPORT  |vpx_v_predictor_32x32_neon|
-    EXPORT  |vpx_h_predictor_4x4_neon|
-    EXPORT  |vpx_h_predictor_8x8_neon|
-    EXPORT  |vpx_h_predictor_16x16_neon|
-    EXPORT  |vpx_h_predictor_32x32_neon|
-    EXPORT  |vpx_tm_predictor_4x4_neon|
-    EXPORT  |vpx_tm_predictor_8x8_neon|
-    EXPORT  |vpx_tm_predictor_16x16_neon|
-    EXPORT  |vpx_tm_predictor_32x32_neon|
+    EXPORT  |aom_v_predictor_4x4_neon|
+    EXPORT  |aom_v_predictor_8x8_neon|
+    EXPORT  |aom_v_predictor_16x16_neon|
+    EXPORT  |aom_v_predictor_32x32_neon|
+    EXPORT  |aom_h_predictor_4x4_neon|
+    EXPORT  |aom_h_predictor_8x8_neon|
+    EXPORT  |aom_h_predictor_16x16_neon|
+    EXPORT  |aom_h_predictor_32x32_neon|
+    EXPORT  |aom_tm_predictor_4x4_neon|
+    EXPORT  |aom_tm_predictor_8x8_neon|
+    EXPORT  |aom_tm_predictor_16x16_neon|
+    EXPORT  |aom_tm_predictor_32x32_neon|
     ARM
     REQUIRE8
     PRESERVE8
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
-;void vpx_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
 ;                              const uint8_t *above,
 ;                              const uint8_t *left)
 ; r0  uint8_t *dst
@@ -34,16 +34,16 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_v_predictor_4x4_neon| PROC
+|aom_v_predictor_4x4_neon| PROC
     vld1.32             {d0[0]}, [r2]
     vst1.32             {d0[0]}, [r0], r1
     vst1.32             {d0[0]}, [r0], r1
     vst1.32             {d0[0]}, [r0], r1
     vst1.32             {d0[0]}, [r0], r1
     bx                  lr
-    ENDP                ; |vpx_v_predictor_4x4_neon|
+    ENDP                ; |aom_v_predictor_4x4_neon|
 
-;void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
 ;                              const uint8_t *above,
 ;                              const uint8_t *left)
 ; r0  uint8_t *dst
@@ -51,7 +51,7 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_v_predictor_8x8_neon| PROC
+|aom_v_predictor_8x8_neon| PROC
     vld1.8              {d0}, [r2]
     vst1.8              {d0}, [r0], r1
     vst1.8              {d0}, [r0], r1
@@ -62,9 +62,9 @@
     vst1.8              {d0}, [r0], r1
     vst1.8              {d0}, [r0], r1
     bx                  lr
-    ENDP                ; |vpx_v_predictor_8x8_neon|
+    ENDP                ; |aom_v_predictor_8x8_neon|
 
-;void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
 ;                                const uint8_t *above,
 ;                                const uint8_t *left)
 ; r0  uint8_t *dst
@@ -72,7 +72,7 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_v_predictor_16x16_neon| PROC
+|aom_v_predictor_16x16_neon| PROC
     vld1.8              {q0}, [r2]
     vst1.8              {q0}, [r0], r1
     vst1.8              {q0}, [r0], r1
@@ -91,9 +91,9 @@
     vst1.8              {q0}, [r0], r1
     vst1.8              {q0}, [r0], r1
     bx                  lr
-    ENDP                ; |vpx_v_predictor_16x16_neon|
+    ENDP                ; |aom_v_predictor_16x16_neon|
 
-;void vpx_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
 ;                                const uint8_t *above,
 ;                                const uint8_t *left)
 ; r0  uint8_t *dst
@@ -101,7 +101,7 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_v_predictor_32x32_neon| PROC
+|aom_v_predictor_32x32_neon| PROC
     vld1.8              {q0, q1}, [r2]
     mov                 r2, #2
 loop_v
@@ -124,9 +124,9 @@
     subs                r2, r2, #1
     bgt                 loop_v
     bx                  lr
-    ENDP                ; |vpx_v_predictor_32x32_neon|
+    ENDP                ; |aom_v_predictor_32x32_neon|
 
-;void vpx_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
 ;                              const uint8_t *above,
 ;                              const uint8_t *left)
 ; r0  uint8_t *dst
@@ -134,7 +134,7 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_h_predictor_4x4_neon| PROC
+|aom_h_predictor_4x4_neon| PROC
     vld1.32             {d1[0]}, [r3]
     vdup.8              d0, d1[0]
     vst1.32             {d0[0]}, [r0], r1
@@ -145,9 +145,9 @@
     vdup.8              d0, d1[3]
     vst1.32             {d0[0]}, [r0], r1
     bx                  lr
-    ENDP                ; |vpx_h_predictor_4x4_neon|
+    ENDP                ; |aom_h_predictor_4x4_neon|
 
-;void vpx_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
 ;                              const uint8_t *above,
 ;                              const uint8_t *left)
 ; r0  uint8_t *dst
@@ -155,7 +155,7 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_h_predictor_8x8_neon| PROC
+|aom_h_predictor_8x8_neon| PROC
     vld1.64             {d1}, [r3]
     vdup.8              d0, d1[0]
     vst1.64             {d0}, [r0], r1
@@ -174,9 +174,9 @@
     vdup.8              d0, d1[7]
     vst1.64             {d0}, [r0], r1
     bx                  lr
-    ENDP                ; |vpx_h_predictor_8x8_neon|
+    ENDP                ; |aom_h_predictor_8x8_neon|
 
-;void vpx_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
 ;                                const uint8_t *above,
 ;                                const uint8_t *left)
 ; r0  uint8_t *dst
@@ -184,7 +184,7 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_h_predictor_16x16_neon| PROC
+|aom_h_predictor_16x16_neon| PROC
     vld1.8              {q1}, [r3]
     vdup.8              q0, d2[0]
     vst1.8              {q0}, [r0], r1
@@ -219,9 +219,9 @@
     vdup.8              q0, d3[7]
     vst1.8              {q0}, [r0], r1
     bx                  lr
-    ENDP                ; |vpx_h_predictor_16x16_neon|
+    ENDP                ; |aom_h_predictor_16x16_neon|
 
-;void vpx_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
 ;                                const uint8_t *above,
 ;                                const uint8_t *left)
 ; r0  uint8_t *dst
@@ -229,7 +229,7 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_h_predictor_32x32_neon| PROC
+|aom_h_predictor_32x32_neon| PROC
     sub                 r1, r1, #16
     mov                 r2, #2
 loop_h
@@ -285,9 +285,9 @@
     subs                r2, r2, #1
     bgt                 loop_h
     bx                  lr
-    ENDP                ; |vpx_h_predictor_32x32_neon|
+    ENDP                ; |aom_h_predictor_32x32_neon|
 
-;void vpx_tm_predictor_4x4_neon (uint8_t *dst, ptrdiff_t y_stride,
+;void aom_tm_predictor_4x4_neon (uint8_t *dst, ptrdiff_t y_stride,
 ;                                const uint8_t *above,
 ;                                const uint8_t *left)
 ; r0  uint8_t *dst
@@ -295,7 +295,7 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_tm_predictor_4x4_neon| PROC
+|aom_tm_predictor_4x4_neon| PROC
     ; Load ytop_left = above[-1];
     sub                 r12, r2, #1
     vld1.u8             {d0[]}, [r12]
@@ -331,9 +331,9 @@
     vst1.32             {d0[0]}, [r0], r1
     vst1.32             {d1[0]}, [r0], r1
     bx                  lr
-    ENDP                ; |vpx_tm_predictor_4x4_neon|
+    ENDP                ; |aom_tm_predictor_4x4_neon|
 
-;void vpx_tm_predictor_8x8_neon (uint8_t *dst, ptrdiff_t y_stride,
+;void aom_tm_predictor_8x8_neon (uint8_t *dst, ptrdiff_t y_stride,
 ;                                const uint8_t *above,
 ;                                const uint8_t *left)
 ; r0  uint8_t *dst
@@ -341,7 +341,7 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_tm_predictor_8x8_neon| PROC
+|aom_tm_predictor_8x8_neon| PROC
     ; Load ytop_left = above[-1];
     sub                 r12, r2, #1
     vld1.8              {d0[]}, [r12]
@@ -403,9 +403,9 @@
     vst1.64             {d3}, [r0], r1
 
     bx                  lr
-    ENDP                ; |vpx_tm_predictor_8x8_neon|
+    ENDP                ; |aom_tm_predictor_8x8_neon|
 
-;void vpx_tm_predictor_16x16_neon (uint8_t *dst, ptrdiff_t y_stride,
+;void aom_tm_predictor_16x16_neon (uint8_t *dst, ptrdiff_t y_stride,
 ;                                const uint8_t *above,
 ;                                const uint8_t *left)
 ; r0  uint8_t *dst
@@ -413,7 +413,7 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_tm_predictor_16x16_neon| PROC
+|aom_tm_predictor_16x16_neon| PROC
     ; Load ytop_left = above[-1];
     sub                 r12, r2, #1
     vld1.8              {d0[]}, [r12]
@@ -496,9 +496,9 @@
     bgt                 loop_16x16_neon
 
     bx                  lr
-    ENDP                ; |vpx_tm_predictor_16x16_neon|
+    ENDP                ; |aom_tm_predictor_16x16_neon|
 
-;void vpx_tm_predictor_32x32_neon (uint8_t *dst, ptrdiff_t y_stride,
+;void aom_tm_predictor_32x32_neon (uint8_t *dst, ptrdiff_t y_stride,
 ;                                  const uint8_t *above,
 ;                                  const uint8_t *left)
 ; r0  uint8_t *dst
@@ -506,7 +506,7 @@
 ; r2  const uint8_t *above
 ; r3  const uint8_t *left
 
-|vpx_tm_predictor_32x32_neon| PROC
+|aom_tm_predictor_32x32_neon| PROC
     ; Load ytop_left = above[-1];
     sub                 r12, r2, #1
     vld1.8              {d0[]}, [r12]
@@ -625,6 +625,6 @@
     bgt                 loop_32x32_neon
 
     bx                  lr
-    ENDP                ; |vpx_tm_predictor_32x32_neon|
+    ENDP                ; |aom_tm_predictor_32x32_neon|
 
     END
diff --git a/aom_dsp/arm/loopfilter_16_neon.asm b/aom_dsp/arm/loopfilter_16_neon.asm
index 5a8fdd6..1f2fc41 100644
--- a/aom_dsp/arm/loopfilter_16_neon.asm
+++ b/aom_dsp/arm/loopfilter_16_neon.asm
@@ -8,12 +8,12 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-    EXPORT  |vpx_lpf_horizontal_4_dual_neon|
+    EXPORT  |aom_lpf_horizontal_4_dual_neon|
     ARM
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
-;void vpx_lpf_horizontal_4_dual_neon(uint8_t *s, int p,
+;void aom_lpf_horizontal_4_dual_neon(uint8_t *s, int p,
 ;                                    const uint8_t *blimit0,
 ;                                    const uint8_t *limit0,
 ;                                    const uint8_t *thresh0,
@@ -29,7 +29,7 @@
 ; sp+8  const uint8_t *limit1,
 ; sp+12 const uint8_t *thresh1,
 
-|vpx_lpf_horizontal_4_dual_neon| PROC
+|aom_lpf_horizontal_4_dual_neon| PROC
     push        {lr}
 
     ldr         r12, [sp, #4]              ; load thresh0
@@ -66,7 +66,7 @@
     sub         r2, r2, r1, lsl #1
     sub         r3, r3, r1, lsl #1
 
-    bl          vpx_loop_filter_neon_16
+    bl          aom_loop_filter_neon_16
 
     vst1.u8     {q5}, [r2@64], r1          ; store op1
     vst1.u8     {q6}, [r3@64], r1          ; store op0
@@ -76,9 +76,9 @@
     vpop        {d8-d15}                   ; restore neon registers
 
     pop         {pc}
-    ENDP        ; |vpx_lpf_horizontal_4_dual_neon|
+    ENDP        ; |aom_lpf_horizontal_4_dual_neon|
 
-; void vpx_loop_filter_neon_16();
+; void aom_loop_filter_neon_16();
 ; This is a helper function for the loopfilters. The invidual functions do the
 ; necessary load, transpose (if necessary) and store. This function uses
 ; registers d8-d15, so the calling function must save those registers.
@@ -101,7 +101,7 @@
 ; q6    op0
 ; q7    oq0
 ; q8    oq1
-|vpx_loop_filter_neon_16| PROC
+|aom_loop_filter_neon_16| PROC
 
     ; filter_mask
     vabd.u8     q11, q3, q4                 ; m1 = abs(p3 - p2)
@@ -194,6 +194,6 @@
     veor        q8, q12, q10                ; *oq1 = u^0x80
 
     bx          lr
-    ENDP        ; |vpx_loop_filter_neon_16|
+    ENDP        ; |aom_loop_filter_neon_16|
 
     END
diff --git a/aom_dsp/arm/loopfilter_16_neon.c b/aom_dsp/arm/loopfilter_16_neon.c
index 70087f9..a6bc70d 100644
--- a/aom_dsp/arm/loopfilter_16_neon.c
+++ b/aom_dsp/arm/loopfilter_16_neon.c
@@ -10,9 +10,9 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 static INLINE void loop_filter_neon_16(uint8x16_t qblimit,  // blimit
                                        uint8x16_t qlimit,   // limit
@@ -122,7 +122,7 @@
   return;
 }
 
-void vpx_lpf_horizontal_4_dual_neon(
+void aom_lpf_horizontal_4_dual_neon(
     uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
     const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
     const uint8_t *limit1, const uint8_t *thresh1) {
diff --git a/aom_dsp/arm/loopfilter_4_neon.asm b/aom_dsp/arm/loopfilter_4_neon.asm
index 9371158..78be4b8 100644
--- a/aom_dsp/arm/loopfilter_4_neon.asm
+++ b/aom_dsp/arm/loopfilter_4_neon.asm
@@ -8,16 +8,16 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-    EXPORT  |vpx_lpf_horizontal_4_neon|
-    EXPORT  |vpx_lpf_vertical_4_neon|
+    EXPORT  |aom_lpf_horizontal_4_neon|
+    EXPORT  |aom_lpf_vertical_4_neon|
     ARM
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
-; Currently vpx only works on iterations 8 at a time. The vp8 loop filter
+; Currently aom only works on iterations 8 at a time. The vp8 loop filter
 ; works on 16 iterations at a time.
 ;
-; void vpx_lpf_horizontal_4_neon(uint8_t *s,
+; void aom_lpf_horizontal_4_neon(uint8_t *s,
 ;                                int p /* pitch */,
 ;                                const uint8_t *blimit,
 ;                                const uint8_t *limit,
@@ -28,7 +28,7 @@
 ; r2    const uint8_t *blimit,
 ; r3    const uint8_t *limit,
 ; sp    const uint8_t *thresh,
-|vpx_lpf_horizontal_4_neon| PROC
+|aom_lpf_horizontal_4_neon| PROC
     push        {lr}
 
     vld1.8      {d0[]}, [r2]               ; duplicate *blimit
@@ -53,7 +53,7 @@
     sub         r2, r2, r1, lsl #1
     sub         r3, r3, r1, lsl #1
 
-    bl          vpx_loop_filter_neon
+    bl          aom_loop_filter_neon
 
     vst1.u8     {d4}, [r2@64], r1          ; store op1
     vst1.u8     {d5}, [r3@64], r1          ; store op0
@@ -61,12 +61,12 @@
     vst1.u8     {d7}, [r3@64], r1          ; store oq1
 
     pop         {pc}
-    ENDP        ; |vpx_lpf_horizontal_4_neon|
+    ENDP        ; |aom_lpf_horizontal_4_neon|
 
-; Currently vpx only works on iterations 8 at a time. The vp8 loop filter
+; Currently aom only works on iterations 8 at a time. The vp8 loop filter
 ; works on 16 iterations at a time.
 ;
-; void vpx_lpf_vertical_4_neon(uint8_t *s,
+; void aom_lpf_vertical_4_neon(uint8_t *s,
 ;                              int p /* pitch */,
 ;                              const uint8_t *blimit,
 ;                              const uint8_t *limit,
@@ -77,7 +77,7 @@
 ; r2    const uint8_t *blimit,
 ; r3    const uint8_t *limit,
 ; sp    const uint8_t *thresh,
-|vpx_lpf_vertical_4_neon| PROC
+|aom_lpf_vertical_4_neon| PROC
     push        {lr}
 
     vld1.8      {d0[]}, [r2]              ; duplicate *blimit
@@ -113,7 +113,7 @@
     vtrn.8      d7, d16
     vtrn.8      d17, d18
 
-    bl          vpx_loop_filter_neon
+    bl          aom_loop_filter_neon
 
     sub         r0, r0, #2
 
@@ -128,9 +128,9 @@
     vst4.8      {d4[7], d5[7], d6[7], d7[7]}, [r0]
 
     pop         {pc}
-    ENDP        ; |vpx_lpf_vertical_4_neon|
+    ENDP        ; |aom_lpf_vertical_4_neon|
 
-; void vpx_loop_filter_neon();
+; void aom_loop_filter_neon();
 ; This is a helper function for the loopfilters. The invidual functions do the
 ; necessary load, transpose (if necessary) and store. The function does not use
 ; registers d8-d15.
@@ -154,7 +154,7 @@
 ; d5    op0
 ; d6    oq0
 ; d7    oq1
-|vpx_loop_filter_neon| PROC
+|aom_loop_filter_neon| PROC
     ; filter_mask
     vabd.u8     d19, d3, d4                 ; m1 = abs(p3 - p2)
     vabd.u8     d20, d4, d5                 ; m2 = abs(p2 - p1)
@@ -244,6 +244,6 @@
     veor        d7, d20, d18                ; *oq1 = u^0x80
 
     bx          lr
-    ENDP        ; |vpx_loop_filter_neon|
+    ENDP        ; |aom_loop_filter_neon|
 
     END
diff --git a/aom_dsp/arm/loopfilter_4_neon.c b/aom_dsp/arm/loopfilter_4_neon.c
index 1c1e80e..74e13bd 100644
--- a/aom_dsp/arm/loopfilter_4_neon.c
+++ b/aom_dsp/arm/loopfilter_4_neon.c
@@ -10,7 +10,7 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
 static INLINE void loop_filter_neon(uint8x8_t dblimit,   // flimit
                                     uint8x8_t dlimit,    // limit
@@ -107,7 +107,7 @@
   return;
 }
 
-void vpx_lpf_horizontal_4_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+void aom_lpf_horizontal_4_neon(uint8_t *src, int pitch, const uint8_t *blimit,
                                const uint8_t *limit, const uint8_t *thresh) {
   int i;
   uint8_t *s, *psrc;
@@ -153,7 +153,7 @@
   return;
 }
 
-void vpx_lpf_vertical_4_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+void aom_lpf_vertical_4_neon(uint8_t *src, int pitch, const uint8_t *blimit,
                              const uint8_t *limit, const uint8_t *thresh) {
   int i, pitch8;
   uint8_t *s;
diff --git a/aom_dsp/arm/loopfilter_8_neon.asm b/aom_dsp/arm/loopfilter_8_neon.asm
index a2f20e1..4f6ede2 100644
--- a/aom_dsp/arm/loopfilter_8_neon.asm
+++ b/aom_dsp/arm/loopfilter_8_neon.asm
@@ -8,16 +8,16 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-    EXPORT  |vpx_lpf_horizontal_8_neon|
-    EXPORT  |vpx_lpf_vertical_8_neon|
+    EXPORT  |aom_lpf_horizontal_8_neon|
+    EXPORT  |aom_lpf_vertical_8_neon|
     ARM
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
-; Currently vpx only works on iterations 8 at a time. The vp8 loop filter
+; Currently aom only works on iterations 8 at a time. The vp8 loop filter
 ; works on 16 iterations at a time.
 ;
-; void vpx_lpf_horizontal_8_neon(uint8_t *s, int p,
+; void aom_lpf_horizontal_8_neon(uint8_t *s, int p,
 ;                                const uint8_t *blimit,
 ;                                const uint8_t *limit,
 ;                                const uint8_t *thresh)
@@ -26,7 +26,7 @@
 ; r2    const uint8_t *blimit,
 ; r3    const uint8_t *limit,
 ; sp    const uint8_t *thresh,
-|vpx_lpf_horizontal_8_neon| PROC
+|aom_lpf_horizontal_8_neon| PROC
     push        {r4-r5, lr}
 
     vld1.8      {d0[]}, [r2]               ; duplicate *blimit
@@ -51,7 +51,7 @@
     sub         r3, r3, r1, lsl #1
     sub         r2, r2, r1, lsl #2
 
-    bl          vpx_mbloop_filter_neon
+    bl          aom_mbloop_filter_neon
 
     vst1.u8     {d0}, [r2@64], r1          ; store op2
     vst1.u8     {d1}, [r3@64], r1          ; store op1
@@ -62,9 +62,9 @@
 
     pop         {r4-r5, pc}
 
-    ENDP        ; |vpx_lpf_horizontal_8_neon|
+    ENDP        ; |aom_lpf_horizontal_8_neon|
 
-; void vpx_lpf_vertical_8_neon(uint8_t *s,
+; void aom_lpf_vertical_8_neon(uint8_t *s,
 ;                              int pitch,
 ;                              const uint8_t *blimit,
 ;                              const uint8_t *limit,
@@ -75,7 +75,7 @@
 ; r2    const uint8_t *blimit,
 ; r3    const uint8_t *limit,
 ; sp    const uint8_t *thresh,
-|vpx_lpf_vertical_8_neon| PROC
+|aom_lpf_vertical_8_neon| PROC
     push        {r4-r5, lr}
 
     vld1.8      {d0[]}, [r2]              ; duplicate *blimit
@@ -114,7 +114,7 @@
     sub         r2, r0, #3
     add         r3, r0, #1
 
-    bl          vpx_mbloop_filter_neon
+    bl          aom_mbloop_filter_neon
 
     ;store op2, op1, op0, oq0
     vst4.8      {d0[0], d1[0], d2[0], d3[0]}, [r2], r1
@@ -137,9 +137,9 @@
     vst2.8      {d4[7], d5[7]}, [r3]
 
     pop         {r4-r5, pc}
-    ENDP        ; |vpx_lpf_vertical_8_neon|
+    ENDP        ; |aom_lpf_vertical_8_neon|
 
-; void vpx_mbloop_filter_neon();
+; void aom_mbloop_filter_neon();
 ; This is a helper function for the loopfilters. The invidual functions do the
 ; necessary load, transpose (if necessary) and store. The function does not use
 ; registers d8-d15.
@@ -165,7 +165,7 @@
 ; d3    oq0
 ; d4    oq1
 ; d5    oq2
-|vpx_mbloop_filter_neon| PROC
+|aom_mbloop_filter_neon| PROC
     ; filter_mask
     vabd.u8     d19, d3, d4                ; m1 = abs(p3 - p2)
     vabd.u8     d20, d4, d5                ; m2 = abs(p2 - p1)
@@ -420,6 +420,6 @@
 
     bx          lr
 
-    ENDP        ; |vpx_mbloop_filter_neon|
+    ENDP        ; |aom_mbloop_filter_neon|
 
     END
diff --git a/aom_dsp/arm/loopfilter_8_neon.c b/aom_dsp/arm/loopfilter_8_neon.c
index 854196f..54c1d22 100644
--- a/aom_dsp/arm/loopfilter_8_neon.c
+++ b/aom_dsp/arm/loopfilter_8_neon.c
@@ -10,7 +10,7 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
 static INLINE void mbloop_filter_neon(uint8x8_t dblimit,   // mblimit
                                       uint8x8_t dlimit,    // limit
@@ -259,7 +259,7 @@
   return;
 }
 
-void vpx_lpf_horizontal_8_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+void aom_lpf_horizontal_8_neon(uint8_t *src, int pitch, const uint8_t *blimit,
                                const uint8_t *limit, const uint8_t *thresh) {
   int i;
   uint8_t *s, *psrc;
@@ -311,7 +311,7 @@
   return;
 }
 
-void vpx_lpf_vertical_8_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+void aom_lpf_vertical_8_neon(uint8_t *src, int pitch, const uint8_t *blimit,
                              const uint8_t *limit, const uint8_t *thresh) {
   int i;
   uint8_t *s;
diff --git a/aom_dsp/arm/loopfilter_mb_neon.asm b/aom_dsp/arm/loopfilter_mb_neon.asm
index d5da7a8..cb20a28 100644
--- a/aom_dsp/arm/loopfilter_mb_neon.asm
+++ b/aom_dsp/arm/loopfilter_mb_neon.asm
@@ -8,9 +8,9 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-    EXPORT  |vpx_lpf_horizontal_edge_8_neon|
-    EXPORT  |vpx_lpf_horizontal_edge_16_neon|
-    EXPORT  |vpx_lpf_vertical_16_neon|
+    EXPORT  |aom_lpf_horizontal_edge_8_neon|
+    EXPORT  |aom_lpf_horizontal_edge_16_neon|
+    EXPORT  |aom_lpf_vertical_16_neon|
     ARM
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
@@ -55,7 +55,7 @@
     vld1.u8     {d14}, [r8@64], r1         ; q6
     vld1.u8     {d15}, [r8@64], r1         ; q7
 
-    bl          vpx_wide_mbfilter_neon
+    bl          aom_wide_mbfilter_neon
 
     tst         r7, #1
     beq         h_mbfilter
@@ -118,7 +118,7 @@
 
     ENDP        ; |mb_lpf_horizontal_edge|
 
-; void vpx_lpf_horizontal_edge_8_neon(uint8_t *s, int pitch,
+; void aom_lpf_horizontal_edge_8_neon(uint8_t *s, int pitch,
 ;                                     const uint8_t *blimit,
 ;                                     const uint8_t *limit,
 ;                                     const uint8_t *thresh)
@@ -127,12 +127,12 @@
 ; r2    const uint8_t *blimit,
 ; r3    const uint8_t *limit,
 ; sp    const uint8_t *thresh
-|vpx_lpf_horizontal_edge_8_neon| PROC
+|aom_lpf_horizontal_edge_8_neon| PROC
     mov r12, #1
     b mb_lpf_horizontal_edge
-    ENDP        ; |vpx_lpf_horizontal_edge_8_neon|
+    ENDP        ; |aom_lpf_horizontal_edge_8_neon|
 
-; void vpx_lpf_horizontal_edge_16_neon(uint8_t *s, int pitch,
+; void aom_lpf_horizontal_edge_16_neon(uint8_t *s, int pitch,
 ;                                      const uint8_t *blimit,
 ;                                      const uint8_t *limit,
 ;                                      const uint8_t *thresh)
@@ -141,12 +141,12 @@
 ; r2    const uint8_t *blimit,
 ; r3    const uint8_t *limit,
 ; sp    const uint8_t *thresh
-|vpx_lpf_horizontal_edge_16_neon| PROC
+|aom_lpf_horizontal_edge_16_neon| PROC
     mov r12, #2
     b mb_lpf_horizontal_edge
-    ENDP        ; |vpx_lpf_horizontal_edge_16_neon|
+    ENDP        ; |aom_lpf_horizontal_edge_16_neon|
 
-; void vpx_lpf_vertical_16_neon(uint8_t *s, int p,
+; void aom_lpf_vertical_16_neon(uint8_t *s, int p,
 ;                               const uint8_t *blimit,
 ;                               const uint8_t *limit,
 ;                               const uint8_t *thresh)
@@ -155,7 +155,7 @@
 ; r2    const uint8_t *blimit,
 ; r3    const uint8_t *limit,
 ; sp    const uint8_t *thresh,
-|vpx_lpf_vertical_16_neon| PROC
+|aom_lpf_vertical_16_neon| PROC
     push        {r4-r8, lr}
     vpush       {d8-d15}
     ldr         r4, [sp, #88]              ; load thresh
@@ -205,7 +205,7 @@
     vtrn.8      d12, d13
     vtrn.8      d14, d15
 
-    bl          vpx_wide_mbfilter_neon
+    bl          aom_wide_mbfilter_neon
 
     tst         r7, #1
     beq         v_mbfilter
@@ -308,9 +308,9 @@
     vpop        {d8-d15}
     pop         {r4-r8, pc}
 
-    ENDP        ; |vpx_lpf_vertical_16_neon|
+    ENDP        ; |aom_lpf_vertical_16_neon|
 
-; void vpx_wide_mbfilter_neon();
+; void aom_wide_mbfilter_neon();
 ; This is a helper function for the loopfilters. The invidual functions do the
 ; necessary load, transpose (if necessary) and store.
 ;
@@ -334,7 +334,7 @@
 ; d13   q5
 ; d14   q6
 ; d15   q7
-|vpx_wide_mbfilter_neon| PROC
+|aom_wide_mbfilter_neon| PROC
     mov         r7, #0
 
     ; filter_mask
@@ -630,6 +630,6 @@
     vbif        d3, d14, d17               ; oq6 |= q6 & ~(f2 & f & m)
 
     bx          lr
-    ENDP        ; |vpx_wide_mbfilter_neon|
+    ENDP        ; |aom_wide_mbfilter_neon|
 
     END
diff --git a/aom_dsp/arm/loopfilter_neon.c b/aom_dsp/arm/loopfilter_neon.c
index 04c163a..da28e27 100644
--- a/aom_dsp/arm/loopfilter_neon.c
+++ b/aom_dsp/arm/loopfilter_neon.c
@@ -10,39 +10,39 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
-void vpx_lpf_vertical_4_dual_neon(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_4_dual_neon(uint8_t *s, int p, const uint8_t *blimit0,
                                   const uint8_t *limit0, const uint8_t *thresh0,
                                   const uint8_t *blimit1, const uint8_t *limit1,
                                   const uint8_t *thresh1) {
-  vpx_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0);
-  vpx_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1);
+  aom_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0);
+  aom_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1);
 }
 
 #if HAVE_NEON_ASM
-void vpx_lpf_horizontal_8_dual_neon(
+void aom_lpf_horizontal_8_dual_neon(
     uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
     const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
     const uint8_t *limit1, const uint8_t *thresh1) {
-  vpx_lpf_horizontal_8_neon(s, p, blimit0, limit0, thresh0);
-  vpx_lpf_horizontal_8_neon(s + 8, p, blimit1, limit1, thresh1);
+  aom_lpf_horizontal_8_neon(s, p, blimit0, limit0, thresh0);
+  aom_lpf_horizontal_8_neon(s + 8, p, blimit1, limit1, thresh1);
 }
 
-void vpx_lpf_vertical_8_dual_neon(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_8_dual_neon(uint8_t *s, int p, const uint8_t *blimit0,
                                   const uint8_t *limit0, const uint8_t *thresh0,
                                   const uint8_t *blimit1, const uint8_t *limit1,
                                   const uint8_t *thresh1) {
-  vpx_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0);
-  vpx_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1);
+  aom_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0);
+  aom_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1);
 }
 
-void vpx_lpf_vertical_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_vertical_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit,
                                    const uint8_t *limit,
                                    const uint8_t *thresh) {
-  vpx_lpf_vertical_16_neon(s, p, blimit, limit, thresh);
-  vpx_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh);
+  aom_lpf_vertical_16_neon(s, p, blimit, limit, thresh);
+  aom_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh);
 }
 #endif  // HAVE_NEON_ASM
diff --git a/aom_dsp/arm/sad4d_neon.c b/aom_dsp/arm/sad4d_neon.c
index 11f13be..e94029e 100644
--- a/aom_dsp/arm/sad4d_neon.c
+++ b/aom_dsp/arm/sad4d_neon.c
@@ -10,9 +10,9 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
 static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo,
                                                     const uint16x8_t vec_hi) {
@@ -78,7 +78,7 @@
                              vget_high_u8(vec_ref_16));
 }
 
-void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride,
+void aom_sad64x64x4d_neon(const uint8_t *src, int src_stride,
                           const uint8_t *const ref[4], int ref_stride,
                           uint32_t *res) {
   int i;
@@ -124,7 +124,7 @@
   res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
 }
 
-void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride,
+void aom_sad32x32x4d_neon(const uint8_t *src, int src_stride,
                           const uint8_t *const ref[4], int ref_stride,
                           uint32_t *res) {
   int i;
@@ -168,7 +168,7 @@
   res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
 }
 
-void vpx_sad16x16x4d_neon(const uint8_t *src, int src_stride,
+void aom_sad16x16x4d_neon(const uint8_t *src, int src_stride,
                           const uint8_t *const ref[4], int ref_stride,
                           uint32_t *res) {
   int i;
diff --git a/aom_dsp/arm/sad_media.asm b/aom_dsp/arm/sad_media.asm
index aed1d3a..9d815a2 100644
--- a/aom_dsp/arm/sad_media.asm
+++ b/aom_dsp/arm/sad_media.asm
@@ -9,7 +9,7 @@
 ;
 
 
-    EXPORT  |vpx_sad16x16_media|
+    EXPORT  |aom_sad16x16_media|
 
     ARM
     REQUIRE8
@@ -21,7 +21,7 @@
 ; r1    int  src_stride
 ; r2    const unsigned char *ref_ptr
 ; r3    int  ref_stride
-|vpx_sad16x16_media| PROC
+|aom_sad16x16_media| PROC
     stmfd   sp!, {r4-r12, lr}
 
     pld     [r0, r1, lsl #0]
diff --git a/aom_dsp/arm/sad_neon.c b/aom_dsp/arm/sad_neon.c
index 19fa109..274b6d3 100644
--- a/aom_dsp/arm/sad_neon.c
+++ b/aom_dsp/arm/sad_neon.c
@@ -10,11 +10,11 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
-unsigned int vpx_sad8x16_neon(unsigned char *src_ptr, int src_stride,
+unsigned int aom_sad8x16_neon(unsigned char *src_ptr, int src_stride,
                               unsigned char *ref_ptr, int ref_stride) {
   uint8x8_t d0, d8;
   uint16x8_t q12;
@@ -45,7 +45,7 @@
   return vget_lane_u32(d5, 0);
 }
 
-unsigned int vpx_sad4x4_neon(unsigned char *src_ptr, int src_stride,
+unsigned int aom_sad4x4_neon(unsigned char *src_ptr, int src_stride,
                              unsigned char *ref_ptr, int ref_stride) {
   uint8x8_t d0, d8;
   uint16x8_t q12;
@@ -73,7 +73,7 @@
   return vget_lane_u32(vreinterpret_u32_u64(d3), 0);
 }
 
-unsigned int vpx_sad16x8_neon(unsigned char *src_ptr, int src_stride,
+unsigned int aom_sad16x8_neon(unsigned char *src_ptr, int src_stride,
                               unsigned char *ref_ptr, int ref_stride) {
   uint8x16_t q0, q4;
   uint16x8_t q12, q13;
@@ -127,7 +127,7 @@
   return vget_lane_u32(c, 0);
 }
 
-unsigned int vpx_sad64x64_neon(const uint8_t *src, int src_stride,
+unsigned int aom_sad64x64_neon(const uint8_t *src, int src_stride,
                                const uint8_t *ref, int ref_stride) {
   int i;
   uint16x8_t vec_accum_lo = vdupq_n_u16(0);
@@ -163,7 +163,7 @@
   return horizontal_long_add_16x8(vec_accum_lo, vec_accum_hi);
 }
 
-unsigned int vpx_sad32x32_neon(const uint8_t *src, int src_stride,
+unsigned int aom_sad32x32_neon(const uint8_t *src, int src_stride,
                                const uint8_t *ref, int ref_stride) {
   int i;
   uint16x8_t vec_accum_lo = vdupq_n_u16(0);
@@ -188,7 +188,7 @@
   return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi));
 }
 
-unsigned int vpx_sad16x16_neon(const uint8_t *src, int src_stride,
+unsigned int aom_sad16x16_neon(const uint8_t *src, int src_stride,
                                const uint8_t *ref, int ref_stride) {
   int i;
   uint16x8_t vec_accum_lo = vdupq_n_u16(0);
@@ -207,7 +207,7 @@
   return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi));
 }
 
-unsigned int vpx_sad8x8_neon(const uint8_t *src, int src_stride,
+unsigned int aom_sad8x8_neon(const uint8_t *src, int src_stride,
                              const uint8_t *ref, int ref_stride) {
   int i;
   uint16x8_t vec_accum = vdupq_n_u16(0);
diff --git a/aom_dsp/arm/save_reg_neon.asm b/aom_dsp/arm/save_reg_neon.asm
index c9ca108..b802792 100644
--- a/aom_dsp/arm/save_reg_neon.asm
+++ b/aom_dsp/arm/save_reg_neon.asm
@@ -9,8 +9,8 @@
 ;
 
 
-    EXPORT  |vpx_push_neon|
-    EXPORT  |vpx_pop_neon|
+    EXPORT  |aom_push_neon|
+    EXPORT  |aom_pop_neon|
 
     ARM
     REQUIRE8
@@ -18,14 +18,14 @@
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
-|vpx_push_neon| PROC
+|aom_push_neon| PROC
     vst1.i64            {d8, d9, d10, d11}, [r0]!
     vst1.i64            {d12, d13, d14, d15}, [r0]!
     bx              lr
 
     ENDP
 
-|vpx_pop_neon| PROC
+|aom_pop_neon| PROC
     vld1.i64            {d8, d9, d10, d11}, [r0]!
     vld1.i64            {d12, d13, d14, d15}, [r0]!
     bx              lr
diff --git a/aom_dsp/arm/subpel_variance_media.c b/aom_dsp/arm/subpel_variance_media.c
index 69b1b33..2704f5a 100644
--- a/aom_dsp/arm/subpel_variance_media.c
+++ b/aom_dsp/arm/subpel_variance_media.c
@@ -8,9 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
 #if HAVE_MEDIA
@@ -19,15 +19,15 @@
                                                       { 64, 64 }, { 48, 80 },
                                                       { 32, 96 }, { 16, 112 } };
 
-extern void vpx_filter_block2d_bil_first_pass_media(
+extern void aom_filter_block2d_bil_first_pass_media(
     const uint8_t *src_ptr, uint16_t *dst_ptr, uint32_t src_pitch,
     uint32_t height, uint32_t width, const int16_t *filter);
 
-extern void vpx_filter_block2d_bil_second_pass_media(
+extern void aom_filter_block2d_bil_second_pass_media(
     const uint16_t *src_ptr, uint8_t *dst_ptr, int32_t src_pitch,
     uint32_t height, uint32_t width, const int16_t *filter);
 
-unsigned int vpx_sub_pixel_variance8x8_media(
+unsigned int aom_sub_pixel_variance8x8_media(
     const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset,
     const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse) {
   uint16_t first_pass[10 * 8];
@@ -37,16 +37,16 @@
   HFilter = bilinear_filters_media[xoffset];
   VFilter = bilinear_filters_media[yoffset];
 
-  vpx_filter_block2d_bil_first_pass_media(src_ptr, first_pass,
+  aom_filter_block2d_bil_first_pass_media(src_ptr, first_pass,
                                           src_pixels_per_line, 9, 8, HFilter);
-  vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass, 8, 8, 8,
+  aom_filter_block2d_bil_second_pass_media(first_pass, second_pass, 8, 8, 8,
                                            VFilter);
 
-  return vpx_variance8x8_media(second_pass, 8, dst_ptr, dst_pixels_per_line,
+  return aom_variance8x8_media(second_pass, 8, dst_ptr, dst_pixels_per_line,
                                sse);
 }
 
-unsigned int vpx_sub_pixel_variance16x16_media(
+unsigned int aom_sub_pixel_variance16x16_media(
     const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset,
     const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse) {
   uint16_t first_pass[36 * 16];
@@ -55,24 +55,24 @@
   unsigned int var;
 
   if (xoffset == 4 && yoffset == 0) {
-    var = vpx_variance_halfpixvar16x16_h_media(
+    var = aom_variance_halfpixvar16x16_h_media(
         src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
   } else if (xoffset == 0 && yoffset == 4) {
-    var = vpx_variance_halfpixvar16x16_v_media(
+    var = aom_variance_halfpixvar16x16_v_media(
         src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
   } else if (xoffset == 4 && yoffset == 4) {
-    var = vpx_variance_halfpixvar16x16_hv_media(
+    var = aom_variance_halfpixvar16x16_hv_media(
         src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
   } else {
     HFilter = bilinear_filters_media[xoffset];
     VFilter = bilinear_filters_media[yoffset];
 
-    vpx_filter_block2d_bil_first_pass_media(
+    aom_filter_block2d_bil_first_pass_media(
         src_ptr, first_pass, src_pixels_per_line, 17, 16, HFilter);
-    vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass, 16, 16,
+    aom_filter_block2d_bil_second_pass_media(first_pass, second_pass, 16, 16,
                                              16, VFilter);
 
-    var = vpx_variance16x16_media(second_pass, 16, dst_ptr, dst_pixels_per_line,
+    var = aom_variance16x16_media(second_pass, 16, dst_ptr, dst_pixels_per_line,
                                   sse);
   }
   return var;
diff --git a/aom_dsp/arm/subpel_variance_neon.c b/aom_dsp/arm/subpel_variance_neon.c
index caa3f4a..f04235d 100644
--- a/aom_dsp/arm/subpel_variance_neon.c
+++ b/aom_dsp/arm/subpel_variance_neon.c
@@ -9,11 +9,11 @@
  */
 
 #include <arm_neon.h>
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
 
 #include "aom_ports/mem.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #include "aom_dsp/variance.h"
 
@@ -73,7 +73,7 @@
   }
 }
 
-unsigned int vpx_sub_pixel_variance8x8_neon(const uint8_t *src, int src_stride,
+unsigned int aom_sub_pixel_variance8x8_neon(const uint8_t *src, int src_stride,
                                             int xoffset, int yoffset,
                                             const uint8_t *dst, int dst_stride,
                                             unsigned int *sse) {
@@ -84,10 +84,10 @@
                             bilinear_filters[xoffset]);
   var_filter_block2d_bil_w8(fdata3, temp2, 8, 8, 8, 8,
                             bilinear_filters[yoffset]);
-  return vpx_variance8x8_neon(temp2, 8, dst, dst_stride, sse);
+  return aom_variance8x8_neon(temp2, 8, dst, dst_stride, sse);
 }
 
-unsigned int vpx_sub_pixel_variance16x16_neon(const uint8_t *src,
+unsigned int aom_sub_pixel_variance16x16_neon(const uint8_t *src,
                                               int src_stride, int xoffset,
                                               int yoffset, const uint8_t *dst,
                                               int dst_stride,
@@ -99,10 +99,10 @@
                              bilinear_filters[xoffset]);
   var_filter_block2d_bil_w16(fdata3, temp2, 16, 16, 16, 16,
                              bilinear_filters[yoffset]);
-  return vpx_variance16x16_neon(temp2, 16, dst, dst_stride, sse);
+  return aom_variance16x16_neon(temp2, 16, dst, dst_stride, sse);
 }
 
-unsigned int vpx_sub_pixel_variance32x32_neon(const uint8_t *src,
+unsigned int aom_sub_pixel_variance32x32_neon(const uint8_t *src,
                                               int src_stride, int xoffset,
                                               int yoffset, const uint8_t *dst,
                                               int dst_stride,
@@ -114,10 +114,10 @@
                              bilinear_filters[xoffset]);
   var_filter_block2d_bil_w16(fdata3, temp2, 32, 32, 32, 32,
                              bilinear_filters[yoffset]);
-  return vpx_variance32x32_neon(temp2, 32, dst, dst_stride, sse);
+  return aom_variance32x32_neon(temp2, 32, dst, dst_stride, sse);
 }
 
-unsigned int vpx_sub_pixel_variance64x64_neon(const uint8_t *src,
+unsigned int aom_sub_pixel_variance64x64_neon(const uint8_t *src,
                                               int src_stride, int xoffset,
                                               int yoffset, const uint8_t *dst,
                                               int dst_stride,
@@ -129,5 +129,5 @@
                              bilinear_filters[xoffset]);
   var_filter_block2d_bil_w16(fdata3, temp2, 64, 64, 64, 64,
                              bilinear_filters[yoffset]);
-  return vpx_variance64x64_neon(temp2, 64, dst, dst_stride, sse);
+  return aom_variance64x64_neon(temp2, 64, dst, dst_stride, sse);
 }
diff --git a/aom_dsp/arm/subtract_neon.c b/aom_dsp/arm/subtract_neon.c
index ab7157c..27b37f3 100644
--- a/aom_dsp/arm/subtract_neon.c
+++ b/aom_dsp/arm/subtract_neon.c
@@ -10,10 +10,10 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
-void vpx_subtract_block_neon(int rows, int cols, int16_t *diff,
+void aom_subtract_block_neon(int rows, int cols, int16_t *diff,
                              ptrdiff_t diff_stride, const uint8_t *src,
                              ptrdiff_t src_stride, const uint8_t *pred,
                              ptrdiff_t pred_stride) {
diff --git a/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm b/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm
index dab845a..52214f7 100644
--- a/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm
+++ b/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm
@@ -9,7 +9,7 @@
 ;
 
 
-    EXPORT  |vpx_variance_halfpixvar16x16_h_media|
+    EXPORT  |aom_variance_halfpixvar16x16_h_media|
 
     ARM
     REQUIRE8
@@ -22,7 +22,7 @@
 ; r2    unsigned char *ref_ptr
 ; r3    int  recon_stride
 ; stack unsigned int *sse
-|vpx_variance_halfpixvar16x16_h_media| PROC
+|aom_variance_halfpixvar16x16_h_media| PROC
 
     stmfd   sp!, {r4-r12, lr}
 
diff --git a/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm b/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm
index 01953b7..a3f60fc 100644
--- a/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm
+++ b/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm
@@ -9,7 +9,7 @@
 ;
 
 
-    EXPORT  |vpx_variance_halfpixvar16x16_hv_media|
+    EXPORT  |aom_variance_halfpixvar16x16_hv_media|
 
     ARM
     REQUIRE8
@@ -22,7 +22,7 @@
 ; r2    unsigned char *ref_ptr
 ; r3    int  recon_stride
 ; stack unsigned int *sse
-|vpx_variance_halfpixvar16x16_hv_media| PROC
+|aom_variance_halfpixvar16x16_hv_media| PROC
 
     stmfd   sp!, {r4-r12, lr}
 
diff --git a/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm b/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm
index 0d17acb..b8071be 100644
--- a/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm
+++ b/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm
@@ -9,7 +9,7 @@
 ;
 
 
-    EXPORT  |vpx_variance_halfpixvar16x16_v_media|
+    EXPORT  |aom_variance_halfpixvar16x16_v_media|
 
     ARM
     REQUIRE8
@@ -22,7 +22,7 @@
 ; r2    unsigned char *ref_ptr
 ; r3    int  recon_stride
 ; stack unsigned int *sse
-|vpx_variance_halfpixvar16x16_v_media| PROC
+|aom_variance_halfpixvar16x16_v_media| PROC
 
     stmfd   sp!, {r4-r12, lr}
 
diff --git a/aom_dsp/arm/variance_media.asm b/aom_dsp/arm/variance_media.asm
index f7f9e14..8a21fdc 100644
--- a/aom_dsp/arm/variance_media.asm
+++ b/aom_dsp/arm/variance_media.asm
@@ -9,9 +9,9 @@
 ;
 
 
-    EXPORT  |vpx_variance16x16_media|
-    EXPORT  |vpx_variance8x8_media|
-    EXPORT  |vpx_mse16x16_media|
+    EXPORT  |aom_variance16x16_media|
+    EXPORT  |aom_variance8x8_media|
+    EXPORT  |aom_mse16x16_media|
 
     ARM
     REQUIRE8
@@ -24,7 +24,7 @@
 ; r2    unsigned char *ref_ptr
 ; r3    int  recon_stride
 ; stack unsigned int *sse
-|vpx_variance16x16_media| PROC
+|aom_variance16x16_media| PROC
 
     stmfd   sp!, {r4-r12, lr}
 
@@ -157,7 +157,7 @@
 ; r2    unsigned char *ref_ptr
 ; r3    int  recon_stride
 ; stack unsigned int *sse
-|vpx_variance8x8_media| PROC
+|aom_variance8x8_media| PROC
 
     push    {r4-r10, lr}
 
@@ -241,10 +241,10 @@
 ; r3    int  recon_stride
 ; stack unsigned int *sse
 ;
-;note: Based on vpx_variance16x16_media. In this function, sum is never used.
+;note: Based on aom_variance16x16_media. In this function, sum is never used.
 ;      So, we can remove this part of calculation.
 
-|vpx_mse16x16_media| PROC
+|aom_mse16x16_media| PROC
 
     push    {r4-r9, lr}
 
diff --git a/aom_dsp/arm/variance_neon.c b/aom_dsp/arm/variance_neon.c
index fcf6e45..1fbf470 100644
--- a/aom_dsp/arm/variance_neon.c
+++ b/aom_dsp/arm/variance_neon.c
@@ -10,10 +10,10 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
 static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) {
@@ -60,17 +60,17 @@
   *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
 }
 
-void vpx_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
+void aom_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
                         int b_stride, unsigned int *sse, int *sum) {
   variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, sum);
 }
 
-void vpx_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
+void aom_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
                           int b_stride, unsigned int *sse, int *sum) {
   variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, sum);
 }
 
-unsigned int vpx_variance8x8_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance8x8_neon(const uint8_t *a, int a_stride,
                                   const uint8_t *b, int b_stride,
                                   unsigned int *sse) {
   int sum;
@@ -78,7 +78,7 @@
   return *sse - (((int64_t)sum * sum) >> 6);  //  >> 6 = / 8 * 8
 }
 
-unsigned int vpx_variance16x16_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance16x16_neon(const uint8_t *a, int a_stride,
                                     const uint8_t *b, int b_stride,
                                     unsigned int *sse) {
   int sum;
@@ -86,7 +86,7 @@
   return *sse - (((int64_t)sum * sum) >> 8);  //  >> 8 = / 16 * 16
 }
 
-unsigned int vpx_variance32x32_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance32x32_neon(const uint8_t *a, int a_stride,
                                     const uint8_t *b, int b_stride,
                                     unsigned int *sse) {
   int sum;
@@ -94,7 +94,7 @@
   return *sse - (((int64_t)sum * sum) >> 10);  // >> 10 = / 32 * 32
 }
 
-unsigned int vpx_variance32x64_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance32x64_neon(const uint8_t *a, int a_stride,
                                     const uint8_t *b, int b_stride,
                                     unsigned int *sse) {
   int sum1, sum2;
@@ -107,7 +107,7 @@
   return *sse - (((int64_t)sum1 * sum1) >> 11);  // >> 11 = / 32 * 64
 }
 
-unsigned int vpx_variance64x32_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance64x32_neon(const uint8_t *a, int a_stride,
                                     const uint8_t *b, int b_stride,
                                     unsigned int *sse) {
   int sum1, sum2;
@@ -120,7 +120,7 @@
   return *sse - (((int64_t)sum1 * sum1) >> 11);  // >> 11 = / 32 * 64
 }
 
-unsigned int vpx_variance64x64_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance64x64_neon(const uint8_t *a, int a_stride,
                                     const uint8_t *b, int b_stride,
                                     unsigned int *sse) {
   int sum1, sum2;
@@ -144,7 +144,7 @@
   return *sse - (((int64_t)sum1 * sum1) >> 12);  // >> 12 = / 64 * 64
 }
 
-unsigned int vpx_variance16x8_neon(const unsigned char *src_ptr,
+unsigned int aom_variance16x8_neon(const unsigned char *src_ptr,
                                    int source_stride,
                                    const unsigned char *ref_ptr,
                                    int recon_stride, unsigned int *sse) {
@@ -220,7 +220,7 @@
   return vget_lane_u32(d0u32, 0);
 }
 
-unsigned int vpx_variance8x16_neon(const unsigned char *src_ptr,
+unsigned int aom_variance8x16_neon(const unsigned char *src_ptr,
                                    int source_stride,
                                    const unsigned char *ref_ptr,
                                    int recon_stride, unsigned int *sse) {
@@ -282,7 +282,7 @@
   return vget_lane_u32(d0u32, 0);
 }
 
-unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int source_stride,
+unsigned int aom_mse16x16_neon(const unsigned char *src_ptr, int source_stride,
                                const unsigned char *ref_ptr, int recon_stride,
                                unsigned int *sse) {
   int i;
@@ -345,7 +345,7 @@
   return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
 }
 
-unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr,
+unsigned int aom_get4x4sse_cs_neon(const unsigned char *src_ptr,
                                    int source_stride,
                                    const unsigned char *ref_ptr,
                                    int recon_stride) {
diff --git a/aom_dsp/avg.c b/aom_dsp/avg.c
index d3e4578..bf0bb5b 100644
--- a/aom_dsp/avg.c
+++ b/aom_dsp/avg.c
@@ -9,10 +9,10 @@
  */
 #include <stdlib.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
 
-unsigned int vpx_avg_8x8_c(const uint8_t *src, int stride) {
+unsigned int aom_avg_8x8_c(const uint8_t *src, int stride) {
   int i, j;
   int sum = 0;
   for (i = 0; i < 8; ++i, src += stride)
@@ -22,7 +22,7 @@
   return ROUND_POWER_OF_TWO(sum, 6);
 }
 
-unsigned int vpx_avg_4x4_c(const uint8_t *src, int stride) {
+unsigned int aom_avg_4x4_c(const uint8_t *src, int stride) {
   int i, j;
   int sum = 0;
   for (i = 0; i < 4; ++i, src += stride)
@@ -66,7 +66,7 @@
 
 // The order of the output coeff of the hadamard is not important. For
 // optimization purposes the final transpose may be skipped.
-void vpx_hadamard_8x8_c(const int16_t *src_diff, int src_stride,
+void aom_hadamard_8x8_c(const int16_t *src_diff, int src_stride,
                         int16_t *coeff) {
   int idx;
   int16_t buffer[64];
@@ -89,14 +89,14 @@
 }
 
 // In place 16x16 2D Hadamard transform
-void vpx_hadamard_16x16_c(const int16_t *src_diff, int src_stride,
+void aom_hadamard_16x16_c(const int16_t *src_diff, int src_stride,
                           int16_t *coeff) {
   int idx;
   for (idx = 0; idx < 4; ++idx) {
     // src_diff: 9 bit, dynamic range [-255, 255]
     const int16_t *src_ptr =
         src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
-    vpx_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64);
+    aom_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64);
   }
 
   // coeff: 15 bit, dynamic range [-16320, 16320]
@@ -122,7 +122,7 @@
 
 // coeff: 16 bits, dynamic range [-32640, 32640].
 // length: value range {16, 64, 256, 1024}.
-int vpx_satd_c(const int16_t *coeff, int length) {
+int aom_satd_c(const int16_t *coeff, int length) {
   int i;
   int satd = 0;
   for (i = 0; i < length; ++i) satd += abs(coeff[i]);
@@ -133,7 +133,7 @@
 
 // Integer projection onto row vectors.
 // height: value range {16, 32, 64}.
-void vpx_int_pro_row_c(int16_t hbuf[16], const uint8_t *ref,
+void aom_int_pro_row_c(int16_t hbuf[16], const uint8_t *ref,
                        const int ref_stride, const int height) {
   int idx;
   const int norm_factor = height >> 1;
@@ -149,7 +149,7 @@
 }
 
 // width: value range {16, 32, 64}.
-int16_t vpx_int_pro_col_c(const uint8_t *ref, const int width) {
+int16_t aom_int_pro_col_c(const uint8_t *ref, const int width) {
   int idx;
   int16_t sum = 0;
   // sum: 14 bit, dynamic range [0, 16320]
@@ -160,7 +160,7 @@
 // ref: [0 - 510]
 // src: [0 - 510]
 // bwl: {2, 3, 4}
-int vpx_vector_var_c(const int16_t *ref, const int16_t *src, const int bwl) {
+int aom_vector_var_c(const int16_t *ref, const int16_t *src, const int bwl) {
   int i;
   int width = 4 << bwl;
   int sse = 0, mean = 0, var;
@@ -176,7 +176,7 @@
   return var;
 }
 
-void vpx_minmax_8x8_c(const uint8_t *src, int src_stride, const uint8_t *ref,
+void aom_minmax_8x8_c(const uint8_t *src, int src_stride, const uint8_t *ref,
                       int ref_stride, int *min, int *max) {
   int i, j;
   *min = 255;
@@ -190,8 +190,8 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t *src, int stride) {
+#if CONFIG_AOM_HIGHBITDEPTH
+unsigned int aom_highbd_avg_8x8_c(const uint8_t *src, int stride) {
   int i, j;
   int sum = 0;
   const uint16_t *s = CONVERT_TO_SHORTPTR(src);
@@ -202,7 +202,7 @@
   return ROUND_POWER_OF_TWO(sum, 6);
 }
 
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t *src, int stride) {
+unsigned int aom_highbd_avg_4x4_c(const uint8_t *src, int stride) {
   int i, j;
   int sum = 0;
   const uint16_t *s = CONVERT_TO_SHORTPTR(src);
@@ -213,7 +213,7 @@
   return ROUND_POWER_OF_TWO(sum, 4);
 }
 
-void vpx_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
+void aom_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
                              int dp, int *min, int *max) {
   int i, j;
   const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
@@ -228,4 +228,4 @@
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/bitreader.c b/aom_dsp/bitreader.c
index 0942005..60c546d 100644
--- a/aom_dsp/bitreader.c
+++ b/aom_dsp/bitreader.c
@@ -9,17 +9,17 @@
  */
 #include <stdlib.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #include "aom_dsp/bitreader.h"
 #include "aom_dsp/prob.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_util/endian_inl.h"
 
-int vpx_reader_init(vpx_reader *r, const uint8_t *buffer, size_t size,
-                    vpx_decrypt_cb decrypt_cb, void *decrypt_state) {
+int aom_reader_init(aom_reader *r, const uint8_t *buffer, size_t size,
+                    aom_decrypt_cb decrypt_cb, void *decrypt_state) {
   if (size && !buffer) {
     return 1;
   } else {
@@ -30,12 +30,12 @@
     r->range = 255;
     r->decrypt_cb = decrypt_cb;
     r->decrypt_state = decrypt_state;
-    vpx_reader_fill(r);
-    return vpx_read_bit(r) != 0;  // marker bit
+    aom_reader_fill(r);
+    return aom_read_bit(r) != 0;  // marker bit
   }
 }
 
-void vpx_reader_fill(vpx_reader *r) {
+void aom_reader_fill(aom_reader *r) {
   const uint8_t *const buffer_end = r->buffer_end;
   const uint8_t *buffer = r->buffer;
   const uint8_t *buffer_start = buffer;
@@ -46,7 +46,7 @@
   int shift = BD_VALUE_SIZE - CHAR_BIT - (count + CHAR_BIT);
 
   if (r->decrypt_cb) {
-    size_t n = VPXMIN(sizeof(r->clear_buffer), bytes_left);
+    size_t n = AOMMIN(sizeof(r->clear_buffer), bytes_left);
     r->decrypt_cb(r->decrypt_state, buffer, r->clear_buffer, (int)n);
     buffer = r->clear_buffer;
     buffer_start = r->clear_buffer;
@@ -90,7 +90,7 @@
   r->count = count;
 }
 
-const uint8_t *vpx_reader_find_end(vpx_reader *r) {
+const uint8_t *aom_reader_find_end(aom_reader *r) {
   // Find the end of the coded buffer
   while (r->count > CHAR_BIT && r->count < BD_VALUE_SIZE) {
     r->count -= CHAR_BIT;
diff --git a/aom_dsp/bitreader.h b/aom_dsp/bitreader.h
index d211511..402461d 100644
--- a/aom_dsp/bitreader.h
+++ b/aom_dsp/bitreader.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_BITREADER_H_
-#define VPX_DSP_BITREADER_H_
+#ifndef AOM_DSP_BITREADER_H_
+#define AOM_DSP_BITREADER_H_
 
 #include <limits.h>
 #include <stddef.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #if CONFIG_BITSTREAM_DEBUG
 #include <assert.h>
@@ -22,8 +22,8 @@
 #endif  // CONFIG_BITSTREAM_DEBUG
 
 #include "aom_ports/mem.h"
-#include "aom/vp8dx.h"
-#include "aom/vpx_integer.h"
+#include "aom/aomdx.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/prob.h"
 #include "aom_util/debug_util.h"
 
@@ -47,19 +47,19 @@
   int count;
   const uint8_t *buffer_end;
   const uint8_t *buffer;
-  vpx_decrypt_cb decrypt_cb;
+  aom_decrypt_cb decrypt_cb;
   void *decrypt_state;
   uint8_t clear_buffer[sizeof(BD_VALUE) + 1];
-} vpx_reader;
+} aom_reader;
 
-int vpx_reader_init(vpx_reader *r, const uint8_t *buffer, size_t size,
-                    vpx_decrypt_cb decrypt_cb, void *decrypt_state);
+int aom_reader_init(aom_reader *r, const uint8_t *buffer, size_t size,
+                    aom_decrypt_cb decrypt_cb, void *decrypt_state);
 
-void vpx_reader_fill(vpx_reader *r);
+void aom_reader_fill(aom_reader *r);
 
-const uint8_t *vpx_reader_find_end(vpx_reader *r);
+const uint8_t *aom_reader_find_end(aom_reader *r);
 
-static INLINE int vpx_reader_has_error(vpx_reader *r) {
+static INLINE int aom_reader_has_error(aom_reader *r) {
   // Check if we have reached the end of the buffer.
   //
   // Variable 'count' stores the number of bits in the 'value' buffer, minus
@@ -77,7 +77,7 @@
   return r->count > BD_VALUE_SIZE && r->count < LOTS_OF_BITS;
 }
 
-static INLINE int vpx_read(vpx_reader *r, int prob) {
+static INLINE int aom_read(aom_reader *r, int prob) {
   unsigned int bit = 0;
   BD_VALUE value;
   BD_VALUE bigsplit;
@@ -85,7 +85,7 @@
   unsigned int range;
   unsigned int split = (r->range * prob + (256 - prob)) >> CHAR_BIT;
 
-  if (r->count < 0) vpx_reader_fill(r);
+  if (r->count < 0) aom_reader_fill(r);
 
   value = r->value;
   count = r->count;
@@ -101,7 +101,7 @@
   }
 
   {
-    register int shift = vpx_norm[range];
+    register int shift = aom_norm[range];
     range <<= shift;
     value <<= shift;
     count -= shift;
@@ -129,23 +129,23 @@
   return bit;
 }
 
-static INLINE int vpx_read_bit(vpx_reader *r) {
-  return vpx_read(r, 128);  // vpx_prob_half
+static INLINE int aom_read_bit(aom_reader *r) {
+  return aom_read(r, 128);  // aom_prob_half
 }
 
-static INLINE int vpx_read_literal(vpx_reader *r, int bits) {
+static INLINE int aom_read_literal(aom_reader *r, int bits) {
   int literal = 0, bit;
 
-  for (bit = bits - 1; bit >= 0; bit--) literal |= vpx_read_bit(r) << bit;
+  for (bit = bits - 1; bit >= 0; bit--) literal |= aom_read_bit(r) << bit;
 
   return literal;
 }
 
-static INLINE int vpx_read_tree(vpx_reader *r, const vpx_tree_index *tree,
-                                const vpx_prob *probs) {
-  vpx_tree_index i = 0;
+static INLINE int aom_read_tree(aom_reader *r, const aom_tree_index *tree,
+                                const aom_prob *probs) {
+  aom_tree_index i = 0;
 
-  while ((i = tree[i + vpx_read(r, probs[i >> 1])]) > 0) continue;
+  while ((i = tree[i + aom_read(r, probs[i >> 1])]) > 0) continue;
 
   return -i;
 }
@@ -154,4 +154,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_BITREADER_H_
+#endif  // AOM_DSP_BITREADER_H_
diff --git a/aom_dsp/bitreader_buffer.c b/aom_dsp/bitreader_buffer.c
index bf88119..cf505e8 100644
--- a/aom_dsp/bitreader_buffer.c
+++ b/aom_dsp/bitreader_buffer.c
@@ -7,14 +7,14 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "./bitreader_buffer.h"
 
-size_t vpx_rb_bytes_read(struct vpx_read_bit_buffer *rb) {
+size_t aom_rb_bytes_read(struct aom_read_bit_buffer *rb) {
   return (rb->bit_offset + 7) >> 3;
 }
 
-int vpx_rb_read_bit(struct vpx_read_bit_buffer *rb) {
+int aom_rb_read_bit(struct aom_read_bit_buffer *rb) {
   const size_t off = rb->bit_offset;
   const size_t p = off >> 3;
   const int q = 7 - (int)(off & 0x7);
@@ -28,19 +28,19 @@
   }
 }
 
-int vpx_rb_read_literal(struct vpx_read_bit_buffer *rb, int bits) {
+int aom_rb_read_literal(struct aom_read_bit_buffer *rb, int bits) {
   int value = 0, bit;
-  for (bit = bits - 1; bit >= 0; bit--) value |= vpx_rb_read_bit(rb) << bit;
+  for (bit = bits - 1; bit >= 0; bit--) value |= aom_rb_read_bit(rb) << bit;
   return value;
 }
 
-int vpx_rb_read_signed_literal(struct vpx_read_bit_buffer *rb, int bits) {
-  const int value = vpx_rb_read_literal(rb, bits);
-  return vpx_rb_read_bit(rb) ? -value : value;
+int aom_rb_read_signed_literal(struct aom_read_bit_buffer *rb, int bits) {
+  const int value = aom_rb_read_literal(rb, bits);
+  return aom_rb_read_bit(rb) ? -value : value;
 }
 
-int vpx_rb_read_inv_signed_literal(struct vpx_read_bit_buffer *rb, int bits) {
+int aom_rb_read_inv_signed_literal(struct aom_read_bit_buffer *rb, int bits) {
   const int nbits = sizeof(unsigned) * 8 - bits - 1;
-  const unsigned value = (unsigned)vpx_rb_read_literal(rb, bits + 1) << nbits;
+  const unsigned value = (unsigned)aom_rb_read_literal(rb, bits + 1) << nbits;
   return ((int)value) >> nbits;
 }
diff --git a/aom_dsp/bitreader_buffer.h b/aom_dsp/bitreader_buffer.h
index 5e557ea..2f68664 100644
--- a/aom_dsp/bitreader_buffer.h
+++ b/aom_dsp/bitreader_buffer.h
@@ -8,40 +8,40 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_BITREADER_BUFFER_H_
-#define VPX_DSP_BITREADER_BUFFER_H_
+#ifndef AOM_DSP_BITREADER_BUFFER_H_
+#define AOM_DSP_BITREADER_BUFFER_H_
 
 #include <limits.h>
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-typedef void (*vpx_rb_error_handler)(void *data);
+typedef void (*aom_rb_error_handler)(void *data);
 
-struct vpx_read_bit_buffer {
+struct aom_read_bit_buffer {
   const uint8_t *bit_buffer;
   const uint8_t *bit_buffer_end;
   size_t bit_offset;
 
   void *error_handler_data;
-  vpx_rb_error_handler error_handler;
+  aom_rb_error_handler error_handler;
 };
 
-size_t vpx_rb_bytes_read(struct vpx_read_bit_buffer *rb);
+size_t aom_rb_bytes_read(struct aom_read_bit_buffer *rb);
 
-int vpx_rb_read_bit(struct vpx_read_bit_buffer *rb);
+int aom_rb_read_bit(struct aom_read_bit_buffer *rb);
 
-int vpx_rb_read_literal(struct vpx_read_bit_buffer *rb, int bits);
+int aom_rb_read_literal(struct aom_read_bit_buffer *rb, int bits);
 
-int vpx_rb_read_signed_literal(struct vpx_read_bit_buffer *rb, int bits);
+int aom_rb_read_signed_literal(struct aom_read_bit_buffer *rb, int bits);
 
-int vpx_rb_read_inv_signed_literal(struct vpx_read_bit_buffer *rb, int bits);
+int aom_rb_read_inv_signed_literal(struct aom_read_bit_buffer *rb, int bits);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_BITREADER_BUFFER_H_
+#endif  // AOM_DSP_BITREADER_BUFFER_H_
diff --git a/aom_dsp/bitwriter.c b/aom_dsp/bitwriter.c
index 0abe351..9009a44 100644
--- a/aom_dsp/bitwriter.c
+++ b/aom_dsp/bitwriter.c
@@ -12,23 +12,23 @@
 
 #include "./bitwriter.h"
 
-void vpx_start_encode(vpx_writer *br, uint8_t *source) {
+void aom_start_encode(aom_writer *br, uint8_t *source) {
   br->lowvalue = 0;
   br->range = 255;
   br->count = -24;
   br->buffer = source;
   br->pos = 0;
-  vpx_write_bit(br, 0);
+  aom_write_bit(br, 0);
 }
 
-void vpx_stop_encode(vpx_writer *br) {
+void aom_stop_encode(aom_writer *br) {
   int i;
 
 #if CONFIG_BITSTREAM_DEBUG
   bitstream_queue_set_skip_write(1);
 #endif  // CONFIG_BITSTREAM_DEBUG
 
-  for (i = 0; i < 32; i++) vpx_write_bit(br, 0);
+  for (i = 0; i < 32; i++) aom_write_bit(br, 0);
 
 #if CONFIG_BITSTREAM_DEBUG
   bitstream_queue_set_skip_write(0);
diff --git a/aom_dsp/bitwriter.h b/aom_dsp/bitwriter.h
index 5b3634a..c6bc99b 100644
--- a/aom_dsp/bitwriter.h
+++ b/aom_dsp/bitwriter.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_BITWRITER_H_
-#define VPX_DSP_BITWRITER_H_
+#ifndef AOM_DSP_BITWRITER_H_
+#define AOM_DSP_BITWRITER_H_
 
 #include "aom_ports/mem.h"
 #include "aom_dsp/prob.h"
@@ -19,18 +19,18 @@
 extern "C" {
 #endif
 
-typedef struct vpx_writer {
+typedef struct aom_writer {
   unsigned int lowvalue;
   unsigned int range;
   int count;
   unsigned int pos;
   uint8_t *buffer;
-} vpx_writer;
+} aom_writer;
 
-void vpx_start_encode(vpx_writer *bc, uint8_t *buffer);
-void vpx_stop_encode(vpx_writer *bc);
+void aom_start_encode(aom_writer *bc, uint8_t *buffer);
+void aom_stop_encode(aom_writer *bc);
 
-static INLINE void vpx_write(vpx_writer *br, int bit, int probability) {
+static INLINE void aom_write(aom_writer *br, int bit, int probability) {
   unsigned int split;
   int count = br->count;
   unsigned int range = br->range;
@@ -50,7 +50,7 @@
     range = br->range - split;
   }
 
-  shift = vpx_norm[range];
+  shift = aom_norm[range];
 
   range <<= shift;
   count += shift;
@@ -82,20 +82,20 @@
   br->range = range;
 }
 
-static INLINE void vpx_write_bit(vpx_writer *w, int bit) {
-  vpx_write(w, bit, 128);  // vpx_prob_half
+static INLINE void aom_write_bit(aom_writer *w, int bit) {
+  aom_write(w, bit, 128);  // aom_prob_half
 }
 
-static INLINE void vpx_write_literal(vpx_writer *w, int data, int bits) {
+static INLINE void aom_write_literal(aom_writer *w, int data, int bits) {
   int bit;
 
-  for (bit = bits - 1; bit >= 0; bit--) vpx_write_bit(w, 1 & (data >> bit));
+  for (bit = bits - 1; bit >= 0; bit--) aom_write_bit(w, 1 & (data >> bit));
 }
 
-#define vpx_write_prob(w, v) vpx_write_literal((w), (v), 8)
+#define aom_write_prob(w, v) aom_write_literal((w), (v), 8)
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_BITWRITER_H_
+#endif  // AOM_DSP_BITWRITER_H_
diff --git a/aom_dsp/bitwriter_buffer.c b/aom_dsp/bitwriter_buffer.c
index 0638622..3f9a875 100644
--- a/aom_dsp/bitwriter_buffer.c
+++ b/aom_dsp/bitwriter_buffer.c
@@ -11,14 +11,14 @@
 #include <limits.h>
 #include <stdlib.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "./bitwriter_buffer.h"
 
-size_t vpx_wb_bytes_written(const struct vpx_write_bit_buffer *wb) {
+size_t aom_wb_bytes_written(const struct aom_write_bit_buffer *wb) {
   return wb->bit_offset / CHAR_BIT + (wb->bit_offset % CHAR_BIT > 0);
 }
 
-void vpx_wb_write_bit(struct vpx_write_bit_buffer *wb, int bit) {
+void aom_wb_write_bit(struct aom_write_bit_buffer *wb, int bit) {
   const int off = (int)wb->bit_offset;
   const int p = off / CHAR_BIT;
   const int q = CHAR_BIT - 1 - off % CHAR_BIT;
@@ -31,12 +31,12 @@
   wb->bit_offset = off + 1;
 }
 
-void vpx_wb_write_literal(struct vpx_write_bit_buffer *wb, int data, int bits) {
+void aom_wb_write_literal(struct aom_write_bit_buffer *wb, int data, int bits) {
   int bit;
-  for (bit = bits - 1; bit >= 0; bit--) vpx_wb_write_bit(wb, (data >> bit) & 1);
+  for (bit = bits - 1; bit >= 0; bit--) aom_wb_write_bit(wb, (data >> bit) & 1);
 }
 
-void vpx_wb_write_inv_signed_literal(struct vpx_write_bit_buffer *wb, int data,
+void aom_wb_write_inv_signed_literal(struct aom_write_bit_buffer *wb, int data,
                                      int bits) {
-  vpx_wb_write_literal(wb, data, bits + 1);
+  aom_wb_write_literal(wb, data, bits + 1);
 }
diff --git a/aom_dsp/bitwriter_buffer.h b/aom_dsp/bitwriter_buffer.h
index 2406abd..4ca942d 100644
--- a/aom_dsp/bitwriter_buffer.h
+++ b/aom_dsp/bitwriter_buffer.h
@@ -8,31 +8,31 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_BITWRITER_BUFFER_H_
-#define VPX_DSP_BITWRITER_BUFFER_H_
+#ifndef AOM_DSP_BITWRITER_BUFFER_H_
+#define AOM_DSP_BITWRITER_BUFFER_H_
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-struct vpx_write_bit_buffer {
+struct aom_write_bit_buffer {
   uint8_t *bit_buffer;
   size_t bit_offset;
 };
 
-size_t vpx_wb_bytes_written(const struct vpx_write_bit_buffer *wb);
+size_t aom_wb_bytes_written(const struct aom_write_bit_buffer *wb);
 
-void vpx_wb_write_bit(struct vpx_write_bit_buffer *wb, int bit);
+void aom_wb_write_bit(struct aom_write_bit_buffer *wb, int bit);
 
-void vpx_wb_write_literal(struct vpx_write_bit_buffer *wb, int data, int bits);
+void aom_wb_write_literal(struct aom_write_bit_buffer *wb, int data, int bits);
 
-void vpx_wb_write_inv_signed_literal(struct vpx_write_bit_buffer *wb, int data,
+void aom_wb_write_inv_signed_literal(struct aom_write_bit_buffer *wb, int data,
                                      int bits);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_BITWRITER_BUFFER_H_
+#endif  // AOM_DSP_BITWRITER_BUFFER_H_
diff --git a/aom_dsp/blend.h b/aom_dsp/blend.h
index e43149d..2dcaa1f 100644
--- a/aom_dsp/blend.h
+++ b/aom_dsp/blend.h
@@ -8,33 +8,33 @@
 *  be found in the AUTHORS file in the root of the source tree.
 */
 
-#ifndef VPX_DSP_BLEND_H_
-#define VPX_DSP_BLEND_H_
+#ifndef AOM_DSP_BLEND_H_
+#define AOM_DSP_BLEND_H_
 
 #include "aom_ports/mem.h"
 
 // Various blending functions and macros.
-// See also the vpx_blend_* functions in vpx_dsp_rtcd.h
+// See also the aom_blend_* functions in aom_dsp_rtcd.h
 
 // Alpha blending with alpha values from the range [0, 64], where 64
 // means use the first input and 0 means use the second input.
-#define VPX_BLEND_A64_ROUND_BITS 6
-#define VPX_BLEND_A64_MAX_ALPHA (1 << VPX_BLEND_A64_ROUND_BITS)  // 64
+#define AOM_BLEND_A64_ROUND_BITS 6
+#define AOM_BLEND_A64_MAX_ALPHA (1 << AOM_BLEND_A64_ROUND_BITS)  // 64
 
-#define VPX_BLEND_A64(a, v0, v1)                                          \
-  ROUND_POWER_OF_TWO((a) * (v0) + (VPX_BLEND_A64_MAX_ALPHA - (a)) * (v1), \
-                     VPX_BLEND_A64_ROUND_BITS)
+#define AOM_BLEND_A64(a, v0, v1)                                          \
+  ROUND_POWER_OF_TWO((a) * (v0) + (AOM_BLEND_A64_MAX_ALPHA - (a)) * (v1), \
+                     AOM_BLEND_A64_ROUND_BITS)
 
 // Alpha blending with alpha values from the range [0, 256], where 256
 // means use the first input and 0 means use the second input.
-#define VPX_BLEND_A256_ROUND_BITS 8
-#define VPX_BLEND_A256_MAX_ALPHA (1 << VPX_BLEND_A256_ROUND_BITS)  // 256
+#define AOM_BLEND_A256_ROUND_BITS 8
+#define AOM_BLEND_A256_MAX_ALPHA (1 << AOM_BLEND_A256_ROUND_BITS)  // 256
 
-#define VPX_BLEND_A256(a, v0, v1)                                          \
-  ROUND_POWER_OF_TWO((a) * (v0) + (VPX_BLEND_A256_MAX_ALPHA - (a)) * (v1), \
-                     VPX_BLEND_A256_ROUND_BITS)
+#define AOM_BLEND_A256(a, v0, v1)                                          \
+  ROUND_POWER_OF_TWO((a) * (v0) + (AOM_BLEND_A256_MAX_ALPHA - (a)) * (v1), \
+                     AOM_BLEND_A256_ROUND_BITS)
 
 // Blending by averaging.
-#define VPX_BLEND_AVG(v0, v1) ROUND_POWER_OF_TWO((v0) + (v1), 1)
+#define AOM_BLEND_AVG(v0, v1) ROUND_POWER_OF_TWO((v0) + (v1), 1)
 
-#endif  // VPX_DSP_BLEND_H_
+#endif  // AOM_DSP_BLEND_H_
diff --git a/aom_dsp/blend_a64_hmask.c b/aom_dsp/blend_a64_hmask.c
index cce5d88..4bb7ae6 100644
--- a/aom_dsp/blend_a64_hmask.c
+++ b/aom_dsp/blend_a64_hmask.c
@@ -10,14 +10,14 @@
 
 #include <assert.h>
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_dsp/blend.h"
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
-void vpx_blend_a64_hmask_c(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_hmask_c(uint8_t *dst, uint32_t dst_stride,
                            const uint8_t *src0, uint32_t src0_stride,
                            const uint8_t *src1, uint32_t src1_stride,
                            const uint8_t *mask, int h, int w) {
@@ -33,14 +33,14 @@
 
   for (i = 0; i < h; ++i) {
     for (j = 0; j < w; ++j) {
-      dst[i * dst_stride + j] = VPX_BLEND_A64(
+      dst[i * dst_stride + j] = AOM_BLEND_A64(
           mask[j], src0[i * src0_stride + j], src1[i * src1_stride + j]);
     }
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_blend_a64_hmask_c(uint8_t *dst_8, uint32_t dst_stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_blend_a64_hmask_c(uint8_t *dst_8, uint32_t dst_stride,
                                   const uint8_t *src0_8, uint32_t src0_stride,
                                   const uint8_t *src1_8, uint32_t src1_stride,
                                   const uint8_t *mask, int h, int w, int bd) {
@@ -61,9 +61,9 @@
 
   for (i = 0; i < h; ++i) {
     for (j = 0; j < w; ++j) {
-      dst[i * dst_stride + j] = VPX_BLEND_A64(
+      dst[i * dst_stride + j] = AOM_BLEND_A64(
           mask[j], src0[i * src0_stride + j], src1[i * src1_stride + j]);
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/blend_a64_mask.c b/aom_dsp/blend_a64_mask.c
index 939e729..bb7a088 100644
--- a/aom_dsp/blend_a64_mask.c
+++ b/aom_dsp/blend_a64_mask.c
@@ -10,18 +10,18 @@
 
 #include <assert.h>
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 #include "aom_dsp/blend.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
 // Blending with alpha mask. Mask values come from the range [0, 64],
-// as described for VPX_BLEND_A64 in aom_dsp/blned.h. src0 or src1 can
+// as described for AOM_BLEND_A64 in aom_dsp/blned.h. src0 or src1 can
 // be the same as dst, or dst can be different from both sources.
 
-void vpx_blend_a64_mask_c(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_mask_c(uint8_t *dst, uint32_t dst_stride,
                           const uint8_t *src0, uint32_t src0_stride,
                           const uint8_t *src1, uint32_t src1_stride,
                           const uint8_t *mask, uint32_t mask_stride, int h,
@@ -40,7 +40,7 @@
     for (i = 0; i < h; ++i) {
       for (j = 0; j < w; ++j) {
         const int m = mask[i * mask_stride + j];
-        dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+        dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
                                                 src1[i * src1_stride + j]);
       }
     }
@@ -53,33 +53,33 @@
                 mask[(2 * i) * mask_stride + (2 * j + 1)] +
                 mask[(2 * i + 1) * mask_stride + (2 * j + 1)],
             2);
-        dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+        dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
                                                 src1[i * src1_stride + j]);
       }
     }
   } else if (subw == 1 && subh == 0) {
     for (i = 0; i < h; ++i) {
       for (j = 0; j < w; ++j) {
-        const int m = VPX_BLEND_AVG(mask[i * mask_stride + (2 * j)],
+        const int m = AOM_BLEND_AVG(mask[i * mask_stride + (2 * j)],
                                     mask[i * mask_stride + (2 * j + 1)]);
-        dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+        dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
                                                 src1[i * src1_stride + j]);
       }
     }
   } else {
     for (i = 0; i < h; ++i) {
       for (j = 0; j < w; ++j) {
-        const int m = VPX_BLEND_AVG(mask[(2 * i) * mask_stride + j],
+        const int m = AOM_BLEND_AVG(mask[(2 * i) * mask_stride + j],
                                     mask[(2 * i + 1) * mask_stride + j]);
-        dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+        dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
                                                 src1[i * src1_stride + j]);
       }
     }
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_blend_a64_mask_c(uint8_t *dst_8, uint32_t dst_stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_blend_a64_mask_c(uint8_t *dst_8, uint32_t dst_stride,
                                  const uint8_t *src0_8, uint32_t src0_stride,
                                  const uint8_t *src1_8, uint32_t src1_stride,
                                  const uint8_t *mask, uint32_t mask_stride,
@@ -103,7 +103,7 @@
     for (i = 0; i < h; ++i) {
       for (j = 0; j < w; ++j) {
         const int m = mask[i * mask_stride + j];
-        dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+        dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
                                                 src1[i * src1_stride + j]);
       }
     }
@@ -116,28 +116,28 @@
                 mask[(2 * i) * mask_stride + (2 * j + 1)] +
                 mask[(2 * i + 1) * mask_stride + (2 * j + 1)],
             2);
-        dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+        dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
                                                 src1[i * src1_stride + j]);
       }
     }
   } else if (subw == 1 && subh == 0) {
     for (i = 0; i < h; ++i) {
       for (j = 0; j < w; ++j) {
-        const int m = VPX_BLEND_AVG(mask[i * mask_stride + (2 * j)],
+        const int m = AOM_BLEND_AVG(mask[i * mask_stride + (2 * j)],
                                     mask[i * mask_stride + (2 * j + 1)]);
-        dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+        dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
                                                 src1[i * src1_stride + j]);
       }
     }
   } else {
     for (i = 0; i < h; ++i) {
       for (j = 0; j < w; ++j) {
-        const int m = VPX_BLEND_AVG(mask[(2 * i) * mask_stride + j],
+        const int m = AOM_BLEND_AVG(mask[(2 * i) * mask_stride + j],
                                     mask[(2 * i + 1) * mask_stride + j]);
-        dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+        dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
                                                 src1[i * src1_stride + j]);
       }
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/blend_a64_vmask.c b/aom_dsp/blend_a64_vmask.c
index b22dcd1..05745dc 100644
--- a/aom_dsp/blend_a64_vmask.c
+++ b/aom_dsp/blend_a64_vmask.c
@@ -10,14 +10,14 @@
 
 #include <assert.h>
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_dsp/blend.h"
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
-void vpx_blend_a64_vmask_c(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_vmask_c(uint8_t *dst, uint32_t dst_stride,
                            const uint8_t *src0, uint32_t src0_stride,
                            const uint8_t *src1, uint32_t src1_stride,
                            const uint8_t *mask, int h, int w) {
@@ -34,14 +34,14 @@
   for (i = 0; i < h; ++i) {
     const int m = mask[i];
     for (j = 0; j < w; ++j) {
-      dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+      dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
                                               src1[i * src1_stride + j]);
     }
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_blend_a64_vmask_c(uint8_t *dst_8, uint32_t dst_stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_blend_a64_vmask_c(uint8_t *dst_8, uint32_t dst_stride,
                                   const uint8_t *src0_8, uint32_t src0_stride,
                                   const uint8_t *src1_8, uint32_t src1_stride,
                                   const uint8_t *mask, int h, int w, int bd) {
@@ -63,9 +63,9 @@
   for (i = 0; i < h; ++i) {
     const int m = mask[i];
     for (j = 0; j < w; ++j) {
-      dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+      dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
                                               src1[i * src1_stride + j]);
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/deblock.c b/aom_dsp/deblock.c
index 66995c1..2027054 100644
--- a/aom_dsp/deblock.c
+++ b/aom_dsp/deblock.c
@@ -8,9 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 #include <stdlib.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
-const int16_t vpx_rv[] = {
+const int16_t aom_rv[] = {
   8,  5,  2,  2,  8,  12, 4,  9,  8,  3,  0,  3,  9,  0,  0,  0,  8,  3,  14,
   4,  10, 1,  11, 14, 1,  14, 9,  6,  12, 11, 8,  6,  10, 0,  0,  8,  9,  0,
   3,  14, 8,  11, 13, 4,  2,  9,  0,  3,  9,  6,  1,  2,  3,  14, 13, 1,  8,
@@ -37,7 +37,7 @@
   9,  10, 13,
 };
 
-void vpx_post_proc_down_and_across_mb_row_c(unsigned char *src_ptr,
+void aom_post_proc_down_and_across_mb_row_c(unsigned char *src_ptr,
                                             unsigned char *dst_ptr,
                                             int src_pixels_per_line,
                                             int dst_pixels_per_line, int cols,
@@ -109,7 +109,7 @@
   }
 }
 
-void vpx_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows,
+void aom_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows,
                                  int cols, int flimit) {
   int r, c, i;
 
@@ -153,10 +153,10 @@
   }
 }
 
-void vpx_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols,
+void aom_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols,
                             int flimit) {
   int r, c, i;
-  const int16_t *rv3 = &vpx_rv[63 & rand()];
+  const int16_t *rv3 = &aom_rv[63 & rand()];
 
   for (c = 0; c < cols; c++) {
     unsigned char *s = &dst[c];
diff --git a/aom_dsp/fastssim.c b/aom_dsp/fastssim.c
index 1bdec95..d9236c0 100644
--- a/aom_dsp/fastssim.c
+++ b/aom_dsp/fastssim.c
@@ -14,8 +14,8 @@
 #include <math.h>
 #include <stdlib.h>
 #include <string.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/ssim.h"
 #include "aom_ports/system_state.h"
 
@@ -24,7 +24,7 @@
 
 #define SSIM_C1 (255 * 255 * 0.01 * 0.01)
 #define SSIM_C2 (255 * 255 * 0.03 * 0.03)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define SSIM_C1_10 (1023 * 1023 * 0.01 * 0.01)
 #define SSIM_C1_12 (4095 * 4095 * 0.01 * 0.01)
 #define SSIM_C2_10 (1023 * 1023 * 0.03 * 0.03)
@@ -197,7 +197,7 @@
   int i;
   int j;
   double ssim_c1 = SSIM_C1;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (bit_depth == 10) ssim_c1 = SSIM_C1_10;
   if (bit_depth == 12) ssim_c1 = SSIM_C1_12;
 #else
@@ -321,7 +321,7 @@
   int i;
   int j;
   double ssim_c2 = SSIM_C2;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (bit_depth == 10) ssim_c2 = SSIM_C2_10;
   if (bit_depth == 12) ssim_c2 = SSIM_C2_12;
 #else
@@ -465,13 +465,13 @@
   return ret;
 }
 
-double vpx_calc_fastssim(const YV12_BUFFER_CONFIG *source,
+double aom_calc_fastssim(const YV12_BUFFER_CONFIG *source,
                          const YV12_BUFFER_CONFIG *dest, double *ssim_y,
                          double *ssim_u, double *ssim_v, uint32_t bd,
                          uint32_t in_bd) {
   double ssimv;
   uint32_t bd_shift = 0;
-  vpx_clear_system_state();
+  aom_clear_system_state();
   assert(bd >= in_bd);
   bd_shift = bd - in_bd;
 
diff --git a/aom_dsp/fwd_txfm.c b/aom_dsp/fwd_txfm.c
index aecaa93..16db08c 100644
--- a/aom_dsp/fwd_txfm.c
+++ b/aom_dsp/fwd_txfm.c
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/fwd_txfm.h"
 
-void vpx_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
   // The 2D transform is done with two passes which are actually pretty
   // similar. In the first one, we transform the columns and transpose
   // the results. In the second one, we transform the rows. To achieve that,
@@ -77,7 +77,7 @@
   }
 }
 
-void vpx_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 4; ++r)
@@ -86,7 +86,7 @@
   output[0] = sum << 1;
 }
 
-void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
+void aom_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
   int i, j;
   tran_low_t intermediate[64];
   int pass;
@@ -171,7 +171,7 @@
   }
 }
 
-void vpx_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 8; ++r)
@@ -180,7 +180,7 @@
   output[0] = sum;
 }
 
-void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
   // The 2D transform is done with two passes which are actually pretty
   // similar. In the first one, we transform the columns and transpose
   // the results. In the second one, we transform the rows. To achieve that,
@@ -360,7 +360,7 @@
   }
 }
 
-void vpx_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   int sum = 0;
   for (r = 0; r < 16; ++r)
@@ -382,7 +382,7 @@
   return rv;
 }
 
-void vpx_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
+void aom_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
   tran_high_t step[32];
   // Stage 1
   step[0] = input[0] + input[(32 - 1)];
@@ -705,7 +705,7 @@
   output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
 }
 
-void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+void aom_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
   int i, j;
   tran_high_t output[32 * 32];
 
@@ -713,7 +713,7 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
-    vpx_fdct32(temp_in, temp_out, 0);
+    aom_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
   }
@@ -722,7 +722,7 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
-    vpx_fdct32(temp_in, temp_out, 0);
+    aom_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       out[j + i * 32] =
           (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
@@ -732,7 +732,7 @@
 // Note that although we use dct_32_round in dct32 computation flow,
 // this 2d fdct32x32 for rate-distortion optimization loop is operating
 // within 16 bits precision.
-void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
+void aom_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
   int i, j;
   tran_high_t output[32 * 32];
 
@@ -740,11 +740,11 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
-    vpx_fdct32(temp_in, temp_out, 0);
+    aom_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       // TODO(cd): see quality impact of only doing
       //           output[j * 32 + i] = (temp_out[j] + 1) >> 2;
-      //           PS: also change code in aom_dsp/x86/vpx_dct_sse2.c
+      //           PS: also change code in aom_dsp/x86/aom_dct_sse2.c
       output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
   }
 
@@ -752,12 +752,12 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
-    vpx_fdct32(temp_in, temp_out, 1);
+    aom_fdct32(temp_in, temp_out, 1);
     for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
   }
 }
 
-void vpx_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   int sum = 0;
   for (r = 0; r < 32; ++r)
@@ -766,43 +766,43 @@
   output[0] = (tran_low_t)(sum >> 3);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
                           int stride) {
-  vpx_fdct4x4_c(input, output, stride);
+  aom_fdct4x4_c(input, output, stride);
 }
 
-void vpx_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
+void aom_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
                           int stride) {
-  vpx_fdct8x8_c(input, final_output, stride);
+  aom_fdct8x8_c(input, final_output, stride);
 }
 
-void vpx_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
+void aom_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
                             int stride) {
-  vpx_fdct8x8_1_c(input, final_output, stride);
+  aom_fdct8x8_1_c(input, final_output, stride);
 }
 
-void vpx_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
+void aom_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
                             int stride) {
-  vpx_fdct16x16_c(input, output, stride);
+  aom_fdct16x16_c(input, output, stride);
 }
 
-void vpx_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
+void aom_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
                               int stride) {
-  vpx_fdct16x16_1_c(input, output, stride);
+  aom_fdct16x16_1_c(input, output, stride);
 }
 
-void vpx_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
-  vpx_fdct32x32_c(input, out, stride);
+void aom_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+  aom_fdct32x32_c(input, out, stride);
 }
 
-void vpx_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
+void aom_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
                                int stride) {
-  vpx_fdct32x32_rd_c(input, out, stride);
+  aom_fdct32x32_rd_c(input, out, stride);
 }
 
-void vpx_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
+void aom_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
                               int stride) {
-  vpx_fdct32x32_1_c(input, out, stride);
+  aom_fdct32x32_1_c(input, out, stride);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/fwd_txfm.h b/aom_dsp/fwd_txfm.h
index b874dd4..168962a 100644
--- a/aom_dsp/fwd_txfm.h
+++ b/aom_dsp/fwd_txfm.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_FWD_TXFM_H_
-#define VPX_DSP_FWD_TXFM_H_
+#ifndef AOM_DSP_FWD_TXFM_H_
+#define AOM_DSP_FWD_TXFM_H_
 
 #include "aom_dsp/txfm_common.h"
 
@@ -21,5 +21,5 @@
   return rv;
 }
 
-void vpx_fdct32(const tran_high_t *input, tran_high_t *output, int round);
-#endif  // VPX_DSP_FWD_TXFM_H_
+void aom_fdct32(const tran_high_t *input, tran_high_t *output, int round);
+#endif  // AOM_DSP_FWD_TXFM_H_
diff --git a/aom_dsp/intrapred.c b/aom_dsp/intrapred.c
index b57ba71..df4ec80 100644
--- a/aom_dsp/intrapred.c
+++ b/aom_dsp/intrapred.c
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 
 #define DST(x, y) dst[(x) + (y)*stride]
 #define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)
@@ -289,7 +289,7 @@
   }
 }
 
-void vpx_he_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_he_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
                             const uint8_t *above, const uint8_t *left) {
   const int H = above[-1];
   const int I = left[0];
@@ -303,7 +303,7 @@
   memset(dst + stride * 3, AVG3(K, L, L), 4);
 }
 
-void vpx_ve_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_ve_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
                             const uint8_t *above, const uint8_t *left) {
   const int H = above[-1];
   const int I = above[0];
@@ -322,7 +322,7 @@
   memcpy(dst + stride * 3, dst, 4);
 }
 
-void vpx_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   const int I = left[0];
   const int J = left[1];
@@ -338,7 +338,7 @@
   DST(3, 2) = DST(2, 2) = DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L;
 }
 
-void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
                              const uint8_t *above, const uint8_t *left) {
   const int A = above[0];
   const int B = above[1];
@@ -361,7 +361,7 @@
   DST(3, 3) = AVG3(E, F, G);  // differs from vp8
 }
 
-void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   const int A = above[0];
   const int B = above[1];
@@ -385,7 +385,7 @@
   DST(3, 3) = AVG3(F, G, H);
 }
 
-void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
                              const uint8_t *above, const uint8_t *left) {
   const int A = above[0];
   const int B = above[1];
@@ -406,7 +406,7 @@
   DST(3, 3) = H;  // differs from vp8
 }
 
-void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   const int A = above[0];
   const int B = above[1];
@@ -427,7 +427,7 @@
   DST(3, 3) = AVG3(G, H, H);
 }
 
-void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   const int I = left[0];
   const int J = left[1];
@@ -450,7 +450,7 @@
   DST(3, 1) = AVG3(B, C, D);
 }
 
-void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   const int I = left[0];
   const int J = left[1];
@@ -471,7 +471,7 @@
   DST(3, 0) = AVG3(D, C, B);
 }
 
-void vpx_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   const int I = left[0];
   const int J = left[1];
@@ -495,7 +495,7 @@
   DST(1, 3) = AVG3(L, K, J);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE void highbd_d207_predictor(uint16_t *dst, ptrdiff_t stride,
                                          int bs, const uint16_t *above,
                                          const uint16_t *left, int bd) {
@@ -683,7 +683,7 @@
   (void)above;
   (void)bd;
   for (r = 0; r < bs; r++) {
-    vpx_memset16(dst, left[r], bs);
+    aom_memset16(dst, left[r], bs);
     dst += stride;
   }
 }
@@ -710,7 +710,7 @@
   (void)left;
 
   for (r = 0; r < bs; r++) {
-    vpx_memset16(dst, 128 << (bd - 8), bs);
+    aom_memset16(dst, 128 << (bd - 8), bs);
     dst += stride;
   }
 }
@@ -726,7 +726,7 @@
   expected_dc = (sum + (bs >> 1)) / bs;
 
   for (r = 0; r < bs; r++) {
-    vpx_memset16(dst, expected_dc, bs);
+    aom_memset16(dst, expected_dc, bs);
     dst += stride;
   }
 }
@@ -742,7 +742,7 @@
   expected_dc = (sum + (bs >> 1)) / bs;
 
   for (r = 0; r < bs; r++) {
-    vpx_memset16(dst, expected_dc, bs);
+    aom_memset16(dst, expected_dc, bs);
     dst += stride;
   }
 }
@@ -762,25 +762,25 @@
   expected_dc = (sum + (count >> 1)) / count;
 
   for (r = 0; r < bs; r++) {
-    vpx_memset16(dst, expected_dc, bs);
+    aom_memset16(dst, expected_dc, bs);
     dst += stride;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // This serves as a wrapper function, so that all the prediction functions
 // can be unified and accessed as a pointer array. Note that the boundary
 // above and left are not necessarily used all the time.
 #define intra_pred_sized(type, size)                        \
-  void vpx_##type##_predictor_##size##x##size##_c(          \
+  void aom_##type##_predictor_##size##x##size##_c(          \
       uint8_t *dst, ptrdiff_t stride, const uint8_t *above, \
       const uint8_t *left) {                                \
     type##_predictor(dst, stride, size, above, left);       \
   }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define intra_pred_highbd_sized(type, size)                        \
-  void vpx_highbd_##type##_predictor_##size##x##size##_c(          \
+  void aom_highbd_##type##_predictor_##size##x##size##_c(          \
       uint16_t *dst, ptrdiff_t stride, const uint16_t *above,      \
       const uint16_t *left, int bd) {                              \
     highbd_##type##_predictor(dst, stride, size, above, left, bd); \
@@ -817,7 +817,7 @@
   intra_pred_sized(type, 8) \
   intra_pred_sized(type, 16) \
   intra_pred_sized(type, 32)
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 intra_pred_no_4x4(d207)
 intra_pred_no_4x4(d63)
diff --git a/aom_dsp/inv_txfm.c b/aom_dsp/inv_txfm.c
index 6f92bee..3bb6c09 100644
--- a/aom_dsp/inv_txfm.c
+++ b/aom_dsp/inv_txfm.c
@@ -11,10 +11,10 @@
 #include <math.h>
 #include <string.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/inv_txfm.h"
 
-void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
      0.5 shifts per pixel. */
   int i;
@@ -66,7 +66,7 @@
   }
 }
 
-void vpx_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) {
+void aom_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) {
   int i;
   tran_high_t a1, e1;
   tran_low_t tmp[4];
@@ -112,7 +112,7 @@
   output[3] = WRAPLOW(step[0] - step[3]);
 }
 
-void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
   int i, j;
@@ -136,7 +136,7 @@
   }
 }
 
-void vpx_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
                          int dest_stride) {
   int i;
   tran_high_t a1;
@@ -207,7 +207,7 @@
   output[7] = WRAPLOW(step1[0] - step1[7]);
 }
 
-void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
   int i, j;
@@ -231,7 +231,7 @@
   }
 }
 
-void vpx_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   int i, j;
   tran_high_t a1;
   tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -357,7 +357,7 @@
   output[7] = WRAPLOW(-x1);
 }
 
-void vpx_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -547,7 +547,7 @@
   output[15] = WRAPLOW(step2[0] - step2[15]);
 }
 
-void vpx_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
                              int stride) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
@@ -743,7 +743,7 @@
   output[15] = WRAPLOW(-x1);
 }
 
-void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
                             int stride) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
@@ -769,7 +769,7 @@
   }
 }
 
-void vpx_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   int i, j;
   tran_high_t a1;
   tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -1148,7 +1148,7 @@
   output[31] = WRAPLOW(step1[0] - step1[31]);
 }
 
-void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
                               int stride) {
   tran_low_t out[32 * 32];
   tran_low_t *outptr = out;
@@ -1185,7 +1185,7 @@
   }
 }
 
-void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
                              int stride) {
   tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
@@ -1211,7 +1211,7 @@
   }
 }
 
-void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
                             int stride) {
   tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
@@ -1237,7 +1237,7 @@
   }
 }
 
-void vpx_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   int i, j;
   tran_high_t a1;
 
@@ -1251,8 +1251,8 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
                                  int stride, int bd) {
   /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
      0.5 shifts per pixel. */
@@ -1310,7 +1310,7 @@
   }
 }
 
-void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
+void aom_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
                                 int dest_stride, int bd) {
   int i;
   tran_high_t a1, e1;
@@ -1343,7 +1343,7 @@
   }
 }
 
-void vpx_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step[4];
   tran_high_t temp1, temp2;
   (void)bd;
@@ -1364,7 +1364,7 @@
   output[3] = HIGHBD_WRAPLOW(step[0] - step[3], bd);
 }
 
-void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
                                  int stride, int bd) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
@@ -1374,7 +1374,7 @@
 
   // Rows
   for (i = 0; i < 4; ++i) {
-    vpx_highbd_idct4_c(input, outptr, bd);
+    aom_highbd_idct4_c(input, outptr, bd);
     input += 4;
     outptr += 4;
   }
@@ -1382,7 +1382,7 @@
   // Columns
   for (i = 0; i < 4; ++i) {
     for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
-    vpx_highbd_idct4_c(temp_in, temp_out, bd);
+    aom_highbd_idct4_c(temp_in, temp_out, bd);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -1390,7 +1390,7 @@
   }
 }
 
-void vpx_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
                                 int dest_stride, int bd) {
   int i;
   tran_high_t a1;
@@ -1410,7 +1410,7 @@
   }
 }
 
-void vpx_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step1[8], step2[8];
   tran_high_t temp1, temp2;
   // stage 1
@@ -1428,7 +1428,7 @@
   step1[6] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd);
 
   // stage 2 & stage 3 - even half
-  vpx_highbd_idct4_c(step1, step1, bd);
+  aom_highbd_idct4_c(step1, step1, bd);
 
   // stage 2 - odd half
   step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
@@ -1455,7 +1455,7 @@
   output[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
 }
 
-void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
                                  int stride, int bd) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
@@ -1465,7 +1465,7 @@
 
   // First transform rows.
   for (i = 0; i < 8; ++i) {
-    vpx_highbd_idct8_c(input, outptr, bd);
+    aom_highbd_idct8_c(input, outptr, bd);
     input += 8;
     outptr += 8;
   }
@@ -1473,7 +1473,7 @@
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vpx_highbd_idct8_c(temp_in, temp_out, bd);
+    aom_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1481,7 +1481,7 @@
   }
 }
 
-void vpx_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
                                 int stride, int bd) {
   int i, j;
   tran_high_t a1;
@@ -1496,7 +1496,7 @@
   }
 }
 
-void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_low_t x0 = input[0];
@@ -1534,7 +1534,7 @@
   output[3] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3), bd);
 }
 
-void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_low_t x0 = input[7];
@@ -1611,7 +1611,7 @@
   output[7] = HIGHBD_WRAPLOW(-x1, bd);
 }
 
-void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
                                  int stride, int bd) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
@@ -1622,14 +1622,14 @@
   // First transform rows.
   // Only first 4 row has non-zero coefs.
   for (i = 0; i < 4; ++i) {
-    vpx_highbd_idct8_c(input, outptr, bd);
+    aom_highbd_idct8_c(input, outptr, bd);
     input += 8;
     outptr += 8;
   }
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vpx_highbd_idct8_c(temp_in, temp_out, bd);
+    aom_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1637,7 +1637,7 @@
   }
 }
 
-void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step1[16], step2[16];
   tran_high_t temp1, temp2;
   (void)bd;
@@ -1803,7 +1803,7 @@
   output[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
 }
 
-void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
                                     int stride, int bd) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
@@ -1813,7 +1813,7 @@
 
   // First transform rows.
   for (i = 0; i < 16; ++i) {
-    vpx_highbd_idct16_c(input, outptr, bd);
+    aom_highbd_idct16_c(input, outptr, bd);
     input += 16;
     outptr += 16;
   }
@@ -1821,7 +1821,7 @@
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vpx_highbd_idct16_c(temp_in, temp_out, bd);
+    aom_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -1829,7 +1829,7 @@
   }
 }
 
-void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
   tran_high_t s9, s10, s11, s12, s13, s14, s15;
 
@@ -1999,7 +1999,7 @@
   output[15] = HIGHBD_WRAPLOW(-x1, bd);
 }
 
-void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
                                    int stride, int bd) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
@@ -2010,7 +2010,7 @@
   // First transform rows. Since all non-zero dct coefficients are in
   // upper-left 4x4 area, we only need to calculate first 4 rows here.
   for (i = 0; i < 4; ++i) {
-    vpx_highbd_idct16_c(input, outptr, bd);
+    aom_highbd_idct16_c(input, outptr, bd);
     input += 16;
     outptr += 16;
   }
@@ -2018,7 +2018,7 @@
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vpx_highbd_idct16_c(temp_in, temp_out, bd);
+    aom_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -2026,7 +2026,7 @@
   }
 }
 
-void vpx_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
                                   int stride, int bd) {
   int i, j;
   tran_high_t a1;
@@ -2042,7 +2042,7 @@
   }
 }
 
-void vpx_highbd_idct32_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_idct32_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step1[32], step2[32];
   tran_high_t temp1, temp2;
   (void)bd;
@@ -2410,7 +2410,7 @@
   output[31] = HIGHBD_WRAPLOW(step1[0] - step1[31], bd);
 }
 
-void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
                                      int stride, int bd) {
   tran_low_t out[32 * 32];
   tran_low_t *outptr = out;
@@ -2430,7 +2430,7 @@
       zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
 
     if (zero_coeff[0] | zero_coeff[1])
-      vpx_highbd_idct32_c(input, outptr, bd);
+      aom_highbd_idct32_c(input, outptr, bd);
     else
       memset(outptr, 0, sizeof(tran_low_t) * 32);
     input += 32;
@@ -2440,7 +2440,7 @@
   // Columns
   for (i = 0; i < 32; ++i) {
     for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
-    vpx_highbd_idct32_c(temp_in, temp_out, bd);
+    aom_highbd_idct32_c(temp_in, temp_out, bd);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -2448,7 +2448,7 @@
   }
 }
 
-void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
                                    int stride, int bd) {
   tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
@@ -2459,14 +2459,14 @@
   // Rows
   // Only upper-left 8x8 has non-zero coeff.
   for (i = 0; i < 8; ++i) {
-    vpx_highbd_idct32_c(input, outptr, bd);
+    aom_highbd_idct32_c(input, outptr, bd);
     input += 32;
     outptr += 32;
   }
   // Columns
   for (i = 0; i < 32; ++i) {
     for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
-    vpx_highbd_idct32_c(temp_in, temp_out, bd);
+    aom_highbd_idct32_c(temp_in, temp_out, bd);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -2474,7 +2474,7 @@
   }
 }
 
-void vpx_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
                                   int stride, int bd) {
   int i, j;
   int a1;
@@ -2490,4 +2490,4 @@
     dest += stride;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/inv_txfm.h b/aom_dsp/inv_txfm.h
index 6865eaf..211ac63 100644
--- a/aom_dsp/inv_txfm.h
+++ b/aom_dsp/inv_txfm.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_INV_TXFM_H_
-#define VPX_DSP_INV_TXFM_H_
+#ifndef AOM_DSP_INV_TXFM_H_
+#define AOM_DSP_INV_TXFM_H_
 
 #include <assert.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_dsp/txfm_common.h"
 #include "aom_ports/mem.h"
 
@@ -23,9 +23,9 @@
 
 static INLINE tran_high_t check_range(tran_high_t input) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
-  // For valid VP9 input streams, intermediate stage coefficients should always
+  // For valid AV1 input streams, intermediate stage coefficients should always
   // stay within the range of a signed 16 bit integer. Coefficients can go out
-  // of this range for invalid/corrupt VP9 streams. However, strictly checking
+  // of this range for invalid/corrupt AV1 streams. However, strictly checking
   // this range for every intermediate coefficient can burdensome for a decoder,
   // therefore the following assertion is only enabled when configured with
   // --enable-coefficient-range-checking.
@@ -40,10 +40,10 @@
   return rv;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE tran_high_t highbd_check_range(tran_high_t input, int bd) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
-  // For valid highbitdepth VP9 streams, intermediate stage coefficients will
+  // For valid highbitdepth AV1 streams, intermediate stage coefficients will
   // stay within the ranges:
   // - 8 bit: signed 16 bit integer
   // - 10 bit: signed 18 bit integer
@@ -62,7 +62,7 @@
   tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
   return rv;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #if CONFIG_EMULATE_HARDWARE
 // When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
@@ -83,17 +83,17 @@
 // bd of x uses trans_low with 8+x bits, need to remove 24-x bits
 
 #define WRAPLOW(x) ((((int32_t)check_range(x)) << 16) >> 16)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define HIGHBD_WRAPLOW(x, bd) \
   ((((int32_t)highbd_check_range((x), bd)) << (24 - bd)) >> (24 - bd))
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #else  // CONFIG_EMULATE_HARDWARE
 
 #define WRAPLOW(x) ((int32_t)check_range(x))
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define HIGHBD_WRAPLOW(x, bd) ((int32_t)highbd_check_range((x), bd))
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_EMULATE_HARDWARE
 
 void idct4_c(const tran_low_t *input, tran_low_t *output);
@@ -104,15 +104,15 @@
 void iadst8_c(const tran_low_t *input, tran_low_t *output);
 void iadst16_c(const tran_low_t *input, tran_low_t *output);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vpx_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vpx_highbd_idct32_c(const tran_low_t *input, tran_low_t *output, int bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_idct32_c(const tran_low_t *input, tran_low_t *output, int bd);
 
-void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
 
 static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans,
                                              int bd) {
@@ -129,4 +129,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_INV_TXFM_H_
+#endif  // AOM_DSP_INV_TXFM_H_
diff --git a/aom_dsp/loopfilter.c b/aom_dsp/loopfilter.c
index 53d028c..1387495 100644
--- a/aom_dsp/loopfilter.c
+++ b/aom_dsp/loopfilter.c
@@ -10,16 +10,16 @@
 
 #include <stdlib.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
 static INLINE int8_t signed_char_clamp(int t) {
   return (int8_t)clamp(t, -128, 127);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE int16_t signed_char_clamp_high(int t, int bd) {
   switch (bd) {
     case 10: return (int16_t)clamp(t, -128 * 4, 128 * 4 - 1);
@@ -109,7 +109,7 @@
   *op1 = signed_char_clamp(ps1 + filter) ^ 0x80;
 }
 
-void vpx_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */,
+void aom_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */,
                             const uint8_t *blimit, const uint8_t *limit,
                             const uint8_t *thresh) {
   int i;
@@ -126,15 +126,15 @@
   }
 }
 
-void vpx_lpf_horizontal_4_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_horizontal_4_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
                                  const uint8_t *limit0, const uint8_t *thresh0,
                                  const uint8_t *blimit1, const uint8_t *limit1,
                                  const uint8_t *thresh1) {
-  vpx_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0);
-  vpx_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1);
+  aom_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0);
+  aom_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1);
 }
 
-void vpx_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit,
+void aom_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit,
                           const uint8_t *limit, const uint8_t *thresh) {
   int i;
 
@@ -150,12 +150,12 @@
   }
 }
 
-void vpx_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
+void aom_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
                                const uint8_t *limit0, const uint8_t *thresh0,
                                const uint8_t *blimit1, const uint8_t *limit1,
                                const uint8_t *thresh1) {
-  vpx_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0);
-  vpx_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1);
+  aom_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0);
+  aom_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1);
 }
 
 static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat,
@@ -178,7 +178,7 @@
   }
 }
 
-void vpx_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit,
                             const uint8_t *limit, const uint8_t *thresh) {
   int i;
 
@@ -197,15 +197,15 @@
   }
 }
 
-void vpx_lpf_horizontal_8_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_horizontal_8_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
                                  const uint8_t *limit0, const uint8_t *thresh0,
                                  const uint8_t *blimit1, const uint8_t *limit1,
                                  const uint8_t *thresh1) {
-  vpx_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0);
-  vpx_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1);
+  aom_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0);
+  aom_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1);
 }
 
-void vpx_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit,
+void aom_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit,
                           const uint8_t *limit, const uint8_t *thresh) {
   int i;
 
@@ -221,12 +221,12 @@
   }
 }
 
-void vpx_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
+void aom_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
                                const uint8_t *limit0, const uint8_t *thresh0,
                                const uint8_t *blimit1, const uint8_t *limit1,
                                const uint8_t *thresh1) {
-  vpx_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0);
-  vpx_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1);
+  aom_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0);
+  aom_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1);
 }
 
 static INLINE void filter16(int8_t mask, uint8_t thresh, uint8_t flat,
@@ -308,12 +308,12 @@
   }
 }
 
-void vpx_lpf_horizontal_edge_8_c(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_horizontal_edge_8_c(uint8_t *s, int p, const uint8_t *blimit,
                                  const uint8_t *limit, const uint8_t *thresh) {
   mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 1);
 }
 
-void vpx_lpf_horizontal_edge_16_c(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_horizontal_edge_16_c(uint8_t *s, int p, const uint8_t *blimit,
                                   const uint8_t *limit, const uint8_t *thresh) {
   mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 2);
 }
@@ -339,17 +339,17 @@
   }
 }
 
-void vpx_lpf_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit,
                            const uint8_t *limit, const uint8_t *thresh) {
   mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8);
 }
 
-void vpx_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
                                 const uint8_t *limit, const uint8_t *thresh) {
   mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 // Should we apply any filter at all: 11111111 yes, 00000000 no ?
 static INLINE int8_t highbd_filter_mask(uint8_t limit, uint8_t blimit,
                                         uint16_t p3, uint16_t p2, uint16_t p1,
@@ -440,7 +440,7 @@
   *op1 = signed_char_clamp_high(ps1 + filter, bd) + (0x80 << shift);
 }
 
-void vpx_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */,
+void aom_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */,
                                    const uint8_t *blimit, const uint8_t *limit,
                                    const uint8_t *thresh, int bd) {
   int i;
@@ -463,15 +463,15 @@
   }
 }
 
-void vpx_highbd_lpf_horizontal_4_dual_c(
+void aom_highbd_lpf_horizontal_4_dual_c(
     uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
     const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
     const uint8_t *thresh1, int bd) {
-  vpx_highbd_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, bd);
-  vpx_highbd_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, bd);
+  aom_highbd_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, bd);
+  aom_highbd_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, bd);
 }
 
-void vpx_highbd_lpf_vertical_4_c(uint16_t *s, int pitch, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_4_c(uint16_t *s, int pitch, const uint8_t *blimit,
                                  const uint8_t *limit, const uint8_t *thresh,
                                  int bd) {
   int i;
@@ -488,12 +488,12 @@
   }
 }
 
-void vpx_highbd_lpf_vertical_4_dual_c(
+void aom_highbd_lpf_vertical_4_dual_c(
     uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
     const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
     const uint8_t *thresh1, int bd) {
-  vpx_highbd_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, bd);
-  vpx_highbd_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1,
+  aom_highbd_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, bd);
+  aom_highbd_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1,
                               bd);
 }
 
@@ -517,7 +517,7 @@
   }
 }
 
-void vpx_highbd_lpf_horizontal_8_c(uint16_t *s, int p, const uint8_t *blimit,
+void aom_highbd_lpf_horizontal_8_c(uint16_t *s, int p, const uint8_t *blimit,
                                    const uint8_t *limit, const uint8_t *thresh,
                                    int bd) {
   int i;
@@ -538,15 +538,15 @@
   }
 }
 
-void vpx_highbd_lpf_horizontal_8_dual_c(
+void aom_highbd_lpf_horizontal_8_dual_c(
     uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
     const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
     const uint8_t *thresh1, int bd) {
-  vpx_highbd_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, bd);
-  vpx_highbd_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, bd);
+  aom_highbd_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, bd);
+  aom_highbd_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, bd);
 }
 
-void vpx_highbd_lpf_vertical_8_c(uint16_t *s, int pitch, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_8_c(uint16_t *s, int pitch, const uint8_t *blimit,
                                  const uint8_t *limit, const uint8_t *thresh,
                                  int bd) {
   int i;
@@ -564,12 +564,12 @@
   }
 }
 
-void vpx_highbd_lpf_vertical_8_dual_c(
+void aom_highbd_lpf_vertical_8_dual_c(
     uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
     const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
     const uint8_t *thresh1, int bd) {
-  vpx_highbd_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, bd);
-  vpx_highbd_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1,
+  aom_highbd_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, bd);
+  aom_highbd_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1,
                               bd);
 }
 
@@ -673,14 +673,14 @@
   }
 }
 
-void vpx_highbd_lpf_horizontal_edge_8_c(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_edge_8_c(uint16_t *s, int p,
                                         const uint8_t *blimit,
                                         const uint8_t *limit,
                                         const uint8_t *thresh, int bd) {
   highbd_mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 1, bd);
 }
 
-void vpx_highbd_lpf_horizontal_edge_16_c(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_edge_16_c(uint16_t *s, int p,
                                          const uint8_t *blimit,
                                          const uint8_t *limit,
                                          const uint8_t *thresh, int bd) {
@@ -717,16 +717,16 @@
   }
 }
 
-void vpx_highbd_lpf_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit,
                                   const uint8_t *limit, const uint8_t *thresh,
                                   int bd) {
   highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8, bd);
 }
 
-void vpx_highbd_lpf_vertical_16_dual_c(uint16_t *s, int p,
+void aom_highbd_lpf_vertical_16_dual_c(uint16_t *s, int p,
                                        const uint8_t *blimit,
                                        const uint8_t *limit,
                                        const uint8_t *thresh, int bd) {
   highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16, bd);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/mips/add_noise_msa.c b/aom_dsp/mips/add_noise_msa.c
index 366770c..fe3510d 100644
--- a/aom_dsp/mips/add_noise_msa.c
+++ b/aom_dsp/mips/add_noise_msa.c
@@ -11,7 +11,7 @@
 #include <stdlib.h>
 #include "./macros_msa.h"
 
-void vpx_plane_add_noise_msa(uint8_t *start_ptr, char *noise,
+void aom_plane_add_noise_msa(uint8_t *start_ptr, char *noise,
                              char blackclamp[16], char whiteclamp[16],
                              char bothclamp[16], uint32_t width,
                              uint32_t height, int32_t pitch) {
diff --git a/aom_dsp/mips/vpx_convolve8_avg_horiz_msa.c b/aom_dsp/mips/aom_convolve8_avg_horiz_msa.c
similarity index 99%
rename from aom_dsp/mips/vpx_convolve8_avg_horiz_msa.c
rename to aom_dsp/mips/aom_convolve8_avg_horiz_msa.c
index 300656c..86f3e95 100644
--- a/aom_dsp/mips/vpx_convolve8_avg_horiz_msa.c
+++ b/aom_dsp/mips/aom_convolve8_avg_horiz_msa.c
@@ -9,8 +9,8 @@
  */
 
 #include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
 
 static void common_hz_8t_and_aver_dst_4x4_msa(const uint8_t *src,
                                               int32_t src_stride, uint8_t *dst,
@@ -631,7 +631,7 @@
   }
 }
 
-void vpx_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
                                  uint8_t *dst, ptrdiff_t dst_stride,
                                  const int16_t *filter_x, int x_step_q4,
                                  const int16_t *filter_y, int y_step_q4, int w,
@@ -668,7 +668,7 @@
                                           (int32_t)dst_stride, &filt_hor[3], h);
         break;
       default:
-        vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
                                   x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
@@ -695,7 +695,7 @@
                                           (int32_t)dst_stride, filt_hor, h);
         break;
       default:
-        vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
                                   x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
diff --git a/aom_dsp/mips/vpx_convolve8_avg_msa.c b/aom_dsp/mips/aom_convolve8_avg_msa.c
similarity index 98%
rename from aom_dsp/mips/vpx_convolve8_avg_msa.c
rename to aom_dsp/mips/aom_convolve8_avg_msa.c
index 8037661..2e66449 100644
--- a/aom_dsp/mips/vpx_convolve8_avg_msa.c
+++ b/aom_dsp/mips/aom_convolve8_avg_msa.c
@@ -9,8 +9,8 @@
  */
 
 #include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
 
 static void common_hv_8ht_8vt_and_aver_dst_4w_msa(
     const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
@@ -514,7 +514,7 @@
   }
 }
 
-void vpx_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
                            uint8_t *dst, ptrdiff_t dst_stride,
                            const int16_t *filter_x, int x_step_q4,
                            const int16_t *filter_y, int y_step_q4, int w,
@@ -560,13 +560,13 @@
                                                &filt_hor[3], &filt_ver[3], h);
         break;
       default:
-        vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
                             x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   } else if (((const int32_t *)filter_x)[0] == 0 ||
              ((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+    aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                         filter_y, y_step_q4, w, h);
   } else {
     switch (w) {
@@ -596,7 +596,7 @@
                                                filt_ver, h);
         break;
       default:
-        vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
                             x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
diff --git a/aom_dsp/mips/vpx_convolve8_avg_vert_msa.c b/aom_dsp/mips/aom_convolve8_avg_vert_msa.c
similarity index 98%
rename from aom_dsp/mips/vpx_convolve8_avg_vert_msa.c
rename to aom_dsp/mips/aom_convolve8_avg_vert_msa.c
index b3c9b6b..f6e966e 100644
--- a/aom_dsp/mips/vpx_convolve8_avg_vert_msa.c
+++ b/aom_dsp/mips/aom_convolve8_avg_vert_msa.c
@@ -9,8 +9,8 @@
  */
 
 #include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
 
 static void common_vt_8t_and_aver_dst_4w_msa(const uint8_t *src,
                                              int32_t src_stride, uint8_t *dst,
@@ -603,7 +603,7 @@
   }
 }
 
-void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
                                 uint8_t *dst, ptrdiff_t dst_stride,
                                 const int16_t *filter_x, int x_step_q4,
                                 const int16_t *filter_y, int y_step_q4, int w,
@@ -640,7 +640,7 @@
                                           (int32_t)dst_stride, &filt_ver[3], h);
         break;
       default:
-        vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
                                  x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
@@ -668,7 +668,7 @@
                                           (int32_t)dst_stride, filt_ver, h);
         break;
       default:
-        vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
                                  x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
diff --git a/aom_dsp/mips/vpx_convolve8_horiz_msa.c b/aom_dsp/mips/aom_convolve8_horiz_msa.c
similarity index 98%
rename from aom_dsp/mips/vpx_convolve8_horiz_msa.c
rename to aom_dsp/mips/aom_convolve8_horiz_msa.c
index 256abd5..7416482 100644
--- a/aom_dsp/mips/vpx_convolve8_horiz_msa.c
+++ b/aom_dsp/mips/aom_convolve8_horiz_msa.c
@@ -9,8 +9,8 @@
  */
 
 #include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
 
 static void common_hz_8t_4x4_msa(const uint8_t *src, int32_t src_stride,
                                  uint8_t *dst, int32_t dst_stride,
@@ -619,7 +619,7 @@
   }
 }
 
-void vpx_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
                              uint8_t *dst, ptrdiff_t dst_stride,
                              const int16_t *filter_x, int x_step_q4,
                              const int16_t *filter_y, int y_step_q4, int w,
@@ -656,7 +656,7 @@
                              &filt_hor[3], h);
         break;
       default:
-        vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
                               x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
@@ -683,7 +683,7 @@
                              filt_hor, h);
         break;
       default:
-        vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
                               x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
diff --git a/aom_dsp/mips/vpx_convolve8_msa.c b/aom_dsp/mips/aom_convolve8_msa.c
similarity index 98%
rename from aom_dsp/mips/vpx_convolve8_msa.c
rename to aom_dsp/mips/aom_convolve8_msa.c
index 81d4f14..45761d3 100644
--- a/aom_dsp/mips/vpx_convolve8_msa.c
+++ b/aom_dsp/mips/aom_convolve8_msa.c
@@ -9,8 +9,8 @@
  */
 
 #include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
 
 const uint8_t mc_filt_mask_arr[16 * 3] = {
   /* 8 width cases */
@@ -540,7 +540,7 @@
   }
 }
 
-void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                        ptrdiff_t dst_stride, const int16_t *filter_x,
                        int32_t x_step_q4, const int16_t *filter_y,
                        int32_t y_step_q4, int32_t w, int32_t h) {
@@ -585,13 +585,13 @@
                                   &filt_ver[3], (int32_t)h);
         break;
       default:
-        vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+        aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                         filter_y, y_step_q4, w, h);
         break;
     }
   } else if (((const int32_t *)filter_x)[0] == 0 ||
              ((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+    aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                     filter_y, y_step_q4, w, h);
   } else {
     switch (w) {
@@ -621,7 +621,7 @@
                                   (int32_t)h);
         break;
       default:
-        vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+        aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                         filter_y, y_step_q4, w, h);
         break;
     }
diff --git a/aom_dsp/mips/vpx_convolve8_vert_msa.c b/aom_dsp/mips/aom_convolve8_vert_msa.c
similarity index 98%
rename from aom_dsp/mips/vpx_convolve8_vert_msa.c
rename to aom_dsp/mips/aom_convolve8_vert_msa.c
index 0404575..4d634c4 100644
--- a/aom_dsp/mips/vpx_convolve8_vert_msa.c
+++ b/aom_dsp/mips/aom_convolve8_vert_msa.c
@@ -9,8 +9,8 @@
  */
 
 #include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
 
 static void common_vt_8t_4w_msa(const uint8_t *src, int32_t src_stride,
                                 uint8_t *dst, int32_t dst_stride,
@@ -626,7 +626,7 @@
   }
 }
 
-void vpx_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x, int x_step_q4,
                             const int16_t *filter_y, int y_step_q4, int w,
@@ -663,7 +663,7 @@
                              &filt_ver[3], h);
         break;
       default:
-        vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
                              x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
@@ -690,7 +690,7 @@
                              filt_ver, h);
         break;
       default:
-        vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
                              x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
diff --git a/aom_dsp/mips/vpx_convolve_avg_msa.c b/aom_dsp/mips/aom_convolve_avg_msa.c
similarity index 99%
rename from aom_dsp/mips/vpx_convolve_avg_msa.c
rename to aom_dsp/mips/aom_convolve_avg_msa.c
index 313223b..f6d9c09 100644
--- a/aom_dsp/mips/vpx_convolve_avg_msa.c
+++ b/aom_dsp/mips/aom_convolve_avg_msa.c
@@ -186,7 +186,7 @@
   }
 }
 
-void vpx_convolve_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
                           uint8_t *dst, ptrdiff_t dst_stride,
                           const int16_t *filter_x, int32_t filter_x_stride,
                           const int16_t *filter_y, int32_t filter_y_stride,
diff --git a/aom_dsp/mips/vpx_convolve_copy_msa.c b/aom_dsp/mips/aom_convolve_copy_msa.c
similarity index 99%
rename from aom_dsp/mips/vpx_convolve_copy_msa.c
rename to aom_dsp/mips/aom_convolve_copy_msa.c
index 520a706..8151609 100644
--- a/aom_dsp/mips/vpx_convolve_copy_msa.c
+++ b/aom_dsp/mips/aom_convolve_copy_msa.c
@@ -196,7 +196,7 @@
   copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 64);
 }
 
-void vpx_convolve_copy_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve_copy_msa(const uint8_t *src, ptrdiff_t src_stride,
                            uint8_t *dst, ptrdiff_t dst_stride,
                            const int16_t *filter_x, int32_t filter_x_stride,
                            const int16_t *filter_y, int32_t filter_y_stride,
diff --git a/aom_dsp/mips/vpx_convolve_msa.h b/aom_dsp/mips/aom_convolve_msa.h
similarity index 97%
rename from aom_dsp/mips/vpx_convolve_msa.h
rename to aom_dsp/mips/aom_convolve_msa.h
index 6b48879..fc2748f 100644
--- a/aom_dsp/mips/vpx_convolve_msa.h
+++ b/aom_dsp/mips/aom_convolve_msa.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_
-#define VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_
+#ifndef AOM_DSP_MIPS_AOM_CONVOLVE_MSA_H_
+#define AOM_DSP_MIPS_AOM_CONVOLVE_MSA_H_
 
 #include "aom_dsp/mips/macros_msa.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
 
 extern const uint8_t mc_filt_mask_arr[16 * 3];
 
@@ -121,4 +121,4 @@
     AVER_UB2_UB(tmp0_m, tmp2_m, tmp1_m, tmp3_m, tmp0_m, tmp1_m);             \
     ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride);                                \
   }
-#endif /* VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_ */
+#endif /* AOM_DSP_MIPS_AOM_CONVOLVE_MSA_H_ */
diff --git a/aom_dsp/mips/avg_msa.c b/aom_dsp/mips/avg_msa.c
index 5896708..0dae2ed 100644
--- a/aom_dsp/mips/avg_msa.c
+++ b/aom_dsp/mips/avg_msa.c
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/macros_msa.h"
 
-uint32_t vpx_avg_8x8_msa(const uint8_t *src, int32_t src_stride) {
+uint32_t aom_avg_8x8_msa(const uint8_t *src, int32_t src_stride) {
   uint32_t sum_out;
   v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
   v8u16 sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7;
@@ -33,7 +33,7 @@
   return sum_out;
 }
 
-uint32_t vpx_avg_4x4_msa(const uint8_t *src, int32_t src_stride) {
+uint32_t aom_avg_4x4_msa(const uint8_t *src, int32_t src_stride) {
   uint32_t sum_out;
   uint32_t src0, src1, src2, src3;
   v16u8 vec = { 0 };
diff --git a/aom_dsp/mips/common_dspr2.c b/aom_dsp/mips/common_dspr2.c
index 268bbcd..537a92f 100644
--- a/aom_dsp/mips/common_dspr2.c
+++ b/aom_dsp/mips/common_dspr2.c
@@ -11,20 +11,20 @@
 #include "aom_dsp/mips/common_dspr2.h"
 
 #if HAVE_DSPR2
-uint8_t vpx_ff_cropTbl_a[256 + 2 * CROP_WIDTH];
-uint8_t *vpx_ff_cropTbl;
+uint8_t aom_ff_cropTbl_a[256 + 2 * CROP_WIDTH];
+uint8_t *aom_ff_cropTbl;
 
-void vpx_dsputil_static_init(void) {
+void aom_dsputil_static_init(void) {
   int i;
 
-  for (i = 0; i < 256; i++) vpx_ff_cropTbl_a[i + CROP_WIDTH] = i;
+  for (i = 0; i < 256; i++) aom_ff_cropTbl_a[i + CROP_WIDTH] = i;
 
   for (i = 0; i < CROP_WIDTH; i++) {
-    vpx_ff_cropTbl_a[i] = 0;
-    vpx_ff_cropTbl_a[i + CROP_WIDTH + 256] = 255;
+    aom_ff_cropTbl_a[i] = 0;
+    aom_ff_cropTbl_a[i + CROP_WIDTH + 256] = 255;
   }
 
-  vpx_ff_cropTbl = &vpx_ff_cropTbl_a[CROP_WIDTH];
+  aom_ff_cropTbl = &aom_ff_cropTbl_a[CROP_WIDTH];
 }
 
 #endif
diff --git a/aom_dsp/mips/common_dspr2.h b/aom_dsp/mips/common_dspr2.h
index 1da490a..2c508ad 100644
--- a/aom_dsp/mips/common_dspr2.h
+++ b/aom_dsp/mips/common_dspr2.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_COMMON_MIPS_DSPR2_H_
-#define VPX_COMMON_MIPS_DSPR2_H_
+#ifndef AOM_COMMON_MIPS_DSPR2_H_
+#define AOM_COMMON_MIPS_DSPR2_H_
 
 #include <assert.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -21,7 +21,7 @@
 #if HAVE_DSPR2
 #define CROP_WIDTH 512
 
-extern uint8_t *vpx_ff_cropTbl;  // From "aom_dsp/mips/intrapred4_dspr2.c"
+extern uint8_t *aom_ff_cropTbl;  // From "aom_dsp/mips/intrapred4_dspr2.c"
 
 static INLINE void prefetch_load(const unsigned char *src) {
   __asm__ __volatile__("pref   0,  0(%[src])   \n\t" : : [src] "r"(src));
@@ -45,4 +45,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_COMMON_MIPS_DSPR2_H_
+#endif  // AOM_COMMON_MIPS_DSPR2_H_
diff --git a/aom_dsp/mips/convolve2_avg_dspr2.c b/aom_dsp/mips/convolve2_avg_dspr2.c
index b73eba2..1cbfe74 100644
--- a/aom_dsp/mips/convolve2_avg_dspr2.c
+++ b/aom_dsp/mips/convolve2_avg_dspr2.c
@@ -11,10 +11,10 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
@@ -25,7 +25,7 @@
   int32_t x, y;
   const uint8_t *src_ptr;
   uint8_t *dst_ptr;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   uint32_t load1, load2;
   uint32_t p1, p2;
@@ -124,7 +124,7 @@
   int32_t x, y;
   const uint8_t *src_ptr;
   uint8_t *dst_ptr;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   uint32_t load1, load2;
   uint32_t p1, p2;
@@ -217,7 +217,7 @@
   }
 }
 
-void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                   uint8_t *dst, ptrdiff_t dst_stride,
                                   const int16_t *filter_x, int x_step_q4,
                                   const int16_t *filter_y, int y_step_q4, int w,
@@ -247,7 +247,7 @@
                                     h);
       break;
     default:
-      vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+      aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
                                x_step_q4, filter_y, y_step_q4, w, h);
       break;
   }
diff --git a/aom_dsp/mips/convolve2_avg_horiz_dspr2.c b/aom_dsp/mips/convolve2_avg_horiz_dspr2.c
index 765c902..d8639b7 100644
--- a/aom_dsp/mips/convolve2_avg_horiz_dspr2.c
+++ b/aom_dsp/mips/convolve2_avg_horiz_dspr2.c
@@ -11,10 +11,10 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
@@ -23,7 +23,7 @@
                                           int32_t dst_stride,
                                           const int16_t *filter_x0, int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   int32_t Temp1, Temp2, Temp3, Temp4;
   uint32_t vector4a = 64;
   uint32_t tp1, tp2;
@@ -114,7 +114,7 @@
                                           int32_t dst_stride,
                                           const int16_t *filter_x0, int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   int32_t Temp1, Temp2, Temp3;
   uint32_t tp1, tp2, tp3, tp4;
@@ -261,7 +261,7 @@
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t Temp1, Temp2, Temp3;
   uint32_t qload1, qload2, qload3;
@@ -508,7 +508,7 @@
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t Temp1, Temp2, Temp3;
   uint32_t qload1, qload2, qload3;
@@ -749,7 +749,7 @@
   }
 }
 
-void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                    uint8_t *dst, ptrdiff_t dst_stride,
                                    const int16_t *filter_x, int x_step_q4,
                                    const int16_t *filter_y, int y_step_q4,
@@ -793,7 +793,7 @@
                                      h);
       break;
     default:
-      vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+      aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
                                 x_step_q4, filter_y, y_step_q4, w, h);
       break;
   }
diff --git a/aom_dsp/mips/convolve2_dspr2.c b/aom_dsp/mips/convolve2_dspr2.c
index 78ee6e0..ee9da6d 100644
--- a/aom_dsp/mips/convolve2_dspr2.c
+++ b/aom_dsp/mips/convolve2_dspr2.c
@@ -11,10 +11,10 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
@@ -22,7 +22,7 @@
     const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
     const int16_t *filter_x0, int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint8_t *dst_ptr;
   int32_t Temp1, Temp2;
   uint32_t vector4a = 64;
@@ -106,7 +106,7 @@
     const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
     const int16_t *filter_x0, int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint8_t *dst_ptr;
   uint32_t vector4a = 64;
   int32_t Temp1, Temp2, Temp3;
@@ -242,7 +242,7 @@
   int32_t c, y;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t Temp1, Temp2, Temp3;
   uint32_t qload1, qload2;
@@ -607,7 +607,7 @@
   int32_t c, y;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t Temp1, Temp2, Temp3;
   uint32_t qload1, qload2;
@@ -987,7 +987,7 @@
   }
 }
 
-void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                          ptrdiff_t dst_stride, const int16_t *filter, int w,
                          int h) {
   uint32_t pos = 38;
diff --git a/aom_dsp/mips/convolve2_horiz_dspr2.c b/aom_dsp/mips/convolve2_horiz_dspr2.c
index 0d6ebea..275f859 100644
--- a/aom_dsp/mips/convolve2_horiz_dspr2.c
+++ b/aom_dsp/mips/convolve2_horiz_dspr2.c
@@ -11,10 +11,10 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
@@ -22,7 +22,7 @@
                                       uint8_t *dst, int32_t dst_stride,
                                       const int16_t *filter_x0, int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   int32_t Temp1, Temp2, Temp3, Temp4;
   uint32_t vector4a = 64;
   uint32_t tp1, tp2;
@@ -99,7 +99,7 @@
                                       uint8_t *dst, int32_t dst_stride,
                                       const int16_t *filter_x0, int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   int32_t Temp1, Temp2, Temp3;
   uint32_t tp1, tp2, tp3;
@@ -223,7 +223,7 @@
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t Temp1, Temp2, Temp3;
   uint32_t qload1, qload2, qload3;
@@ -427,7 +427,7 @@
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t Temp1, Temp2, Temp3;
   uint32_t qload1, qload2, qload3;
@@ -626,7 +626,7 @@
   }
 }
 
-void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                uint8_t *dst, ptrdiff_t dst_stride,
                                const int16_t *filter_x, int x_step_q4,
                                const int16_t *filter_y, int y_step_q4, int w,
@@ -672,7 +672,7 @@
                                  (int32_t)dst_stride, filter_x, (int32_t)h);
       break;
     default:
-      vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+      aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
                             x_step_q4, filter_y, y_step_q4, w, h);
       break;
   }
diff --git a/aom_dsp/mips/convolve2_vert_dspr2.c b/aom_dsp/mips/convolve2_vert_dspr2.c
index a9d0cbf..0af7b6a 100644
--- a/aom_dsp/mips/convolve2_vert_dspr2.c
+++ b/aom_dsp/mips/convolve2_vert_dspr2.c
@@ -11,10 +11,10 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
@@ -25,7 +25,7 @@
   int32_t x, y;
   const uint8_t *src_ptr;
   uint8_t *dst_ptr;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   uint32_t load1, load2;
   uint32_t p1, p2;
@@ -115,7 +115,7 @@
   int32_t x, y;
   const uint8_t *src_ptr;
   uint8_t *dst_ptr;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   uint32_t load1, load2;
   uint32_t p1, p2;
@@ -199,7 +199,7 @@
   }
 }
 
-void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
                               const int16_t *filter_x, int x_step_q4,
                               const int16_t *filter_y, int y_step_q4, int w,
@@ -228,7 +228,7 @@
       convolve_bi_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h);
       break;
     default:
-      vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+      aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
                            x_step_q4, filter_y, y_step_q4, w, h);
       break;
   }
diff --git a/aom_dsp/mips/convolve8_avg_dspr2.c b/aom_dsp/mips/convolve8_avg_dspr2.c
index 8baf33a..774616a 100644
--- a/aom_dsp/mips/convolve8_avg_dspr2.c
+++ b/aom_dsp/mips/convolve8_avg_dspr2.c
@@ -11,10 +11,10 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
@@ -25,7 +25,7 @@
   int32_t x, y;
   const uint8_t *src_ptr;
   uint8_t *dst_ptr;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   uint32_t load1, load2, load3, load4;
   uint32_t p1, p2;
@@ -181,7 +181,7 @@
   int32_t x, y;
   const uint8_t *src_ptr;
   uint8_t *dst_ptr;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   uint32_t load1, load2, load3, load4;
   uint32_t p1, p2;
@@ -332,7 +332,7 @@
   }
 }
 
-void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                   uint8_t *dst, ptrdiff_t dst_stride,
                                   const int16_t *filter_x, int x_step_q4,
                                   const int16_t *filter_y, int y_step_q4, int w,
@@ -341,7 +341,7 @@
   assert(((const int32_t *)filter_y)[1] != 0x800000);
 
   if (((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
+    aom_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
                                  x_step_q4, filter_y, y_step_q4, w, h);
   } else {
     uint32_t pos = 38;
@@ -367,14 +367,14 @@
                                    h);
         break;
       default:
-        vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
                                  x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   }
 }
 
-void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                              uint8_t *dst, ptrdiff_t dst_stride,
                              const int16_t *filter_x, int x_step_q4,
                              const int16_t *filter_y, int y_step_q4, int w,
@@ -390,14 +390,14 @@
 
   if (intermediate_height < h) intermediate_height = h;
 
-  vpx_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter_x,
+  aom_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter_x,
                       x_step_q4, filter_y, y_step_q4, w, intermediate_height);
 
-  vpx_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter_x,
+  aom_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter_x,
                          x_step_q4, filter_y, y_step_q4, w, h);
 }
 
-void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x, int filter_x_stride,
                             const int16_t *filter_y, int filter_y_stride, int w,
diff --git a/aom_dsp/mips/convolve8_avg_horiz_dspr2.c b/aom_dsp/mips/convolve8_avg_horiz_dspr2.c
index d732d2e..3267446 100644
--- a/aom_dsp/mips/convolve8_avg_horiz_dspr2.c
+++ b/aom_dsp/mips/convolve8_avg_horiz_dspr2.c
@@ -11,10 +11,10 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
@@ -22,7 +22,7 @@
                                        uint8_t *dst, int32_t dst_stride,
                                        const int16_t *filter_x0, int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   int32_t vector1b, vector2b, vector3b, vector4b;
   int32_t Temp1, Temp2, Temp3, Temp4;
   uint32_t vector4a = 64;
@@ -139,7 +139,7 @@
                                        uint8_t *dst, int32_t dst_stride,
                                        const int16_t *filter_x0, int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   int32_t vector1b, vector2b, vector3b, vector4b;
   int32_t Temp1, Temp2, Temp3;
@@ -325,7 +325,7 @@
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t filter12, filter34, filter56, filter78;
   int32_t Temp1, Temp2, Temp3;
@@ -633,7 +633,7 @@
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t filter12, filter34, filter56, filter78;
   int32_t Temp1, Temp2, Temp3;
@@ -936,7 +936,7 @@
   }
 }
 
-void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                    uint8_t *dst, ptrdiff_t dst_stride,
                                    const int16_t *filter_x, int x_step_q4,
                                    const int16_t *filter_y, int y_step_q4,
@@ -945,7 +945,7 @@
   assert(((const int32_t *)filter_x)[1] != 0x800000);
 
   if (((const int32_t *)filter_x)[0] == 0) {
-    vpx_convolve2_avg_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
+    aom_convolve2_avg_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
                                   x_step_q4, filter_y, y_step_q4, w, h);
   } else {
     uint32_t pos = 38;
@@ -987,7 +987,7 @@
                                     h);
         break;
       default:
-        vpx_convolve8_avg_horiz_c(src + 3, src_stride, dst, dst_stride,
+        aom_convolve8_avg_horiz_c(src + 3, src_stride, dst, dst_stride,
                                   filter_x, x_step_q4, filter_y, y_step_q4, w,
                                   h);
         break;
diff --git a/aom_dsp/mips/convolve8_dspr2.c b/aom_dsp/mips/convolve8_dspr2.c
index 09a9083..db2e6ef 100644
--- a/aom_dsp/mips/convolve8_dspr2.c
+++ b/aom_dsp/mips/convolve8_dspr2.c
@@ -11,10 +11,10 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
@@ -24,7 +24,7 @@
                                               const int16_t *filter_x0,
                                               int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint8_t *dst_ptr;
   int32_t vector1b, vector2b, vector3b, vector4b;
   int32_t Temp1, Temp2, Temp3, Temp4;
@@ -138,7 +138,7 @@
                                               const int16_t *filter_x0,
                                               int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint8_t *dst_ptr;
   uint32_t vector4a = 64;
   int32_t vector1b, vector2b, vector3b, vector4b;
@@ -311,7 +311,7 @@
   int32_t c, y;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t filter12, filter34, filter56, filter78;
   int32_t Temp1, Temp2, Temp3;
@@ -789,7 +789,7 @@
   int32_t c, y;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t filter12, filter34, filter56, filter78;
   int32_t Temp1, Temp2, Temp3;
@@ -1295,7 +1295,7 @@
   }
 }
 
-void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                          ptrdiff_t dst_stride, const int16_t *filter_x,
                          int x_step_q4, const int16_t *filter_y, int y_step_q4,
                          int w, int h) {
@@ -1320,7 +1320,7 @@
     copy_horiz_transposed(src - src_stride * 3, src_stride, temp,
                           intermediate_height, w, intermediate_height);
   } else if (((const int32_t *)filter_x)[0] == 0) {
-    vpx_convolve2_dspr2(src - src_stride * 3, src_stride, temp,
+    aom_convolve2_dspr2(src - src_stride * 3, src_stride, temp,
                         intermediate_height, filter_x, w, intermediate_height);
   } else {
     src -= (src_stride * 3 + 3);
@@ -1363,7 +1363,7 @@
   if (filter_y[3] == 0x80) {
     copy_horiz_transposed(temp + 3, intermediate_height, dst, dst_stride, h, w);
   } else if (((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve2_dspr2(temp + 3, intermediate_height, dst, dst_stride,
+    aom_convolve2_dspr2(temp + 3, intermediate_height, dst, dst_stride,
                         filter_y, h, w);
   } else {
     switch (h) {
@@ -1392,7 +1392,7 @@
   }
 }
 
-void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                              uint8_t *dst, ptrdiff_t dst_stride,
                              const int16_t *filter_x, int filter_x_stride,
                              const int16_t *filter_y, int filter_y_stride,
diff --git a/aom_dsp/mips/convolve8_horiz_dspr2.c b/aom_dsp/mips/convolve8_horiz_dspr2.c
index 66692be..cbd9b5a 100644
--- a/aom_dsp/mips/convolve8_horiz_dspr2.c
+++ b/aom_dsp/mips/convolve8_horiz_dspr2.c
@@ -11,10 +11,10 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
@@ -22,7 +22,7 @@
                                    uint8_t *dst, int32_t dst_stride,
                                    const int16_t *filter_x0, int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   int32_t vector1b, vector2b, vector3b, vector4b;
   int32_t Temp1, Temp2, Temp3, Temp4;
   uint32_t vector4a = 64;
@@ -128,7 +128,7 @@
                                    uint8_t *dst, int32_t dst_stride,
                                    const int16_t *filter_x0, int32_t h) {
   int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   int32_t vector1b, vector2b, vector3b, vector4b;
   int32_t Temp1, Temp2, Temp3;
@@ -290,7 +290,7 @@
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t filter12, filter34, filter56, filter78;
   int32_t Temp1, Temp2, Temp3;
@@ -555,7 +555,7 @@
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector_64 = 64;
   int32_t filter12, filter34, filter56, filter78;
   int32_t Temp1, Temp2, Temp3;
@@ -816,7 +816,7 @@
   }
 }
 
-void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                uint8_t *dst, ptrdiff_t dst_stride,
                                const int16_t *filter_x, int x_step_q4,
                                const int16_t *filter_y, int y_step_q4, int w,
@@ -825,7 +825,7 @@
   assert(((const int32_t *)filter_x)[1] != 0x800000);
 
   if (((const int32_t *)filter_x)[0] == 0) {
-    vpx_convolve2_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
+    aom_convolve2_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
                               x_step_q4, filter_y, y_step_q4, w, h);
   } else {
     uint32_t pos = 38;
@@ -868,7 +868,7 @@
                                 (int32_t)dst_stride, filter_x, (int32_t)h);
         break;
       default:
-        vpx_convolve8_horiz_c(src + 3, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_horiz_c(src + 3, src_stride, dst, dst_stride, filter_x,
                               x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
diff --git a/aom_dsp/mips/convolve8_vert_dspr2.c b/aom_dsp/mips/convolve8_vert_dspr2.c
index 1594f10..7aee3b7 100644
--- a/aom_dsp/mips/convolve8_vert_dspr2.c
+++ b/aom_dsp/mips/convolve8_vert_dspr2.c
@@ -11,10 +11,10 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
@@ -25,7 +25,7 @@
   int32_t x, y;
   const uint8_t *src_ptr;
   uint8_t *dst_ptr;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   uint32_t load1, load2, load3, load4;
   uint32_t p1, p2;
@@ -173,7 +173,7 @@
   int32_t x, y;
   const uint8_t *src_ptr;
   uint8_t *dst_ptr;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
   uint32_t vector4a = 64;
   uint32_t load1, load2, load3, load4;
   uint32_t p1, p2;
@@ -316,7 +316,7 @@
   }
 }
 
-void vpx_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
                               const int16_t *filter_x, int x_step_q4,
                               const int16_t *filter_y, int y_step_q4, int w,
@@ -325,7 +325,7 @@
   assert(((const int32_t *)filter_y)[1] != 0x800000);
 
   if (((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve2_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
+    aom_convolve2_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
                              x_step_q4, filter_y, y_step_q4, w, h);
   } else {
     uint32_t pos = 38;
@@ -349,7 +349,7 @@
         convolve_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h);
         break;
       default:
-        vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+        aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
                              x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
diff --git a/aom_dsp/mips/convolve_common_dspr2.h b/aom_dsp/mips/convolve_common_dspr2.h
index b650019..e9dbf2d 100644
--- a/aom_dsp/mips/convolve_common_dspr2.h
+++ b/aom_dsp/mips/convolve_common_dspr2.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
-#define VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
+#ifndef AOM_DSP_MIPS_AOM_COMMON_DSPR2_H_
+#define AOM_DSP_MIPS_AOM_COMMON_DSPR2_H_
 
 #include <assert.h>
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/mips/common_dspr2.h"
 
 #ifdef __cplusplus
@@ -22,29 +22,29 @@
 #endif
 
 #if HAVE_DSPR2
-void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                uint8_t *dst, ptrdiff_t dst_stride,
                                const int16_t *filter_x, int x_step_q4,
                                const int16_t *filter_y, int y_step_q4, int w,
                                int h);
 
-void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                    uint8_t *dst, ptrdiff_t dst_stride,
                                    const int16_t *filter_x, int x_step_q4,
                                    const int16_t *filter_y, int y_step_q4,
                                    int w, int h);
 
-void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                   uint8_t *dst, ptrdiff_t dst_stride,
                                   const int16_t *filter_x, int x_step_q4,
                                   const int16_t *filter_y, int y_step_q4, int w,
                                   int h);
 
-void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                          ptrdiff_t dst_stride, const int16_t *filter, int w,
                          int h);
 
-void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
                               const int16_t *filter_x, int x_step_q4,
                               const int16_t *filter_y, int y_step_q4, int w,
@@ -55,4 +55,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
+#endif  // AOM_DSP_MIPS_AOM_COMMON_DSPR2_H_
diff --git a/aom_dsp/mips/deblock_msa.c b/aom_dsp/mips/deblock_msa.c
index 402d7ed..37e3e4a 100644
--- a/aom_dsp/mips/deblock_msa.c
+++ b/aom_dsp/mips/deblock_msa.c
@@ -11,9 +11,9 @@
 #include <stdlib.h>
 #include "./macros_msa.h"
 
-extern const int16_t vpx_rv[];
+extern const int16_t aom_rv[];
 
-#define VPX_TRANSPOSE8x16_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, out0,  \
+#define AOM_TRANSPOSE8x16_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, out0,  \
                                 out1, out2, out3, out4, out5, out6, out7,      \
                                 out8, out9, out10, out11, out12, out13, out14, \
                                 out15)                                         \
@@ -47,7 +47,7 @@
     out7 = (v16u8)__msa_ilvl_d((v2i64)out6, (v2i64)out6);                      \
   }
 
-#define VPX_AVER_IF_RETAIN(above2_in, above1_in, src_in, below1_in, below2_in, \
+#define AOM_AVER_IF_RETAIN(above2_in, above1_in, src_in, below1_in, below2_in, \
                            ref, out)                                           \
   {                                                                            \
     v16u8 temp0, temp1;                                                        \
@@ -109,7 +109,7 @@
     in11 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp2);                   \
   }
 
-#define VPX_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, \
+#define AOM_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, \
                                 in9, in10, in11)                             \
   {                                                                          \
     v8i16 temp0, temp1, temp2, temp3;                                        \
@@ -159,21 +159,21 @@
     LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
     src = LD_UB(p_src);
     LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
     above2 = LD_UB(p_src + 3 * src_stride);
-    VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
+    AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
     above1 = LD_UB(p_src + 4 * src_stride);
-    VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
+    AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
     src = LD_UB(p_src + 5 * src_stride);
-    VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
+    AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
     below1 = LD_UB(p_src + 6 * src_stride);
-    VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
+    AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
     below2 = LD_UB(p_src + 7 * src_stride);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
     above2 = LD_UB(p_src + 8 * src_stride);
-    VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
+    AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
     above1 = LD_UB(p_src + 9 * src_stride);
-    VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
+    AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
     ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7,
            p_dst, dst_stride);
 
@@ -187,21 +187,21 @@
     LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
     src = LD_UB(p_src);
     LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
     above2 = LD_UB(p_src + 3 * src_stride);
-    VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
+    AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
     above1 = LD_UB(p_src + 4 * src_stride);
-    VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
+    AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
     src = LD_UB(p_src + 5 * src_stride);
-    VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
+    AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
     below1 = LD_UB(p_src + 6 * src_stride);
-    VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
+    AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
     below2 = LD_UB(p_src + 7 * src_stride);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
     above2 = LD_UB(p_src + 8 * src_stride);
-    VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
+    AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
     above1 = LD_UB(p_src + 9 * src_stride);
-    VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
+    AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
     out0 = __msa_copy_u_d((v2i64)inter0, 0);
     out1 = __msa_copy_u_d((v2i64)inter1, 0);
     out2 = __msa_copy_u_d((v2i64)inter2, 0);
@@ -223,7 +223,7 @@
   for (col = 0; col < (cols / 8); ++col) {
     ref = LD_UB(f);
     f += 8;
-    VPX_TRANSPOSE12x8_UB_UB(inter0, inter1, inter2, inter3, inter4, inter5,
+    AOM_TRANSPOSE12x8_UB_UB(inter0, inter1, inter2, inter3, inter4, inter5,
                             inter6, inter7, inter8, inter9, inter10, inter11);
     if (0 == col) {
       above2 = inter2;
@@ -236,36 +236,36 @@
     below1 = inter3;
     below2 = inter4;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
     above2 = inter5;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1);
-    VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
+    AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
     above1 = inter6;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2);
-    VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
+    AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
     src = inter7;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3);
-    VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
+    AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
     below1 = inter8;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4);
-    VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
+    AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
     below2 = inter9;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
     if (col == (cols / 8 - 1)) {
       above2 = inter9;
     } else {
       above2 = inter10;
     }
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6);
-    VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
+    AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
     if (col == (cols / 8 - 1)) {
       above1 = inter9;
     } else {
       above1 = inter11;
     }
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7);
-    VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
+    AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
     TRANSPOSE8x8_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7, inter8,
                        inter9, inter2, inter3, inter4, inter5, inter6, inter7,
                        inter8, inter9);
@@ -306,37 +306,37 @@
     LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
     src = LD_UB(p_src);
     LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
     above2 = LD_UB(p_src + 3 * src_stride);
-    VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
+    AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
     above1 = LD_UB(p_src + 4 * src_stride);
-    VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
+    AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
     src = LD_UB(p_src + 5 * src_stride);
-    VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
+    AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
     below1 = LD_UB(p_src + 6 * src_stride);
-    VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
+    AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
     below2 = LD_UB(p_src + 7 * src_stride);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
     above2 = LD_UB(p_src + 8 * src_stride);
-    VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
+    AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
     above1 = LD_UB(p_src + 9 * src_stride);
-    VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
+    AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
     src = LD_UB(p_src + 10 * src_stride);
-    VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter8);
+    AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter8);
     below1 = LD_UB(p_src + 11 * src_stride);
-    VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter9);
+    AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter9);
     below2 = LD_UB(p_src + 12 * src_stride);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter10);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter10);
     above2 = LD_UB(p_src + 13 * src_stride);
-    VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter11);
+    AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter11);
     above1 = LD_UB(p_src + 14 * src_stride);
-    VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter12);
+    AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter12);
     src = LD_UB(p_src + 15 * src_stride);
-    VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter13);
+    AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter13);
     below1 = LD_UB(p_src + 16 * src_stride);
-    VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter14);
+    AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter14);
     below2 = LD_UB(p_src + 17 * src_stride);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter15);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter15);
     ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7,
            p_dst, dst_stride);
     ST_UB8(inter8, inter9, inter10, inter11, inter12, inter13, inter14, inter15,
@@ -371,37 +371,37 @@
     below1 = inter3;
     below2 = inter4;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
     above2 = inter5;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1);
-    VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
+    AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
     above1 = inter6;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2);
-    VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
+    AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
     src = inter7;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3);
-    VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
+    AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
     below1 = inter8;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4);
-    VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
+    AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
     below2 = inter9;
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5);
-    VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
+    AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
     if (col == (cols / 8 - 1)) {
       above2 = inter9;
     } else {
       above2 = inter10;
     }
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6);
-    VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
+    AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
     if (col == (cols / 8 - 1)) {
       above1 = inter9;
     } else {
       above1 = inter11;
     }
     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7);
-    VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
-    VPX_TRANSPOSE8x16_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7,
+    AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
+    AOM_TRANSPOSE8x16_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7,
                             inter8, inter9, inter2, inter3, inter4, inter5,
                             inter6, inter7, inter8, inter9, inter10, inter11,
                             inter12, inter13, inter14, inter15, above2, above1);
@@ -435,7 +435,7 @@
   }
 }
 
-void vpx_post_proc_down_and_across_mb_row_msa(uint8_t *src, uint8_t *dst,
+void aom_post_proc_down_and_across_mb_row_msa(uint8_t *src, uint8_t *dst,
                                               int32_t src_stride,
                                               int32_t dst_stride, int32_t cols,
                                               uint8_t *f, int32_t size) {
@@ -446,7 +446,7 @@
   }
 }
 
-void vpx_mbpost_proc_across_ip_msa(uint8_t *src_ptr, int32_t pitch,
+void aom_mbpost_proc_across_ip_msa(uint8_t *src_ptr, int32_t pitch,
                                    int32_t rows, int32_t cols, int32_t flimit) {
   int32_t row, col, cnt;
   uint8_t *src_dup = src_ptr;
@@ -571,10 +571,10 @@
   }
 }
 
-void vpx_mbpost_proc_down_msa(uint8_t *dst_ptr, int32_t pitch, int32_t rows,
+void aom_mbpost_proc_down_msa(uint8_t *dst_ptr, int32_t pitch, int32_t rows,
                               int32_t cols, int32_t flimit) {
   int32_t row, col, cnt, i;
-  const int16_t *rv3 = &vpx_rv[63 & rand()];
+  const int16_t *rv3 = &aom_rv[63 & rand()];
   v4i32 flimit_vec;
   v16u8 dst7, dst8, dst_r_b, dst_l_b;
   v16i8 mask;
diff --git a/aom_dsp/mips/fwd_dct32x32_msa.c b/aom_dsp/mips/fwd_dct32x32_msa.c
index 9dcde12..fd24501 100644
--- a/aom_dsp/mips/fwd_dct32x32_msa.c
+++ b/aom_dsp/mips/fwd_dct32x32_msa.c
@@ -670,7 +670,7 @@
   fdct8x32_1d_row_transpose_store(tmp_buf, output);
 }
 
-void vpx_fdct32x32_msa(const int16_t *input, int16_t *output,
+void aom_fdct32x32_msa(const int16_t *input, int16_t *output,
                        int32_t src_stride) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
@@ -907,7 +907,7 @@
   fdct8x32_1d_row_transpose_store(tmp_buf, output);
 }
 
-void vpx_fdct32x32_rd_msa(const int16_t *input, int16_t *out,
+void aom_fdct32x32_rd_msa(const int16_t *input, int16_t *out,
                           int32_t src_stride) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
@@ -926,7 +926,7 @@
   }
 }
 
-void vpx_fdct32x32_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
+void aom_fdct32x32_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
   int sum = LD_HADD(input, stride);
   sum += LD_HADD(input + 8, stride);
   sum += LD_HADD(input + 16, stride);
diff --git a/aom_dsp/mips/fwd_txfm_msa.c b/aom_dsp/mips/fwd_txfm_msa.c
index c95c1d0..53c0bd0 100644
--- a/aom_dsp/mips/fwd_txfm_msa.c
+++ b/aom_dsp/mips/fwd_txfm_msa.c
@@ -166,7 +166,7 @@
   ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, output + 8, 16);
 }
 
-void vpx_fdct4x4_msa(const int16_t *input, int16_t *output,
+void aom_fdct4x4_msa(const int16_t *input, int16_t *output,
                      int32_t src_stride) {
   v8i16 in0, in1, in2, in3;
 
@@ -186,9 +186,9 @@
     in0 += vec;
   }
 
-  VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+  AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
   TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-  VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+  AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
   TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
   ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3);
   SRA_4V(in0, in1, in2, in3, 2);
@@ -196,18 +196,18 @@
   ST_SH2(in0, in2, output, 8);
 }
 
-void vpx_fdct8x8_msa(const int16_t *input, int16_t *output,
+void aom_fdct8x8_msa(const int16_t *input, int16_t *output,
                      int32_t src_stride) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
 
   LD_SH8(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7);
   SLLI_4V(in0, in1, in2, in3, 2);
   SLLI_4V(in4, in5, in6, in7, 2);
-  VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+  AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
             in5, in6, in7);
   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
-  VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+  AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
             in5, in6, in7);
   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
@@ -215,12 +215,12 @@
   ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
 }
 
-void vpx_fdct8x8_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
+void aom_fdct8x8_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
   out[0] = LD_HADD(input, stride);
   out[1] = 0;
 }
 
-void vpx_fdct16x16_msa(const int16_t *input, int16_t *output,
+void aom_fdct16x16_msa(const int16_t *input, int16_t *output,
                        int32_t src_stride) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, tmp_buf[16 * 16]);
@@ -236,7 +236,7 @@
   }
 }
 
-void vpx_fdct16x16_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
+void aom_fdct16x16_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
   int sum = LD_HADD(input, stride);
   sum += LD_HADD(input + 8, stride);
   sum += LD_HADD(input + 16 * 8, stride);
diff --git a/aom_dsp/mips/fwd_txfm_msa.h b/aom_dsp/mips/fwd_txfm_msa.h
index 0911c3e..5251d0d 100644
--- a/aom_dsp/mips/fwd_txfm_msa.h
+++ b/aom_dsp/mips/fwd_txfm_msa.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_MIPS_FWD_TXFM_MSA_H_
-#define VPX_DSP_MIPS_FWD_TXFM_MSA_H_
+#ifndef AOM_DSP_MIPS_FWD_TXFM_MSA_H_
+#define AOM_DSP_MIPS_FWD_TXFM_MSA_H_
 
 #include "aom_dsp/mips/txfm_macros_msa.h"
 #include "aom_dsp/txfm_common.h"
@@ -30,7 +30,7 @@
     HADD_SW_S32(vec_w_m);                                                      \
   })
 
-#define VPX_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3)                  \
+#define AOM_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3)                  \
   {                                                                            \
     v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m;                                  \
     v8i16 vec0_m, vec1_m, vec2_m, vec3_m;                                      \
@@ -71,7 +71,7 @@
                in6, in7);                                                    \
   }
 
-#define VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2,  \
+#define AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2,  \
                   out3, out4, out5, out6, out7)                              \
   {                                                                          \
     v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m;                          \
@@ -377,4 +377,4 @@
 void fdct8x16_1d_column(const int16_t *input, int16_t *tmp_ptr,
                         int32_t src_stride);
 void fdct16x8_1d_row(int16_t *input, int16_t *output);
-#endif  // VPX_DSP_MIPS_FWD_TXFM_MSA_H_
+#endif  // AOM_DSP_MIPS_FWD_TXFM_MSA_H_
diff --git a/aom_dsp/mips/idct16x16_msa.c b/aom_dsp/mips/idct16x16_msa.c
index 977d794..258847e 100644
--- a/aom_dsp/mips/idct16x16_msa.c
+++ b/aom_dsp/mips/idct16x16_msa.c
@@ -10,7 +10,7 @@
 
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output) {
+void aom_idct16_1d_rows_msa(const int16_t *input, int16_t *output) {
   v8i16 loc0, loc1, loc2, loc3;
   v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
   v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15;
@@ -103,7 +103,7 @@
   ST_SH8(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, (output + 8), 16);
 }
 
-void vpx_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
+void aom_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
                                       int32_t dst_stride) {
   v8i16 loc0, loc1, loc2, loc3;
   v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
@@ -189,19 +189,19 @@
   reg3 = tmp7;
 
   SRARI_H4_SH(reg0, reg2, reg4, reg6, 6);
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6);
   dst += (4 * dst_stride);
   SRARI_H4_SH(reg8, reg10, reg12, reg14, 6);
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14);
   dst += (4 * dst_stride);
   SRARI_H4_SH(reg3, reg13, reg11, reg5, 6);
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5);
   dst += (4 * dst_stride);
   SRARI_H4_SH(reg7, reg9, reg1, reg15, 6);
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
 }
 
-void vpx_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst,
                                int32_t dst_stride) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, out_arr[16 * 16]);
@@ -210,25 +210,25 @@
   /* transform rows */
   for (i = 0; i < 2; ++i) {
     /* process 16 * 8 block */
-    vpx_idct16_1d_rows_msa((input + (i << 7)), (out + (i << 7)));
+    aom_idct16_1d_rows_msa((input + (i << 7)), (out + (i << 7)));
   }
 
   /* transform columns */
   for (i = 0; i < 2; ++i) {
     /* process 8 * 16 block */
-    vpx_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
+    aom_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
                                      dst_stride);
   }
 }
 
-void vpx_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst,
                               int32_t dst_stride) {
   uint8_t i;
   DECLARE_ALIGNED(32, int16_t, out_arr[16 * 16]);
   int16_t *out = out_arr;
 
   /* process 16 * 8 block */
-  vpx_idct16_1d_rows_msa(input, out);
+  aom_idct16_1d_rows_msa(input, out);
 
   /* short case just considers top 4 rows as valid output */
   out += 4 * 16;
@@ -254,12 +254,12 @@
   /* transform columns */
   for (i = 0; i < 2; ++i) {
     /* process 8 * 16 block */
-    vpx_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
+    aom_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
                                      dst_stride);
   }
 }
 
-void vpx_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst,
                              int32_t dst_stride) {
   uint8_t i;
   int16_t out;
@@ -289,7 +289,7 @@
   }
 }
 
-void vpx_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) {
+void aom_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) {
   v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
   v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
 
@@ -302,7 +302,7 @@
                      l12, l13, l14, l15);
 
   /* ADST in horizontal */
-  VP9_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13,
+  AV1_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13,
                    l14, l15, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11,
                    r12, r13, r14, r15);
 
@@ -319,7 +319,7 @@
   ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16);
 }
 
-void vpx_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
+void aom_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
                                        int32_t dst_stride) {
   v8i16 v0, v2, v4, v6, k0, k1, k2, k3;
   v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
@@ -343,20 +343,20 @@
   r15 = LD_SH(input + 15 * 16);
 
   /* stage 1 */
-  k0 = VPX_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);
-  k1 = VPX_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);
-  k2 = VPX_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);
-  k3 = VPX_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);
+  k0 = AOM_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);
+  k1 = AOM_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);
+  k2 = AOM_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);
+  k3 = AOM_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);
   MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
-  k0 = VPX_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);
-  k1 = VPX_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);
-  k2 = VPX_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);
-  k3 = VPX_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);
+  k0 = AOM_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);
+  k1 = AOM_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);
+  k2 = AOM_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);
+  k3 = AOM_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);
   MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
   BUTTERFLY_4(g0, g2, g10, g8, h8, h9, v2, v0);
-  k0 = VPX_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
-  k1 = VPX_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
-  k2 = VPX_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);
+  k0 = AOM_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
+  k1 = AOM_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
+  k2 = AOM_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);
   MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
 
   r1 = LD_SH(input + 1 * 16);
@@ -368,15 +368,15 @@
   r13 = LD_SH(input + 13 * 16);
   r14 = LD_SH(input + 14 * 16);
 
-  k0 = VPX_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);
-  k1 = VPX_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);
-  k2 = VPX_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);
-  k3 = VPX_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);
+  k0 = AOM_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);
+  k1 = AOM_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);
+  k2 = AOM_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);
+  k3 = AOM_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);
   MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, g4, g5, g6, g7);
-  k0 = VPX_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);
-  k1 = VPX_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);
-  k2 = VPX_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);
-  k3 = VPX_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);
+  k0 = AOM_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);
+  k1 = AOM_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);
+  k2 = AOM_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);
+  k3 = AOM_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);
   MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g12, g13, g14, g15);
   BUTTERFLY_4(g4, g6, g14, g12, h10, h11, v6, v4);
   BUTTERFLY_4(h8, h9, h11, h10, out0, out1, h11, h10);
@@ -391,9 +391,9 @@
   ST8x1_UB(res0, dst);
   ST8x1_UB(res1, dst + 15 * dst_stride);
 
-  k0 = VPX_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
-  k1 = VPX_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
-  k2 = VPX_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);
+  k0 = AOM_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
+  k1 = AOM_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
+  k2 = AOM_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);
   MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
   BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10);
   out8 = -out8;
@@ -408,9 +408,9 @@
   ST8x1_UB(res8, dst + dst_stride);
   ST8x1_UB(res9, dst + 14 * dst_stride);
 
-  k0 = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
-  k1 = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
-  k2 = VPX_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
+  k0 = AOM_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
+  k1 = AOM_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
+  k2 = AOM_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
   MADD_BF(v0, v2, v4, v6, k0, k1, k2, k0, out4, out6, out5, out7);
   out4 = -out4;
   SRARI_H2_SH(out4, out5, 6);
@@ -435,8 +435,8 @@
   ST8x1_UB(res12, dst + 2 * dst_stride);
   ST8x1_UB(res13, dst + 13 * dst_stride);
 
-  k0 = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
-  k3 = VPX_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
+  k0 = AOM_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
+  k3 = AOM_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
   MADD_SHORT(out6, out7, k0, k3, out6, out7);
   SRARI_H2_SH(out6, out7, 6);
   dst6 = LD_UB(dst + 4 * dst_stride);
@@ -459,8 +459,8 @@
   ST8x1_UB(res10, dst + 6 * dst_stride);
   ST8x1_UB(res11, dst + 9 * dst_stride);
 
-  k1 = VPX_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);
-  k2 = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
+  k1 = AOM_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);
+  k2 = AOM_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
   MADD_SHORT(h10, h11, k1, k2, out2, out3);
   SRARI_H2_SH(out2, out3, 6);
   dst2 = LD_UB(dst + 7 * dst_stride);
diff --git a/aom_dsp/mips/idct32x32_msa.c b/aom_dsp/mips/idct32x32_msa.c
index e090c62..47fad35 100644
--- a/aom_dsp/mips/idct32x32_msa.c
+++ b/aom_dsp/mips/idct32x32_msa.c
@@ -553,11 +553,11 @@
 
   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
   SRARI_H4_SH(m0, m2, m4, m6, 6);
-  VPX_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
+  AOM_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
   SRARI_H4_SH(m0, m2, m4, m6, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride), m0, m2, m4,
+  AOM_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride), m0, m2, m4,
                       m6);
 
   /* Load 8 & Store 8 */
@@ -572,11 +572,11 @@
 
   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
   SRARI_H4_SH(m1, m3, m5, m7, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride), m1, m3, m5, m7);
+  AOM_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride), m1, m3, m5, m7);
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
   SRARI_H4_SH(m1, m3, m5, m7, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride), m1, m3, m5,
+  AOM_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride), m1, m3, m5,
                       m7);
 
   /* Load 8 & Store 8 */
@@ -591,11 +591,11 @@
 
   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
   SRARI_H4_SH(n0, n2, n4, n6, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride), n0, n2, n4, n6);
+  AOM_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride), n0, n2, n4, n6);
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
   SRARI_H4_SH(n0, n2, n4, n6, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride), n0, n2, n4,
+  AOM_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride), n0, n2, n4,
                       n6);
 
   /* Load 8 & Store 8 */
@@ -610,11 +610,11 @@
 
   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
   SRARI_H4_SH(n1, n3, n5, n7, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride), n1, n3, n5, n7);
+  AOM_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride), n1, n3, n5, n7);
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
   SRARI_H4_SH(n1, n3, n5, n7, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride), n1, n3, n5,
+  AOM_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride), n1, n3, n5,
                       n7);
 }
 
@@ -629,7 +629,7 @@
                                    dst_stride);
 }
 
-void vpx_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst,
                                 int32_t dst_stride) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
@@ -649,7 +649,7 @@
   }
 }
 
-void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
                               int32_t dst_stride) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
@@ -693,7 +693,7 @@
   }
 }
 
-void vpx_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst,
                              int32_t dst_stride) {
   int32_t i;
   int16_t out;
diff --git a/aom_dsp/mips/idct4x4_msa.c b/aom_dsp/mips/idct4x4_msa.c
index 956b5f5..446f402 100644
--- a/aom_dsp/mips/idct4x4_msa.c
+++ b/aom_dsp/mips/idct4x4_msa.c
@@ -10,7 +10,7 @@
 
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vpx_iwht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+void aom_iwht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
                             int32_t dst_stride) {
   v8i16 in0, in1, in2, in3;
   v4i32 in0_r, in1_r, in2_r, in3_r, in4_r;
@@ -47,7 +47,7 @@
   ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride);
 }
 
-void vpx_iwht4x4_1_add_msa(const int16_t *input, uint8_t *dst,
+void aom_iwht4x4_1_add_msa(const int16_t *input, uint8_t *dst,
                            int32_t dst_stride) {
   int16_t a1, e1;
   v8i16 in1, in0 = { 0 };
@@ -67,7 +67,7 @@
   ADDBLK_ST4x4_UB(in0, in1, in1, in1, dst, dst_stride);
 }
 
-void vpx_idct4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct4x4_16_add_msa(const int16_t *input, uint8_t *dst,
                             int32_t dst_stride) {
   v8i16 in0, in1, in2, in3;
 
@@ -75,16 +75,16 @@
   LD4x4_SH(input, in0, in1, in2, in3);
   /* rows */
   TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-  VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+  AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
   /* columns */
   TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-  VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+  AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
   /* rounding (add 2^3, divide by 2^4) */
   SRARI_H4_SH(in0, in1, in2, in3, 4);
   ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
 }
 
-void vpx_idct4x4_1_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct4x4_1_add_msa(const int16_t *input, uint8_t *dst,
                            int32_t dst_stride) {
   int16_t out;
   v8i16 vec;
diff --git a/aom_dsp/mips/idct8x8_msa.c b/aom_dsp/mips/idct8x8_msa.c
index 420433f..8366ef8 100644
--- a/aom_dsp/mips/idct8x8_msa.c
+++ b/aom_dsp/mips/idct8x8_msa.c
@@ -10,7 +10,7 @@
 
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vpx_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst,
                             int32_t dst_stride) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
 
@@ -21,24 +21,24 @@
   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
   /* 1D idct8x8 */
-  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+  AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                  in4, in5, in6, in7);
   /* columns transform */
   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
   /* 1D idct8x8 */
-  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+  AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                  in4, in5, in6, in7);
   /* final rounding (add 2^4, divide by 2^5) and shift */
   SRARI_H4_SH(in0, in1, in2, in3, 5);
   SRARI_H4_SH(in4, in5, in6, in7, 5);
   /* add block and store 8x8 */
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
   dst += (4 * dst_stride);
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
 }
 
-void vpx_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst,
                             int32_t dst_stride) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
   v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3;
@@ -51,10 +51,10 @@
 
   /* stage1 */
   ILVL_H2_SH(in3, in0, in2, in1, s0, s1);
-  k0 = VPX_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
-  k1 = VPX_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
-  k2 = VPX_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
-  k3 = VPX_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
+  k0 = AOM_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
+  k1 = AOM_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
+  k2 = AOM_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
+  k3 = AOM_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
   DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
   SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
   PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
@@ -63,10 +63,10 @@
 
   /* stage2 */
   ILVR_H2_SH(in3, in1, in2, in0, s1, s0);
-  k0 = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
-  k1 = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
-  k2 = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
-  k3 = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
+  k0 = AOM_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
+  k1 = AOM_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
+  k2 = AOM_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
+  k3 = AOM_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
   DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
   SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
   PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
@@ -76,7 +76,7 @@
   /* stage3 */
   s0 = __msa_ilvr_h(s6, s5);
 
-  k1 = VPX_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
+  k1 = AOM_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
   DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1);
   SRARI_W2_SW(tmp0, tmp1, DCT_CONST_BITS);
   PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3);
@@ -86,7 +86,7 @@
               in7);
   TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
-  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+  AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                  in4, in5, in6, in7);
 
   /* final rounding (add 2^4, divide by 2^5) and shift */
@@ -94,12 +94,12 @@
   SRARI_H4_SH(in4, in5, in6, in7, 5);
 
   /* add block and store 8x8 */
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
   dst += (4 * dst_stride);
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
 }
 
-void vpx_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst,
                            int32_t dst_stride) {
   int16_t out;
   int32_t val;
@@ -110,7 +110,7 @@
   val = ROUND_POWER_OF_TWO(out, 5);
   vec = __msa_fill_h(val);
 
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
   dst += (4 * dst_stride);
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
 }
diff --git a/aom_dsp/mips/intrapred16_dspr2.c b/aom_dsp/mips/intrapred16_dspr2.c
index b9bb55c..a5c0a64 100644
--- a/aom_dsp/mips/intrapred16_dspr2.c
+++ b/aom_dsp/mips/intrapred16_dspr2.c
@@ -11,7 +11,7 @@
 #include "aom_dsp/mips/common_dspr2.h"
 
 #if HAVE_DSPR2
-void vpx_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
                                  const uint8_t *above, const uint8_t *left) {
   int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
   int32_t tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
@@ -155,7 +155,7 @@
       : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
 }
 
-void vpx_dc_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
                                   const uint8_t *above, const uint8_t *left) {
   int32_t expected_dc;
   int32_t average;
diff --git a/aom_dsp/mips/intrapred4_dspr2.c b/aom_dsp/mips/intrapred4_dspr2.c
index 4494bc8..c26d5d3 100644
--- a/aom_dsp/mips/intrapred4_dspr2.c
+++ b/aom_dsp/mips/intrapred4_dspr2.c
@@ -11,7 +11,7 @@
 #include "aom_dsp/mips/common_dspr2.h"
 
 #if HAVE_DSPR2
-void vpx_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
                                const uint8_t *above, const uint8_t *left) {
   int32_t tmp1, tmp2, tmp3, tmp4;
 
@@ -37,7 +37,7 @@
       : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
 }
 
-void vpx_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int32_t expected_dc;
   int32_t average;
@@ -78,7 +78,7 @@
         [stride] "r"(stride));
 }
 
-void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int32_t abovel, abover;
   int32_t left0, left1, left2, left3;
@@ -86,7 +86,7 @@
   int32_t resl;
   int32_t resr;
   int32_t top_left;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
 
   __asm__ __volatile__(
       "ulw             %[resl],       (%[above])                         \n\t"
diff --git a/aom_dsp/mips/intrapred8_dspr2.c b/aom_dsp/mips/intrapred8_dspr2.c
index f85209b..fe0d339 100644
--- a/aom_dsp/mips/intrapred8_dspr2.c
+++ b/aom_dsp/mips/intrapred8_dspr2.c
@@ -11,7 +11,7 @@
 #include "aom_dsp/mips/common_dspr2.h"
 
 #if HAVE_DSPR2
-void vpx_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
                                const uint8_t *above, const uint8_t *left) {
   int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
 
@@ -64,7 +64,7 @@
       : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
 }
 
-void vpx_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int32_t expected_dc;
   int32_t average;
@@ -146,7 +146,7 @@
         [stride] "r"(stride));
 }
 
-void vpx_tm_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int32_t abovel, abover;
   int32_t abovel_1, abover_1;
@@ -154,7 +154,7 @@
   int32_t res0, res1, res2, res3;
   int32_t reshw;
   int32_t top_left;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
 
   __asm__ __volatile__(
       "ulw             %[reshw],       (%[above])                         \n\t"
diff --git a/aom_dsp/mips/intrapred_msa.c b/aom_dsp/mips/intrapred_msa.c
index 17dd57b..3d868cd 100644
--- a/aom_dsp/mips/intrapred_msa.c
+++ b/aom_dsp/mips/intrapred_msa.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/macros_msa.h"
 
 #define IPRED_SUBS_UH2_UH(in0, in1, out0, out1) \
@@ -551,125 +551,125 @@
   }
 }
 
-void vpx_v_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_v_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
                              const uint8_t *above, const uint8_t *left) {
   (void)left;
 
   intra_predict_vert_4x4_msa(above, dst, y_stride);
 }
 
-void vpx_v_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_v_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
                              const uint8_t *above, const uint8_t *left) {
   (void)left;
 
   intra_predict_vert_8x8_msa(above, dst, y_stride);
 }
 
-void vpx_v_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_v_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
                                const uint8_t *above, const uint8_t *left) {
   (void)left;
 
   intra_predict_vert_16x16_msa(above, dst, y_stride);
 }
 
-void vpx_v_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_v_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
                                const uint8_t *above, const uint8_t *left) {
   (void)left;
 
   intra_predict_vert_32x32_msa(above, dst, y_stride);
 }
 
-void vpx_h_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_h_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
                              const uint8_t *above, const uint8_t *left) {
   (void)above;
 
   intra_predict_horiz_4x4_msa(left, dst, y_stride);
 }
 
-void vpx_h_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_h_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
                              const uint8_t *above, const uint8_t *left) {
   (void)above;
 
   intra_predict_horiz_8x8_msa(left, dst, y_stride);
 }
 
-void vpx_h_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_h_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
                                const uint8_t *above, const uint8_t *left) {
   (void)above;
 
   intra_predict_horiz_16x16_msa(left, dst, y_stride);
 }
 
-void vpx_h_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_h_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
                                const uint8_t *above, const uint8_t *left) {
   (void)above;
 
   intra_predict_horiz_32x32_msa(left, dst, y_stride);
 }
 
-void vpx_dc_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
                               const uint8_t *above, const uint8_t *left) {
   intra_predict_dc_4x4_msa(above, left, dst, y_stride);
 }
 
-void vpx_dc_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
                               const uint8_t *above, const uint8_t *left) {
   intra_predict_dc_8x8_msa(above, left, dst, y_stride);
 }
 
-void vpx_dc_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
                                 const uint8_t *above, const uint8_t *left) {
   intra_predict_dc_16x16_msa(above, left, dst, y_stride);
 }
 
-void vpx_dc_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
                                 const uint8_t *above, const uint8_t *left) {
   intra_predict_dc_32x32_msa(above, left, dst, y_stride);
 }
 
-void vpx_dc_top_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_top_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
                                   const uint8_t *above, const uint8_t *left) {
   (void)left;
 
   intra_predict_dc_tl_4x4_msa(above, dst, y_stride);
 }
 
-void vpx_dc_top_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_top_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
                                   const uint8_t *above, const uint8_t *left) {
   (void)left;
 
   intra_predict_dc_tl_8x8_msa(above, dst, y_stride);
 }
 
-void vpx_dc_top_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_top_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
                                     const uint8_t *above, const uint8_t *left) {
   (void)left;
 
   intra_predict_dc_tl_16x16_msa(above, dst, y_stride);
 }
 
-void vpx_dc_top_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_top_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
                                     const uint8_t *above, const uint8_t *left) {
   (void)left;
 
   intra_predict_dc_tl_32x32_msa(above, dst, y_stride);
 }
 
-void vpx_dc_left_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_left_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
                                    const uint8_t *above, const uint8_t *left) {
   (void)above;
 
   intra_predict_dc_tl_4x4_msa(left, dst, y_stride);
 }
 
-void vpx_dc_left_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_left_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
                                    const uint8_t *above, const uint8_t *left) {
   (void)above;
 
   intra_predict_dc_tl_8x8_msa(left, dst, y_stride);
 }
 
-void vpx_dc_left_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_left_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
                                      const uint8_t *above,
                                      const uint8_t *left) {
   (void)above;
@@ -677,7 +677,7 @@
   intra_predict_dc_tl_16x16_msa(left, dst, y_stride);
 }
 
-void vpx_dc_left_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_left_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
                                      const uint8_t *above,
                                      const uint8_t *left) {
   (void)above;
@@ -685,7 +685,7 @@
   intra_predict_dc_tl_32x32_msa(left, dst, y_stride);
 }
 
-void vpx_dc_128_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_128_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
                                   const uint8_t *above, const uint8_t *left) {
   (void)above;
   (void)left;
@@ -693,7 +693,7 @@
   intra_predict_128dc_4x4_msa(dst, y_stride);
 }
 
-void vpx_dc_128_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_128_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
                                   const uint8_t *above, const uint8_t *left) {
   (void)above;
   (void)left;
@@ -701,7 +701,7 @@
   intra_predict_128dc_8x8_msa(dst, y_stride);
 }
 
-void vpx_dc_128_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_128_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
                                     const uint8_t *above, const uint8_t *left) {
   (void)above;
   (void)left;
@@ -709,7 +709,7 @@
   intra_predict_128dc_16x16_msa(dst, y_stride);
 }
 
-void vpx_dc_128_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_128_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
                                     const uint8_t *above, const uint8_t *left) {
   (void)above;
   (void)left;
@@ -717,22 +717,22 @@
   intra_predict_128dc_32x32_msa(dst, y_stride);
 }
 
-void vpx_tm_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_tm_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
                               const uint8_t *above, const uint8_t *left) {
   intra_predict_tm_4x4_msa(above, left, dst, y_stride);
 }
 
-void vpx_tm_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_tm_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
                               const uint8_t *above, const uint8_t *left) {
   intra_predict_tm_8x8_msa(above, left, dst, y_stride);
 }
 
-void vpx_tm_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_tm_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
                                 const uint8_t *above, const uint8_t *left) {
   intra_predict_tm_16x16_msa(above, left, dst, y_stride);
 }
 
-void vpx_tm_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_tm_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
                                 const uint8_t *above, const uint8_t *left) {
   intra_predict_tm_32x32_msa(above, left, dst, y_stride);
 }
diff --git a/aom_dsp/mips/inv_txfm_dspr2.h b/aom_dsp/mips/inv_txfm_dspr2.h
index 5c9a020..1642c11 100644
--- a/aom_dsp/mips/inv_txfm_dspr2.h
+++ b/aom_dsp/mips/inv_txfm_dspr2.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_MIPS_INV_TXFM_DSPR2_H_
-#define VPX_DSP_MIPS_INV_TXFM_DSPR2_H_
+#ifndef AOM_DSP_MIPS_INV_TXFM_DSPR2_H_
+#define AOM_DSP_MIPS_INV_TXFM_DSPR2_H_
 
 #include <assert.h>
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/inv_txfm.h"
 #include "aom_dsp/mips/common_dspr2.h"
 
@@ -57,10 +57,10 @@
     out;                                                                       \
   })
 
-void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
+void aom_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
                                    int dest_stride);
-void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output);
-void vpx_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
+void aom_idct4_rows_dspr2(const int16_t *input, int16_t *output);
+void aom_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
                                      int dest_stride);
 void iadst4_dspr2(const int16_t *input, int16_t *output);
 void idct8_rows_dspr2(const int16_t *input, int16_t *output, uint32_t no_rows);
@@ -76,4 +76,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_MIPS_INV_TXFM_DSPR2_H_
+#endif  // AOM_DSP_MIPS_INV_TXFM_DSPR2_H_
diff --git a/aom_dsp/mips/inv_txfm_msa.h b/aom_dsp/mips/inv_txfm_msa.h
index 14d38b8..d9478c9 100644
--- a/aom_dsp/mips/inv_txfm_msa.h
+++ b/aom_dsp/mips/inv_txfm_msa.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_MIPS_INV_TXFM_MSA_H_
-#define VPX_DSP_MIPS_INV_TXFM_MSA_H_
+#ifndef AOM_DSP_MIPS_INV_TXFM_MSA_H_
+#define AOM_DSP_MIPS_INV_TXFM_MSA_H_
 
 #include "aom_dsp/mips/macros_msa.h"
 #include "aom_dsp/mips/txfm_macros_msa.h"
 #include "aom_dsp/txfm_common.h"
 
-#define VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2,  \
+#define AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2,  \
                   out3, out4, out5, out6, out7)                              \
   {                                                                          \
     v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m;                       \
@@ -79,7 +79,7 @@
     out5 = -out5;                                                            \
   }
 
-#define VPX_SET_COSPI_PAIR(c0_h, c1_h)  \
+#define AOM_SET_COSPI_PAIR(c0_h, c1_h)  \
   ({                                    \
     v8i16 out0_m, r0_m, r1_m;           \
                                         \
@@ -90,7 +90,7 @@
     out0_m;                             \
   })
 
-#define VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3)               \
+#define AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3)               \
   {                                                                            \
     uint8_t *dst_m = (uint8_t *)(dst);                                         \
     v16u8 dst0_m, dst1_m, dst2_m, dst3_m;                                      \
@@ -108,19 +108,19 @@
     ST8x4_UB(tmp0_m, tmp1_m, dst_m, dst_stride);                               \
   }
 
-#define VPX_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3)             \
+#define AOM_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3)             \
   {                                                                         \
     v8i16 c0_m, c1_m, c2_m, c3_m;                                           \
     v8i16 step0_m, step1_m;                                                 \
     v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                   \
                                                                             \
-    c0_m = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);                    \
-    c1_m = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);                   \
+    c0_m = AOM_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);                    \
+    c1_m = AOM_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);                   \
     step0_m = __msa_ilvr_h(in2, in0);                                       \
     DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m);              \
                                                                             \
-    c2_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                    \
-    c3_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                     \
+    c2_m = AOM_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                    \
+    c3_m = AOM_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                     \
     step1_m = __msa_ilvr_h(in3, in1);                                       \
     DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m);              \
     SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);            \
@@ -131,7 +131,7 @@
                 out0, out1, out2, out3);                                    \
   }
 
-#define VPX_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3)       \
+#define AOM_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3)       \
   {                                                                    \
     v8i16 res0_m, res1_m, c0_m, c1_m;                                  \
     v8i16 k1_m, k2_m, k3_m, k4_m;                                      \
@@ -181,7 +181,7 @@
     PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3);           \
   }
 
-#define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h)    \
+#define AV1_SET_CONST_PAIR(mask_h, idx1_h, idx2_h)    \
   ({                                                  \
     v8i16 c0_m, c1_m;                                 \
                                                       \
@@ -192,7 +192,7 @@
   })
 
 /* multiply and add macro */
-#define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1,  \
+#define AV1_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1,  \
                  out2, out3)                                                  \
   {                                                                           \
     v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m;                         \
@@ -211,7 +211,7 @@
   }
 
 /* idct 8x8 macro */
-#define VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1,    \
+#define AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1,    \
                        out2, out3, out4, out5, out6, out7)                    \
   {                                                                           \
     v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m;             \
@@ -220,13 +220,13 @@
     v8i16 mask_m = { cospi_28_64, cospi_4_64,  cospi_20_64,  cospi_12_64,     \
                      cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 };  \
                                                                               \
-    k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5);                                  \
-    k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0);                                  \
-    k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3);                                  \
-    k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2);                                  \
-    VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \
+    k0_m = AV1_SET_CONST_PAIR(mask_m, 0, 5);                                  \
+    k1_m = AV1_SET_CONST_PAIR(mask_m, 1, 0);                                  \
+    k2_m = AV1_SET_CONST_PAIR(mask_m, 6, 3);                                  \
+    k3_m = AV1_SET_CONST_PAIR(mask_m, 3, 2);                                  \
+    AV1_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \
     SUB2(in1, in3, in7, in5, res0_m, res1_m);                                 \
-    k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7);                                  \
+    k0_m = AV1_SET_CONST_PAIR(mask_m, 4, 7);                                  \
     k1_m = __msa_splati_h(mask_m, 4);                                         \
                                                                               \
     ILVRL_H2_SH(res0_m, res1_m, res2_m, res3_m);                              \
@@ -236,15 +236,15 @@
     tp4_m = in1 + in3;                                                        \
     PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m);                \
     tp7_m = in7 + in5;                                                        \
-    k2_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                      \
-    k3_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                       \
-    VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, in0, in4, in2, in6); \
+    k2_m = AOM_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                      \
+    k3_m = AOM_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                       \
+    AV1_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, in0, in4, in2, in6); \
     BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m);              \
     BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m, out0, \
                 out1, out2, out3, out4, out5, out6, out7);                    \
   }
 
-#define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1,   \
+#define AV1_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1,   \
                         out2, out3, out4, out5, out6, out7)                   \
   {                                                                           \
     v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m;                     \
@@ -258,13 +258,13 @@
       -cospi_24_64, cospi_8_64, cospi_16_64, -cospi_16_64, 0, 0, 0, 0         \
     };                                                                        \
                                                                               \
-    k0_m = VP9_SET_CONST_PAIR(mask1_m, 0, 1);                                 \
-    k1_m = VP9_SET_CONST_PAIR(mask1_m, 1, 2);                                 \
+    k0_m = AV1_SET_CONST_PAIR(mask1_m, 0, 1);                                 \
+    k1_m = AV1_SET_CONST_PAIR(mask1_m, 1, 2);                                 \
     ILVRL_H2_SH(in1, in0, in_s1, in_s0);                                      \
     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m,     \
                 r1_m, r2_m, r3_m);                                            \
-    k0_m = VP9_SET_CONST_PAIR(mask1_m, 6, 7);                                 \
-    k1_m = VP9_SET_CONST_PAIR(mask2_m, 0, 1);                                 \
+    k0_m = AV1_SET_CONST_PAIR(mask1_m, 6, 7);                                 \
+    k1_m = AV1_SET_CONST_PAIR(mask2_m, 0, 1);                                 \
     ILVRL_H2_SH(in5, in4, in_s1, in_s0);                                      \
     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r4_m,     \
                 r5_m, r6_m, r7_m);                                            \
@@ -276,13 +276,13 @@
          m3_m);                                                               \
     SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                      \
     PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, t0_m, t1_m);                          \
-    k0_m = VP9_SET_CONST_PAIR(mask1_m, 3, 4);                                 \
-    k1_m = VP9_SET_CONST_PAIR(mask1_m, 4, 5);                                 \
+    k0_m = AV1_SET_CONST_PAIR(mask1_m, 3, 4);                                 \
+    k1_m = AV1_SET_CONST_PAIR(mask1_m, 4, 5);                                 \
     ILVRL_H2_SH(in3, in2, in_s1, in_s0);                                      \
     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m,     \
                 r1_m, r2_m, r3_m);                                            \
-    k0_m = VP9_SET_CONST_PAIR(mask2_m, 2, 3);                                 \
-    k1_m = VP9_SET_CONST_PAIR(mask2_m, 3, 4);                                 \
+    k0_m = AV1_SET_CONST_PAIR(mask2_m, 2, 3);                                 \
+    k1_m = AV1_SET_CONST_PAIR(mask2_m, 3, 4);                                 \
     ILVRL_H2_SH(in7, in6, in_s1, in_s0);                                      \
     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r4_m,     \
                 r5_m, r6_m, r7_m);                                            \
@@ -296,12 +296,12 @@
     PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, r2_m, r3_m);                          \
     ILVRL_H2_SW(r3_m, r2_m, m2_m, m3_m);                                      \
     BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3);         \
-    k0_m = VP9_SET_CONST_PAIR(mask2_m, 5, 6);                                 \
-    k1_m = VP9_SET_CONST_PAIR(mask2_m, 6, 7);                                 \
+    k0_m = AV1_SET_CONST_PAIR(mask2_m, 5, 6);                                 \
+    k1_m = AV1_SET_CONST_PAIR(mask2_m, 6, 7);                                 \
     ILVRL_H2_SH(t1_m, t0_m, in_s1, in_s0);                                    \
     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m,     \
                 r1_m, r2_m, r3_m);                                            \
-    k1_m = VP9_SET_CONST_PAIR(mask3_m, 0, 1);                                 \
+    k1_m = AV1_SET_CONST_PAIR(mask3_m, 0, 1);                                 \
     DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, r4_m, r5_m,   \
                 r6_m, r7_m);                                                  \
     ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, m0_m, m1_m, m2_m,    \
@@ -312,8 +312,8 @@
          m3_m);                                                               \
     SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                      \
     PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in2, in5);                            \
-    k0_m = VP9_SET_CONST_PAIR(mask3_m, 2, 2);                                 \
-    k1_m = VP9_SET_CONST_PAIR(mask3_m, 2, 3);                                 \
+    k0_m = AV1_SET_CONST_PAIR(mask3_m, 2, 2);                                 \
+    k1_m = AV1_SET_CONST_PAIR(mask3_m, 2, 3);                                 \
     ILVRL_H2_SH(in4, in3, in_s1, in_s0);                                      \
     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, m0_m,     \
                 m1_m, m2_m, m3_m);                                            \
@@ -331,7 +331,7 @@
     out7 = -in7;                                                              \
   }
 
-#define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11,     \
+#define AV1_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11,     \
                          r12, r13, r14, r15, out0, out1, out2, out3, out4,     \
                          out5, out6, out7, out8, out9, out10, out11, out12,    \
                          out13, out14, out15)                                  \
@@ -343,38 +343,38 @@
     v8i16 k0_m, k1_m, k2_m, k3_m;                                              \
                                                                                \
     /* stage 1 */                                                              \
-    k0_m = VPX_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);                        \
-    k1_m = VPX_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);                       \
-    k2_m = VPX_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);                       \
-    k3_m = VPX_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);                      \
+    k0_m = AOM_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);                        \
+    k1_m = AOM_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);                       \
+    k2_m = AOM_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);                       \
+    k3_m = AOM_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);                      \
     MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m, g0_m, g1_m, g2_m, g3_m);  \
-    k0_m = VPX_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);                        \
-    k1_m = VPX_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);                       \
-    k2_m = VPX_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);                       \
-    k3_m = VPX_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);                      \
+    k0_m = AOM_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);                        \
+    k1_m = AOM_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);                       \
+    k2_m = AOM_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);                       \
+    k3_m = AOM_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);                      \
     MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m, g4_m, g5_m, g6_m, g7_m); \
-    k0_m = VPX_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);                        \
-    k1_m = VPX_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);                       \
-    k2_m = VPX_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);                        \
-    k3_m = VPX_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);                       \
+    k0_m = AOM_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);                        \
+    k1_m = AOM_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);                       \
+    k2_m = AOM_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);                        \
+    k3_m = AOM_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);                       \
     MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m, g8_m, g9_m, g10_m,       \
             g11_m);                                                            \
-    k0_m = VPX_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);                       \
-    k1_m = VPX_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);                      \
-    k2_m = VPX_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);                        \
-    k3_m = VPX_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);                       \
+    k0_m = AOM_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);                       \
+    k1_m = AOM_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);                      \
+    k2_m = AOM_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);                        \
+    k3_m = AOM_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);                       \
     MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m, g12_m, g13_m, g14_m,      \
             g15_m);                                                            \
                                                                                \
     /* stage 2 */                                                              \
-    k0_m = VPX_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);                        \
-    k1_m = VPX_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);                       \
-    k2_m = VPX_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);                       \
+    k0_m = AOM_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);                        \
+    k1_m = AOM_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);                       \
+    k2_m = AOM_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);                       \
     MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m, h0_m, h1_m, h2_m, \
             h3_m);                                                             \
-    k0_m = VPX_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);                       \
-    k1_m = VPX_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);                      \
-    k2_m = VPX_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);                      \
+    k0_m = AOM_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);                       \
+    k1_m = AOM_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);                      \
+    k2_m = AOM_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);                      \
     MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m, h4_m, h5_m,      \
             h6_m, h7_m);                                                       \
     BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10);             \
@@ -383,29 +383,29 @@
                                                                                \
     /* stage 3 */                                                              \
     BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m);           \
-    k0_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                        \
-    k1_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                       \
-    k2_m = VPX_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);                       \
+    k0_m = AOM_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                        \
+    k1_m = AOM_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                       \
+    k2_m = AOM_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);                       \
     MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m, out4, out6, out5,  \
             out7);                                                             \
     MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m, out12, out14,      \
             out13, out15);                                                     \
                                                                                \
     /* stage 4 */                                                              \
-    k0_m = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);                       \
-    k1_m = VPX_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);                     \
-    k2_m = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);                      \
-    k3_m = VPX_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);                      \
+    k0_m = AOM_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);                       \
+    k1_m = AOM_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);                     \
+    k2_m = AOM_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);                      \
+    k3_m = AOM_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);                      \
     MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3);                          \
     MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7);                            \
     MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11);                        \
     MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15);                        \
   }
 
-void vpx_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
+void aom_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
                                       int32_t dst_stride);
-void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output);
-void vpx_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
+void aom_idct16_1d_rows_msa(const int16_t *input, int16_t *output);
+void aom_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
                                        int32_t dst_stride);
-void vpx_iadst16_1d_rows_msa(const int16_t *input, int16_t *output);
-#endif  // VPX_DSP_MIPS_INV_TXFM_MSA_H_
+void aom_iadst16_1d_rows_msa(const int16_t *input, int16_t *output);
+#endif  // AOM_DSP_MIPS_INV_TXFM_MSA_H_
diff --git a/aom_dsp/mips/itrans16_dspr2.c b/aom_dsp/mips/itrans16_dspr2.c
index 8d184cb..cb3659e 100644
--- a/aom_dsp/mips/itrans16_dspr2.c
+++ b/aom_dsp/mips/itrans16_dspr2.c
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/inv_txfm_dspr2.h"
 #include "aom_dsp/txfm_common.h"
 
@@ -401,17 +401,17 @@
   int result1, result2, result3, result4;
   const int const_2_power_13 = 8192;
   uint8_t *dest_pix;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
 
-  /* prefetch vpx_ff_cropTbl */
-  prefetch_load(vpx_ff_cropTbl);
-  prefetch_load(vpx_ff_cropTbl + 32);
-  prefetch_load(vpx_ff_cropTbl + 64);
-  prefetch_load(vpx_ff_cropTbl + 96);
-  prefetch_load(vpx_ff_cropTbl + 128);
-  prefetch_load(vpx_ff_cropTbl + 160);
-  prefetch_load(vpx_ff_cropTbl + 192);
-  prefetch_load(vpx_ff_cropTbl + 224);
+  /* prefetch aom_ff_cropTbl */
+  prefetch_load(aom_ff_cropTbl);
+  prefetch_load(aom_ff_cropTbl + 32);
+  prefetch_load(aom_ff_cropTbl + 64);
+  prefetch_load(aom_ff_cropTbl + 96);
+  prefetch_load(aom_ff_cropTbl + 128);
+  prefetch_load(aom_ff_cropTbl + 160);
+  prefetch_load(aom_ff_cropTbl + 192);
+  prefetch_load(aom_ff_cropTbl + 224);
 
   for (i = 0; i < 16; ++i) {
     dest_pix = (dest + i);
@@ -868,7 +868,7 @@
   }
 }
 
-void vpx_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
                                  int dest_stride) {
   DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
   uint32_t pos = 45;
@@ -883,7 +883,7 @@
   idct16_cols_add_blk_dspr2(out, dest, dest_stride);
 }
 
-void vpx_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest,
                                 int dest_stride) {
   DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
   int16_t *outptr = out;
@@ -927,7 +927,7 @@
   idct16_cols_add_blk_dspr2(out, dest, dest_stride);
 }
 
-void vpx_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest,
                                int dest_stride) {
   uint32_t pos = 45;
   int32_t out;
diff --git a/aom_dsp/mips/itrans32_cols_dspr2.c b/aom_dsp/mips/itrans32_cols_dspr2.c
index 7997131..8918bd5 100644
--- a/aom_dsp/mips/itrans32_cols_dspr2.c
+++ b/aom_dsp/mips/itrans32_cols_dspr2.c
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_dsp/mips/inv_txfm_dspr2.h"
 #include "aom_dsp/txfm_common.h"
 
 #if HAVE_DSPR2
-void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
+void aom_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
                                    int dest_stride) {
   int16_t step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6;
   int16_t step1_7, step1_8, step1_9, step1_10, step1_11, step1_12, step1_13;
@@ -35,17 +35,17 @@
   int i, temp21;
   uint8_t *dest_pix, *dest_pix1;
   const int const_2_power_13 = 8192;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
 
-  /* prefetch vpx_ff_cropTbl */
-  prefetch_load(vpx_ff_cropTbl);
-  prefetch_load(vpx_ff_cropTbl + 32);
-  prefetch_load(vpx_ff_cropTbl + 64);
-  prefetch_load(vpx_ff_cropTbl + 96);
-  prefetch_load(vpx_ff_cropTbl + 128);
-  prefetch_load(vpx_ff_cropTbl + 160);
-  prefetch_load(vpx_ff_cropTbl + 192);
-  prefetch_load(vpx_ff_cropTbl + 224);
+  /* prefetch aom_ff_cropTbl */
+  prefetch_load(aom_ff_cropTbl);
+  prefetch_load(aom_ff_cropTbl + 32);
+  prefetch_load(aom_ff_cropTbl + 64);
+  prefetch_load(aom_ff_cropTbl + 96);
+  prefetch_load(aom_ff_cropTbl + 128);
+  prefetch_load(aom_ff_cropTbl + 160);
+  prefetch_load(aom_ff_cropTbl + 192);
+  prefetch_load(aom_ff_cropTbl + 224);
 
   for (i = 0; i < 32; ++i) {
     dest_pix = dest + i;
diff --git a/aom_dsp/mips/itrans32_dspr2.c b/aom_dsp/mips/itrans32_dspr2.c
index 74248b3..5ce34d1 100644
--- a/aom_dsp/mips/itrans32_dspr2.c
+++ b/aom_dsp/mips/itrans32_dspr2.c
@@ -11,7 +11,7 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_dsp/mips/inv_txfm_dspr2.h"
 #include "aom_dsp/txfm_common.h"
 
@@ -835,7 +835,7 @@
   }
 }
 
-void vpx_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest,
                                   int dest_stride) {
   DECLARE_ALIGNED(32, int16_t, out[32 * 32]);
   int16_t *outptr = out;
@@ -850,10 +850,10 @@
   idct32_rows_dspr2(input, outptr, 32);
 
   // Columns
-  vpx_idct32_cols_add_blk_dspr2(out, dest, dest_stride);
+  aom_idct32_cols_add_blk_dspr2(out, dest, dest_stride);
 }
 
-void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest,
                                 int stride) {
   DECLARE_ALIGNED(32, int16_t, out[32 * 32]);
   int16_t *outptr = out;
@@ -908,10 +908,10 @@
   }
 
   // Columns
-  vpx_idct32_cols_add_blk_dspr2(out, dest, stride);
+  aom_idct32_cols_add_blk_dspr2(out, dest, stride);
 }
 
-void vpx_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest,
                                int stride) {
   int r, out;
   int32_t a1, absa1;
diff --git a/aom_dsp/mips/itrans4_dspr2.c b/aom_dsp/mips/itrans4_dspr2.c
index d6ea667..9453e95 100644
--- a/aom_dsp/mips/itrans4_dspr2.c
+++ b/aom_dsp/mips/itrans4_dspr2.c
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/inv_txfm_dspr2.h"
 #include "aom_dsp/txfm_common.h"
 
 #if HAVE_DSPR2
-void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output) {
+void aom_idct4_rows_dspr2(const int16_t *input, int16_t *output) {
   int16_t step_0, step_1, step_2, step_3;
   int Temp0, Temp1, Temp2, Temp3;
   const int const_2_power_13 = 8192;
@@ -95,24 +95,24 @@
   }
 }
 
-void vpx_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
+void aom_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
                                      int dest_stride) {
   int16_t step_0, step_1, step_2, step_3;
   int Temp0, Temp1, Temp2, Temp3;
   const int const_2_power_13 = 8192;
   int i;
   uint8_t *dest_pix;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
 
-  /* prefetch vpx_ff_cropTbl */
-  prefetch_load(vpx_ff_cropTbl);
-  prefetch_load(vpx_ff_cropTbl + 32);
-  prefetch_load(vpx_ff_cropTbl + 64);
-  prefetch_load(vpx_ff_cropTbl + 96);
-  prefetch_load(vpx_ff_cropTbl + 128);
-  prefetch_load(vpx_ff_cropTbl + 160);
-  prefetch_load(vpx_ff_cropTbl + 192);
-  prefetch_load(vpx_ff_cropTbl + 224);
+  /* prefetch aom_ff_cropTbl */
+  prefetch_load(aom_ff_cropTbl);
+  prefetch_load(aom_ff_cropTbl + 32);
+  prefetch_load(aom_ff_cropTbl + 64);
+  prefetch_load(aom_ff_cropTbl + 96);
+  prefetch_load(aom_ff_cropTbl + 128);
+  prefetch_load(aom_ff_cropTbl + 160);
+  prefetch_load(aom_ff_cropTbl + 192);
+  prefetch_load(aom_ff_cropTbl + 224);
 
   for (i = 0; i < 4; ++i) {
     dest_pix = (dest + i);
@@ -215,7 +215,7 @@
   }
 }
 
-void vpx_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
                               int dest_stride) {
   DECLARE_ALIGNED(32, int16_t, out[4 * 4]);
   int16_t *outptr = out;
@@ -227,13 +227,13 @@
                        : [pos] "r"(pos));
 
   // Rows
-  vpx_idct4_rows_dspr2(input, outptr);
+  aom_idct4_rows_dspr2(input, outptr);
 
   // Columns
-  vpx_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
+  aom_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
 }
 
-void vpx_idct4x4_1_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct4x4_1_add_dspr2(const int16_t *input, uint8_t *dest,
                              int dest_stride) {
   int a1, absa1;
   int r;
diff --git a/aom_dsp/mips/itrans8_dspr2.c b/aom_dsp/mips/itrans8_dspr2.c
index 4cee3d0..c1d1141 100644
--- a/aom_dsp/mips/itrans8_dspr2.c
+++ b/aom_dsp/mips/itrans8_dspr2.c
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/inv_txfm_dspr2.h"
 #include "aom_dsp/txfm_common.h"
 
@@ -199,17 +199,17 @@
   int i;
   const int const_2_power_13 = 8192;
   uint8_t *dest_pix;
-  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *cm = aom_ff_cropTbl;
 
-  /* prefetch vpx_ff_cropTbl */
-  prefetch_load(vpx_ff_cropTbl);
-  prefetch_load(vpx_ff_cropTbl + 32);
-  prefetch_load(vpx_ff_cropTbl + 64);
-  prefetch_load(vpx_ff_cropTbl + 96);
-  prefetch_load(vpx_ff_cropTbl + 128);
-  prefetch_load(vpx_ff_cropTbl + 160);
-  prefetch_load(vpx_ff_cropTbl + 192);
-  prefetch_load(vpx_ff_cropTbl + 224);
+  /* prefetch aom_ff_cropTbl */
+  prefetch_load(aom_ff_cropTbl);
+  prefetch_load(aom_ff_cropTbl + 32);
+  prefetch_load(aom_ff_cropTbl + 64);
+  prefetch_load(aom_ff_cropTbl + 96);
+  prefetch_load(aom_ff_cropTbl + 128);
+  prefetch_load(aom_ff_cropTbl + 160);
+  prefetch_load(aom_ff_cropTbl + 192);
+  prefetch_load(aom_ff_cropTbl + 224);
 
   for (i = 0; i < 8; ++i) {
     dest_pix = (dest + i);
@@ -438,7 +438,7 @@
   }
 }
 
-void vpx_idct8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
                               int dest_stride) {
   DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
   int16_t *outptr = out;
@@ -454,7 +454,7 @@
   idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride);
 }
 
-void vpx_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest,
                               int dest_stride) {
   DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
   int16_t *outptr = out;
@@ -493,7 +493,7 @@
   idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride);
 }
 
-void vpx_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest,
                              int dest_stride) {
   uint32_t pos = 45;
   int32_t out;
diff --git a/aom_dsp/mips/loopfilter_16_msa.c b/aom_dsp/mips/loopfilter_16_msa.c
index ff7dc05..de7f754 100644
--- a/aom_dsp/mips/loopfilter_16_msa.c
+++ b/aom_dsp/mips/loopfilter_16_msa.c
@@ -11,7 +11,7 @@
 #include "aom_ports/mem.h"
 #include "aom_dsp/mips/loopfilter_msa.h"
 
-int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, uint8_t *filter48,
+int32_t aom_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, uint8_t *filter48,
                                  const uint8_t *b_limit_ptr,
                                  const uint8_t *limit_ptr,
                                  const uint8_t *thresh_ptr) {
@@ -34,8 +34,8 @@
   /* mask and hev */
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
                mask, flat);
-  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   if (__msa_test_bz_v(flat)) {
     ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch);
@@ -44,12 +44,12 @@
   } else {
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
                q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
     ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
     ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
-    VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+    AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
                 p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
 
     /* convert 16 bit output data into 8 bit */
@@ -77,7 +77,7 @@
   }
 }
 
-void vpx_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) {
+void aom_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) {
   v16u8 flat, flat2, filter8;
   v16i8 zero = { 0 };
   v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
@@ -92,7 +92,7 @@
 
   LD_UB8((src - 8 * pitch), pitch, p7, p6, p5, p4, p3, p2, p1, p0);
   LD_UB8(src, pitch, q0, q1, q2, q3, q4, q5, q6, q7);
-  VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+  AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
 
   if (__msa_test_bz_v(flat2)) {
     LD_UB4(filter48, 16, p2, p1, p0, q0);
@@ -403,7 +403,7 @@
   }
 }
 
-void vpx_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch,
                                     const uint8_t *b_limit_ptr,
                                     const uint8_t *limit_ptr,
                                     const uint8_t *thresh_ptr, int32_t count) {
@@ -412,11 +412,11 @@
 
   (void)count;
 
-  early_exit = vpx_hz_lpf_t4_and_t8_16w(src, pitch, &filter48[0], b_limit_ptr,
+  early_exit = aom_hz_lpf_t4_and_t8_16w(src, pitch, &filter48[0], b_limit_ptr,
                                         limit_ptr, thresh_ptr);
 
   if (0 == early_exit) {
-    vpx_hz_lpf_t16_16w(src, pitch, filter48);
+    aom_hz_lpf_t16_16w(src, pitch, filter48);
   }
 }
 
@@ -447,8 +447,8 @@
 
     LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
                  mask, flat);
-    VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-    VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+    AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+    AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
                        q1_out);
 
     flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
@@ -464,7 +464,7 @@
       ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
                  zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
                  q3_r);
-      VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
+      AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
                   p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
 
       /* convert 16 bit output data into 8 bit */
@@ -484,7 +484,7 @@
       LD_UB4((src - 8 * pitch), pitch, p7, p6, p5, p4);
       LD_UB4(src + (4 * pitch), pitch, q4, q5, q6, q7);
 
-      VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+      AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
 
       if (__msa_test_bz_v(flat2)) {
         p2_d = __msa_copy_u_d((v2i64)p2_out, 0);
@@ -638,19 +638,19 @@
       }
     }
   } else {
-    vpx_lpf_horizontal_16_dual_msa(src, pitch, b_limit_ptr, limit_ptr,
+    aom_lpf_horizontal_16_dual_msa(src, pitch, b_limit_ptr, limit_ptr,
                                    thresh_ptr, count);
   }
 }
 
-void vpx_lpf_horizontal_edge_8_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_edge_8_msa(uint8_t *src, int32_t pitch,
                                    const uint8_t *b_limit_ptr,
                                    const uint8_t *limit_ptr,
                                    const uint8_t *thresh_ptr) {
   mb_lpf_horizontal_edge(src, pitch, b_limit_ptr, limit_ptr, thresh_ptr, 1);
 }
 
-void vpx_lpf_horizontal_edge_16_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_edge_16_msa(uint8_t *src, int32_t pitch,
                                     const uint8_t *b_limit_ptr,
                                     const uint8_t *limit_ptr,
                                     const uint8_t *thresh_ptr) {
@@ -752,7 +752,7 @@
   ST_UB8(q0, q1, q2, q3, q4, q5, q6, q7, output, out_pitch);
 }
 
-int32_t vpx_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48,
+int32_t aom_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48,
                                 uint8_t *src_org, int32_t pitch_org,
                                 const uint8_t *b_limit_ptr,
                                 const uint8_t *limit_ptr,
@@ -776,9 +776,9 @@
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
                mask, flat);
   /* flat4 */
-  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
-  VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
 
@@ -790,7 +790,7 @@
   } else {
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
                q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
     /* convert 16 bit output data into 8 bit */
@@ -819,7 +819,7 @@
   }
 }
 
-int32_t vpx_vt_lpf_t16_8w(uint8_t *src, uint8_t *src_org, int32_t pitch,
+int32_t aom_vt_lpf_t16_8w(uint8_t *src, uint8_t *src_org, int32_t pitch,
                           uint8_t *filter48) {
   v16i8 zero = { 0 };
   v16u8 filter8, flat, flat2;
@@ -834,7 +834,7 @@
   LD_UB8((src - 8 * 16), 16, p7, p6, p5, p4, p3, p2, p1, p0);
   LD_UB8(src, 16, q0, q1, q2, q3, q4, q5, q6, q7);
 
-  VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+  AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
 
   if (__msa_test_bz_v(flat2)) {
     v8i16 vec0, vec1, vec2, vec3, vec4;
@@ -1039,7 +1039,7 @@
   }
 }
 
-void vpx_lpf_vertical_16_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_16_msa(uint8_t *src, int32_t pitch,
                              const uint8_t *b_limit_ptr,
                              const uint8_t *limit_ptr,
                              const uint8_t *thresh_ptr) {
@@ -1050,11 +1050,11 @@
   transpose_16x8_to_8x16(src - 8, pitch, transposed_input, 16);
 
   early_exit =
-      vpx_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8), &filter48[0], src,
+      aom_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8), &filter48[0], src,
                               pitch, b_limit_ptr, limit_ptr, thresh_ptr);
 
   if (0 == early_exit) {
-    early_exit = vpx_vt_lpf_t16_8w((transposed_input + 16 * 8), src, pitch,
+    early_exit = aom_vt_lpf_t16_8w((transposed_input + 16 * 8), src, pitch,
                                    &filter48[0]);
 
     if (0 == early_exit) {
@@ -1063,7 +1063,7 @@
   }
 }
 
-int32_t vpx_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48,
+int32_t aom_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48,
                                  uint8_t *src_org, int32_t pitch,
                                  const uint8_t *b_limit_ptr,
                                  const uint8_t *limit_ptr,
@@ -1089,9 +1089,9 @@
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
                mask, flat);
   /* flat4 */
-  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
-  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   if (__msa_test_bz_v(flat)) {
     ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
@@ -1108,11 +1108,11 @@
   } else {
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
                q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
     ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
     ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
-    VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+    AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
                 p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
 
     /* convert 16 bit output data into 8 bit */
@@ -1140,7 +1140,7 @@
   }
 }
 
-int32_t vpx_vt_lpf_t16_16w(uint8_t *src, uint8_t *src_org, int32_t pitch,
+int32_t aom_vt_lpf_t16_16w(uint8_t *src, uint8_t *src_org, int32_t pitch,
                            uint8_t *filter48) {
   v16u8 flat, flat2, filter8;
   v16i8 zero = { 0 };
@@ -1157,7 +1157,7 @@
   LD_UB8((src - 8 * 16), 16, p7, p6, p5, p4, p3, p2, p1, p0);
   LD_UB8(src, 16, q0, q1, q2, q3, q4, q5, q6, q7);
 
-  VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+  AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
 
   if (__msa_test_bz_v(flat2)) {
     v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
@@ -1461,7 +1461,7 @@
   }
 }
 
-void vpx_lpf_vertical_16_dual_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_16_dual_msa(uint8_t *src, int32_t pitch,
                                   const uint8_t *b_limit_ptr,
                                   const uint8_t *limit_ptr,
                                   const uint8_t *thresh_ptr) {
@@ -1472,11 +1472,11 @@
   transpose_16x16((src - 8), pitch, &transposed_input[0], 16);
 
   early_exit =
-      vpx_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8), &filter48[0], src,
+      aom_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8), &filter48[0], src,
                                pitch, b_limit_ptr, limit_ptr, thresh_ptr);
 
   if (0 == early_exit) {
-    early_exit = vpx_vt_lpf_t16_16w((transposed_input + 16 * 8), src, pitch,
+    early_exit = aom_vt_lpf_t16_16w((transposed_input + 16 * 8), src, pitch,
                                     &filter48[0]);
 
     if (0 == early_exit) {
diff --git a/aom_dsp/mips/loopfilter_4_msa.c b/aom_dsp/mips/loopfilter_4_msa.c
index 9411d96..6e95b53 100644
--- a/aom_dsp/mips/loopfilter_4_msa.c
+++ b/aom_dsp/mips/loopfilter_4_msa.c
@@ -10,7 +10,7 @@
 
 #include "aom_dsp/mips/loopfilter_msa.h"
 
-void vpx_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch,
                               const uint8_t *b_limit_ptr,
                               const uint8_t *limit_ptr,
                               const uint8_t *thresh_ptr) {
@@ -27,7 +27,7 @@
 
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
                mask, flat);
-  VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
   p0_d = __msa_copy_u_d((v2i64)p0_out, 0);
@@ -36,7 +36,7 @@
   SD4(p1_d, p0_d, q0_d, q1_d, (src - 2 * pitch), pitch);
 }
 
-void vpx_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch,
                                    const uint8_t *b_limit0_ptr,
                                    const uint8_t *limit0_ptr,
                                    const uint8_t *thresh0_ptr,
@@ -63,12 +63,12 @@
 
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
                mask, flat);
-  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
 
   ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch);
 }
 
-void vpx_lpf_vertical_4_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_4_msa(uint8_t *src, int32_t pitch,
                             const uint8_t *b_limit_ptr,
                             const uint8_t *limit_ptr,
                             const uint8_t *thresh_ptr) {
@@ -86,7 +86,7 @@
                      q3);
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
                mask, flat);
-  VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+  AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
   ILVR_B2_SH(p0, p1, q1, q0, vec0, vec1);
   ILVRL_H2_SH(vec1, vec0, vec2, vec3);
 
@@ -96,7 +96,7 @@
   ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch);
 }
 
-void vpx_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch,
                                  const uint8_t *b_limit0_ptr,
                                  const uint8_t *limit0_ptr,
                                  const uint8_t *thresh0_ptr,
@@ -132,7 +132,7 @@
 
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
                mask, flat);
-  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
   ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
   ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3);
   ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
diff --git a/aom_dsp/mips/loopfilter_8_msa.c b/aom_dsp/mips/loopfilter_8_msa.c
index 4f745da..0208c69 100644
--- a/aom_dsp/mips/loopfilter_8_msa.c
+++ b/aom_dsp/mips/loopfilter_8_msa.c
@@ -10,7 +10,7 @@
 
 #include "aom_dsp/mips/loopfilter_msa.h"
 
-void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch,
                               const uint8_t *b_limit_ptr,
                               const uint8_t *limit_ptr,
                               const uint8_t *thresh_ptr) {
@@ -31,8 +31,8 @@
 
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
                mask, flat);
-  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
 
@@ -45,7 +45,7 @@
   } else {
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
                q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
+    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
                 p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
 
     /* convert 16 bit output data into 8 bit */
@@ -78,7 +78,7 @@
   }
 }
 
-void vpx_lpf_horizontal_8_dual_msa(
+void aom_lpf_horizontal_8_dual_msa(
     uint8_t *src, int32_t pitch, const uint8_t *b_limit0, const uint8_t *limit0,
     const uint8_t *thresh0, const uint8_t *b_limit1, const uint8_t *limit1,
     const uint8_t *thresh1) {
@@ -109,20 +109,20 @@
   /* mask and hev */
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
                mask, flat);
-  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   if (__msa_test_bz_v(flat)) {
     ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch);
   } else {
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
                q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
     ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
     ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
-    VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+    AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
                 p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
 
     /* convert 16 bit output data into 8 bit */
@@ -149,7 +149,7 @@
   }
 }
 
-void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_8_msa(uint8_t *src, int32_t pitch,
                             const uint8_t *b_limit_ptr,
                             const uint8_t *limit_ptr,
                             const uint8_t *thresh_ptr) {
@@ -175,9 +175,9 @@
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
                mask, flat);
   /* flat4 */
-  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
-  VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
 
@@ -193,7 +193,7 @@
   } else {
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
                q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
     /* convert 16 bit output data into 8 bit */
     PCKEV_B4_SH(p2_filt8_r, p2_filt8_r, p1_filt8_r, p1_filt8_r, p0_filt8_r,
@@ -224,7 +224,7 @@
   }
 }
 
-void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch,
                                  const uint8_t *b_limit0, const uint8_t *limit0,
                                  const uint8_t *thresh0,
                                  const uint8_t *b_limit1, const uint8_t *limit1,
@@ -268,9 +268,9 @@
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
                mask, flat);
   /* flat4 */
-  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
-  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   if (__msa_test_bz_v(flat)) {
     ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
@@ -285,14 +285,14 @@
   } else {
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
                q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
     ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
     ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
 
     /* filter8 */
-    VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+    AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
                 p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
 
     /* convert 16 bit output data into 8 bit */
diff --git a/aom_dsp/mips/loopfilter_filters_dspr2.c b/aom_dsp/mips/loopfilter_filters_dspr2.c
index dc59838..d6f6213 100644
--- a/aom_dsp/mips/loopfilter_filters_dspr2.c
+++ b/aom_dsp/mips/loopfilter_filters_dspr2.c
@@ -10,16 +10,16 @@
 
 #include <stdlib.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/mips/common_dspr2.h"
 #include "aom_dsp/mips/loopfilter_filters_dspr2.h"
 #include "aom_dsp/mips/loopfilter_macros_dspr2.h"
 #include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 #if HAVE_DSPR2
-void vpx_lpf_horizontal_4_dspr2(unsigned char *s, int pitch,
+void aom_lpf_horizontal_4_dspr2(unsigned char *s, int pitch,
                                 const uint8_t *blimit, const uint8_t *limit,
                                 const uint8_t *thresh) {
   uint8_t i;
@@ -104,7 +104,7 @@
   }
 }
 
-void vpx_lpf_vertical_4_dspr2(unsigned char *s, int pitch,
+void aom_lpf_vertical_4_dspr2(unsigned char *s, int pitch,
                               const uint8_t *blimit, const uint8_t *limit,
                               const uint8_t *thresh) {
   uint8_t i;
@@ -281,46 +281,46 @@
   }
 }
 
-void vpx_lpf_horizontal_4_dual_dspr2(
+void aom_lpf_horizontal_4_dual_dspr2(
     uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
     const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
     const uint8_t *limit1, const uint8_t *thresh1) {
-  vpx_lpf_horizontal_4_dspr2(s, p, blimit0, limit0, thresh0);
-  vpx_lpf_horizontal_4_dspr2(s + 8, p, blimit1, limit1, thresh1);
+  aom_lpf_horizontal_4_dspr2(s, p, blimit0, limit0, thresh0);
+  aom_lpf_horizontal_4_dspr2(s + 8, p, blimit1, limit1, thresh1);
 }
 
-void vpx_lpf_horizontal_8_dual_dspr2(
+void aom_lpf_horizontal_8_dual_dspr2(
     uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
     const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
     const uint8_t *limit1, const uint8_t *thresh1) {
-  vpx_lpf_horizontal_8_dspr2(s, p, blimit0, limit0, thresh0);
-  vpx_lpf_horizontal_8_dspr2(s + 8, p, blimit1, limit1, thresh1);
+  aom_lpf_horizontal_8_dspr2(s, p, blimit0, limit0, thresh0);
+  aom_lpf_horizontal_8_dspr2(s + 8, p, blimit1, limit1, thresh1);
 }
 
-void vpx_lpf_vertical_4_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_4_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
                                    const uint8_t *limit0,
                                    const uint8_t *thresh0,
                                    const uint8_t *blimit1,
                                    const uint8_t *limit1,
                                    const uint8_t *thresh1) {
-  vpx_lpf_vertical_4_dspr2(s, p, blimit0, limit0, thresh0);
-  vpx_lpf_vertical_4_dspr2(s + 8 * p, p, blimit1, limit1, thresh1);
+  aom_lpf_vertical_4_dspr2(s, p, blimit0, limit0, thresh0);
+  aom_lpf_vertical_4_dspr2(s + 8 * p, p, blimit1, limit1, thresh1);
 }
 
-void vpx_lpf_vertical_8_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_8_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
                                    const uint8_t *limit0,
                                    const uint8_t *thresh0,
                                    const uint8_t *blimit1,
                                    const uint8_t *limit1,
                                    const uint8_t *thresh1) {
-  vpx_lpf_vertical_8_dspr2(s, p, blimit0, limit0, thresh0);
-  vpx_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1);
+  aom_lpf_vertical_8_dspr2(s, p, blimit0, limit0, thresh0);
+  aom_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1);
 }
 
-void vpx_lpf_vertical_16_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_vertical_16_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit,
                                     const uint8_t *limit,
                                     const uint8_t *thresh) {
-  vpx_lpf_vertical_16_dspr2(s, p, blimit, limit, thresh);
-  vpx_lpf_vertical_16_dspr2(s + 8 * p, p, blimit, limit, thresh);
+  aom_lpf_vertical_16_dspr2(s, p, blimit, limit, thresh);
+  aom_lpf_vertical_16_dspr2(s + 8 * p, p, blimit, limit, thresh);
 }
 #endif  // #if HAVE_DSPR2
diff --git a/aom_dsp/mips/loopfilter_filters_dspr2.h b/aom_dsp/mips/loopfilter_filters_dspr2.h
index 919618c..a511202 100644
--- a/aom_dsp/mips/loopfilter_filters_dspr2.h
+++ b/aom_dsp/mips/loopfilter_filters_dspr2.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
-#define VPX_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
+#ifndef AOM_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
+#define AOM_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
 
 #include <stdlib.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #ifdef __cplusplus
@@ -26,7 +26,7 @@
 /* inputs & outputs are quad-byte vectors */
 static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, uint32_t *ps1,
                                 uint32_t *ps0, uint32_t *qs0, uint32_t *qs1) {
-  int32_t vpx_filter_l, vpx_filter_r;
+  int32_t aom_filter_l, aom_filter_r;
   int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r;
   int32_t subr_r, subr_l;
   uint32_t t1, t2, HWM, t3;
@@ -72,33 +72,33 @@
   hev_r = hev_r & HWM;
 
   __asm__ __volatile__(
-      /* vpx_filter = vp8_signed_char_clamp(ps1 - qs1); */
-      "subq_s.ph    %[vpx_filter_l], %[vps1_l],       %[vqs1_l]       \n\t"
-      "subq_s.ph    %[vpx_filter_r], %[vps1_r],       %[vqs1_r]       \n\t"
+      /* aom_filter = vp8_signed_char_clamp(ps1 - qs1); */
+      "subq_s.ph    %[aom_filter_l], %[vps1_l],       %[vqs1_l]       \n\t"
+      "subq_s.ph    %[aom_filter_r], %[vps1_r],       %[vqs1_r]       \n\t"
 
       /* qs0 - ps0 */
       "subq_s.ph    %[subr_l],       %[vqs0_l],       %[vps0_l]       \n\t"
       "subq_s.ph    %[subr_r],       %[vqs0_r],       %[vps0_r]       \n\t"
 
-      /* vpx_filter &= hev; */
-      "and          %[vpx_filter_l], %[vpx_filter_l], %[hev_l]        \n\t"
-      "and          %[vpx_filter_r], %[vpx_filter_r], %[hev_r]        \n\t"
+      /* aom_filter &= hev; */
+      "and          %[aom_filter_l], %[aom_filter_l], %[hev_l]        \n\t"
+      "and          %[aom_filter_r], %[aom_filter_r], %[hev_r]        \n\t"
 
-      /* vpx_filter = vp8_signed_char_clamp(vpx_filter + 3 * (qs0 - ps0)); */
-      "addq_s.ph    %[vpx_filter_l], %[vpx_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[vpx_filter_r], %[vpx_filter_r], %[subr_r]       \n\t"
+      /* aom_filter = vp8_signed_char_clamp(aom_filter + 3 * (qs0 - ps0)); */
+      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
+      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
       "xor          %[invhev_l],     %[hev_l],        %[HWM]          \n\t"
-      "addq_s.ph    %[vpx_filter_l], %[vpx_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[vpx_filter_r], %[vpx_filter_r], %[subr_r]       \n\t"
+      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
+      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
       "xor          %[invhev_r],     %[hev_r],        %[HWM]          \n\t"
-      "addq_s.ph    %[vpx_filter_l], %[vpx_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[vpx_filter_r], %[vpx_filter_r], %[subr_r]       \n\t"
+      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
+      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
 
-      /* vpx_filter &= mask; */
-      "and          %[vpx_filter_l], %[vpx_filter_l], %[mask_l]       \n\t"
-      "and          %[vpx_filter_r], %[vpx_filter_r], %[mask_r]       \n\t"
+      /* aom_filter &= mask; */
+      "and          %[aom_filter_l], %[aom_filter_l], %[mask_l]       \n\t"
+      "and          %[aom_filter_r], %[aom_filter_r], %[mask_r]       \n\t"
 
-      : [vpx_filter_l] "=&r"(vpx_filter_l), [vpx_filter_r] "=&r"(vpx_filter_r),
+      : [aom_filter_l] "=&r"(aom_filter_l), [aom_filter_r] "=&r"(aom_filter_r),
         [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r),
         [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r)
       : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l),
@@ -109,13 +109,13 @@
 
   /* save bottom 3 bits so that we round one side +4 and the other +3 */
   __asm__ __volatile__(
-      /* Filter2 = vp8_signed_char_clamp(vpx_filter + 3) >>= 3; */
-      "addq_s.ph    %[Filter1_l],    %[vpx_filter_l], %[t2]           \n\t"
-      "addq_s.ph    %[Filter1_r],    %[vpx_filter_r], %[t2]           \n\t"
+      /* Filter2 = vp8_signed_char_clamp(aom_filter + 3) >>= 3; */
+      "addq_s.ph    %[Filter1_l],    %[aom_filter_l], %[t2]           \n\t"
+      "addq_s.ph    %[Filter1_r],    %[aom_filter_r], %[t2]           \n\t"
 
-      /* Filter1 = vp8_signed_char_clamp(vpx_filter + 4) >>= 3; */
-      "addq_s.ph    %[Filter2_l],    %[vpx_filter_l], %[t1]           \n\t"
-      "addq_s.ph    %[Filter2_r],    %[vpx_filter_r], %[t1]           \n\t"
+      /* Filter1 = vp8_signed_char_clamp(aom_filter + 4) >>= 3; */
+      "addq_s.ph    %[Filter2_l],    %[aom_filter_l], %[t1]           \n\t"
+      "addq_s.ph    %[Filter2_r],    %[aom_filter_r], %[t1]           \n\t"
       "shra.ph      %[Filter1_r],    %[Filter1_r],    3               \n\t"
       "shra.ph      %[Filter1_l],    %[Filter1_l],    3               \n\t"
 
@@ -138,22 +138,22 @@
         [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l),
         [vqs0_r] "+r"(vqs0_r)
       : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM),
-        [vpx_filter_l] "r"(vpx_filter_l), [vpx_filter_r] "r"(vpx_filter_r));
+        [aom_filter_l] "r"(aom_filter_l), [aom_filter_r] "r"(aom_filter_r));
 
   __asm__ __volatile__(
-      /* (vpx_filter += 1) >>= 1 */
+      /* (aom_filter += 1) >>= 1 */
       "addqh.ph    %[Filter1_l],    %[Filter1_l],     %[t3]           \n\t"
       "addqh.ph    %[Filter1_r],    %[Filter1_r],     %[t3]           \n\t"
 
-      /* vpx_filter &= ~hev; */
+      /* aom_filter &= ~hev; */
       "and          %[Filter1_l],    %[Filter1_l],    %[invhev_l]     \n\t"
       "and          %[Filter1_r],    %[Filter1_r],    %[invhev_r]     \n\t"
 
-      /* vps1 = vp8_signed_char_clamp(ps1 + vpx_filter); */
+      /* vps1 = vp8_signed_char_clamp(ps1 + aom_filter); */
       "addq_s.ph    %[vps1_l],       %[vps1_l],       %[Filter1_l]    \n\t"
       "addq_s.ph    %[vps1_r],       %[vps1_r],       %[Filter1_r]    \n\t"
 
-      /* vqs1 = vp8_signed_char_clamp(qs1 - vpx_filter); */
+      /* vqs1 = vp8_signed_char_clamp(qs1 - aom_filter); */
       "subq_s.ph    %[vqs1_l],       %[vqs1_l],       %[Filter1_l]    \n\t"
       "subq_s.ph    %[vqs1_r],       %[vqs1_r],       %[Filter1_r]    \n\t"
 
@@ -193,7 +193,7 @@
                                  uint32_t ps0, uint32_t qs0, uint32_t qs1,
                                  uint32_t *p1_f0, uint32_t *p0_f0,
                                  uint32_t *q0_f0, uint32_t *q1_f0) {
-  int32_t vpx_filter_l, vpx_filter_r;
+  int32_t aom_filter_l, aom_filter_r;
   int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r;
   int32_t subr_r, subr_l;
   uint32_t t1, t2, HWM, t3;
@@ -239,33 +239,33 @@
   hev_r = hev_r & HWM;
 
   __asm__ __volatile__(
-      /* vpx_filter = vp8_signed_char_clamp(ps1 - qs1); */
-      "subq_s.ph    %[vpx_filter_l], %[vps1_l],       %[vqs1_l]       \n\t"
-      "subq_s.ph    %[vpx_filter_r], %[vps1_r],       %[vqs1_r]       \n\t"
+      /* aom_filter = vp8_signed_char_clamp(ps1 - qs1); */
+      "subq_s.ph    %[aom_filter_l], %[vps1_l],       %[vqs1_l]       \n\t"
+      "subq_s.ph    %[aom_filter_r], %[vps1_r],       %[vqs1_r]       \n\t"
 
       /* qs0 - ps0 */
       "subq_s.ph    %[subr_l],       %[vqs0_l],       %[vps0_l]       \n\t"
       "subq_s.ph    %[subr_r],       %[vqs0_r],       %[vps0_r]       \n\t"
 
-      /* vpx_filter &= hev; */
-      "and          %[vpx_filter_l], %[vpx_filter_l], %[hev_l]        \n\t"
-      "and          %[vpx_filter_r], %[vpx_filter_r], %[hev_r]        \n\t"
+      /* aom_filter &= hev; */
+      "and          %[aom_filter_l], %[aom_filter_l], %[hev_l]        \n\t"
+      "and          %[aom_filter_r], %[aom_filter_r], %[hev_r]        \n\t"
 
-      /* vpx_filter = vp8_signed_char_clamp(vpx_filter + 3 * (qs0 - ps0)); */
-      "addq_s.ph    %[vpx_filter_l], %[vpx_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[vpx_filter_r], %[vpx_filter_r], %[subr_r]       \n\t"
+      /* aom_filter = vp8_signed_char_clamp(aom_filter + 3 * (qs0 - ps0)); */
+      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
+      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
       "xor          %[invhev_l],     %[hev_l],        %[HWM]          \n\t"
-      "addq_s.ph    %[vpx_filter_l], %[vpx_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[vpx_filter_r], %[vpx_filter_r], %[subr_r]       \n\t"
+      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
+      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
       "xor          %[invhev_r],     %[hev_r],        %[HWM]          \n\t"
-      "addq_s.ph    %[vpx_filter_l], %[vpx_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[vpx_filter_r], %[vpx_filter_r], %[subr_r]       \n\t"
+      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
+      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
 
-      /* vpx_filter &= mask; */
-      "and          %[vpx_filter_l], %[vpx_filter_l], %[mask_l]       \n\t"
-      "and          %[vpx_filter_r], %[vpx_filter_r], %[mask_r]       \n\t"
+      /* aom_filter &= mask; */
+      "and          %[aom_filter_l], %[aom_filter_l], %[mask_l]       \n\t"
+      "and          %[aom_filter_r], %[aom_filter_r], %[mask_r]       \n\t"
 
-      : [vpx_filter_l] "=&r"(vpx_filter_l), [vpx_filter_r] "=&r"(vpx_filter_r),
+      : [aom_filter_l] "=&r"(aom_filter_l), [aom_filter_r] "=&r"(aom_filter_r),
         [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r),
         [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r)
       : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l),
@@ -276,13 +276,13 @@
 
   /* save bottom 3 bits so that we round one side +4 and the other +3 */
   __asm__ __volatile__(
-      /* Filter2 = vp8_signed_char_clamp(vpx_filter + 3) >>= 3; */
-      "addq_s.ph    %[Filter1_l],    %[vpx_filter_l], %[t2]           \n\t"
-      "addq_s.ph    %[Filter1_r],    %[vpx_filter_r], %[t2]           \n\t"
+      /* Filter2 = vp8_signed_char_clamp(aom_filter + 3) >>= 3; */
+      "addq_s.ph    %[Filter1_l],    %[aom_filter_l], %[t2]           \n\t"
+      "addq_s.ph    %[Filter1_r],    %[aom_filter_r], %[t2]           \n\t"
 
-      /* Filter1 = vp8_signed_char_clamp(vpx_filter + 4) >>= 3; */
-      "addq_s.ph    %[Filter2_l],    %[vpx_filter_l], %[t1]           \n\t"
-      "addq_s.ph    %[Filter2_r],    %[vpx_filter_r], %[t1]           \n\t"
+      /* Filter1 = vp8_signed_char_clamp(aom_filter + 4) >>= 3; */
+      "addq_s.ph    %[Filter2_l],    %[aom_filter_l], %[t1]           \n\t"
+      "addq_s.ph    %[Filter2_r],    %[aom_filter_r], %[t1]           \n\t"
       "shra.ph      %[Filter1_r],    %[Filter1_r],    3               \n\t"
       "shra.ph      %[Filter1_l],    %[Filter1_l],    3               \n\t"
 
@@ -305,22 +305,22 @@
         [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l),
         [vqs0_r] "+r"(vqs0_r)
       : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM),
-        [vpx_filter_l] "r"(vpx_filter_l), [vpx_filter_r] "r"(vpx_filter_r));
+        [aom_filter_l] "r"(aom_filter_l), [aom_filter_r] "r"(aom_filter_r));
 
   __asm__ __volatile__(
-      /* (vpx_filter += 1) >>= 1 */
+      /* (aom_filter += 1) >>= 1 */
       "addqh.ph    %[Filter1_l],    %[Filter1_l],     %[t3]           \n\t"
       "addqh.ph    %[Filter1_r],    %[Filter1_r],     %[t3]           \n\t"
 
-      /* vpx_filter &= ~hev; */
+      /* aom_filter &= ~hev; */
       "and          %[Filter1_l],    %[Filter1_l],    %[invhev_l]     \n\t"
       "and          %[Filter1_r],    %[Filter1_r],    %[invhev_r]     \n\t"
 
-      /* vps1 = vp8_signed_char_clamp(ps1 + vpx_filter); */
+      /* vps1 = vp8_signed_char_clamp(ps1 + aom_filter); */
       "addq_s.ph    %[vps1_l],       %[vps1_l],       %[Filter1_l]    \n\t"
       "addq_s.ph    %[vps1_r],       %[vps1_r],       %[Filter1_r]    \n\t"
 
-      /* vqs1 = vp8_signed_char_clamp(qs1 - vpx_filter); */
+      /* vqs1 = vp8_signed_char_clamp(qs1 - aom_filter); */
       "subq_s.ph    %[vqs1_l],       %[vqs1_l],       %[Filter1_l]    \n\t"
       "subq_s.ph    %[vqs1_r],       %[vqs1_r],       %[Filter1_r]    \n\t"
 
@@ -731,4 +731,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
+#endif  // AOM_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
diff --git a/aom_dsp/mips/loopfilter_macros_dspr2.h b/aom_dsp/mips/loopfilter_macros_dspr2.h
index 3928263..6db867e 100644
--- a/aom_dsp/mips/loopfilter_macros_dspr2.h
+++ b/aom_dsp/mips/loopfilter_macros_dspr2.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
-#define VPX_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
+#ifndef AOM_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
+#define AOM_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
 
 #include <stdlib.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
+#include "aom_mem/aom_mem.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -432,4 +432,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
+#endif  // AOM_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
diff --git a/aom_dsp/mips/loopfilter_masks_dspr2.h b/aom_dsp/mips/loopfilter_masks_dspr2.h
index 986db05..141a71a 100644
--- a/aom_dsp/mips/loopfilter_masks_dspr2.h
+++ b/aom_dsp/mips/loopfilter_masks_dspr2.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
-#define VPX_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
+#ifndef AOM_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
+#define AOM_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
 
 #include <stdlib.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
+#include "aom_mem/aom_mem.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -352,4 +352,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
+#endif  // AOM_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
diff --git a/aom_dsp/mips/loopfilter_mb_dspr2.c b/aom_dsp/mips/loopfilter_mb_dspr2.c
index ea30e16..a365131 100644
--- a/aom_dsp/mips/loopfilter_mb_dspr2.c
+++ b/aom_dsp/mips/loopfilter_mb_dspr2.c
@@ -10,16 +10,16 @@
 
 #include <stdlib.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/mips/common_dspr2.h"
 #include "aom_dsp/mips/loopfilter_filters_dspr2.h"
 #include "aom_dsp/mips/loopfilter_macros_dspr2.h"
 #include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 #if HAVE_DSPR2
-void vpx_lpf_horizontal_8_dspr2(unsigned char *s, int pitch,
+void aom_lpf_horizontal_8_dspr2(unsigned char *s, int pitch,
                                 const uint8_t *blimit, const uint8_t *limit,
                                 const uint8_t *thresh) {
   uint32_t mask;
@@ -286,7 +286,7 @@
   }
 }
 
-void vpx_lpf_vertical_8_dspr2(unsigned char *s, int pitch,
+void aom_lpf_vertical_8_dspr2(unsigned char *s, int pitch,
                               const uint8_t *blimit, const uint8_t *limit,
                               const uint8_t *thresh) {
   uint8_t i;
diff --git a/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c b/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c
index 82a44c5..1665367 100644
--- a/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c
+++ b/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c
@@ -10,13 +10,13 @@
 
 #include <stdlib.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/mips/common_dspr2.h"
 #include "aom_dsp/mips/loopfilter_filters_dspr2.h"
 #include "aom_dsp/mips/loopfilter_macros_dspr2.h"
 #include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 #if HAVE_DSPR2
 static void mb_lpf_horizontal_edge(unsigned char *s, int pitch,
@@ -717,14 +717,14 @@
   }
 }
 
-void vpx_lpf_horizontal_edge_8_dspr2(unsigned char *s, int pitch,
+void aom_lpf_horizontal_edge_8_dspr2(unsigned char *s, int pitch,
                                      const uint8_t *blimit,
                                      const uint8_t *limit,
                                      const uint8_t *thresh) {
   mb_lpf_horizontal_edge(s, pitch, blimit, limit, thresh, 1);
 }
 
-void vpx_lpf_horizontal_edge_16_dspr2(unsigned char *s, int pitch,
+void aom_lpf_horizontal_edge_16_dspr2(unsigned char *s, int pitch,
                                       const uint8_t *blimit,
                                       const uint8_t *limit,
                                       const uint8_t *thresh) {
diff --git a/aom_dsp/mips/loopfilter_mb_vert_dspr2.c b/aom_dsp/mips/loopfilter_mb_vert_dspr2.c
index 22d7261..f313275 100644
--- a/aom_dsp/mips/loopfilter_mb_vert_dspr2.c
+++ b/aom_dsp/mips/loopfilter_mb_vert_dspr2.c
@@ -10,16 +10,16 @@
 
 #include <stdlib.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/mips/common_dspr2.h"
 #include "aom_dsp/mips/loopfilter_filters_dspr2.h"
 #include "aom_dsp/mips/loopfilter_macros_dspr2.h"
 #include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 #if HAVE_DSPR2
-void vpx_lpf_vertical_16_dspr2(uint8_t *s, int pitch, const uint8_t *blimit,
+void aom_lpf_vertical_16_dspr2(uint8_t *s, int pitch, const uint8_t *blimit,
                                const uint8_t *limit, const uint8_t *thresh) {
   uint8_t i;
   uint32_t mask, hev, flat, flat2;
diff --git a/aom_dsp/mips/loopfilter_msa.h b/aom_dsp/mips/loopfilter_msa.h
index d977f34..5aadad2 100644
--- a/aom_dsp/mips/loopfilter_msa.h
+++ b/aom_dsp/mips/loopfilter_msa.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_LOOPFILTER_MSA_H_
-#define VPX_DSP_LOOPFILTER_MSA_H_
+#ifndef AOM_DSP_LOOPFILTER_MSA_H_
+#define AOM_DSP_LOOPFILTER_MSA_H_
 
 #include "aom_dsp/mips/macros_msa.h"
 
-#define VPX_LPF_FILTER4_8W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \
+#define AOM_LPF_FILTER4_8W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \
                            p1_out, p0_out, q0_out, q1_out)              \
   {                                                                     \
     v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign;                 \
@@ -64,7 +64,7 @@
     p1_out = __msa_xori_b((v16u8)p1_m, 0x80);                           \
   }
 
-#define VPX_LPF_FILTER4_4W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \
+#define AOM_LPF_FILTER4_4W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \
                            p1_out, p0_out, q0_out, q1_out)              \
   {                                                                     \
     v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign;                 \
@@ -122,7 +122,7 @@
     p1_out = __msa_xori_b((v16u8)p1_m, 0x80);                           \
   }
 
-#define VPX_FLAT4(p3_in, p2_in, p0_in, q0_in, q2_in, q3_in, flat_out) \
+#define AOM_FLAT4(p3_in, p2_in, p0_in, q0_in, q2_in, q3_in, flat_out) \
   {                                                                   \
     v16u8 tmp, p2_a_sub_p0, q2_a_sub_q0, p3_a_sub_p0, q3_a_sub_q0;    \
     v16u8 zero_in = { 0 };                                            \
@@ -143,7 +143,7 @@
     flat_out = flat_out & (mask);                                     \
   }
 
-#define VPX_FLAT5(p7_in, p6_in, p5_in, p4_in, p0_in, q0_in, q4_in, q5_in, \
+#define AOM_FLAT5(p7_in, p6_in, p5_in, p4_in, p0_in, q0_in, q4_in, q5_in, \
                   q6_in, q7_in, flat_in, flat2_out)                       \
   {                                                                       \
     v16u8 tmp, zero_in = { 0 };                                           \
@@ -173,7 +173,7 @@
     flat2_out = flat2_out & flat_in;                                      \
   }
 
-#define VPX_FILTER8(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \
+#define AOM_FILTER8(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \
                     p2_filt8_out, p1_filt8_out, p0_filt8_out, q0_filt8_out, \
                     q1_filt8_out, q2_filt8_out)                             \
   {                                                                         \
@@ -247,4 +247,4 @@
     mask_out = limit_in < (v16u8)mask_out;                                   \
     mask_out = __msa_xori_b(mask_out, 0xff);                                 \
   }
-#endif /* VPX_DSP_LOOPFILTER_MSA_H_ */
+#endif /* AOM_DSP_LOOPFILTER_MSA_H_ */
diff --git a/aom_dsp/mips/macros_msa.h b/aom_dsp/mips/macros_msa.h
index f21b895..d7e9ad4 100644
--- a/aom_dsp/mips/macros_msa.h
+++ b/aom_dsp/mips/macros_msa.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_MIPS_MACROS_MSA_H_
-#define VPX_DSP_MIPS_MACROS_MSA_H_
+#ifndef AOM_DSP_MIPS_MACROS_MSA_H_
+#define AOM_DSP_MIPS_MACROS_MSA_H_
 
 #include <msa.h>
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 #define LD_B(RTYPE, psrc) *((const RTYPE *)(psrc))
 #define LD_UB(...) LD_B(v16u8, __VA_ARGS__)
@@ -2054,4 +2054,4 @@
                                                                 \
     tmp1_m;                                                     \
   })
-#endif /* VPX_DSP_MIPS_MACROS_MSA_H_ */
+#endif /* AOM_DSP_MIPS_MACROS_MSA_H_ */
diff --git a/aom_dsp/mips/sad_msa.c b/aom_dsp/mips/sad_msa.c
index 30123d6..aeeae84 100644
--- a/aom_dsp/mips/sad_msa.c
+++ b/aom_dsp/mips/sad_msa.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/macros_msa.h"
 
 #define SAD_INSVE_W4(RTYPE, in0, in1, in2, in3, out)       \
@@ -1259,175 +1259,175 @@
   return HADD_SW_S32(sad);
 }
 
-#define VPX_SAD_4xHEIGHT_MSA(height)                                         \
-  uint32_t vpx_sad4x##height##_msa(const uint8_t *src, int32_t src_stride,   \
+#define AOM_SAD_4xHEIGHT_MSA(height)                                         \
+  uint32_t aom_sad4x##height##_msa(const uint8_t *src, int32_t src_stride,   \
                                    const uint8_t *ref, int32_t ref_stride) { \
     return sad_4width_msa(src, src_stride, ref, ref_stride, height);         \
   }
 
-#define VPX_SAD_8xHEIGHT_MSA(height)                                         \
-  uint32_t vpx_sad8x##height##_msa(const uint8_t *src, int32_t src_stride,   \
+#define AOM_SAD_8xHEIGHT_MSA(height)                                         \
+  uint32_t aom_sad8x##height##_msa(const uint8_t *src, int32_t src_stride,   \
                                    const uint8_t *ref, int32_t ref_stride) { \
     return sad_8width_msa(src, src_stride, ref, ref_stride, height);         \
   }
 
-#define VPX_SAD_16xHEIGHT_MSA(height)                                         \
-  uint32_t vpx_sad16x##height##_msa(const uint8_t *src, int32_t src_stride,   \
+#define AOM_SAD_16xHEIGHT_MSA(height)                                         \
+  uint32_t aom_sad16x##height##_msa(const uint8_t *src, int32_t src_stride,   \
                                     const uint8_t *ref, int32_t ref_stride) { \
     return sad_16width_msa(src, src_stride, ref, ref_stride, height);         \
   }
 
-#define VPX_SAD_32xHEIGHT_MSA(height)                                         \
-  uint32_t vpx_sad32x##height##_msa(const uint8_t *src, int32_t src_stride,   \
+#define AOM_SAD_32xHEIGHT_MSA(height)                                         \
+  uint32_t aom_sad32x##height##_msa(const uint8_t *src, int32_t src_stride,   \
                                     const uint8_t *ref, int32_t ref_stride) { \
     return sad_32width_msa(src, src_stride, ref, ref_stride, height);         \
   }
 
-#define VPX_SAD_64xHEIGHT_MSA(height)                                         \
-  uint32_t vpx_sad64x##height##_msa(const uint8_t *src, int32_t src_stride,   \
+#define AOM_SAD_64xHEIGHT_MSA(height)                                         \
+  uint32_t aom_sad64x##height##_msa(const uint8_t *src, int32_t src_stride,   \
                                     const uint8_t *ref, int32_t ref_stride) { \
     return sad_64width_msa(src, src_stride, ref, ref_stride, height);         \
   }
 
-#define VPX_SAD_4xHEIGHTx3_MSA(height)                                   \
-  void vpx_sad4x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_4xHEIGHTx3_MSA(height)                                   \
+  void aom_sad4x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
                                  const uint8_t *ref, int32_t ref_stride, \
                                  uint32_t *sads) {                       \
     sad_4width_x3_msa(src, src_stride, ref, ref_stride, height, sads);   \
   }
 
-#define VPX_SAD_8xHEIGHTx3_MSA(height)                                   \
-  void vpx_sad8x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_8xHEIGHTx3_MSA(height)                                   \
+  void aom_sad8x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
                                  const uint8_t *ref, int32_t ref_stride, \
                                  uint32_t *sads) {                       \
     sad_8width_x3_msa(src, src_stride, ref, ref_stride, height, sads);   \
   }
 
-#define VPX_SAD_16xHEIGHTx3_MSA(height)                                   \
-  void vpx_sad16x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_16xHEIGHTx3_MSA(height)                                   \
+  void aom_sad16x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
                                   const uint8_t *ref, int32_t ref_stride, \
                                   uint32_t *sads) {                       \
     sad_16width_x3_msa(src, src_stride, ref, ref_stride, height, sads);   \
   }
 
-#define VPX_SAD_32xHEIGHTx3_MSA(height)                                   \
-  void vpx_sad32x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_32xHEIGHTx3_MSA(height)                                   \
+  void aom_sad32x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
                                   const uint8_t *ref, int32_t ref_stride, \
                                   uint32_t *sads) {                       \
     sad_32width_x3_msa(src, src_stride, ref, ref_stride, height, sads);   \
   }
 
-#define VPX_SAD_64xHEIGHTx3_MSA(height)                                   \
-  void vpx_sad64x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_64xHEIGHTx3_MSA(height)                                   \
+  void aom_sad64x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
                                   const uint8_t *ref, int32_t ref_stride, \
                                   uint32_t *sads) {                       \
     sad_64width_x3_msa(src, src_stride, ref, ref_stride, height, sads);   \
   }
 
-#define VPX_SAD_4xHEIGHTx8_MSA(height)                                   \
-  void vpx_sad4x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_4xHEIGHTx8_MSA(height)                                   \
+  void aom_sad4x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
                                  const uint8_t *ref, int32_t ref_stride, \
                                  uint32_t *sads) {                       \
     sad_4width_x8_msa(src, src_stride, ref, ref_stride, height, sads);   \
   }
 
-#define VPX_SAD_8xHEIGHTx8_MSA(height)                                   \
-  void vpx_sad8x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_8xHEIGHTx8_MSA(height)                                   \
+  void aom_sad8x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
                                  const uint8_t *ref, int32_t ref_stride, \
                                  uint32_t *sads) {                       \
     sad_8width_x8_msa(src, src_stride, ref, ref_stride, height, sads);   \
   }
 
-#define VPX_SAD_16xHEIGHTx8_MSA(height)                                   \
-  void vpx_sad16x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_16xHEIGHTx8_MSA(height)                                   \
+  void aom_sad16x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
                                   const uint8_t *ref, int32_t ref_stride, \
                                   uint32_t *sads) {                       \
     sad_16width_x8_msa(src, src_stride, ref, ref_stride, height, sads);   \
   }
 
-#define VPX_SAD_32xHEIGHTx8_MSA(height)                                   \
-  void vpx_sad32x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_32xHEIGHTx8_MSA(height)                                   \
+  void aom_sad32x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
                                   const uint8_t *ref, int32_t ref_stride, \
                                   uint32_t *sads) {                       \
     sad_32width_x8_msa(src, src_stride, ref, ref_stride, height, sads);   \
   }
 
-#define VPX_SAD_64xHEIGHTx8_MSA(height)                                   \
-  void vpx_sad64x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_64xHEIGHTx8_MSA(height)                                   \
+  void aom_sad64x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
                                   const uint8_t *ref, int32_t ref_stride, \
                                   uint32_t *sads) {                       \
     sad_64width_x8_msa(src, src_stride, ref, ref_stride, height, sads);   \
   }
 
-#define VPX_SAD_4xHEIGHTx4D_MSA(height)                                   \
-  void vpx_sad4x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_4xHEIGHTx4D_MSA(height)                                   \
+  void aom_sad4x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
                                   const uint8_t *const refs[],            \
                                   int32_t ref_stride, uint32_t *sads) {   \
     sad_4width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);  \
   }
 
-#define VPX_SAD_8xHEIGHTx4D_MSA(height)                                   \
-  void vpx_sad8x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_8xHEIGHTx4D_MSA(height)                                   \
+  void aom_sad8x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
                                   const uint8_t *const refs[],            \
                                   int32_t ref_stride, uint32_t *sads) {   \
     sad_8width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);  \
   }
 
-#define VPX_SAD_16xHEIGHTx4D_MSA(height)                                   \
-  void vpx_sad16x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_16xHEIGHTx4D_MSA(height)                                   \
+  void aom_sad16x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
                                    const uint8_t *const refs[],            \
                                    int32_t ref_stride, uint32_t *sads) {   \
     sad_16width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);  \
   }
 
-#define VPX_SAD_32xHEIGHTx4D_MSA(height)                                   \
-  void vpx_sad32x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_32xHEIGHTx4D_MSA(height)                                   \
+  void aom_sad32x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
                                    const uint8_t *const refs[],            \
                                    int32_t ref_stride, uint32_t *sads) {   \
     sad_32width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);  \
   }
 
-#define VPX_SAD_64xHEIGHTx4D_MSA(height)                                   \
-  void vpx_sad64x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_64xHEIGHTx4D_MSA(height)                                   \
+  void aom_sad64x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
                                    const uint8_t *const refs[],            \
                                    int32_t ref_stride, uint32_t *sads) {   \
     sad_64width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);  \
   }
 
-#define VPX_AVGSAD_4xHEIGHT_MSA(height)                                        \
-  uint32_t vpx_sad4x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_AVGSAD_4xHEIGHT_MSA(height)                                        \
+  uint32_t aom_sad4x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
                                        const uint8_t *ref, int32_t ref_stride, \
                                        const uint8_t *second_pred) {           \
     return avgsad_4width_msa(src, src_stride, ref, ref_stride, height,         \
                              second_pred);                                     \
   }
 
-#define VPX_AVGSAD_8xHEIGHT_MSA(height)                                        \
-  uint32_t vpx_sad8x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_AVGSAD_8xHEIGHT_MSA(height)                                        \
+  uint32_t aom_sad8x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
                                        const uint8_t *ref, int32_t ref_stride, \
                                        const uint8_t *second_pred) {           \
     return avgsad_8width_msa(src, src_stride, ref, ref_stride, height,         \
                              second_pred);                                     \
   }
 
-#define VPX_AVGSAD_16xHEIGHT_MSA(height)                                \
-  uint32_t vpx_sad16x##height##_avg_msa(                                \
+#define AOM_AVGSAD_16xHEIGHT_MSA(height)                                \
+  uint32_t aom_sad16x##height##_avg_msa(                                \
       const uint8_t *src, int32_t src_stride, const uint8_t *ref,       \
       int32_t ref_stride, const uint8_t *second_pred) {                 \
     return avgsad_16width_msa(src, src_stride, ref, ref_stride, height, \
                               second_pred);                             \
   }
 
-#define VPX_AVGSAD_32xHEIGHT_MSA(height)                                \
-  uint32_t vpx_sad32x##height##_avg_msa(                                \
+#define AOM_AVGSAD_32xHEIGHT_MSA(height)                                \
+  uint32_t aom_sad32x##height##_avg_msa(                                \
       const uint8_t *src, int32_t src_stride, const uint8_t *ref,       \
       int32_t ref_stride, const uint8_t *second_pred) {                 \
     return avgsad_32width_msa(src, src_stride, ref, ref_stride, height, \
                               second_pred);                             \
   }
 
-#define VPX_AVGSAD_64xHEIGHT_MSA(height)                                \
-  uint32_t vpx_sad64x##height##_avg_msa(                                \
+#define AOM_AVGSAD_64xHEIGHT_MSA(height)                                \
+  uint32_t aom_sad64x##height##_avg_msa(                                \
       const uint8_t *src, int32_t src_stride, const uint8_t *ref,       \
       int32_t ref_stride, const uint8_t *second_pred) {                 \
     return avgsad_64width_msa(src, src_stride, ref, ref_stride, height, \
@@ -1435,92 +1435,92 @@
   }
 
 // 64x64
-VPX_SAD_64xHEIGHT_MSA(64);
-VPX_SAD_64xHEIGHTx3_MSA(64);
-VPX_SAD_64xHEIGHTx8_MSA(64);
-VPX_SAD_64xHEIGHTx4D_MSA(64);
-VPX_AVGSAD_64xHEIGHT_MSA(64);
+AOM_SAD_64xHEIGHT_MSA(64);
+AOM_SAD_64xHEIGHTx3_MSA(64);
+AOM_SAD_64xHEIGHTx8_MSA(64);
+AOM_SAD_64xHEIGHTx4D_MSA(64);
+AOM_AVGSAD_64xHEIGHT_MSA(64);
 
 // 64x32
-VPX_SAD_64xHEIGHT_MSA(32);
-VPX_SAD_64xHEIGHTx3_MSA(32);
-VPX_SAD_64xHEIGHTx8_MSA(32);
-VPX_SAD_64xHEIGHTx4D_MSA(32);
-VPX_AVGSAD_64xHEIGHT_MSA(32);
+AOM_SAD_64xHEIGHT_MSA(32);
+AOM_SAD_64xHEIGHTx3_MSA(32);
+AOM_SAD_64xHEIGHTx8_MSA(32);
+AOM_SAD_64xHEIGHTx4D_MSA(32);
+AOM_AVGSAD_64xHEIGHT_MSA(32);
 
 // 32x64
-VPX_SAD_32xHEIGHT_MSA(64);
-VPX_SAD_32xHEIGHTx3_MSA(64);
-VPX_SAD_32xHEIGHTx8_MSA(64);
-VPX_SAD_32xHEIGHTx4D_MSA(64);
-VPX_AVGSAD_32xHEIGHT_MSA(64);
+AOM_SAD_32xHEIGHT_MSA(64);
+AOM_SAD_32xHEIGHTx3_MSA(64);
+AOM_SAD_32xHEIGHTx8_MSA(64);
+AOM_SAD_32xHEIGHTx4D_MSA(64);
+AOM_AVGSAD_32xHEIGHT_MSA(64);
 
 // 32x32
-VPX_SAD_32xHEIGHT_MSA(32);
-VPX_SAD_32xHEIGHTx3_MSA(32);
-VPX_SAD_32xHEIGHTx8_MSA(32);
-VPX_SAD_32xHEIGHTx4D_MSA(32);
-VPX_AVGSAD_32xHEIGHT_MSA(32);
+AOM_SAD_32xHEIGHT_MSA(32);
+AOM_SAD_32xHEIGHTx3_MSA(32);
+AOM_SAD_32xHEIGHTx8_MSA(32);
+AOM_SAD_32xHEIGHTx4D_MSA(32);
+AOM_AVGSAD_32xHEIGHT_MSA(32);
 
 // 32x16
-VPX_SAD_32xHEIGHT_MSA(16);
-VPX_SAD_32xHEIGHTx3_MSA(16);
-VPX_SAD_32xHEIGHTx8_MSA(16);
-VPX_SAD_32xHEIGHTx4D_MSA(16);
-VPX_AVGSAD_32xHEIGHT_MSA(16);
+AOM_SAD_32xHEIGHT_MSA(16);
+AOM_SAD_32xHEIGHTx3_MSA(16);
+AOM_SAD_32xHEIGHTx8_MSA(16);
+AOM_SAD_32xHEIGHTx4D_MSA(16);
+AOM_AVGSAD_32xHEIGHT_MSA(16);
 
 // 16x32
-VPX_SAD_16xHEIGHT_MSA(32);
-VPX_SAD_16xHEIGHTx3_MSA(32);
-VPX_SAD_16xHEIGHTx8_MSA(32);
-VPX_SAD_16xHEIGHTx4D_MSA(32);
-VPX_AVGSAD_16xHEIGHT_MSA(32);
+AOM_SAD_16xHEIGHT_MSA(32);
+AOM_SAD_16xHEIGHTx3_MSA(32);
+AOM_SAD_16xHEIGHTx8_MSA(32);
+AOM_SAD_16xHEIGHTx4D_MSA(32);
+AOM_AVGSAD_16xHEIGHT_MSA(32);
 
 // 16x16
-VPX_SAD_16xHEIGHT_MSA(16);
-VPX_SAD_16xHEIGHTx3_MSA(16);
-VPX_SAD_16xHEIGHTx8_MSA(16);
-VPX_SAD_16xHEIGHTx4D_MSA(16);
-VPX_AVGSAD_16xHEIGHT_MSA(16);
+AOM_SAD_16xHEIGHT_MSA(16);
+AOM_SAD_16xHEIGHTx3_MSA(16);
+AOM_SAD_16xHEIGHTx8_MSA(16);
+AOM_SAD_16xHEIGHTx4D_MSA(16);
+AOM_AVGSAD_16xHEIGHT_MSA(16);
 
 // 16x8
-VPX_SAD_16xHEIGHT_MSA(8);
-VPX_SAD_16xHEIGHTx3_MSA(8);
-VPX_SAD_16xHEIGHTx8_MSA(8);
-VPX_SAD_16xHEIGHTx4D_MSA(8);
-VPX_AVGSAD_16xHEIGHT_MSA(8);
+AOM_SAD_16xHEIGHT_MSA(8);
+AOM_SAD_16xHEIGHTx3_MSA(8);
+AOM_SAD_16xHEIGHTx8_MSA(8);
+AOM_SAD_16xHEIGHTx4D_MSA(8);
+AOM_AVGSAD_16xHEIGHT_MSA(8);
 
 // 8x16
-VPX_SAD_8xHEIGHT_MSA(16);
-VPX_SAD_8xHEIGHTx3_MSA(16);
-VPX_SAD_8xHEIGHTx8_MSA(16);
-VPX_SAD_8xHEIGHTx4D_MSA(16);
-VPX_AVGSAD_8xHEIGHT_MSA(16);
+AOM_SAD_8xHEIGHT_MSA(16);
+AOM_SAD_8xHEIGHTx3_MSA(16);
+AOM_SAD_8xHEIGHTx8_MSA(16);
+AOM_SAD_8xHEIGHTx4D_MSA(16);
+AOM_AVGSAD_8xHEIGHT_MSA(16);
 
 // 8x8
-VPX_SAD_8xHEIGHT_MSA(8);
-VPX_SAD_8xHEIGHTx3_MSA(8);
-VPX_SAD_8xHEIGHTx8_MSA(8);
-VPX_SAD_8xHEIGHTx4D_MSA(8);
-VPX_AVGSAD_8xHEIGHT_MSA(8);
+AOM_SAD_8xHEIGHT_MSA(8);
+AOM_SAD_8xHEIGHTx3_MSA(8);
+AOM_SAD_8xHEIGHTx8_MSA(8);
+AOM_SAD_8xHEIGHTx4D_MSA(8);
+AOM_AVGSAD_8xHEIGHT_MSA(8);
 
 // 8x4
-VPX_SAD_8xHEIGHT_MSA(4);
-VPX_SAD_8xHEIGHTx3_MSA(4);
-VPX_SAD_8xHEIGHTx8_MSA(4);
-VPX_SAD_8xHEIGHTx4D_MSA(4);
-VPX_AVGSAD_8xHEIGHT_MSA(4);
+AOM_SAD_8xHEIGHT_MSA(4);
+AOM_SAD_8xHEIGHTx3_MSA(4);
+AOM_SAD_8xHEIGHTx8_MSA(4);
+AOM_SAD_8xHEIGHTx4D_MSA(4);
+AOM_AVGSAD_8xHEIGHT_MSA(4);
 
 // 4x8
-VPX_SAD_4xHEIGHT_MSA(8);
-VPX_SAD_4xHEIGHTx3_MSA(8);
-VPX_SAD_4xHEIGHTx8_MSA(8);
-VPX_SAD_4xHEIGHTx4D_MSA(8);
-VPX_AVGSAD_4xHEIGHT_MSA(8);
+AOM_SAD_4xHEIGHT_MSA(8);
+AOM_SAD_4xHEIGHTx3_MSA(8);
+AOM_SAD_4xHEIGHTx8_MSA(8);
+AOM_SAD_4xHEIGHTx4D_MSA(8);
+AOM_AVGSAD_4xHEIGHT_MSA(8);
 
 // 4x4
-VPX_SAD_4xHEIGHT_MSA(4);
-VPX_SAD_4xHEIGHTx3_MSA(4);
-VPX_SAD_4xHEIGHTx8_MSA(4);
-VPX_SAD_4xHEIGHTx4D_MSA(4);
-VPX_AVGSAD_4xHEIGHT_MSA(4);
+AOM_SAD_4xHEIGHT_MSA(4);
+AOM_SAD_4xHEIGHTx3_MSA(4);
+AOM_SAD_4xHEIGHTx8_MSA(4);
+AOM_SAD_4xHEIGHTx4D_MSA(4);
+AOM_AVGSAD_4xHEIGHT_MSA(4);
diff --git a/aom_dsp/mips/sub_pixel_variance_msa.c b/aom_dsp/mips/sub_pixel_variance_msa.c
index 4352ff5..cfbdb15 100644
--- a/aom_dsp/mips/sub_pixel_variance_msa.c
+++ b/aom_dsp/mips/sub_pixel_variance_msa.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
 #include "aom_dsp/mips/macros_msa.h"
 #include "aom_dsp/variance.h"
@@ -1617,8 +1617,8 @@
 #define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
 #define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12);
 
-#define VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht)                              \
-  uint32_t vpx_sub_pixel_variance##wd##x##ht##_msa(                           \
+#define AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht)                              \
+  uint32_t aom_sub_pixel_variance##wd##x##ht##_msa(                           \
       const uint8_t *src, int32_t src_stride, int32_t xoffset,                \
       int32_t yoffset, const uint8_t *ref, int32_t ref_stride,                \
       uint32_t *sse) {                                                        \
@@ -1644,7 +1644,7 @@
                                                                               \
         var = VARIANCE_##wd##Wx##ht##H(*sse, diff);                           \
       } else {                                                                \
-        var = vpx_variance##wd##x##ht##_msa(src, src_stride, ref, ref_stride, \
+        var = aom_variance##wd##x##ht##_msa(src, src_stride, ref, ref_stride, \
                                             sse);                             \
       }                                                                       \
     }                                                                         \
@@ -1652,26 +1652,26 @@
     return var;                                                               \
   }
 
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 4);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 8);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 4);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 8);
 
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 4);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 8);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 16);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 4);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 8);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 16);
 
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 8);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 16);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 32);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 8);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 16);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 32);
 
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 16);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 32);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 64);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 16);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 32);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 64);
 
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 32);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 32);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64);
 
-#define VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(wd, ht)                          \
-  uint32_t vpx_sub_pixel_avg_variance##wd##x##ht##_msa(                       \
+#define AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(wd, ht)                          \
+  uint32_t aom_sub_pixel_avg_variance##wd##x##ht##_msa(                       \
       const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset,            \
       int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride,            \
       uint32_t *sse, const uint8_t *sec_pred) {                               \
@@ -1703,21 +1703,21 @@
     return VARIANCE_##wd##Wx##ht##H(*sse, diff);                              \
   }
 
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 4);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 8);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 4);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 8);
 
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 4);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 8);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 16);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 4);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 8);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 16);
 
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 8);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 16);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 32);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 8);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 16);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 32);
 
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 16);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 32);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 16);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 32);
 
-uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
+uint32_t aom_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
                                              int32_t src_stride,
                                              int32_t xoffset, int32_t yoffset,
                                              const uint8_t *ref_ptr,
@@ -1751,8 +1751,8 @@
   return VARIANCE_32Wx64H(*sse, diff);
 }
 
-#define VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht)                           \
-  uint32_t vpx_sub_pixel_avg_variance64x##ht##_msa(                           \
+#define AOM_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht)                           \
+  uint32_t aom_sub_pixel_avg_variance64x##ht##_msa(                           \
       const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset,            \
       int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride,            \
       uint32_t *sse, const uint8_t *sec_pred) {                               \
@@ -1784,5 +1784,5 @@
     return VARIANCE_64Wx##ht##H(*sse, diff);                                  \
   }
 
-VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(32);
-VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(64);
+AOM_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(32);
+AOM_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(64);
diff --git a/aom_dsp/mips/subtract_msa.c b/aom_dsp/mips/subtract_msa.c
index 04cb922..018e5f4 100644
--- a/aom_dsp/mips/subtract_msa.c
+++ b/aom_dsp/mips/subtract_msa.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/macros_msa.h"
 
 static void sub_blk_4x4_msa(const uint8_t *src_ptr, int32_t src_stride,
@@ -226,7 +226,7 @@
   }
 }
 
-void vpx_subtract_block_msa(int32_t rows, int32_t cols, int16_t *diff_ptr,
+void aom_subtract_block_msa(int32_t rows, int32_t cols, int16_t *diff_ptr,
                             ptrdiff_t diff_stride, const uint8_t *src_ptr,
                             ptrdiff_t src_stride, const uint8_t *pred_ptr,
                             ptrdiff_t pred_stride) {
@@ -253,12 +253,12 @@
                           diff_stride);
         break;
       default:
-        vpx_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr,
+        aom_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr,
                              src_stride, pred_ptr, pred_stride);
         break;
     }
   } else {
-    vpx_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr, src_stride,
+    aom_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr, src_stride,
                          pred_ptr, pred_stride);
   }
 }
diff --git a/aom_dsp/mips/txfm_macros_msa.h b/aom_dsp/mips/txfm_macros_msa.h
index a7da24e..955473f 100644
--- a/aom_dsp/mips/txfm_macros_msa.h
+++ b/aom_dsp/mips/txfm_macros_msa.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
-#define VPX_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
+#ifndef AOM_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
+#define AOM_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
 
 #include "aom_dsp/mips/macros_msa.h"
 
@@ -93,4 +93,4 @@
     SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS);                  \
     PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3);                      \
   }
-#endif  // VPX_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
+#endif  // AOM_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
diff --git a/aom_dsp/mips/variance_msa.c b/aom_dsp/mips/variance_msa.c
index 767dcf0..078625e 100644
--- a/aom_dsp/mips/variance_msa.c
+++ b/aom_dsp/mips/variance_msa.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/mips/macros_msa.h"
 
 #define CALC_MSE_B(src, ref, var)                                   \
@@ -487,7 +487,7 @@
   return HADD_SW_S32(var);
 }
 
-uint32_t vpx_get4x4sse_cs_msa(const uint8_t *src_ptr, int32_t src_stride,
+uint32_t aom_get4x4sse_cs_msa(const uint8_t *src_ptr, int32_t src_stride,
                               const uint8_t *ref_ptr, int32_t ref_stride) {
   uint32_t err = 0;
   uint32_t src0, src1, src2, src3;
@@ -527,8 +527,8 @@
 #define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
 #define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12);
 
-#define VPX_VARIANCE_WDXHT_MSA(wd, ht)                                         \
-  uint32_t vpx_variance##wd##x##ht##_msa(                                      \
+#define AOM_VARIANCE_WDXHT_MSA(wd, ht)                                         \
+  uint32_t aom_variance##wd##x##ht##_msa(                                      \
       const uint8_t *src, int32_t src_stride, const uint8_t *ref,              \
       int32_t ref_stride, uint32_t *sse) {                                     \
     int32_t diff;                                                              \
@@ -539,21 +539,21 @@
     return VARIANCE_##wd##Wx##ht##H(*sse, diff);                               \
   }
 
-VPX_VARIANCE_WDXHT_MSA(4, 4);
-VPX_VARIANCE_WDXHT_MSA(4, 8);
+AOM_VARIANCE_WDXHT_MSA(4, 4);
+AOM_VARIANCE_WDXHT_MSA(4, 8);
 
-VPX_VARIANCE_WDXHT_MSA(8, 4)
-VPX_VARIANCE_WDXHT_MSA(8, 8)
-VPX_VARIANCE_WDXHT_MSA(8, 16)
+AOM_VARIANCE_WDXHT_MSA(8, 4)
+AOM_VARIANCE_WDXHT_MSA(8, 8)
+AOM_VARIANCE_WDXHT_MSA(8, 16)
 
-VPX_VARIANCE_WDXHT_MSA(16, 8)
-VPX_VARIANCE_WDXHT_MSA(16, 16)
-VPX_VARIANCE_WDXHT_MSA(16, 32)
+AOM_VARIANCE_WDXHT_MSA(16, 8)
+AOM_VARIANCE_WDXHT_MSA(16, 16)
+AOM_VARIANCE_WDXHT_MSA(16, 32)
 
-VPX_VARIANCE_WDXHT_MSA(32, 16)
-VPX_VARIANCE_WDXHT_MSA(32, 32)
+AOM_VARIANCE_WDXHT_MSA(32, 16)
+AOM_VARIANCE_WDXHT_MSA(32, 32)
 
-uint32_t vpx_variance32x64_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_variance32x64_msa(const uint8_t *src, int32_t src_stride,
                                const uint8_t *ref, int32_t ref_stride,
                                uint32_t *sse) {
   int32_t diff;
@@ -563,7 +563,7 @@
   return VARIANCE_32Wx64H(*sse, diff);
 }
 
-uint32_t vpx_variance64x32_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_variance64x32_msa(const uint8_t *src, int32_t src_stride,
                                const uint8_t *ref, int32_t ref_stride,
                                uint32_t *sse) {
   int32_t diff;
@@ -573,7 +573,7 @@
   return VARIANCE_64Wx32H(*sse, diff);
 }
 
-uint32_t vpx_variance64x64_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_variance64x64_msa(const uint8_t *src, int32_t src_stride,
                                const uint8_t *ref, int32_t ref_stride,
                                uint32_t *sse) {
   int32_t diff;
@@ -583,14 +583,14 @@
   return VARIANCE_64Wx64H(*sse, diff);
 }
 
-uint32_t vpx_mse8x8_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_mse8x8_msa(const uint8_t *src, int32_t src_stride,
                         const uint8_t *ref, int32_t ref_stride, uint32_t *sse) {
   *sse = sse_8width_msa(src, src_stride, ref, ref_stride, 8);
 
   return *sse;
 }
 
-uint32_t vpx_mse8x16_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_mse8x16_msa(const uint8_t *src, int32_t src_stride,
                          const uint8_t *ref, int32_t ref_stride,
                          uint32_t *sse) {
   *sse = sse_8width_msa(src, src_stride, ref, ref_stride, 16);
@@ -598,7 +598,7 @@
   return *sse;
 }
 
-uint32_t vpx_mse16x8_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_mse16x8_msa(const uint8_t *src, int32_t src_stride,
                          const uint8_t *ref, int32_t ref_stride,
                          uint32_t *sse) {
   *sse = sse_16width_msa(src, src_stride, ref, ref_stride, 8);
@@ -606,7 +606,7 @@
   return *sse;
 }
 
-uint32_t vpx_mse16x16_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_mse16x16_msa(const uint8_t *src, int32_t src_stride,
                           const uint8_t *ref, int32_t ref_stride,
                           uint32_t *sse) {
   *sse = sse_16width_msa(src, src_stride, ref, ref_stride, 16);
@@ -614,16 +614,16 @@
   return *sse;
 }
 
-void vpx_get8x8var_msa(const uint8_t *src, int32_t src_stride,
+void aom_get8x8var_msa(const uint8_t *src, int32_t src_stride,
                        const uint8_t *ref, int32_t ref_stride, uint32_t *sse,
                        int32_t *sum) {
   *sse = sse_diff_8width_msa(src, src_stride, ref, ref_stride, 8, sum);
 }
 
-void vpx_get16x16var_msa(const uint8_t *src, int32_t src_stride,
+void aom_get16x16var_msa(const uint8_t *src, int32_t src_stride,
                          const uint8_t *ref, int32_t ref_stride, uint32_t *sse,
                          int32_t *sum) {
   *sse = sse_diff_16width_msa(src, src_stride, ref, ref_stride, 16, sum);
 }
 
-uint32_t vpx_get_mb_ss_msa(const int16_t *src) { return get_mb_ss_msa(src); }
+uint32_t aom_get_mb_ss_msa(const int16_t *src) { return get_mb_ss_msa(src); }
diff --git a/aom_dsp/postproc.h b/aom_dsp/postproc.h
index 78d11b1..f78a472 100644
--- a/aom_dsp/postproc.h
+++ b/aom_dsp/postproc.h
@@ -8,18 +8,18 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_POSTPROC_H_
-#define VPX_DSP_POSTPROC_H_
+#ifndef AOM_DSP_POSTPROC_H_
+#define AOM_DSP_POSTPROC_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 // Fills a noise buffer with gaussian noise strength determined by sigma.
-int vpx_setup_noise(double sigma, int size, char *noise);
+int aom_setup_noise(double sigma, int size, char *noise);
 
 #ifdef __cplusplus
 }
 #endif
 
-#endif  // VPX_DSP_POSTPROC_H_
+#endif  // AOM_DSP_POSTPROC_H_
diff --git a/aom_dsp/prob.c b/aom_dsp/prob.c
index 819e950..2fd9c13 100644
--- a/aom_dsp/prob.c
+++ b/aom_dsp/prob.c
@@ -10,7 +10,7 @@
 
 #include "./prob.h"
 
-const uint8_t vpx_norm[256] = {
+const uint8_t aom_norm[256] = {
   0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
   3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
   2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -24,10 +24,10 @@
 };
 
 static unsigned int tree_merge_probs_impl(unsigned int i,
-                                          const vpx_tree_index *tree,
-                                          const vpx_prob *pre_probs,
+                                          const aom_tree_index *tree,
+                                          const aom_prob *pre_probs,
                                           const unsigned int *counts,
-                                          vpx_prob *probs) {
+                                          aom_prob *probs) {
   const int l = tree[i];
   const unsigned int left_count =
       (l <= 0) ? counts[-l]
@@ -41,7 +41,7 @@
   return left_count + right_count;
 }
 
-void vpx_tree_merge_probs(const vpx_tree_index *tree, const vpx_prob *pre_probs,
-                          const unsigned int *counts, vpx_prob *probs) {
+void aom_tree_merge_probs(const aom_tree_index *tree, const aom_prob *pre_probs,
+                          const unsigned int *counts, aom_prob *probs) {
   tree_merge_probs_impl(0, tree, pre_probs, counts, probs);
 }
diff --git a/aom_dsp/prob.h b/aom_dsp/prob.h
index 3de6463..e00cd7c 100644
--- a/aom_dsp/prob.h
+++ b/aom_dsp/prob.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_PROB_H_
-#define VPX_DSP_PROB_H_
+#ifndef AOM_DSP_PROB_H_
+#define AOM_DSP_PROB_H_
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_common.h"
+#include "./aom_config.h"
+#include "./aom_dsp_common.h"
 
 #include "aom_ports/mem.h"
 
@@ -20,51 +20,51 @@
 extern "C" {
 #endif
 
-typedef uint8_t vpx_prob;
+typedef uint8_t aom_prob;
 
 #define MAX_PROB 255
 
-#define vpx_prob_half ((vpx_prob)128)
+#define aom_prob_half ((aom_prob)128)
 
-typedef int8_t vpx_tree_index;
+typedef int8_t aom_tree_index;
 
 #define TREE_SIZE(leaf_count) (2 * (leaf_count)-2)
 
-#define vpx_complement(x) (255 - x)
+#define aom_complement(x) (255 - x)
 
 #define MODE_MV_COUNT_SAT 20
 
 /* We build coding trees compactly in arrays.
-   Each node of the tree is a pair of vpx_tree_indices.
+   Each node of the tree is a pair of aom_tree_indices.
    Array index often references a corresponding probability table.
    Index <= 0 means done encoding/decoding and value = -Index,
    Index > 0 means need another bit, specification at index.
    Nonnegative indices are always even;  processing begins at node 0. */
 
-typedef const vpx_tree_index vpx_tree[];
+typedef const aom_tree_index aom_tree[];
 
-static INLINE vpx_prob clip_prob(int p) {
+static INLINE aom_prob clip_prob(int p) {
   return (p > 255) ? 255 : (p < 1) ? 1 : p;
 }
 
-static INLINE vpx_prob get_prob(int num, int den) {
+static INLINE aom_prob get_prob(int num, int den) {
   return (den == 0) ? 128u : clip_prob(((int64_t)num * 256 + (den >> 1)) / den);
 }
 
-static INLINE vpx_prob get_binary_prob(int n0, int n1) {
+static INLINE aom_prob get_binary_prob(int n0, int n1) {
   return get_prob(n0, n0 + n1);
 }
 
 /* This function assumes prob1 and prob2 are already within [1,255] range. */
-static INLINE vpx_prob weighted_prob(int prob1, int prob2, int factor) {
+static INLINE aom_prob weighted_prob(int prob1, int prob2, int factor) {
   return ROUND_POWER_OF_TWO(prob1 * (256 - factor) + prob2 * factor, 8);
 }
 
-static INLINE vpx_prob merge_probs(vpx_prob pre_prob, const unsigned int ct[2],
+static INLINE aom_prob merge_probs(aom_prob pre_prob, const unsigned int ct[2],
                                    unsigned int count_sat,
                                    unsigned int max_update_factor) {
-  const vpx_prob prob = get_binary_prob(ct[0], ct[1]);
-  const unsigned int count = VPXMIN(ct[0] + ct[1], count_sat);
+  const aom_prob prob = get_binary_prob(ct[0], ct[1]);
+  const unsigned int count = AOMMIN(ct[0] + ct[1], count_sat);
   const unsigned int factor = max_update_factor * count / count_sat;
   return weighted_prob(pre_prob, prob, factor);
 }
@@ -75,27 +75,27 @@
   70, 76, 83, 89, 96, 102, 108, 115, 121, 128
 };
 
-static INLINE vpx_prob mode_mv_merge_probs(vpx_prob pre_prob,
+static INLINE aom_prob mode_mv_merge_probs(aom_prob pre_prob,
                                            const unsigned int ct[2]) {
   const unsigned int den = ct[0] + ct[1];
   if (den == 0) {
     return pre_prob;
   } else {
-    const unsigned int count = VPXMIN(den, MODE_MV_COUNT_SAT);
+    const unsigned int count = AOMMIN(den, MODE_MV_COUNT_SAT);
     const unsigned int factor = count_to_update_factor[count];
-    const vpx_prob prob =
+    const aom_prob prob =
         clip_prob(((int64_t)(ct[0]) * 256 + (den >> 1)) / den);
     return weighted_prob(pre_prob, prob, factor);
   }
 }
 
-void vpx_tree_merge_probs(const vpx_tree_index *tree, const vpx_prob *pre_probs,
-                          const unsigned int *counts, vpx_prob *probs);
+void aom_tree_merge_probs(const aom_tree_index *tree, const aom_prob *pre_probs,
+                          const unsigned int *counts, aom_prob *probs);
 
-DECLARE_ALIGNED(16, extern const uint8_t, vpx_norm[256]);
+DECLARE_ALIGNED(16, extern const uint8_t, aom_norm[256]);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_PROB_H_
+#endif  // AOM_DSP_PROB_H_
diff --git a/aom_dsp/psnr.c b/aom_dsp/psnr.c
index f237ca4..70659dc 100644
--- a/aom_dsp/psnr.c
+++ b/aom_dsp/psnr.c
@@ -10,11 +10,11 @@
 
 #include <math.h>
 #include <assert.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/psnr.h"
 #include "aom_scale/yv12config.h"
 
-double vpx_sse_to_psnr(double samples, double peak, double sse) {
+double aom_sse_to_psnr(double samples, double peak, double sse) {
   if (sse > 0.0) {
     const double psnr = 10.0 * log10(samples * peak * peak / sse);
     return psnr > MAX_PSNR ? MAX_PSNR : psnr;
@@ -46,7 +46,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void encoder_highbd_variance64(const uint8_t *a8, int a_stride,
                                       const uint8_t *b8, int b_stride, int w,
                                       int h, uint64_t *sse, int64_t *sum) {
@@ -78,7 +78,7 @@
   *sse = (unsigned int)sse_long;
   *sum = (int)sum_long;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static int64_t get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
                        int b_stride, int width, int height) {
@@ -106,7 +106,7 @@
     const uint8_t *pa = a;
     const uint8_t *pb = b;
     for (x = 0; x < width / 16; ++x) {
-      vpx_mse16x16(pa, a_stride, pb, b_stride, &sse);
+      aom_mse16x16(pa, a_stride, pb, b_stride, &sse);
       total_sse += sse;
 
       pa += 16;
@@ -120,7 +120,7 @@
   return total_sse;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static int64_t highbd_get_sse_shift(const uint8_t *a8, int a_stride,
                                     const uint8_t *b8, int b_stride, int width,
                                     int height, unsigned int input_shift) {
@@ -163,7 +163,7 @@
     const uint8_t *pa = a;
     const uint8_t *pb = b;
     for (x = 0; x < width / 16; ++x) {
-      vpx_highbd_8_mse16x16(pa, a_stride, pb, b_stride, &sse);
+      aom_highbd_8_mse16x16(pa, a_stride, pb, b_stride, &sse);
       total_sse += sse;
       pa += 16;
       pb += 16;
@@ -173,9 +173,9 @@
   }
   return total_sse;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-int64_t vpx_get_y_sse(const YV12_BUFFER_CONFIG *a,
+int64_t aom_get_y_sse(const YV12_BUFFER_CONFIG *a,
                       const YV12_BUFFER_CONFIG *b) {
   assert(a->y_crop_width == b->y_crop_width);
   assert(a->y_crop_height == b->y_crop_height);
@@ -184,7 +184,7 @@
                  a->y_crop_width, a->y_crop_height);
 }
 
-int64_t vpx_get_u_sse(const YV12_BUFFER_CONFIG *a,
+int64_t aom_get_u_sse(const YV12_BUFFER_CONFIG *a,
                       const YV12_BUFFER_CONFIG *b) {
   assert(a->uv_crop_width == b->uv_crop_width);
   assert(a->uv_crop_height == b->uv_crop_height);
@@ -193,7 +193,7 @@
                  a->uv_crop_width, a->uv_crop_height);
 }
 
-int64_t vpx_get_v_sse(const YV12_BUFFER_CONFIG *a,
+int64_t aom_get_v_sse(const YV12_BUFFER_CONFIG *a,
                       const YV12_BUFFER_CONFIG *b) {
   assert(a->uv_crop_width == b->uv_crop_width);
   assert(a->uv_crop_height == b->uv_crop_height);
@@ -202,8 +202,8 @@
                  a->uv_crop_width, a->uv_crop_height);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-int64_t vpx_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t aom_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
                              const YV12_BUFFER_CONFIG *b) {
   assert(a->y_crop_width == b->y_crop_width);
   assert(a->y_crop_height == b->y_crop_height);
@@ -214,7 +214,7 @@
                         a->y_crop_width, a->y_crop_height);
 }
 
-int64_t vpx_highbd_get_u_sse(const YV12_BUFFER_CONFIG *a,
+int64_t aom_highbd_get_u_sse(const YV12_BUFFER_CONFIG *a,
                              const YV12_BUFFER_CONFIG *b) {
   assert(a->uv_crop_width == b->uv_crop_width);
   assert(a->uv_crop_height == b->uv_crop_height);
@@ -225,7 +225,7 @@
                         a->uv_crop_width, a->uv_crop_height);
 }
 
-int64_t vpx_highbd_get_v_sse(const YV12_BUFFER_CONFIG *a,
+int64_t aom_highbd_get_v_sse(const YV12_BUFFER_CONFIG *a,
                              const YV12_BUFFER_CONFIG *b) {
   assert(a->uv_crop_width == b->uv_crop_width);
   assert(a->uv_crop_height == b->uv_crop_height);
@@ -235,10 +235,10 @@
   return highbd_get_sse(a->v_buffer, a->uv_stride, b->v_buffer, b->uv_stride,
                         a->uv_crop_width, a->uv_crop_height);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
                           const YV12_BUFFER_CONFIG *b, PSNR_STATS *psnr,
                           uint32_t bit_depth, uint32_t in_bit_depth) {
   const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width };
@@ -272,7 +272,7 @@
     }
     psnr->sse[1 + i] = sse;
     psnr->samples[1 + i] = samples;
-    psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
+    psnr->psnr[1 + i] = aom_sse_to_psnr(samples, peak, (double)sse);
 
     total_sse += sse;
     total_samples += samples;
@@ -281,12 +281,12 @@
   psnr->sse[0] = total_sse;
   psnr->samples[0] = total_samples;
   psnr->psnr[0] =
-      vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse);
+      aom_sse_to_psnr((double)total_samples, peak, (double)total_sse);
 }
 
-#endif  // !CONFIG_VP9_HIGHBITDEPTH
+#endif  // !CONFIG_AOM_HIGHBITDEPTH
 
-void vpx_calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
+void aom_calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
                    PSNR_STATS *psnr) {
   static const double peak = 255.0;
   const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width };
@@ -308,7 +308,7 @@
         get_sse(a_planes[i], a_strides[i], b_planes[i], b_strides[i], w, h);
     psnr->sse[1 + i] = sse;
     psnr->samples[1 + i] = samples;
-    psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
+    psnr->psnr[1 + i] = aom_sse_to_psnr(samples, peak, (double)sse);
 
     total_sse += sse;
     total_samples += samples;
@@ -317,5 +317,5 @@
   psnr->sse[0] = total_sse;
   psnr->samples[0] = total_samples;
   psnr->psnr[0] =
-      vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse);
+      aom_sse_to_psnr((double)total_samples, peak, (double)total_sse);
 }
diff --git a/aom_dsp/psnr.h b/aom_dsp/psnr.h
index ba70407..48e7613 100644
--- a/aom_dsp/psnr.h
+++ b/aom_dsp/psnr.h
@@ -8,8 +8,8 @@
 *  be found in the AUTHORS file in the root of the source tree.
 */
 
-#ifndef VPX_DSP_PSNR_H_
-#define VPX_DSP_PSNR_H_
+#ifndef AOM_DSP_PSNR_H_
+#define AOM_DSP_PSNR_H_
 
 #include "aom_scale/yv12config.h"
 
@@ -25,7 +25,7 @@
   uint32_t samples[4];  // total/y/u/v
 } PSNR_STATS;
 
-// TODO(dkovalev) change vpx_sse_to_psnr signature: double -> int64_t
+// TODO(dkovalev) change aom_sse_to_psnr signature: double -> int64_t
 
 /*!\brief Converts SSE to PSNR
 *
@@ -35,29 +35,29 @@
 * \param[in]    peak          Max sample value
 * \param[in]    sse           Sum of squared errors
 */
-double vpx_sse_to_psnr(double samples, double peak, double sse);
-int64_t vpx_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
-int64_t vpx_get_u_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
-int64_t vpx_get_v_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
-#if CONFIG_VP9_HIGHBITDEPTH
-int64_t vpx_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
+double aom_sse_to_psnr(double samples, double peak, double sse);
+int64_t aom_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
+int64_t aom_get_u_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
+int64_t aom_get_v_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t aom_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
                              const YV12_BUFFER_CONFIG *b);
-int64_t vpx_highbd_get_u_sse(const YV12_BUFFER_CONFIG *a,
+int64_t v_highbd_get_u_sse(const YV12_BUFFER_CONFIG *a,
+                           const YV12_BUFFER_CONFIG *b);
+int64_t aom_highbd_get_v_sse(const YV12_BUFFER_CONFIG *a,
                              const YV12_BUFFER_CONFIG *b);
-int64_t vpx_highbd_get_v_sse(const YV12_BUFFER_CONFIG *a,
-                             const YV12_BUFFER_CONFIG *b);
-void vpx_calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
+void aom_calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
                           const YV12_BUFFER_CONFIG *b, PSNR_STATS *psnr,
                           unsigned int bit_depth, unsigned int in_bit_depth);
 #endif
-void vpx_calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
+void aom_calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
                    PSNR_STATS *psnr);
 
-double vpx_psnrhvs(const YV12_BUFFER_CONFIG *source,
+double aom_psnrhvs(const YV12_BUFFER_CONFIG *source,
                    const YV12_BUFFER_CONFIG *dest, double *phvs_y,
                    double *phvs_u, double *phvs_v, uint32_t bd, uint32_t in_bd);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
-#endif  // VPX_DSP_PSNR_H_
+#endif  // AOM_DSP_PSNR_H_
diff --git a/aom_dsp/psnrhvs.c b/aom_dsp/psnrhvs.c
index 333ff63..f7b78ae 100644
--- a/aom_dsp/psnrhvs.c
+++ b/aom_dsp/psnrhvs.c
@@ -15,8 +15,8 @@
 #include <stdlib.h>
 #include <math.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/ssim.h"
 #include "aom_ports/system_state.h"
 #include "aom_dsp/psnr.h"
@@ -30,17 +30,17 @@
                            int xstride) {
   int i, j;
   (void)xstride;
-  vpx_fdct8x8(x, y, ystride);
+  aom_fdct8x8(x, y, ystride);
   for (i = 0; i < 8; i++)
     for (j = 0; j < 8; j++)
       *(y + ystride * i + j) = (*(y + ystride * i + j) + 4) >> 3;
 }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void hbd_od_bin_fdct8x8(tran_low_t *y, int ystride, const int16_t *x,
                                int xstride) {
   int i, j;
   (void)xstride;
-  vpx_highbd_fdct8x8(x, y, ystride);
+  aom_highbd_fdct8x8(x, y, ystride);
   for (i = 0; i < 8; i++)
     for (j = 0; j < 8; j++)
       *(y + ystride * i + j) = (*(y + ystride * i + j) + 4) >> 3;
@@ -210,7 +210,7 @@
         s_gvar = (s_vars[0] + s_vars[1] + s_vars[2] + s_vars[3]) / s_gvar;
       if (d_gvar > 0)
         d_gvar = (d_vars[0] + d_vars[1] + d_vars[2] + d_vars[3]) / d_gvar;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (bit_depth == 10 || bit_depth == 12) {
         hbd_od_bin_fdct8x8(dct_s_coef, 8, dct_s, 8);
         hbd_od_bin_fdct8x8(dct_d_coef, 8, dct_d, 8);
@@ -246,7 +246,7 @@
   return ret;
 }
 
-double vpx_psnrhvs(const YV12_BUFFER_CONFIG *src,
+double aom_psnrhvs(const YV12_BUFFER_CONFIG *src,
                    const YV12_BUFFER_CONFIG *dest, double *y_psnrhvs,
                    double *u_psnrhvs, double *v_psnrhvs, uint32_t bd,
                    uint32_t in_bd) {
@@ -254,7 +254,7 @@
   const double par = 1.0;
   const int step = 7;
   uint32_t bd_shift = 0;
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   assert(bd == 8 || bd == 10 || bd == 12);
   assert(bd >= in_bd);
diff --git a/aom_dsp/quantize.c b/aom_dsp/quantize.c
index c901aa0..76e7e97 100644
--- a/aom_dsp/quantize.c
+++ b/aom_dsp/quantize.c
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/quantize.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 #if CONFIG_AOM_QM
-void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
+void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
                      const int16_t *round_ptr, const int16_t quant,
                      tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                      const int16_t dequant_ptr, uint16_t *eob_ptr,
@@ -40,8 +40,8 @@
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
                             int skip_block, const int16_t *round_ptr,
                             const int16_t quant, tran_low_t *qcoeff_ptr,
                             tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
@@ -69,7 +69,7 @@
 }
 #endif
 
-void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
                            const int16_t *round_ptr, const int16_t quant,
                            tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                            const int16_t dequant_ptr, uint16_t *eob_ptr,
@@ -99,8 +99,8 @@
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
                                   const int16_t *round_ptr, const int16_t quant,
                                   tran_low_t *qcoeff_ptr,
                                   tran_low_t *dqcoeff_ptr,
@@ -131,7 +131,7 @@
 }
 #endif
 
-void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                       int skip_block, const int16_t *zbin_ptr,
                       const int16_t *round_ptr, const int16_t *quant_ptr,
                       const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -192,8 +192,8 @@
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                              int skip_block, const int16_t *zbin_ptr,
                              const int16_t *round_ptr, const int16_t *quant_ptr,
                              const int16_t *quant_shift_ptr,
@@ -252,7 +252,7 @@
 }
 #endif
 
-void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                             int skip_block, const int16_t *zbin_ptr,
                             const int16_t *round_ptr, const int16_t *quant_ptr,
                             const int16_t *quant_shift_ptr,
@@ -316,8 +316,8 @@
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_b_32x32_c(
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_b_32x32_c(
     const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
     const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
     const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -377,7 +377,7 @@
 }
 #endif
 #else
-void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
+void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
                      const int16_t *round_ptr, const int16_t quant,
                      tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                      const int16_t dequant_ptr, uint16_t *eob_ptr) {
@@ -400,8 +400,8 @@
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
                             int skip_block, const int16_t *round_ptr,
                             const int16_t quant, tran_low_t *qcoeff_ptr,
                             tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
@@ -425,7 +425,7 @@
 }
 #endif
 
-void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
                            const int16_t *round_ptr, const int16_t quant,
                            tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                            const int16_t dequant_ptr, uint16_t *eob_ptr) {
@@ -450,8 +450,8 @@
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
                                   const int16_t *round_ptr, const int16_t quant,
                                   tran_low_t *qcoeff_ptr,
                                   tran_low_t *dqcoeff_ptr,
@@ -477,7 +477,7 @@
 }
 #endif
 
-void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                       int skip_block, const int16_t *zbin_ptr,
                       const int16_t *round_ptr, const int16_t *quant_ptr,
                       const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -527,8 +527,8 @@
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                              int skip_block, const int16_t *zbin_ptr,
                              const int16_t *round_ptr, const int16_t *quant_ptr,
                              const int16_t *quant_shift_ptr,
@@ -578,7 +578,7 @@
 }
 #endif
 
-void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                             int skip_block, const int16_t *zbin_ptr,
                             const int16_t *round_ptr, const int16_t *quant_ptr,
                             const int16_t *quant_shift_ptr,
@@ -632,8 +632,8 @@
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_b_32x32_c(
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_b_32x32_c(
     const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
     const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
     const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
diff --git a/aom_dsp/quantize.h b/aom_dsp/quantize.h
index b994d9a..720dce3 100644
--- a/aom_dsp/quantize.h
+++ b/aom_dsp/quantize.h
@@ -8,28 +8,28 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_QUANTIZE_H_
-#define VPX_DSP_QUANTIZE_H_
+#ifndef AOM_DSP_QUANTIZE_H_
+#define AOM_DSP_QUANTIZE_H_
 
-#include "./vpx_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./aom_config.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 #if CONFIG_AOM_QM
-void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
+void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
                      const int16_t *round_ptr, const int16_t quant_ptr,
                      tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                      const int16_t dequant_ptr, uint16_t *eob_ptr,
                      const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr);
-void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
                            const int16_t *round_ptr, const int16_t quant_ptr,
                            tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                            const int16_t dequant_ptr, uint16_t *eob_ptr,
                            const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr);
-void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                       int skip_block, const int16_t *zbin_ptr,
                       const int16_t *round_ptr, const int16_t *quant_ptr,
                       const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -37,19 +37,19 @@
                       uint16_t *eob_ptr, const int16_t *scan,
                       const int16_t *iscan, const qm_val_t *qm_ptr,
                       const qm_val_t *iqm_ptr);
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
                             int skip_block, const int16_t *round_ptr,
                             const int16_t quant_ptr, tran_low_t *qcoeff_ptr,
                             tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
                             uint16_t *eob_ptr, const qm_val_t *qm_ptr,
                             const qm_val_t *iqm_ptr);
-void vpx_highbd_quantize_dc_32x32(
+void aom_highbd_quantize_dc_32x32(
     const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr,
     const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
     const int16_t dequant_ptr, uint16_t *eob_ptr, const qm_val_t *qm_ptr,
     const qm_val_t *iqm_ptr);
-void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                              int skip_block, const int16_t *zbin_ptr,
                              const int16_t *round_ptr, const int16_t *quant_ptr,
                              const int16_t *quant_shift_ptr,
@@ -59,22 +59,22 @@
                              const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr);
 #endif
 #else
-void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
+void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
                      const int16_t *round_ptr, const int16_t quant_ptr,
                      tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                      const int16_t dequant_ptr, uint16_t *eob_ptr);
-void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
                            const int16_t *round_ptr, const int16_t quant_ptr,
                            tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                            const int16_t dequant_ptr, uint16_t *eob_ptr);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
                             int skip_block, const int16_t *round_ptr,
                             const int16_t quant_ptr, tran_low_t *qcoeff_ptr,
                             tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
                             uint16_t *eob_ptr);
-void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
                                   const int16_t *round_ptr,
                                   const int16_t quant_ptr,
                                   tran_low_t *qcoeff_ptr,
@@ -87,4 +87,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_QUANTIZE_H_
+#endif  // AOM_DSP_QUANTIZE_H_
diff --git a/aom_dsp/sad.c b/aom_dsp/sad.c
index 8bbf83f..f5d19cc 100644
--- a/aom_dsp/sad.c
+++ b/aom_dsp/sad.c
@@ -10,10 +10,10 @@
 
 #include <stdlib.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
 /* Sum the difference between every corresponding element of the buffers. */
@@ -32,43 +32,43 @@
 }
 
 #define sadMxN(m, n)                                                        \
-  unsigned int vpx_sad##m##x##n##_c(const uint8_t *src, int src_stride,     \
+  unsigned int aom_sad##m##x##n##_c(const uint8_t *src, int src_stride,     \
                                     const uint8_t *ref, int ref_stride) {   \
     return sad(src, src_stride, ref, ref_stride, m, n);                     \
   }                                                                         \
-  unsigned int vpx_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \
+  unsigned int aom_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \
                                         const uint8_t *ref, int ref_stride, \
                                         const uint8_t *second_pred) {       \
     uint8_t comp_pred[m * n];                                               \
-    vpx_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride);     \
+    aom_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride);     \
     return sad(src, src_stride, comp_pred, m, m, n);                        \
   }
 
 // depending on call sites, pass **ref_array to avoid & in subsequent call and
 // de-dup with 4D below.
 #define sadMxNxK(m, n, k)                                                   \
-  void vpx_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride,       \
+  void aom_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride,       \
                                   const uint8_t *ref_array, int ref_stride, \
                                   uint32_t *sad_array) {                    \
     int i;                                                                  \
     for (i = 0; i < k; ++i)                                                 \
       sad_array[i] =                                                        \
-          vpx_sad##m##x##n##_c(src, src_stride, &ref_array[i], ref_stride); \
+          aom_sad##m##x##n##_c(src, src_stride, &ref_array[i], ref_stride); \
   }
 
 // This appears to be equivalent to the above when k == 4 and refs is const
 #define sadMxNx4D(m, n)                                                    \
-  void vpx_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride,         \
+  void aom_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride,         \
                                const uint8_t *const ref_array[],           \
                                int ref_stride, uint32_t *sad_array) {      \
     int i;                                                                 \
     for (i = 0; i < 4; ++i)                                                \
       sad_array[i] =                                                       \
-          vpx_sad##m##x##n##_c(src, src_stride, ref_array[i], ref_stride); \
+          aom_sad##m##x##n##_c(src, src_stride, ref_array[i], ref_stride); \
   }
 
 /* clang-format off */
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
 // 128x128
 sadMxN(128, 128)
 sadMxNxK(128, 128, 3)
@@ -82,7 +82,7 @@
 // 64x128
 sadMxN(64, 128)
 sadMxNx4D(64, 128)
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
 
 // 64x64
 sadMxN(64, 64)
@@ -153,7 +153,7 @@
 sadMxNx4D(4, 4)
 /* clang-format on */
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         static INLINE
     unsigned int highbd_sad(const uint8_t *a8, int a_stride, const uint8_t *b8,
                             int b_stride, int width, int height) {
@@ -186,43 +186,43 @@
 }
 
 #define highbd_sadMxN(m, n)                                                    \
-  unsigned int vpx_highbd_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
+  unsigned int aom_highbd_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
                                            const uint8_t *ref,                 \
                                            int ref_stride) {                   \
     return highbd_sad(src, src_stride, ref, ref_stride, m, n);                 \
   }                                                                            \
-  unsigned int vpx_highbd_sad##m##x##n##_avg_c(                                \
+  unsigned int aom_highbd_sad##m##x##n##_avg_c(                                \
       const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,  \
       const uint8_t *second_pred) {                                            \
     uint16_t comp_pred[m * n];                                                 \
-    vpx_highbd_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \
+    aom_highbd_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \
     return highbd_sadb(src, src_stride, comp_pred, m, m, n);                   \
   }
 
 #define highbd_sadMxNxK(m, n, k)                                             \
-  void vpx_highbd_sad##m##x##n##x##k##_c(                                    \
+  void aom_highbd_sad##m##x##n##x##k##_c(                                    \
       const uint8_t *src, int src_stride, const uint8_t *ref_array,          \
       int ref_stride, uint32_t *sad_array) {                                 \
     int i;                                                                   \
     for (i = 0; i < k; ++i) {                                                \
-      sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride,            \
+      sad_array[i] = aom_highbd_sad##m##x##n##_c(src, src_stride,            \
                                                  &ref_array[i], ref_stride); \
     }                                                                        \
   }
 
 #define highbd_sadMxNx4D(m, n)                                               \
-  void vpx_highbd_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride,    \
+  void aom_highbd_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride,    \
                                       const uint8_t *const ref_array[],      \
                                       int ref_stride, uint32_t *sad_array) { \
     int i;                                                                   \
     for (i = 0; i < 4; ++i) {                                                \
-      sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride,            \
+      sad_array[i] = aom_highbd_sad##m##x##n##_c(src, src_stride,            \
                                                  ref_array[i], ref_stride);  \
     }                                                                        \
   }
 
 /* clang-format off */
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
 // 128x128
 highbd_sadMxN(128, 128)
 highbd_sadMxNxK(128, 128, 3)
@@ -236,7 +236,7 @@
 // 64x128
 highbd_sadMxN(64, 128)
 highbd_sadMxNx4D(64, 128)
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
 
 // 64x64
 highbd_sadMxN(64, 64)
@@ -306,9 +306,9 @@
 highbd_sadMxNxK(4, 4, 8)
 highbd_sadMxNx4D(4, 4)
 /* clang-format on */
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VP10 && CONFIG_EXT_INTER
+#if CONFIG_AV1 && CONFIG_EXT_INTER
             static INLINE
     unsigned int masked_sad(const uint8_t *a, int a_stride, const uint8_t *b,
                             int b_stride, const uint8_t *m, int m_stride,
@@ -329,7 +329,7 @@
 }
 
 #define MASKSADMxN(m, n)                                                      \
-  unsigned int vpx_masked_sad##m##x##n##_c(                                   \
+  unsigned int aom_masked_sad##m##x##n##_c(                                   \
       const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
       const uint8_t *msk, int msk_stride) {                                   \
     return masked_sad(src, src_stride, ref, ref_stride, msk, msk_stride, m,   \
@@ -357,7 +357,7 @@
 MASKSADMxN(4, 4)
 /* clang-format on */
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                     static INLINE
     unsigned int highbd_masked_sad(const uint8_t *a8, int a_stride,
                                    const uint8_t *b8, int b_stride,
@@ -381,7 +381,7 @@
 }
 
 #define HIGHBD_MASKSADMXN(m, n)                                               \
-  unsigned int vpx_highbd_masked_sad##m##x##n##_c(                            \
+  unsigned int aom_highbd_masked_sad##m##x##n##_c(                            \
       const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
       const uint8_t *msk, int msk_stride) {                                   \
     return highbd_masked_sad(src, src_stride, ref, ref_stride, msk,           \
@@ -406,10 +406,10 @@
 HIGHBD_MASKSADMXN(8, 4)
 HIGHBD_MASKSADMXN(4, 8)
 HIGHBD_MASKSADMXN(4, 4)
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-#endif  // CONFIG_VP10 && CONFIG_EXT_INTER
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+#endif  // CONFIG_AV1 && CONFIG_EXT_INTER
 
-#if CONFIG_VP10 && CONFIG_OBMC
+#if CONFIG_AV1 && CONFIG_OBMC
 // pre: predictor being evaluated
 // wsrc: target weighted prediction (has been *4096 to keep precision)
 // mask: 2d weights (scaled by 4096)
@@ -432,7 +432,7 @@
 }
 
 #define OBMCSADMxN(m, n)                                                     \
-  unsigned int vpx_obmc_sad##m##x##n##_c(const uint8_t *ref, int ref_stride, \
+  unsigned int aom_obmc_sad##m##x##n##_c(const uint8_t *ref, int ref_stride, \
                                          const int32_t *wsrc,                \
                                          const int32_t *mask) {              \
     return obmc_sad(ref, ref_stride, wsrc, mask, m, n);                      \
@@ -459,7 +459,7 @@
 OBMCSADMxN(4, 4)
 /* clang-format on */
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                     static INLINE
     unsigned int highbd_obmc_sad(const uint8_t *pre8, int pre_stride,
                                  const int32_t *wsrc, const int32_t *mask,
@@ -481,7 +481,7 @@
 }
 
 #define HIGHBD_OBMCSADMXN(m, n)                                \
-  unsigned int vpx_highbd_obmc_sad##m##x##n##_c(               \
+  unsigned int aom_highbd_obmc_sad##m##x##n##_c(               \
       const uint8_t *ref, int ref_stride, const int32_t *wsrc, \
       const int32_t *mask) {                                   \
     return highbd_obmc_sad(ref, ref_stride, wsrc, mask, m, n); \
@@ -507,5 +507,5 @@
 HIGHBD_OBMCSADMXN(4, 8)
 HIGHBD_OBMCSADMXN(4, 4)
 /* clang-format on */
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-#endif  // CONFIG_VP10 && CONFIG_OBMC
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+#endif  // CONFIG_AV1 && CONFIG_OBMC
diff --git a/aom_dsp/ssim.c b/aom_dsp/ssim.c
index c111ead..ed8aaea 100644
--- a/aom_dsp/ssim.c
+++ b/aom_dsp/ssim.c
@@ -10,12 +10,12 @@
 
 #include <assert.h>
 #include <math.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/ssim.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/system_state.h"
 
-void vpx_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
+void aom_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
                             uint32_t *sum_s, uint32_t *sum_r,
                             uint32_t *sum_sq_s, uint32_t *sum_sq_r,
                             uint32_t *sum_sxr) {
@@ -30,7 +30,7 @@
     }
   }
 }
-void vpx_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
+void aom_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
                           uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s,
                           uint32_t *sum_sq_r, uint32_t *sum_sxr) {
   int i, j;
@@ -45,8 +45,8 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, const uint16_t *r,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, const uint16_t *r,
                                  int rp, uint32_t *sum_s, uint32_t *sum_r,
                                  uint32_t *sum_sq_s, uint32_t *sum_sq_r,
                                  uint32_t *sum_sxr) {
@@ -61,7 +61,7 @@
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static const int64_t cc1 = 26634;        // (64^2*(.01*255)^2
 static const int64_t cc2 = 239708;       // (64^2*(.03*255)^2
@@ -102,26 +102,26 @@
 
 static double ssim_8x8(const uint8_t *s, int sp, const uint8_t *r, int rp) {
   uint32_t sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
-  vpx_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
+  aom_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
                      &sum_sxr);
   return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64, 8);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static double highbd_ssim_8x8(const uint16_t *s, int sp, const uint16_t *r,
                               int rp, uint32_t bd, uint32_t shift) {
   uint32_t sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
-  vpx_highbd_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
+  aom_highbd_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
                             &sum_sxr);
   return similarity(sum_s >> shift, sum_r >> shift, sum_sq_s >> (2 * shift),
                     sum_sq_r >> (2 * shift), sum_sxr >> (2 * shift), 64, bd);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // We are using a 8x8 moving window with starting location of each 8x8 window
 // on the 4x4 pixel grid. Such arrangement allows the windows to overlap
 // block boundaries to penalize blocking artifacts.
-static double vpx_ssim2(const uint8_t *img1, const uint8_t *img2,
+static double aom_ssim2(const uint8_t *img1, const uint8_t *img2,
                         int stride_img1, int stride_img2, int width,
                         int height) {
   int i, j;
@@ -141,8 +141,8 @@
   return ssim_total;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-static double vpx_highbd_ssim2(const uint8_t *img1, const uint8_t *img2,
+#if CONFIG_AOM_HIGHBITDEPTH
+static double aom_highbd_ssim2(const uint8_t *img1, const uint8_t *img2,
                                int stride_img1, int stride_img2, int width,
                                int height, uint32_t bd, uint32_t shift) {
   int i, j;
@@ -163,20 +163,20 @@
   ssim_total /= samples;
   return ssim_total;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source,
+double aom_calc_ssim(const YV12_BUFFER_CONFIG *source,
                      const YV12_BUFFER_CONFIG *dest, double *weight) {
   double a, b, c;
   double ssimv;
 
-  a = vpx_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
+  a = aom_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
                 dest->y_stride, source->y_crop_width, source->y_crop_height);
 
-  b = vpx_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
+  b = aom_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
                 dest->uv_stride, source->uv_crop_width, source->uv_crop_height);
 
-  c = vpx_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
+  c = aom_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
                 dest->uv_stride, source->uv_crop_width, source->uv_crop_height);
 
   ssimv = a * .8 + .1 * (b + c);
@@ -264,11 +264,11 @@
 }
 static void ssimv_parms(uint8_t *img1, int img1_pitch, uint8_t *img2,
                         int img2_pitch, Ssimv *sv) {
-  vpx_ssim_parms_8x8(img1, img1_pitch, img2, img2_pitch, &sv->sum_s, &sv->sum_r,
+  aom_ssim_parms_8x8(img1, img1_pitch, img2, img2_pitch, &sv->sum_s, &sv->sum_r,
                      &sv->sum_sq_s, &sv->sum_sq_r, &sv->sum_sxr);
 }
 
-double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
+double aom_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
                             int img2_pitch, int width, int height, Ssimv *sv2,
                             Metrics *m, int do_inconsistency) {
   double dssim_total = 0;
@@ -279,7 +279,7 @@
   int c = 0;
   double norm;
   double old_ssim_total = 0;
-  vpx_clear_system_state();
+  aom_clear_system_state();
   // We can sample points as frequently as we like start with 1 per 4x4.
   for (i = 0; i < height;
        i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) {
@@ -428,8 +428,8 @@
   return inconsistency_total;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
+#if CONFIG_AOM_HIGHBITDEPTH
+double aom_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
                             const YV12_BUFFER_CONFIG *dest, double *weight,
                             uint32_t bd, uint32_t in_bd) {
   double a, b, c;
@@ -439,15 +439,15 @@
   assert(bd >= in_bd);
   shift = bd - in_bd;
 
-  a = vpx_highbd_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
+  a = aom_highbd_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
                        dest->y_stride, source->y_crop_width,
                        source->y_crop_height, in_bd, shift);
 
-  b = vpx_highbd_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
+  b = aom_highbd_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
                        dest->uv_stride, source->uv_crop_width,
                        source->uv_crop_height, in_bd, shift);
 
-  c = vpx_highbd_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
+  c = aom_highbd_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
                        dest->uv_stride, source->uv_crop_width,
                        source->uv_crop_height, in_bd, shift);
 
@@ -458,4 +458,4 @@
   return ssimv;
 }
 
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/ssim.h b/aom_dsp/ssim.h
index 0127d3f..831803a 100644
--- a/aom_dsp/ssim.h
+++ b/aom_dsp/ssim.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_SSIM_H_
-#define VPX_DSP_SSIM_H_
+#ifndef AOM_DSP_SSIM_H_
+#define AOM_DSP_SSIM_H_
 
 #define MAX_SSIM_DB 100.0;
 
@@ -17,7 +17,7 @@
 extern "C" {
 #endif
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_scale/yv12config.h"
 
 // metrics used for calculating ssim, ssim2, dssim, and ssimc
@@ -62,26 +62,26 @@
   double ssimcd;
 } Metrics;
 
-double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
+double aom_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
                             int img2_pitch, int width, int height, Ssimv *sv2,
                             Metrics *m, int do_inconsistency);
 
-double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source,
+double aom_calc_ssim(const YV12_BUFFER_CONFIG *source,
                      const YV12_BUFFER_CONFIG *dest, double *weight);
 
-double vpx_calc_fastssim(const YV12_BUFFER_CONFIG *source,
+double aom_calc_fastssim(const YV12_BUFFER_CONFIG *source,
                          const YV12_BUFFER_CONFIG *dest, double *ssim_y,
                          double *ssim_u, double *ssim_v, uint32_t bd,
                          uint32_t in_bd);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
+#if CONFIG_AOM_HIGHBITDEPTH
+double aom_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
                             const YV12_BUFFER_CONFIG *dest, double *weight,
                             uint32_t bd, uint32_t in_bd);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_SSIM_H_
+#endif  // AOM_DSP_SSIM_H_
diff --git a/aom_dsp/subtract.c b/aom_dsp/subtract.c
index 5d048ed..a68bc64 100644
--- a/aom_dsp/subtract.c
+++ b/aom_dsp/subtract.c
@@ -10,13 +10,13 @@
 
 #include <stdlib.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
-void vpx_subtract_block_c(int rows, int cols, int16_t *diff,
+void aom_subtract_block_c(int rows, int cols, int16_t *diff,
                           ptrdiff_t diff_stride, const uint8_t *src,
                           ptrdiff_t src_stride, const uint8_t *pred,
                           ptrdiff_t pred_stride) {
@@ -31,8 +31,8 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_subtract_block_c(int rows, int cols, int16_t *diff,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_subtract_block_c(int rows, int cols, int16_t *diff,
                                  ptrdiff_t diff_stride, const uint8_t *src8,
                                  ptrdiff_t src_stride, const uint8_t *pred8,
                                  ptrdiff_t pred_stride, int bd) {
@@ -51,4 +51,4 @@
     src += src_stride;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/sum_squares.c b/aom_dsp/sum_squares.c
index 73a9006..6b71d44 100644
--- a/aom_dsp/sum_squares.c
+++ b/aom_dsp/sum_squares.c
@@ -10,9 +10,9 @@
 
 #include <assert.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
-uint64_t vpx_sum_squares_2d_i16_c(const int16_t *src, int src_stride,
+uint64_t aom_sum_squares_2d_i16_c(const int16_t *src, int src_stride,
                                   int size) {
   int r, c;
   uint64_t ss = 0;
@@ -28,7 +28,7 @@
   return ss;
 }
 
-uint64_t vpx_sum_squares_i16_c(const int16_t *src, uint32_t n) {
+uint64_t aom_sum_squares_i16_c(const int16_t *src, uint32_t n) {
   uint64_t ss = 0;
   do {
     const int16_t v = *src++;
diff --git a/aom_dsp/txfm_common.h b/aom_dsp/txfm_common.h
index 38fe2b7..3287990 100644
--- a/aom_dsp/txfm_common.h
+++ b/aom_dsp/txfm_common.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_TXFM_COMMON_H_
-#define VPX_DSP_TXFM_COMMON_H_
+#ifndef AOM_DSP_TXFM_COMMON_H_
+#define AOM_DSP_TXFM_COMMON_H_
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 // Constants and Macros used by all idct/dct functions
 #define DCT_CONST_BITS 14
@@ -66,4 +66,4 @@
 // 16384 * sqrt(2)
 static const tran_high_t Sqrt2 = 23170;
 
-#endif  // VPX_DSP_TXFM_COMMON_H_
+#endif  // AOM_DSP_TXFM_COMMON_H_
diff --git a/aom_dsp/variance.c b/aom_dsp/variance.c
index 5df2aa5..bb7720b 100644
--- a/aom_dsp/variance.c
+++ b/aom_dsp/variance.c
@@ -9,16 +9,16 @@
  */
 #include <stdlib.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
 #include "aom_ports/mem.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #include "aom_dsp/variance.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
 
-uint32_t vpx_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b,
+uint32_t aom_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b,
                             int b_stride) {
   int distortion = 0;
   int r, c;
@@ -36,7 +36,7 @@
   return distortion;
 }
 
-uint32_t vpx_get_mb_ss_c(const int16_t *a) {
+uint32_t aom_get_mb_ss_c(const int16_t *a) {
   unsigned int i, sum = 0;
 
   for (i = 0; i < 256; ++i) {
@@ -46,22 +46,22 @@
   return sum;
 }
 
-uint32_t vpx_variance_halfpixvar16x16_h_c(const uint8_t *a, int a_stride,
+uint32_t aom_variance_halfpixvar16x16_h_c(const uint8_t *a, int a_stride,
                                           const uint8_t *b, int b_stride,
                                           uint32_t *sse) {
-  return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 0, b, b_stride, sse);
+  return aom_sub_pixel_variance16x16_c(a, a_stride, 4, 0, b, b_stride, sse);
 }
 
-uint32_t vpx_variance_halfpixvar16x16_v_c(const uint8_t *a, int a_stride,
+uint32_t aom_variance_halfpixvar16x16_v_c(const uint8_t *a, int a_stride,
                                           const uint8_t *b, int b_stride,
                                           uint32_t *sse) {
-  return vpx_sub_pixel_variance16x16_c(a, a_stride, 0, 4, b, b_stride, sse);
+  return aom_sub_pixel_variance16x16_c(a, a_stride, 0, 4, b, b_stride, sse);
 }
 
-uint32_t vpx_variance_halfpixvar16x16_hv_c(const uint8_t *a, int a_stride,
+uint32_t aom_variance_halfpixvar16x16_hv_c(const uint8_t *a, int a_stride,
                                            const uint8_t *b, int b_stride,
                                            uint32_t *sse) {
-  return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 4, b, b_stride, sse);
+  return aom_sub_pixel_variance16x16_c(a, a_stride, 4, 4, b, b_stride, sse);
 }
 
 static void variance(const uint8_t *a, int a_stride, const uint8_t *b,
@@ -142,7 +142,7 @@
 }
 
 #define VAR(W, H)                                                    \
-  uint32_t vpx_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
+  uint32_t aom_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
                                      const uint8_t *b, int b_stride, \
                                      uint32_t *sse) {                \
     int sum;                                                         \
@@ -151,7 +151,7 @@
   }
 
 #define SUBPIX_VAR(W, H)                                                \
-  uint32_t vpx_sub_pixel_variance##W##x##H##_c(                         \
+  uint32_t aom_sub_pixel_variance##W##x##H##_c(                         \
       const uint8_t *a, int a_stride, int xoffset, int yoffset,         \
       const uint8_t *b, int b_stride, uint32_t *sse) {                  \
     uint16_t fdata3[(H + 1) * W];                                       \
@@ -162,11 +162,11 @@
     var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,       \
                                        bilinear_filters_2t[yoffset]);   \
                                                                         \
-    return vpx_variance##W##x##H##_c(temp2, W, b, b_stride, sse);       \
+    return aom_variance##W##x##H##_c(temp2, W, b, b_stride, sse);       \
   }
 
 #define SUBPIX_AVG_VAR(W, H)                                            \
-  uint32_t vpx_sub_pixel_avg_variance##W##x##H##_c(                     \
+  uint32_t aom_sub_pixel_avg_variance##W##x##H##_c(                     \
       const uint8_t *a, int a_stride, int xoffset, int yoffset,         \
       const uint8_t *b, int b_stride, uint32_t *sse,                    \
       const uint8_t *second_pred) {                                     \
@@ -179,9 +179,9 @@
     var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,       \
                                        bilinear_filters_2t[yoffset]);   \
                                                                         \
-    vpx_comp_avg_pred(temp3, second_pred, W, H, temp2, W);              \
+    aom_comp_avg_pred(temp3, second_pred, W, H, temp2, W);              \
                                                                         \
-    return vpx_variance##W##x##H##_c(temp3, W, b, b_stride, sse);       \
+    return aom_variance##W##x##H##_c(temp3, W, b, b_stride, sse);       \
   }
 
 /* Identical to the variance call except it takes an additional parameter, sum,
@@ -189,7 +189,7 @@
  * sse - sum^2 / w*h
  */
 #define GET_VAR(W, H)                                                         \
-  void vpx_get##W##x##H##var_c(const uint8_t *a, int a_stride,                \
+  void aom_get##W##x##H##var_c(const uint8_t *a, int a_stride,                \
                                const uint8_t *b, int b_stride, uint32_t *sse, \
                                int *sum) {                                    \
     variance(a, a_stride, b, b_stride, W, H, sse, sum);                       \
@@ -200,7 +200,7 @@
  * variable.
  */
 #define MSE(W, H)                                               \
-  uint32_t vpx_mse##W##x##H##_c(const uint8_t *a, int a_stride, \
+  uint32_t aom_mse##W##x##H##_c(const uint8_t *a, int a_stride, \
                                 const uint8_t *b, int b_stride, \
                                 uint32_t *sse) {                \
     int sum;                                                    \
@@ -214,11 +214,11 @@
   SUBPIX_VAR(W, H)      \
   SUBPIX_AVG_VAR(W, H)
 
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
 VARIANCES(128, 128)
 VARIANCES(128, 64)
 VARIANCES(64, 128)
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
 VARIANCES(64, 64)
 VARIANCES(64, 32)
 VARIANCES(32, 64)
@@ -241,7 +241,7 @@
 MSE(8, 16)
 MSE(8, 8)
 
-void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
+void aom_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
                          int height, const uint8_t *ref, int ref_stride) {
   int i, j;
 
@@ -257,7 +257,7 @@
 }
 
 // Get pred block from up-sampled reference.
-void vpx_upsampled_pred_c(uint8_t *comp_pred, int width, int height,
+void aom_upsampled_pred_c(uint8_t *comp_pred, int width, int height,
                           const uint8_t *ref, int ref_stride) {
   int i, j, k;
   int stride = ref_stride << 3;
@@ -271,7 +271,7 @@
   }
 }
 
-void vpx_comp_avg_upsampled_pred_c(uint8_t *comp_pred, const uint8_t *pred,
+void aom_comp_avg_upsampled_pred_c(uint8_t *comp_pred, const uint8_t *pred,
                                    int width, int height, const uint8_t *ref,
                                    int ref_stride) {
   int i, j;
@@ -288,7 +288,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_variance64(const uint8_t *a8, int a_stride,
                               const uint8_t *b8, int b_stride, int w, int h,
                               uint64_t *sse, int64_t *sum) {
@@ -341,7 +341,7 @@
 }
 
 #define HIGHBD_VAR(W, H)                                                       \
-  uint32_t vpx_highbd_8_variance##W##x##H##_c(const uint8_t *a, int a_stride,  \
+  uint32_t aom_highbd_8_variance##W##x##H##_c(const uint8_t *a, int a_stride,  \
                                               const uint8_t *b, int b_stride,  \
                                               uint32_t *sse) {                 \
     int sum;                                                                   \
@@ -349,7 +349,7 @@
     return *sse - (((int64_t)sum * sum) / (W * H));                            \
   }                                                                            \
                                                                                \
-  uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
+  uint32_t aom_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
                                                const uint8_t *b, int b_stride, \
                                                uint32_t *sse) {                \
     int sum;                                                                   \
@@ -359,7 +359,7 @@
     return (var >= 0) ? (uint32_t)var : 0;                                     \
   }                                                                            \
                                                                                \
-  uint32_t vpx_highbd_12_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
+  uint32_t aom_highbd_12_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
                                                const uint8_t *b, int b_stride, \
                                                uint32_t *sse) {                \
     int sum;                                                                   \
@@ -370,26 +370,26 @@
   }
 
 #define HIGHBD_GET_VAR(S)                                                    \
-  void vpx_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride,  \
+  void aom_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride,  \
                                         const uint8_t *ref, int ref_stride,  \
                                         uint32_t *sse, int *sum) {           \
     highbd_8_variance(src, src_stride, ref, ref_stride, S, S, sse, sum);     \
   }                                                                          \
                                                                              \
-  void vpx_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
+  void aom_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
                                          const uint8_t *ref, int ref_stride, \
                                          uint32_t *sse, int *sum) {          \
     highbd_10_variance(src, src_stride, ref, ref_stride, S, S, sse, sum);    \
   }                                                                          \
                                                                              \
-  void vpx_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
+  void aom_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
                                          const uint8_t *ref, int ref_stride, \
                                          uint32_t *sse, int *sum) {          \
     highbd_12_variance(src, src_stride, ref, ref_stride, S, S, sse, sum);    \
   }
 
 #define HIGHBD_MSE(W, H)                                                      \
-  uint32_t vpx_highbd_8_mse##W##x##H##_c(const uint8_t *src, int src_stride,  \
+  uint32_t aom_highbd_8_mse##W##x##H##_c(const uint8_t *src, int src_stride,  \
                                          const uint8_t *ref, int ref_stride,  \
                                          uint32_t *sse) {                     \
     int sum;                                                                  \
@@ -397,7 +397,7 @@
     return *sse;                                                              \
   }                                                                           \
                                                                               \
-  uint32_t vpx_highbd_10_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
+  uint32_t aom_highbd_10_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
                                           const uint8_t *ref, int ref_stride, \
                                           uint32_t *sse) {                    \
     int sum;                                                                  \
@@ -405,7 +405,7 @@
     return *sse;                                                              \
   }                                                                           \
                                                                               \
-  uint32_t vpx_highbd_12_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
+  uint32_t aom_highbd_12_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
                                           const uint8_t *ref, int ref_stride, \
                                           uint32_t *sse) {                    \
     int sum;                                                                  \
@@ -413,7 +413,7 @@
     return *sse;                                                              \
   }
 
-void vpx_highbd_var_filter_block2d_bil_first_pass(
+void aom_highbd_var_filter_block2d_bil_first_pass(
     const uint8_t *src_ptr8, uint16_t *output_ptr,
     unsigned int src_pixels_per_line, int pixel_step,
     unsigned int output_height, unsigned int output_width,
@@ -435,7 +435,7 @@
   }
 }
 
-void vpx_highbd_var_filter_block2d_bil_second_pass(
+void aom_highbd_var_filter_block2d_bil_second_pass(
     const uint16_t *src_ptr, uint16_t *output_ptr,
     unsigned int src_pixels_per_line, unsigned int pixel_step,
     unsigned int output_height, unsigned int output_width,
@@ -456,53 +456,53 @@
 }
 
 #define HIGHBD_SUBPIX_VAR(W, H)                                              \
-  uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_c(                     \
+  uint32_t aom_highbd_8_sub_pixel_variance##W##x##H##_c(                     \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
       const uint8_t *dst, int dst_stride, uint32_t *sse) {                   \
     uint16_t fdata3[(H + 1) * W];                                            \
     uint16_t temp2[H * W];                                                   \
                                                                              \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                            \
+    aom_highbd_var_filter_block2d_bil_first_pass(                            \
         src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                           \
+    aom_highbd_var_filter_block2d_bil_second_pass(                           \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);            \
                                                                              \
-    return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W,  \
+    return aom_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W,  \
                                               dst, dst_stride, sse);         \
   }                                                                          \
                                                                              \
-  uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_c(                    \
+  uint32_t aom_highbd_10_sub_pixel_variance##W##x##H##_c(                    \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
       const uint8_t *dst, int dst_stride, uint32_t *sse) {                   \
     uint16_t fdata3[(H + 1) * W];                                            \
     uint16_t temp2[H * W];                                                   \
                                                                              \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                            \
+    aom_highbd_var_filter_block2d_bil_first_pass(                            \
         src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                           \
+    aom_highbd_var_filter_block2d_bil_second_pass(                           \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);            \
                                                                              \
-    return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+    return aom_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
                                                dst, dst_stride, sse);        \
   }                                                                          \
                                                                              \
-  uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_c(                    \
+  uint32_t aom_highbd_12_sub_pixel_variance##W##x##H##_c(                    \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
       const uint8_t *dst, int dst_stride, uint32_t *sse) {                   \
     uint16_t fdata3[(H + 1) * W];                                            \
     uint16_t temp2[H * W];                                                   \
                                                                              \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                            \
+    aom_highbd_var_filter_block2d_bil_first_pass(                            \
         src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                           \
+    aom_highbd_var_filter_block2d_bil_second_pass(                           \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);            \
                                                                              \
-    return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+    return aom_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
                                                dst, dst_stride, sse);        \
   }
 
 #define HIGHBD_SUBPIX_AVG_VAR(W, H)                                          \
-  uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_c(                 \
+  uint32_t aom_highbd_8_sub_pixel_avg_variance##W##x##H##_c(                 \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
       const uint8_t *dst, int dst_stride, uint32_t *sse,                     \
       const uint8_t *second_pred) {                                          \
@@ -510,19 +510,19 @@
     uint16_t temp2[H * W];                                                   \
     DECLARE_ALIGNED(16, uint16_t, temp3[H * W]);                             \
                                                                              \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                            \
+    aom_highbd_var_filter_block2d_bil_first_pass(                            \
         src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                           \
+    aom_highbd_var_filter_block2d_bil_second_pass(                           \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);            \
                                                                              \
-    vpx_highbd_comp_avg_pred_c(temp3, second_pred, W, H,                     \
+    aom_highbd_comp_avg_pred_c(temp3, second_pred, W, H,                     \
                                CONVERT_TO_BYTEPTR(temp2), W);                \
                                                                              \
-    return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W,  \
+    return aom_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W,  \
                                               dst, dst_stride, sse);         \
   }                                                                          \
                                                                              \
-  uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_c(                \
+  uint32_t aom_highbd_10_sub_pixel_avg_variance##W##x##H##_c(                \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
       const uint8_t *dst, int dst_stride, uint32_t *sse,                     \
       const uint8_t *second_pred) {                                          \
@@ -530,19 +530,19 @@
     uint16_t temp2[H * W];                                                   \
     DECLARE_ALIGNED(16, uint16_t, temp3[H * W]);                             \
                                                                              \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                            \
+    aom_highbd_var_filter_block2d_bil_first_pass(                            \
         src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                           \
+    aom_highbd_var_filter_block2d_bil_second_pass(                           \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);            \
                                                                              \
-    vpx_highbd_comp_avg_pred_c(temp3, second_pred, W, H,                     \
+    aom_highbd_comp_avg_pred_c(temp3, second_pred, W, H,                     \
                                CONVERT_TO_BYTEPTR(temp2), W);                \
                                                                              \
-    return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+    return aom_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
                                                dst, dst_stride, sse);        \
   }                                                                          \
                                                                              \
-  uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_c(                \
+  uint32_t aom_highbd_12_sub_pixel_avg_variance##W##x##H##_c(                \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
       const uint8_t *dst, int dst_stride, uint32_t *sse,                     \
       const uint8_t *second_pred) {                                          \
@@ -550,15 +550,15 @@
     uint16_t temp2[H * W];                                                   \
     DECLARE_ALIGNED(16, uint16_t, temp3[H * W]);                             \
                                                                              \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                            \
+    aom_highbd_var_filter_block2d_bil_first_pass(                            \
         src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                           \
+    aom_highbd_var_filter_block2d_bil_second_pass(                           \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);            \
                                                                              \
-    vpx_highbd_comp_avg_pred_c(temp3, second_pred, W, H,                     \
+    aom_highbd_comp_avg_pred_c(temp3, second_pred, W, H,                     \
                                CONVERT_TO_BYTEPTR(temp2), W);                \
                                                                              \
-    return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+    return aom_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
                                                dst, dst_stride, sse);        \
   }
 
@@ -568,11 +568,11 @@
   HIGHBD_SUBPIX_VAR(W, H)      \
   HIGHBD_SUBPIX_AVG_VAR(W, H)
 
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
 HIGHBD_VARIANCES(128, 128)
 HIGHBD_VARIANCES(128, 64)
 HIGHBD_VARIANCES(64, 128)
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
 HIGHBD_VARIANCES(64, 64)
 HIGHBD_VARIANCES(64, 32)
 HIGHBD_VARIANCES(32, 64)
@@ -595,7 +595,7 @@
 HIGHBD_MSE(8, 16)
 HIGHBD_MSE(8, 8)
 
-void vpx_highbd_comp_avg_pred_c(uint16_t *comp_pred, const uint8_t *pred8,
+void aom_highbd_comp_avg_pred_c(uint16_t *comp_pred, const uint8_t *pred8,
                                 int width, int height, const uint8_t *ref8,
                                 int ref_stride) {
   int i, j;
@@ -612,7 +612,7 @@
   }
 }
 
-void vpx_highbd_upsampled_pred_c(uint16_t *comp_pred, int width, int height,
+void aom_highbd_upsampled_pred_c(uint16_t *comp_pred, int width, int height,
                                  const uint8_t *ref8, int ref_stride) {
   int i, j;
   int stride = ref_stride << 3;
@@ -627,7 +627,7 @@
   }
 }
 
-void vpx_highbd_comp_avg_upsampled_pred_c(uint16_t *comp_pred,
+void aom_highbd_comp_avg_upsampled_pred_c(uint16_t *comp_pred,
                                           const uint8_t *pred8, int width,
                                           int height, const uint8_t *ref8,
                                           int ref_stride) {
@@ -646,9 +646,9 @@
     ref += stride;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VP10 && CONFIG_EXT_INTER
+#if CONFIG_AV1 && CONFIG_EXT_INTER
 void masked_variance(const uint8_t *a, int a_stride, const uint8_t *b,
                      int b_stride, const uint8_t *m, int m_stride, int w, int h,
                      unsigned int *sse, int *sum) {
@@ -674,7 +674,7 @@
 }
 
 #define MASK_VAR(W, H)                                                       \
-  unsigned int vpx_masked_variance##W##x##H##_c(                             \
+  unsigned int aom_masked_variance##W##x##H##_c(                             \
       const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,        \
       const uint8_t *m, int m_stride, unsigned int *sse) {                   \
     int sum;                                                                 \
@@ -683,7 +683,7 @@
   }
 
 #define MASK_SUBPIX_VAR(W, H)                                                 \
-  unsigned int vpx_masked_sub_pixel_variance##W##x##H##_c(                    \
+  unsigned int aom_masked_sub_pixel_variance##W##x##H##_c(                    \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,           \
       const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
       unsigned int *sse) {                                                    \
@@ -695,7 +695,7 @@
     var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,             \
                                        bilinear_filters_2t[yoffset]);         \
                                                                               \
-    return vpx_masked_variance##W##x##H##_c(temp2, W, dst, dst_stride, msk,   \
+    return aom_masked_variance##W##x##H##_c(temp2, W, dst, dst_stride, msk,   \
                                             msk_stride, sse);                 \
   }
 
@@ -749,7 +749,7 @@
 MASK_SUBPIX_VAR(128, 128)
 #endif  // CONFIG_EXT_PARTITION
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void highbd_masked_variance64(const uint8_t *a8, int a_stride,
                               const uint8_t *b8, int b_stride, const uint8_t *m,
                               int m_stride, int w, int h, uint64_t *sse,
@@ -813,7 +813,7 @@
 }
 
 #define HIGHBD_MASK_VAR(W, H)                                                \
-  unsigned int vpx_highbd_masked_variance##W##x##H##_c(                      \
+  unsigned int aom_highbd_masked_variance##W##x##H##_c(                      \
       const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,        \
       const uint8_t *m, int m_stride, unsigned int *sse) {                   \
     int sum;                                                                 \
@@ -822,7 +822,7 @@
     return *sse - (((int64_t)sum * sum) / (W * H));                          \
   }                                                                          \
                                                                              \
-  unsigned int vpx_highbd_10_masked_variance##W##x##H##_c(                   \
+  unsigned int aom_highbd_10_masked_variance##W##x##H##_c(                   \
       const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,        \
       const uint8_t *m, int m_stride, unsigned int *sse) {                   \
     int sum;                                                                 \
@@ -831,7 +831,7 @@
     return *sse - (((int64_t)sum * sum) / (W * H));                          \
   }                                                                          \
                                                                              \
-  unsigned int vpx_highbd_12_masked_variance##W##x##H##_c(                   \
+  unsigned int aom_highbd_12_masked_variance##W##x##H##_c(                   \
       const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,        \
       const uint8_t *m, int m_stride, unsigned int *sse) {                   \
     int sum;                                                                 \
@@ -841,51 +841,51 @@
   }
 
 #define HIGHBD_MASK_SUBPIX_VAR(W, H)                                          \
-  unsigned int vpx_highbd_masked_sub_pixel_variance##W##x##H##_c(             \
+  unsigned int aom_highbd_masked_sub_pixel_variance##W##x##H##_c(             \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,           \
       const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
       unsigned int *sse) {                                                    \
     uint16_t fdata3[(H + 1) * W];                                             \
     uint16_t temp2[H * W];                                                    \
                                                                               \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                             \
+    aom_highbd_var_filter_block2d_bil_first_pass(                             \
         src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]);  \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                            \
+    aom_highbd_var_filter_block2d_bil_second_pass(                            \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);             \
                                                                               \
-    return vpx_highbd_masked_variance##W##x##H##_c(                           \
+    return aom_highbd_masked_variance##W##x##H##_c(                           \
         CONVERT_TO_BYTEPTR(temp2), W, dst, dst_stride, msk, msk_stride, sse); \
   }                                                                           \
                                                                               \
-  unsigned int vpx_highbd_10_masked_sub_pixel_variance##W##x##H##_c(          \
+  unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_c(          \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,           \
       const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
       unsigned int *sse) {                                                    \
     uint16_t fdata3[(H + 1) * W];                                             \
     uint16_t temp2[H * W];                                                    \
                                                                               \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                             \
+    aom_highbd_var_filter_block2d_bil_first_pass(                             \
         src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]);  \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                            \
+    aom_highbd_var_filter_block2d_bil_second_pass(                            \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);             \
                                                                               \
-    return vpx_highbd_10_masked_variance##W##x##H##_c(                        \
+    return aom_highbd_10_masked_variance##W##x##H##_c(                        \
         CONVERT_TO_BYTEPTR(temp2), W, dst, dst_stride, msk, msk_stride, sse); \
   }                                                                           \
                                                                               \
-  unsigned int vpx_highbd_12_masked_sub_pixel_variance##W##x##H##_c(          \
+  unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_c(          \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,           \
       const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
       unsigned int *sse) {                                                    \
     uint16_t fdata3[(H + 1) * W];                                             \
     uint16_t temp2[H * W];                                                    \
                                                                               \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                             \
+    aom_highbd_var_filter_block2d_bil_first_pass(                             \
         src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]);  \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                            \
+    aom_highbd_var_filter_block2d_bil_second_pass(                            \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);             \
                                                                               \
-    return vpx_highbd_12_masked_variance##W##x##H##_c(                        \
+    return aom_highbd_12_masked_variance##W##x##H##_c(                        \
         CONVERT_TO_BYTEPTR(temp2), W, dst, dst_stride, msk, msk_stride, sse); \
   }
 
@@ -938,10 +938,10 @@
 HIGHBD_MASK_VAR(128, 128)
 HIGHBD_MASK_SUBPIX_VAR(128, 128)
 #endif  // CONFIG_EXT_PARTITION
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-#endif  // CONFIG_VP10 && CONFIG_EXT_INTER
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+#endif  // CONFIG_AV1 && CONFIG_EXT_INTER
 
-#if CONFIG_VP10 && CONFIG_OBMC
+#if CONFIG_AV1 && CONFIG_OBMC
 static INLINE void obmc_variance(const uint8_t *pre, int pre_stride,
                                  const int32_t *wsrc, const int32_t *mask,
                                  int w, int h, unsigned int *sse, int *sum) {
@@ -964,7 +964,7 @@
 }
 
 #define OBMC_VAR(W, H)                                           \
-  unsigned int vpx_obmc_variance##W##x##H##_c(                   \
+  unsigned int aom_obmc_variance##W##x##H##_c(                   \
       const uint8_t *pre, int pre_stride, const int32_t *wsrc,   \
       const int32_t *mask, unsigned int *sse) {                  \
     int sum;                                                     \
@@ -973,7 +973,7 @@
   }
 
 #define OBMC_SUBPIX_VAR(W, H)                                               \
-  unsigned int vpx_obmc_sub_pixel_variance##W##x##H##_c(                    \
+  unsigned int aom_obmc_sub_pixel_variance##W##x##H##_c(                    \
       const uint8_t *pre, int pre_stride, int xoffset, int yoffset,         \
       const int32_t *wsrc, const int32_t *mask, unsigned int *sse) {        \
     uint16_t fdata3[(H + 1) * W];                                           \
@@ -984,7 +984,7 @@
     var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,           \
                                        bilinear_filters_2t[yoffset]);       \
                                                                             \
-    return vpx_obmc_variance##W##x##H##_c(temp2, W, wsrc, mask, sse);       \
+    return aom_obmc_variance##W##x##H##_c(temp2, W, wsrc, mask, sse);       \
   }
 
 OBMC_VAR(4, 4)
@@ -1037,7 +1037,7 @@
 OBMC_SUBPIX_VAR(128, 128)
 #endif  // CONFIG_EXT_PARTITION
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE void highbd_obmc_variance64(const uint8_t *pre8, int pre_stride,
                                           const int32_t *wsrc,
                                           const int32_t *mask, int w, int h,
@@ -1095,7 +1095,7 @@
 }
 
 #define HIGHBD_OBMC_VAR(W, H)                                              \
-  unsigned int vpx_highbd_obmc_variance##W##x##H##_c(                      \
+  unsigned int aom_highbd_obmc_variance##W##x##H##_c(                      \
       const uint8_t *pre, int pre_stride, const int32_t *wsrc,             \
       const int32_t *mask, unsigned int *sse) {                            \
     int sum;                                                               \
@@ -1103,7 +1103,7 @@
     return *sse - (((int64_t)sum * sum) / (W * H));                        \
   }                                                                        \
                                                                            \
-  unsigned int vpx_highbd_10_obmc_variance##W##x##H##_c(                   \
+  unsigned int aom_highbd_10_obmc_variance##W##x##H##_c(                   \
       const uint8_t *pre, int pre_stride, const int32_t *wsrc,             \
       const int32_t *mask, unsigned int *sse) {                            \
     int sum;                                                               \
@@ -1111,7 +1111,7 @@
     return *sse - (((int64_t)sum * sum) / (W * H));                        \
   }                                                                        \
                                                                            \
-  unsigned int vpx_highbd_12_obmc_variance##W##x##H##_c(                   \
+  unsigned int aom_highbd_12_obmc_variance##W##x##H##_c(                   \
       const uint8_t *pre, int pre_stride, const int32_t *wsrc,             \
       const int32_t *mask, unsigned int *sse) {                            \
     int sum;                                                               \
@@ -1120,48 +1120,48 @@
   }
 
 #define HIGHBD_OBMC_SUBPIX_VAR(W, H)                                           \
-  unsigned int vpx_highbd_obmc_sub_pixel_variance##W##x##H##_c(                \
+  unsigned int aom_highbd_obmc_sub_pixel_variance##W##x##H##_c(                \
       const uint8_t *pre, int pre_stride, int xoffset, int yoffset,            \
       const int32_t *wsrc, const int32_t *mask, unsigned int *sse) {           \
     uint16_t fdata3[(H + 1) * W];                                              \
     uint16_t temp2[H * W];                                                     \
                                                                                \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                              \
+    aom_highbd_var_filter_block2d_bil_first_pass(                              \
         pre, fdata3, pre_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]);   \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                             \
+    aom_highbd_var_filter_block2d_bil_second_pass(                             \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);              \
                                                                                \
-    return vpx_highbd_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+    return aom_highbd_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
                                                  wsrc, mask, sse);             \
   }                                                                            \
                                                                                \
-  unsigned int vpx_highbd_10_obmc_sub_pixel_variance##W##x##H##_c(             \
+  unsigned int aom_highbd_10_obmc_sub_pixel_variance##W##x##H##_c(             \
       const uint8_t *pre, int pre_stride, int xoffset, int yoffset,            \
       const int32_t *wsrc, const int32_t *mask, unsigned int *sse) {           \
     uint16_t fdata3[(H + 1) * W];                                              \
     uint16_t temp2[H * W];                                                     \
                                                                                \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                              \
+    aom_highbd_var_filter_block2d_bil_first_pass(                              \
         pre, fdata3, pre_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]);   \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                             \
+    aom_highbd_var_filter_block2d_bil_second_pass(                             \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);              \
                                                                                \
-    return vpx_highbd_10_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
+    return aom_highbd_10_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
                                                     W, wsrc, mask, sse);       \
   }                                                                            \
                                                                                \
-  unsigned int vpx_highbd_12_obmc_sub_pixel_variance##W##x##H##_c(             \
+  unsigned int aom_highbd_12_obmc_sub_pixel_variance##W##x##H##_c(             \
       const uint8_t *pre, int pre_stride, int xoffset, int yoffset,            \
       const int32_t *wsrc, const int32_t *mask, unsigned int *sse) {           \
     uint16_t fdata3[(H + 1) * W];                                              \
     uint16_t temp2[H * W];                                                     \
                                                                                \
-    vpx_highbd_var_filter_block2d_bil_first_pass(                              \
+    aom_highbd_var_filter_block2d_bil_first_pass(                              \
         pre, fdata3, pre_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]);   \
-    vpx_highbd_var_filter_block2d_bil_second_pass(                             \
+    aom_highbd_var_filter_block2d_bil_second_pass(                             \
         fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]);              \
                                                                                \
-    return vpx_highbd_12_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
+    return aom_highbd_12_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
                                                     W, wsrc, mask, sse);       \
   }
 
@@ -1214,5 +1214,5 @@
 HIGHBD_OBMC_VAR(128, 128)
 HIGHBD_OBMC_SUBPIX_VAR(128, 128)
 #endif  // CONFIG_EXT_PARTITION
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-#endif  // CONFIG_VP10 && CONFIG_OBMC
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+#endif  // CONFIG_AV1 && CONFIG_OBMC
diff --git a/aom_dsp/variance.h b/aom_dsp/variance.h
index 088e09c..a4bad8c 100644
--- a/aom_dsp/variance.h
+++ b/aom_dsp/variance.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_VARIANCE_H_
-#define VPX_DSP_VARIANCE_H_
+#ifndef AOM_DSP_VARIANCE_H_
+#define AOM_DSP_VARIANCE_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -22,95 +22,95 @@
 #define FILTER_BITS 7
 #define FILTER_WEIGHT 128
 
-typedef unsigned int (*vpx_sad_fn_t)(const uint8_t *a, int a_stride,
+typedef unsigned int (*aom_sad_fn_t)(const uint8_t *a, int a_stride,
                                      const uint8_t *b, int b_stride);
 
-typedef unsigned int (*vpx_sad_avg_fn_t)(const uint8_t *a, int a_stride,
+typedef unsigned int (*aom_sad_avg_fn_t)(const uint8_t *a, int a_stride,
                                          const uint8_t *b, int b_stride,
                                          const uint8_t *second_pred);
 
-typedef void (*vp8_copy32xn_fn_t)(const uint8_t *a, int a_stride, uint8_t *b,
+typedef void (*aom_copy32xn_fn_t)(const uint8_t *a, int a_stride, uint8_t *b,
                                   int b_stride, int n);
 
-typedef void (*vpx_sad_multi_fn_t)(const uint8_t *a, int a_stride,
+typedef void (*aom_sad_multi_fn_t)(const uint8_t *a, int a_stride,
                                    const uint8_t *b, int b_stride,
                                    unsigned int *sad_array);
 
-typedef void (*vpx_sad_multi_d_fn_t)(const uint8_t *a, int a_stride,
+typedef void (*aom_sad_multi_d_fn_t)(const uint8_t *a, int a_stride,
                                      const uint8_t *const b_array[],
                                      int b_stride, unsigned int *sad_array);
 
-typedef unsigned int (*vpx_variance_fn_t)(const uint8_t *a, int a_stride,
+typedef unsigned int (*aom_variance_fn_t)(const uint8_t *a, int a_stride,
                                           const uint8_t *b, int b_stride,
                                           unsigned int *sse);
 
-typedef unsigned int (*vpx_subpixvariance_fn_t)(const uint8_t *a, int a_stride,
+typedef unsigned int (*aom_subpixvariance_fn_t)(const uint8_t *a, int a_stride,
                                                 int xoffset, int yoffset,
                                                 const uint8_t *b, int b_stride,
                                                 unsigned int *sse);
 
-typedef unsigned int (*vpx_subp_avg_variance_fn_t)(
+typedef unsigned int (*aom_subp_avg_variance_fn_t)(
     const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b,
     int b_stride, unsigned int *sse, const uint8_t *second_pred);
 
-#if CONFIG_VP10 && CONFIG_EXT_INTER
-typedef unsigned int (*vpx_masked_sad_fn_t)(const uint8_t *src, int src_stride,
+#if CONFIG_AV1 && CONFIG_EXT_INTER
+typedef unsigned int (*aom_masked_sad_fn_t)(const uint8_t *src, int src_stride,
                                             const uint8_t *ref, int ref_stride,
                                             const uint8_t *msk_ptr,
                                             int msk_stride);
-typedef unsigned int (*vpx_masked_variance_fn_t)(
+typedef unsigned int (*aom_masked_variance_fn_t)(
     const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,
     const uint8_t *msk, int msk_stride, unsigned int *sse);
-typedef unsigned int (*vpx_masked_subpixvariance_fn_t)(
+typedef unsigned int (*aom_masked_subpixvariance_fn_t)(
     const uint8_t *src, int src_stride, int xoffset, int yoffset,
     const uint8_t *ref, int ref_stride, const uint8_t *msk, int msk_stride,
     unsigned int *sse);
-#endif  // CONFIG_VP10 && CONFIG_EXT_INTER
+#endif  // CONFIG_AV1 && CONFIG_EXT_INTER
 
-#if CONFIG_VP10 && CONFIG_OBMC
-typedef unsigned int (*vpx_obmc_sad_fn_t)(const uint8_t *pred, int pred_stride,
+#if CONFIG_AV1 && CONFIG_OBMC
+typedef unsigned int (*aom_obmc_sad_fn_t)(const uint8_t *pred, int pred_stride,
                                           const int32_t *wsrc,
                                           const int32_t *msk);
-typedef unsigned int (*vpx_obmc_variance_fn_t)(const uint8_t *pred,
+typedef unsigned int (*aom_obmc_variance_fn_t)(const uint8_t *pred,
                                                int pred_stride,
                                                const int32_t *wsrc,
                                                const int32_t *msk,
                                                unsigned int *sse);
-typedef unsigned int (*vpx_obmc_subpixvariance_fn_t)(
+typedef unsigned int (*aom_obmc_subpixvariance_fn_t)(
     const uint8_t *pred, int pred_stride, int xoffset, int yoffset,
     const int32_t *wsrc, const int32_t *msk, unsigned int *sse);
-#endif  // CONFIG_VP10 && CONFIG_OBMC
+#endif  // CONFIG_AV1 && CONFIG_OBMC
 
-#if CONFIG_VP10
-typedef struct vpx_variance_vtable {
-  vpx_sad_fn_t sdf;
-  vpx_sad_avg_fn_t sdaf;
-  vpx_variance_fn_t vf;
-  vpx_subpixvariance_fn_t svf;
-  vpx_subp_avg_variance_fn_t svaf;
-  vpx_sad_multi_fn_t sdx3f;
-  vpx_sad_multi_fn_t sdx8f;
-  vpx_sad_multi_d_fn_t sdx4df;
+#if CONFIG_AV1
+typedef struct aom_variance_vtable {
+  aom_sad_fn_t sdf;
+  aom_sad_avg_fn_t sdaf;
+  aom_variance_fn_t vf;
+  aom_subpixvariance_fn_t svf;
+  aom_subp_avg_variance_fn_t svaf;
+  aom_sad_multi_fn_t sdx3f;
+  aom_sad_multi_fn_t sdx8f;
+  aom_sad_multi_d_fn_t sdx4df;
 #if CONFIG_EXT_INTER
-  vpx_masked_sad_fn_t msdf;
-  vpx_masked_variance_fn_t mvf;
-  vpx_masked_subpixvariance_fn_t msvf;
+  aom_masked_sad_fn_t msdf;
+  aom_masked_variance_fn_t mvf;
+  aom_masked_subpixvariance_fn_t msvf;
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_OBMC
-  vpx_obmc_sad_fn_t osdf;
-  vpx_obmc_variance_fn_t ovf;
-  vpx_obmc_subpixvariance_fn_t osvf;
+  aom_obmc_sad_fn_t osdf;
+  aom_obmc_variance_fn_t ovf;
+  aom_obmc_subpixvariance_fn_t osvf;
 #endif  // CONFIG_OBMC
-} vpx_variance_fn_ptr_t;
-#endif  // CONFIG_VP10
+} aom_variance_fn_ptr_t;
+#endif  // CONFIG_AV1
 
-void vpx_highbd_var_filter_block2d_bil_first_pass(
+void aom_highbd_var_filter_block2d_bil_first_pass(
     const uint8_t *src_ptr8, uint16_t *output_ptr,
     unsigned int src_pixels_per_line, int pixel_step,
     unsigned int output_height, unsigned int output_width,
     const uint8_t *filter);
 
-void vpx_highbd_var_filter_block2d_bil_second_pass(
+void aom_highbd_var_filter_block2d_bil_second_pass(
     const uint16_t *src_ptr, uint16_t *output_ptr,
     unsigned int src_pixels_per_line, unsigned int pixel_step,
     unsigned int output_height, unsigned int output_width,
@@ -120,4 +120,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_VARIANCE_H_
+#endif  // AOM_DSP_VARIANCE_H_
diff --git a/aom_dsp/vpx_dsp_rtcd_defs.pl b/aom_dsp/vpx_dsp_rtcd_defs.pl
deleted file mode 100644
index 509fba6..0000000
--- a/aom_dsp/vpx_dsp_rtcd_defs.pl
+++ /dev/null
@@ -1,1929 +0,0 @@
-sub vpx_dsp_forward_decls() {
-print <<EOF
-/*
- * DSP
- */
-
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
-
-EOF
-}
-forward_decls qw/vpx_dsp_forward_decls/;
-
-# optimizations which depend on multiple features
-$avx2_ssse3 = '';
-if ((vpx_config("HAVE_AVX2") eq "yes") && (vpx_config("HAVE_SSSE3") eq "yes")) {
-  $avx2_ssse3 = 'avx2';
-}
-
-# functions that are 64 bit only.
-$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
-if ($opts{arch} eq "x86_64") {
-  $mmx_x86_64 = 'mmx';
-  $sse2_x86_64 = 'sse2';
-  $ssse3_x86_64 = 'ssse3';
-  $avx_x86_64 = 'avx';
-  $avx2_x86_64 = 'avx2';
-}
-
-if (vpx_config("CONFIG_EXT_PARTITION") eq "yes") {
-  @block_widths = (4, 8, 16, 32, 64, 128)
-} else {
-  @block_widths = (4, 8, 16, 32, 64)
-}
-
-@block_sizes = ();
-foreach $w (@block_widths) {
-  foreach $h (@block_widths) {
-    push @block_sizes, [$w, $h] if ($w <= 2*$h && $h <= 2*$w) ;
-  }
-}
-
-#
-# Intra prediction
-#
-
-add_proto qw/void vpx_d207_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207_predictor_4x4 sse2/;
-
-add_proto qw/void vpx_d207e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207e_predictor_4x4/;
-
-add_proto qw/void vpx_d45_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45_predictor_4x4 neon sse2/;
-
-add_proto qw/void vpx_d45e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45e_predictor_4x4/;
-
-add_proto qw/void vpx_d63_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63_predictor_4x4 ssse3/;
-
-add_proto qw/void vpx_d63e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63e_predictor_4x4/;
-
-add_proto qw/void vpx_d63f_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63f_predictor_4x4/;
-
-add_proto qw/void vpx_h_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_h_predictor_4x4 neon dspr2 msa sse2/;
-
-add_proto qw/void vpx_he_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_he_predictor_4x4/;
-
-add_proto qw/void vpx_d117_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d117_predictor_4x4/;
-
-add_proto qw/void vpx_d135_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d135_predictor_4x4 neon/;
-
-add_proto qw/void vpx_d153_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d153_predictor_4x4 ssse3/;
-
-add_proto qw/void vpx_v_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_v_predictor_4x4 neon msa sse2/;
-
-add_proto qw/void vpx_ve_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_ve_predictor_4x4/;
-
-add_proto qw/void vpx_tm_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_tm_predictor_4x4 neon dspr2 msa sse2/;
-
-add_proto qw/void vpx_dc_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_predictor_4x4 dspr2 msa neon sse2/;
-
-add_proto qw/void vpx_dc_top_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_top_predictor_4x4 msa neon sse2/;
-
-add_proto qw/void vpx_dc_left_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_left_predictor_4x4 msa neon sse2/;
-
-add_proto qw/void vpx_dc_128_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_128_predictor_4x4 msa neon sse2/;
-
-add_proto qw/void vpx_d207_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207_predictor_8x8 ssse3/;
-
-add_proto qw/void vpx_d207e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207e_predictor_8x8/;
-
-add_proto qw/void vpx_d45_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45_predictor_8x8 neon sse2/;
-
-add_proto qw/void vpx_d45e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45e_predictor_8x8/;
-
-add_proto qw/void vpx_d63_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63_predictor_8x8 ssse3/;
-
-add_proto qw/void vpx_d63e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63e_predictor_8x8/;
-
-add_proto qw/void vpx_h_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_h_predictor_8x8 neon dspr2 msa sse2/;
-
-add_proto qw/void vpx_d117_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d117_predictor_8x8/;
-
-add_proto qw/void vpx_d135_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d135_predictor_8x8/;
-
-add_proto qw/void vpx_d153_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d153_predictor_8x8 ssse3/;
-
-add_proto qw/void vpx_v_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_v_predictor_8x8 neon msa sse2/;
-
-add_proto qw/void vpx_tm_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_tm_predictor_8x8 neon dspr2 msa sse2/;
-
-add_proto qw/void vpx_dc_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_predictor_8x8 dspr2 neon msa sse2/;
-
-add_proto qw/void vpx_dc_top_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_top_predictor_8x8 neon msa sse2/;
-
-add_proto qw/void vpx_dc_left_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_left_predictor_8x8 neon msa sse2/;
-
-add_proto qw/void vpx_dc_128_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_128_predictor_8x8 neon msa sse2/;
-
-add_proto qw/void vpx_d207_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207_predictor_16x16 ssse3/;
-
-add_proto qw/void vpx_d207e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207e_predictor_16x16/;
-
-add_proto qw/void vpx_d45_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45_predictor_16x16 neon ssse3/;
-
-add_proto qw/void vpx_d45e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45e_predictor_16x16/;
-
-add_proto qw/void vpx_d63_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63_predictor_16x16 ssse3/;
-
-add_proto qw/void vpx_d63e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63e_predictor_16x16/;
-
-add_proto qw/void vpx_h_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_h_predictor_16x16 neon dspr2 msa sse2/;
-
-add_proto qw/void vpx_d117_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d117_predictor_16x16/;
-
-add_proto qw/void vpx_d135_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d135_predictor_16x16/;
-
-add_proto qw/void vpx_d153_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d153_predictor_16x16 ssse3/;
-
-add_proto qw/void vpx_v_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_v_predictor_16x16 neon msa sse2/;
-
-add_proto qw/void vpx_tm_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_tm_predictor_16x16 neon msa sse2/;
-
-add_proto qw/void vpx_dc_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_predictor_16x16 dspr2 neon msa sse2/;
-
-add_proto qw/void vpx_dc_top_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_top_predictor_16x16 neon msa sse2/;
-
-add_proto qw/void vpx_dc_left_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_left_predictor_16x16 neon msa sse2/;
-
-add_proto qw/void vpx_dc_128_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_128_predictor_16x16 neon msa sse2/;
-
-add_proto qw/void vpx_d207_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207_predictor_32x32 ssse3/;
-
-add_proto qw/void vpx_d207e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207e_predictor_32x32/;
-
-add_proto qw/void vpx_d45_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45_predictor_32x32 ssse3/;
-
-add_proto qw/void vpx_d45e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45e_predictor_32x32/;
-
-add_proto qw/void vpx_d63_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63_predictor_32x32 ssse3/;
-
-add_proto qw/void vpx_d63e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63e_predictor_32x32/;
-
-add_proto qw/void vpx_h_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_h_predictor_32x32 neon msa sse2/;
-
-add_proto qw/void vpx_d117_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d117_predictor_32x32/;
-
-add_proto qw/void vpx_d135_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d135_predictor_32x32/;
-
-add_proto qw/void vpx_d153_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d153_predictor_32x32 ssse3/;
-
-add_proto qw/void vpx_v_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_v_predictor_32x32 neon msa sse2/;
-
-add_proto qw/void vpx_tm_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_tm_predictor_32x32 neon msa sse2/;
-
-add_proto qw/void vpx_dc_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_predictor_32x32 msa neon sse2/;
-
-add_proto qw/void vpx_dc_top_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_top_predictor_32x32 msa neon sse2/;
-
-add_proto qw/void vpx_dc_left_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_left_predictor_32x32 msa neon sse2/;
-
-add_proto qw/void vpx_dc_128_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_128_predictor_32x32 msa neon sse2/;
-
-# High bitdepth functions
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  add_proto qw/void vpx_highbd_d207_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d207_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_d207e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d207e_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_d45_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d45_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_d45e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d45e_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_d63_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d63_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_d63e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d63e_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_h_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_h_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_d117_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d117_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_d135_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d135_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_d153_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d153_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_v_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_v_predictor_4x4 sse2/;
-
-  add_proto qw/void vpx_highbd_tm_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_tm_predictor_4x4 sse2/;
-
-  add_proto qw/void vpx_highbd_dc_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_predictor_4x4 sse2/;
-
-  add_proto qw/void vpx_highbd_dc_top_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_top_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_dc_left_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_left_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_dc_128_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_128_predictor_4x4/;
-
-  add_proto qw/void vpx_highbd_d207_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d207_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_d207e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d207e_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_d45_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d45_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_d45e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d45e_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_d63_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d63_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_d63e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d63e_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_h_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_h_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_d117_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d117_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_d135_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d135_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_d153_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d153_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_v_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_v_predictor_8x8 sse2/;
-
-  add_proto qw/void vpx_highbd_tm_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_tm_predictor_8x8 sse2/;
-
-  add_proto qw/void vpx_highbd_dc_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_predictor_8x8 sse2/;;
-
-  add_proto qw/void vpx_highbd_dc_top_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_top_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_dc_left_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_left_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_dc_128_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_128_predictor_8x8/;
-
-  add_proto qw/void vpx_highbd_d207_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d207_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_d207e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d207e_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_d45_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d45_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_d45e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d45e_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_d63_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d63_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_d63e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d63e_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_h_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_h_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_d117_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d117_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_d135_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d135_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_d153_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d153_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_v_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_v_predictor_16x16 sse2/;
-
-  add_proto qw/void vpx_highbd_tm_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_tm_predictor_16x16 sse2/;
-
-  add_proto qw/void vpx_highbd_dc_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_predictor_16x16 sse2/;
-
-  add_proto qw/void vpx_highbd_dc_top_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_top_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_dc_left_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_left_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_dc_128_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_128_predictor_16x16/;
-
-  add_proto qw/void vpx_highbd_d207_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d207_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_d207e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d207e_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_d45_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d45_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_d45e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d45e_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_d63_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d63_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_d63e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d63e_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_h_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_h_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_d117_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d117_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_d135_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d135_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_d153_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_d153_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_v_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_v_predictor_32x32 sse2/;
-
-  add_proto qw/void vpx_highbd_tm_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_tm_predictor_32x32 sse2/;
-
-  add_proto qw/void vpx_highbd_dc_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_predictor_32x32 sse2/;
-
-  add_proto qw/void vpx_highbd_dc_top_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_top_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_dc_left_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_left_predictor_32x32/;
-
-  add_proto qw/void vpx_highbd_dc_128_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
-  specialize qw/vpx_highbd_dc_128_predictor_32x32/;
-}  # CONFIG_VP9_HIGHBITDEPTH
-
-#
-# Sub Pixel Filters
-#
-add_proto qw/void vpx_convolve_copy/,       "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve_avg/,        "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8/,           "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8_horiz/,     "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8_vert/,      "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8_avg/,       "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8_avg_vert/,  "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_2d/,           "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_horiz/,        "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_vert/,         "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_avg_2d/,       "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_avg_horiz/,    "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_avg_vert/,     "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-
-specialize qw/vpx_convolve_copy       sse2      /;
-specialize qw/vpx_convolve_avg        sse2      /;
-specialize qw/vpx_convolve8           sse2 ssse3/, "$avx2_ssse3";
-specialize qw/vpx_convolve8_horiz     sse2 ssse3/, "$avx2_ssse3";
-specialize qw/vpx_convolve8_vert      sse2 ssse3/, "$avx2_ssse3";
-specialize qw/vpx_convolve8_avg       sse2 ssse3/;
-specialize qw/vpx_convolve8_avg_horiz sse2 ssse3/;
-specialize qw/vpx_convolve8_avg_vert  sse2 ssse3/;
-specialize qw/vpx_scaled_2d                ssse3/;
-
-# TODO(any): These need to be extended to up to 128x128 block sizes
-if (!(vpx_config("CONFIG_VP10") eq "yes" && vpx_config("CONFIG_EXT_PARTITION") eq "yes")) {
-  specialize qw/vpx_convolve_copy       neon dspr2 msa/;
-  specialize qw/vpx_convolve_avg        neon dspr2 msa/;
-  specialize qw/vpx_convolve8           neon dspr2 msa/;
-  specialize qw/vpx_convolve8_horiz     neon dspr2 msa/;
-  specialize qw/vpx_convolve8_vert      neon dspr2 msa/;
-  specialize qw/vpx_convolve8_avg       neon dspr2 msa/;
-  specialize qw/vpx_convolve8_avg_horiz neon dspr2 msa/;
-  specialize qw/vpx_convolve8_avg_vert  neon dspr2 msa/;
-}
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  add_proto qw/void vpx_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vpx_highbd_convolve_copy sse2/;
-
-  add_proto qw/void vpx_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vpx_highbd_convolve_avg sse2/;
-
-  add_proto qw/void vpx_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vpx_highbd_convolve8/, "$sse2_x86_64";
-
-  add_proto qw/void vpx_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vpx_highbd_convolve8_horiz/, "$sse2_x86_64";
-
-  add_proto qw/void vpx_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vpx_highbd_convolve8_vert/, "$sse2_x86_64";
-
-  add_proto qw/void vpx_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vpx_highbd_convolve8_avg/, "$sse2_x86_64";
-
-  add_proto qw/void vpx_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vpx_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
-
-  add_proto qw/void vpx_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vpx_highbd_convolve8_avg_vert/, "$sse2_x86_64";
-}  # CONFIG_VP9_HIGHBITDEPTH
-
-#
-# Loopfilter
-#
-add_proto qw/void vpx_lpf_vertical_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_vertical_16 sse2 neon_asm dspr2 msa/;
-$vpx_lpf_vertical_16_neon_asm=vpx_lpf_vertical_16_neon;
-
-add_proto qw/void vpx_lpf_vertical_16_dual/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_vertical_16_dual sse2 neon_asm dspr2 msa/;
-$vpx_lpf_vertical_16_dual_neon_asm=vpx_lpf_vertical_16_dual_neon;
-
-add_proto qw/void vpx_lpf_vertical_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_vertical_8 sse2 neon dspr2 msa/;
-
-add_proto qw/void vpx_lpf_vertical_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vpx_lpf_vertical_8_dual sse2 neon_asm dspr2 msa/;
-$vpx_lpf_vertical_8_dual_neon_asm=vpx_lpf_vertical_8_dual_neon;
-
-add_proto qw/void vpx_lpf_vertical_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_vertical_4 sse2 neon dspr2 msa/;
-
-add_proto qw/void vpx_lpf_vertical_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vpx_lpf_vertical_4_dual sse2 neon dspr2 msa/;
-
-add_proto qw/void vpx_lpf_horizontal_edge_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_horizontal_edge_8 sse2 avx2 neon_asm dspr2 msa/;
-$vpx_lpf_horizontal_edge_8_neon_asm=vpx_lpf_horizontal_edge_8_neon;
-
-add_proto qw/void vpx_lpf_horizontal_edge_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_horizontal_edge_16 sse2 avx2 neon_asm dspr2 msa/;
-$vpx_lpf_horizontal_edge_16_neon_asm=vpx_lpf_horizontal_edge_16_neon;
-
-add_proto qw/void vpx_lpf_horizontal_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_horizontal_8 sse2 neon dspr2 msa/;
-
-add_proto qw/void vpx_lpf_horizontal_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vpx_lpf_horizontal_8_dual sse2 neon_asm dspr2 msa/;
-$vpx_lpf_horizontal_8_dual_neon_asm=vpx_lpf_horizontal_8_dual_neon;
-
-add_proto qw/void vpx_lpf_horizontal_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_horizontal_4 sse2 neon dspr2 msa/;
-
-add_proto qw/void vpx_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vpx_lpf_horizontal_4_dual sse2 neon dspr2 msa/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  add_proto qw/void vpx_highbd_lpf_vertical_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
-  specialize qw/vpx_highbd_lpf_vertical_16 sse2/;
-
-  add_proto qw/void vpx_highbd_lpf_vertical_16_dual/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
-  specialize qw/vpx_highbd_lpf_vertical_16_dual sse2/;
-
-  add_proto qw/void vpx_highbd_lpf_vertical_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
-  specialize qw/vpx_highbd_lpf_vertical_8 sse2/;
-
-  add_proto qw/void vpx_highbd_lpf_vertical_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
-  specialize qw/vpx_highbd_lpf_vertical_8_dual sse2/;
-
-  add_proto qw/void vpx_highbd_lpf_vertical_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
-  specialize qw/vpx_highbd_lpf_vertical_4 sse2/;
-
-  add_proto qw/void vpx_highbd_lpf_vertical_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
-  specialize qw/vpx_highbd_lpf_vertical_4_dual sse2/;
-
-  add_proto qw/void vpx_highbd_lpf_horizontal_edge_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
-  specialize qw/vpx_highbd_lpf_horizontal_edge_8 sse2/;
-
-  add_proto qw/void vpx_highbd_lpf_horizontal_edge_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
-  specialize qw/vpx_highbd_lpf_horizontal_edge_16 sse2/;
-
-  add_proto qw/void vpx_highbd_lpf_horizontal_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
-  specialize qw/vpx_highbd_lpf_horizontal_8 sse2/;
-
-  add_proto qw/void vpx_highbd_lpf_horizontal_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
-  specialize qw/vpx_highbd_lpf_horizontal_8_dual sse2/;
-
-  add_proto qw/void vpx_highbd_lpf_horizontal_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
-  specialize qw/vpx_highbd_lpf_horizontal_4 sse2/;
-
-  add_proto qw/void vpx_highbd_lpf_horizontal_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
-  specialize qw/vpx_highbd_lpf_horizontal_4_dual sse2/;
-}  # CONFIG_VP9_HIGHBITDEPTH
-
-#
-# Encoder functions.
-#
-
-#
-# Forward transform
-#
-if ((vpx_config("CONFIG_VP10_ENCODER") eq "yes")) {
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  add_proto qw/void vpx_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct4x4 sse2/;
-
-  add_proto qw/void vpx_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct4x4_1 sse2/;
-
-  add_proto qw/void vpx_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct8x8 sse2/;
-
-  add_proto qw/void vpx_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct8x8_1 sse2/;
-
-  add_proto qw/void vpx_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct16x16 sse2/;
-
-  add_proto qw/void vpx_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct16x16_1 sse2/;
-
-  add_proto qw/void vpx_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct32x32 sse2/;
-
-  add_proto qw/void vpx_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct32x32_rd sse2/;
-
-  add_proto qw/void vpx_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct32x32_1 sse2/;
-
-  add_proto qw/void vpx_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_highbd_fdct4x4 sse2/;
-
-  add_proto qw/void vpx_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_highbd_fdct8x8 sse2/;
-
-  add_proto qw/void vpx_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_highbd_fdct8x8_1/;
-
-  add_proto qw/void vpx_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_highbd_fdct16x16 sse2/;
-
-  add_proto qw/void vpx_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_highbd_fdct16x16_1/;
-
-  add_proto qw/void vpx_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_highbd_fdct32x32 sse2/;
-
-  add_proto qw/void vpx_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_highbd_fdct32x32_rd sse2/;
-
-  add_proto qw/void vpx_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_highbd_fdct32x32_1/;
-} else {
-  add_proto qw/void vpx_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct4x4 sse2 msa/;
-
-  add_proto qw/void vpx_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct4x4_1 sse2/;
-
-  add_proto qw/void vpx_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct8x8 sse2 neon msa/, "$ssse3_x86_64";
-
-  add_proto qw/void vpx_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct8x8_1 sse2 neon msa/;
-
-  add_proto qw/void vpx_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct16x16 sse2 msa/;
-
-  add_proto qw/void vpx_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct16x16_1 sse2 msa/;
-
-  add_proto qw/void vpx_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct32x32 sse2 avx2 msa/;
-
-  add_proto qw/void vpx_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct32x32_rd sse2 avx2 msa/;
-
-  add_proto qw/void vpx_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vpx_fdct32x32_1 sse2 msa/;
-}  # CONFIG_VP9_HIGHBITDEPTH
-}  # CONFIG_VP10_ENCODER
-
-#
-# Inverse transform
-if (vpx_config("CONFIG_VP10") eq "yes") {
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  # Note as optimized versions of these functions are added we need to add a check to ensure
-  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
-  add_proto qw/void vpx_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vpx_iwht4x4_1_add/;
-
-  add_proto qw/void vpx_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vpx_iwht4x4_16_add sse2/;
-
-  add_proto qw/void vpx_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vpx_highbd_idct4x4_1_add/;
-
-  add_proto qw/void vpx_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vpx_highbd_idct8x8_1_add/;
-
-  add_proto qw/void vpx_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vpx_highbd_idct16x16_1_add/;
-
-  add_proto qw/void vpx_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vpx_highbd_idct32x32_1024_add/;
-
-  add_proto qw/void vpx_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vpx_highbd_idct32x32_34_add/;
-
-  add_proto qw/void vpx_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vpx_highbd_idct32x32_1_add/;
-
-  add_proto qw/void vpx_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vpx_highbd_iwht4x4_1_add/;
-
-  add_proto qw/void vpx_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vpx_highbd_iwht4x4_16_add/;
-
-  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
-  if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vpx_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct4x4_16_add/;
-
-    add_proto qw/void vpx_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct4x4_1_add/;
-
-    add_proto qw/void vpx_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_64_add/;
-
-    add_proto qw/void vpx_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_12_add/;
-
-    add_proto qw/void vpx_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_1_add/;
-
-    add_proto qw/void vpx_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_256_add/;
-
-    add_proto qw/void vpx_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_10_add/;
-
-    add_proto qw/void vpx_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_1_add/;
-
-    add_proto qw/void vpx_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_1024_add/;
-
-    add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_135_add/;
-
-    add_proto qw/void vpx_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_34_add/;
-
-    add_proto qw/void vpx_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_1_add/;
-
-    add_proto qw/void vpx_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vpx_highbd_idct4x4_16_add/;
-
-    add_proto qw/void vpx_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vpx_highbd_idct8x8_64_add/;
-
-    add_proto qw/void vpx_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vpx_highbd_idct8x8_10_add/;
-
-    add_proto qw/void vpx_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vpx_highbd_idct16x16_256_add/;
-
-    add_proto qw/void vpx_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vpx_highbd_idct16x16_10_add/;
-  } else {
-    add_proto qw/void vpx_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct4x4_16_add sse2/;
-
-    add_proto qw/void vpx_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct4x4_1_add sse2/;
-
-    add_proto qw/void vpx_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_64_add sse2/, "$ssse3_x86_64";
-
-    add_proto qw/void vpx_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_12_add sse2/, "$ssse3_x86_64";
-
-    add_proto qw/void vpx_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_1_add sse2/;
-
-    add_proto qw/void vpx_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_256_add sse2/;
-
-    add_proto qw/void vpx_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_10_add sse2/;
-
-    add_proto qw/void vpx_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_1_add sse2/;
-
-    add_proto qw/void vpx_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_1024_add sse2/, "$ssse3_x86_64";
-
-    add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_135_add sse2/, "$ssse3_x86_64";
-    # Need to add 135 eob idct32x32 implementations.
-    $vpx_idct32x32_135_add_sse2=vpx_idct32x32_1024_add_sse2;
-
-    add_proto qw/void vpx_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_34_add sse2/, "$ssse3_x86_64";
-
-    add_proto qw/void vpx_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_1_add sse2/;
-
-    add_proto qw/void vpx_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vpx_highbd_idct4x4_16_add sse2/;
-
-    add_proto qw/void vpx_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vpx_highbd_idct8x8_64_add sse2/;
-
-    add_proto qw/void vpx_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vpx_highbd_idct8x8_10_add sse2/;
-
-    add_proto qw/void vpx_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vpx_highbd_idct16x16_256_add sse2/;
-
-    add_proto qw/void vpx_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vpx_highbd_idct16x16_10_add sse2/;
-  }  # CONFIG_EMULATE_HARDWARE
-} else {
-  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
-  if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vpx_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct4x4_1_add/;
-
-    add_proto qw/void vpx_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct4x4_16_add/;
-
-    add_proto qw/void vpx_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_1_add/;
-
-    add_proto qw/void vpx_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_64_add/;
-
-    add_proto qw/void vpx_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_12_add/;
-
-    add_proto qw/void vpx_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_1_add/;
-
-    add_proto qw/void vpx_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_256_add/;
-
-    add_proto qw/void vpx_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_10_add/;
-
-    add_proto qw/void vpx_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_1024_add/;
-
-    add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_135_add/;
-
-    add_proto qw/void vpx_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_34_add/;
-
-    add_proto qw/void vpx_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_1_add/;
-
-    add_proto qw/void vpx_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_iwht4x4_1_add/;
-
-    add_proto qw/void vpx_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_iwht4x4_16_add/;
-  } else {
-    add_proto qw/void vpx_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct4x4_1_add sse2 neon dspr2 msa/;
-
-    add_proto qw/void vpx_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct4x4_16_add sse2 neon dspr2 msa/;
-
-    add_proto qw/void vpx_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_1_add sse2 neon dspr2 msa/;
-
-    add_proto qw/void vpx_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_64_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
-
-    add_proto qw/void vpx_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct8x8_12_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
-
-    add_proto qw/void vpx_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_1_add sse2 neon dspr2 msa/;
-
-    add_proto qw/void vpx_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_256_add sse2 neon dspr2 msa/;
-
-    add_proto qw/void vpx_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_10_add sse2 neon dspr2 msa/;
-
-    add_proto qw/void vpx_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_1024_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
-
-    add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_135_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
-    # Need to add 135 eob idct32x32 implementations.
-    $vpx_idct32x32_135_add_sse2=vpx_idct32x32_1024_add_sse2;
-    $vpx_idct32x32_135_add_neon=vpx_idct32x32_1024_add_neon;
-    $vpx_idct32x32_135_add_dspr2=vpx_idct32x32_1024_add_dspr2;
-    $vpx_idct32x32_135_add_msa=vpx_idct32x32_1024_add_msa;
-
-    add_proto qw/void vpx_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_34_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
-    # Need to add 34 eob idct32x32 neon implementation.
-    $vpx_idct32x32_34_add_neon=vpx_idct32x32_1024_add_neon;
-
-    add_proto qw/void vpx_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct32x32_1_add sse2 neon dspr2 msa/;
-
-    add_proto qw/void vpx_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_iwht4x4_1_add msa/;
-
-    add_proto qw/void vpx_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_iwht4x4_16_add msa sse2/;
-  }  # CONFIG_EMULATE_HARDWARE
-}  # CONFIG_VP9_HIGHBITDEPTH
-}  # CONFIG_VP10
-
-#
-# Quantization
-#
-if (vpx_config("CONFIG_AOM_QM") eq "yes") {
-  if (vpx_config("CONFIG_VP10_ENCODER") eq "yes") {
-    add_proto qw/void vpx_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-
-    add_proto qw/void vpx_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-
-    if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-      add_proto qw/void vpx_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-
-      add_proto qw/void vpx_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-    }  # CONFIG_VPX_HIGHBITDEPTH
-  }  # CONFIG_VP10_ENCODER
-} else {
-  if (vpx_config("CONFIG_VP10_ENCODER") eq "yes") {
-    add_proto qw/void vpx_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vpx_quantize_b sse2/, "$ssse3_x86_64", "$avx_x86_64";
-
-    add_proto qw/void vpx_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vpx_quantize_b_32x32/, "$ssse3_x86_64", "$avx_x86_64";
-
-    if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-      add_proto qw/void vpx_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-      specialize qw/vpx_highbd_quantize_b sse2/;
-
-      add_proto qw/void vpx_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-      specialize qw/vpx_highbd_quantize_b_32x32 sse2/;
-    }  # CONFIG_VP9_HIGHBITDEPTH
-  }  # CONFIG_VP10_ENCODER
-} # CONFIG_AOM_QM
-if (vpx_config("CONFIG_VP10") eq "yes") {
-  #
-  # Alpha blending with mask
-  #
-  add_proto qw/void vpx_blend_a64_mask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, int suby, int subx";
-  add_proto qw/void vpx_blend_a64_hmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w";
-  add_proto qw/void vpx_blend_a64_vmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w";
-  specialize "vpx_blend_a64_mask", qw/sse4_1/;
-  specialize "vpx_blend_a64_hmask", qw/sse4_1/;
-  specialize "vpx_blend_a64_vmask", qw/sse4_1/;
-
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    add_proto qw/void vpx_highbd_blend_a64_mask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, int suby, int subx, int bd";
-    add_proto qw/void vpx_highbd_blend_a64_hmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w, int bd";
-    add_proto qw/void vpx_highbd_blend_a64_vmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w, int bd";
-    specialize "vpx_highbd_blend_a64_mask", qw/sse4_1/;
-    specialize "vpx_highbd_blend_a64_hmask", qw/sse4_1/;
-    specialize "vpx_highbd_blend_a64_vmask", qw/sse4_1/;
-  }
-}  # CONFIG_VP10
-
-if (vpx_config("CONFIG_ENCODERS") eq "yes") {
-#
-# Block subtraction
-#
-add_proto qw/void vpx_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride";
-specialize qw/vpx_subtract_block neon msa sse2/;
-
-if (vpx_config("CONFIG_VP10_ENCODER") eq "yes") {
-#
-# Sum of Squares
-#
-add_proto qw/uint64_t vpx_sum_squares_2d_i16/, "const int16_t *src, int stride, int size";
-specialize qw/vpx_sum_squares_2d_i16 sse2/;
-
-add_proto qw/uint64_t vpx_sum_squares_i16/, "const int16_t *src, uint32_t N";
-specialize qw/vpx_sum_squares_i16 sse2/;
-}
-
-
-# Single block SAD
-#
-add_proto qw/unsigned int vpx_sad64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad64x64 avx2 neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad64x32 avx2 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad32x64 avx2 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad32x32 avx2 neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad32x16 avx2 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad16x32 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad16x16 media neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad16x8 neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad8x16 neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad8x8 neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad8x4 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad4x8 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad4x4 neon msa sse2/;
-
-#
-# Avg
-#
-if ((vpx_config("CONFIG_VP10_ENCODER") eq "yes")) {
-  #
-  # Avg
-  #
-  add_proto qw/unsigned int vpx_avg_8x8/, "const uint8_t *, int p";
-  specialize qw/vpx_avg_8x8 sse2 neon msa/;
-  add_proto qw/unsigned int vpx_avg_4x4/, "const uint8_t *, int p";
-  specialize qw/vpx_avg_4x4 sse2 neon msa/;
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    add_proto qw/unsigned int vpx_highbd_avg_8x8/, "const uint8_t *, int p";
-    specialize qw/vpx_highbd_avg_8x8/;
-    add_proto qw/unsigned int vpx_highbd_avg_4x4/, "const uint8_t *, int p";
-    specialize qw/vpx_highbd_avg_4x4/;
-    add_proto qw/void vpx_highbd_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride, int bd";
-    specialize qw/vpx_highbd_subtract_block sse2/;
-  }
-
-  #
-  # Minmax
-  #
-  add_proto qw/void vpx_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
-  specialize qw/vpx_minmax_8x8 sse2 neon/;
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    add_proto qw/void vpx_highbd_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
-    specialize qw/vpx_highbd_minmax_8x8/;
-  }
-
-  add_proto qw/void vpx_hadamard_8x8/, "const int16_t *src_diff, int src_stride, int16_t *coeff";
-  specialize qw/vpx_hadamard_8x8 sse2 neon/, "$ssse3_x86_64";
-
-  add_proto qw/void vpx_hadamard_16x16/, "const int16_t *src_diff, int src_stride, int16_t *coeff";
-  specialize qw/vpx_hadamard_16x16 sse2 neon/;
-
-  add_proto qw/int vpx_satd/, "const int16_t *coeff, int length";
-  specialize qw/vpx_satd sse2 neon/;
-
-  add_proto qw/void vpx_int_pro_row/, "int16_t *hbuf, const uint8_t *ref, const int ref_stride, const int height";
-  specialize qw/vpx_int_pro_row sse2 neon/;
-
-  add_proto qw/int16_t vpx_int_pro_col/, "const uint8_t *ref, const int width";
-  specialize qw/vpx_int_pro_col sse2 neon/;
-
-  add_proto qw/int vpx_vector_var/, "const int16_t *ref, const int16_t *src, const int bwl";
-  specialize qw/vpx_vector_var neon sse2/;
-}  # CONFIG_VP10_ENCODER
-
-#
-# Single block SAD / Single block Avg SAD
-#
-foreach (@block_sizes) {
-  ($w, $h) = @$_;
-  add_proto qw/unsigned int/, "vpx_sad${w}x${h}", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-  add_proto qw/unsigned int/, "vpx_sad${w}x${h}_avg", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
-}
-
-specialize qw/vpx_sad128x128                        sse2/;
-specialize qw/vpx_sad128x64                         sse2/;
-specialize qw/vpx_sad64x128                         sse2/;
-specialize qw/vpx_sad64x64      avx2            msa sse2/;
-specialize qw/vpx_sad64x32      avx2            msa sse2/;
-specialize qw/vpx_sad32x64      avx2            msa sse2/;
-specialize qw/vpx_sad32x32      avx2       neon msa sse2/;
-specialize qw/vpx_sad32x16      avx2            msa sse2/;
-specialize qw/vpx_sad16x32                      msa sse2/;
-specialize qw/vpx_sad16x16           media neon msa sse2/;
-specialize qw/vpx_sad16x8                  neon msa sse2/;
-specialize qw/vpx_sad8x16                  neon msa sse2/;
-specialize qw/vpx_sad8x8                   neon msa sse2/;
-specialize qw/vpx_sad8x4                        msa sse2/;
-specialize qw/vpx_sad4x8                        msa sse2/;
-specialize qw/vpx_sad4x4                   neon msa sse2/;
-
-specialize qw/vpx_sad128x128_avg          sse2/;
-specialize qw/vpx_sad128x64_avg           sse2/;
-specialize qw/vpx_sad64x128_avg           sse2/;
-specialize qw/vpx_sad64x64_avg   avx2 msa sse2/;
-specialize qw/vpx_sad64x32_avg   avx2 msa sse2/;
-specialize qw/vpx_sad32x64_avg   avx2 msa sse2/;
-specialize qw/vpx_sad32x32_avg   avx2 msa sse2/;
-specialize qw/vpx_sad32x16_avg   avx2 msa sse2/; 
-specialize qw/vpx_sad16x32_avg        msa sse2/;
-specialize qw/vpx_sad16x16_avg        msa sse2/;
-specialize qw/vpx_sad16x8_avg         msa sse2/;
-specialize qw/vpx_sad8x16_avg         msa sse2/;
-specialize qw/vpx_sad8x8_avg          msa sse2/;
-specialize qw/vpx_sad8x4_avg          msa sse2/;
-specialize qw/vpx_sad4x8_avg          msa sse2/;
-specialize qw/vpx_sad4x4_avg          msa sse2/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  foreach (@block_sizes) {
-    ($w, $h) = @$_;
-    add_proto qw/unsigned int/, "vpx_highbd_sad${w}x${h}", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-    add_proto qw/unsigned int/, "vpx_highbd_sad${w}x${h}_avg", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
-    if ($w != 128 && $h != 128 && $w != 4) {
-      specialize "vpx_highbd_sad${w}x${h}", qw/sse2/;
-      specialize "vpx_highbd_sad${w}x${h}_avg", qw/sse2/;
-    }
-  }
-}
-
-#
-# Masked SAD
-#
-if (vpx_config("CONFIG_EXT_INTER") eq "yes") {
-  foreach (@block_sizes) {
-    ($w, $h) = @$_;
-    add_proto qw/unsigned int/, "vpx_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
-    specialize "vpx_masked_sad${w}x${h}", qw/ssse3/;
-  }
-
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    foreach (@block_sizes) {
-      ($w, $h) = @$_;
-      add_proto qw/unsigned int/, "vpx_highbd_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
-      specialize "vpx_highbd_masked_sad${w}x${h}", qw/ssse3/;
-    }
-  }
-}
-
-#
-# OBMC SAD
-#
-if (vpx_config("CONFIG_OBMC") eq "yes") {
-  foreach (@block_sizes) {
-    ($w, $h) = @$_;
-    add_proto qw/unsigned int/, "vpx_obmc_sad${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask";
-    specialize "vpx_obmc_sad${w}x${h}", qw/sse4_1/;
-  }
-
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    foreach (@block_sizes) {
-      ($w, $h) = @$_;
-      add_proto qw/unsigned int/, "vpx_highbd_obmc_sad${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask";
-      specialize "vpx_highbd_obmc_sad${w}x${h}", qw/sse4_1/;
-    }
-  }
-}
-
-#
-# Multi-block SAD, comparing a reference to N blocks 1 pixel apart horizontally
-#
-# Blocks of 3
-foreach $s (@block_widths) {
-  add_proto qw/void/, "vpx_sad${s}x${s}x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-}
-specialize qw/vpx_sad64x64x3            msa/;
-specialize qw/vpx_sad32x32x3            msa/;
-specialize qw/vpx_sad16x16x3 sse3 ssse3 msa/;
-specialize qw/vpx_sad8x8x3   sse3       msa/;
-specialize qw/vpx_sad4x4x3   sse3       msa/;
-
-add_proto qw/void/, "vpx_sad16x8x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad16x8x3 sse3 ssse3 msa/;
-add_proto qw/void/, "vpx_sad8x16x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad8x16x3 sse3 msa/;
-
-# Blocks of 8
-foreach $s (@block_widths) {
-  add_proto qw/void/, "vpx_sad${s}x${s}x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-}
-specialize qw/vpx_sad64x64x8        msa/;
-specialize qw/vpx_sad32x32x8        msa/;
-specialize qw/vpx_sad16x16x8 sse4_1 msa/;
-specialize qw/vpx_sad8x8x8   sse4_1 msa/;
-specialize qw/vpx_sad4x4x8   sse4_1 msa/;
-
-add_proto qw/void/, "vpx_sad16x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad16x8x8 sse4_1 msa/;
-add_proto qw/void/, "vpx_sad8x16x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad8x16x8 sse4_1 msa/;
-add_proto qw/void/, "vpx_sad8x4x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad8x4x8 msa/;
-add_proto qw/void/, "vpx_sad4x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad4x8x8 msa/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  foreach $s (@block_widths) {
-    # Blocks of 3
-    add_proto qw/void/, "vpx_highbd_sad${s}x${s}x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-    # Blocks of 8
-    add_proto qw/void/, "vpx_highbd_sad${s}x${s}x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-  }
-  # Blocks of 3
-  add_proto qw/void/, "vpx_highbd_sad16x8x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-  add_proto qw/void/, "vpx_highbd_sad8x16x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-  # Blocks of 8
-  add_proto qw/void/, "vpx_highbd_sad16x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-  add_proto qw/void/, "vpx_highbd_sad8x16x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-  add_proto qw/void/, "vpx_highbd_sad8x4x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-  add_proto qw/void/, "vpx_highbd_sad4x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-}
-
-#
-# Multi-block SAD, comparing a reference to N independent blocks
-#
-foreach (@block_sizes) {
-  ($w, $h) = @$_;
-  add_proto qw/void/, "vpx_sad${w}x${h}x4d", "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
-}
-
-specialize qw/vpx_sad128x128x4d              sse2/;
-specialize qw/vpx_sad128x64x4d               sse2/;
-specialize qw/vpx_sad64x128x4d               sse2/;
-specialize qw/vpx_sad64x64x4d  avx2 neon msa sse2/;
-specialize qw/vpx_sad64x32x4d            msa sse2/;
-specialize qw/vpx_sad32x64x4d            msa sse2/;
-specialize qw/vpx_sad32x32x4d  avx2 neon msa sse2/;
-specialize qw/vpx_sad32x16x4d            msa sse2/;
-specialize qw/vpx_sad16x32x4d            msa sse2/;
-specialize qw/vpx_sad16x16x4d       neon msa sse2/;
-specialize qw/vpx_sad16x8x4d             msa sse2/;
-specialize qw/vpx_sad8x16x4d             msa sse2/;
-specialize qw/vpx_sad8x8x4d              msa sse2/;
-specialize qw/vpx_sad8x4x4d              msa sse2/;
-specialize qw/vpx_sad4x8x4d              msa sse2/;
-specialize qw/vpx_sad4x4x4d              msa sse2/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  #
-  # Multi-block SAD, comparing a reference to N independent blocks
-  #
-  foreach (@block_sizes) {
-    ($w, $h) = @$_;
-    add_proto qw/void/, "vpx_highbd_sad${w}x${h}x4d", "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
-    if ($w != 128 && $h != 128) {
-      specialize "vpx_highbd_sad${w}x${h}x4d", qw/sse2/;
-    }
-  }
-}
-
-#
-# Structured Similarity (SSIM)
-#
-if (vpx_config("CONFIG_INTERNAL_STATS") eq "yes") {
-  add_proto qw/void vpx_ssim_parms_8x8/, "const uint8_t *s, int sp, const uint8_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
-  specialize qw/vpx_ssim_parms_8x8/, "$sse2_x86_64";
-
-  add_proto qw/void vpx_ssim_parms_16x16/, "const uint8_t *s, int sp, const uint8_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
-  specialize qw/vpx_ssim_parms_16x16/, "$sse2_x86_64";
-
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    add_proto qw/void vpx_highbd_ssim_parms_8x8/, "const uint16_t *s, int sp, const uint16_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
-  }
-}
-}  # CONFIG_ENCODERS
-
-if (vpx_config("CONFIG_ENCODERS") eq "yes") {
-
-#
-# Variance
-#
-add_proto qw/unsigned int vpx_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance64x64 sse2 avx2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance64x32 sse2 avx2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance32x64 sse2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance32x32 sse2 avx2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance32x16 sse2 avx2 msa/;
-
-add_proto qw/unsigned int vpx_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance16x32 sse2 msa/;
-
-add_proto qw/unsigned int vpx_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance16x16 sse2 avx2 media neon msa/;
-
-add_proto qw/unsigned int vpx_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance16x8 sse2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance8x16 sse2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance8x8 sse2 media neon msa/;
-
-add_proto qw/unsigned int vpx_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance8x4 sse2 msa/;
-
-add_proto qw/unsigned int vpx_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance4x8 sse2 msa/;
-
-add_proto qw/unsigned int vpx_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_variance4x4 sse2 msa/;
-
-#
-# Specialty Variance
-#
-add_proto qw/void vpx_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
-add_proto qw/void vpx_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
-specialize qw/vpx_get16x16var sse2 avx2 neon msa/;
-specialize qw/vpx_get8x8var   sse2      neon msa/;
-
-
-add_proto qw/unsigned int vpx_mse16x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-add_proto qw/unsigned int vpx_mse16x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-add_proto qw/unsigned int vpx_mse8x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-add_proto qw/unsigned int vpx_mse8x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-
-specialize qw/vpx_mse16x16          sse2 avx2 media neon msa/;
-specialize qw/vpx_mse16x8           sse2                 msa/;
-specialize qw/vpx_mse8x16           sse2                 msa/;
-specialize qw/vpx_mse8x8            sse2                 msa/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  foreach $bd (8, 10, 12) {
-    add_proto qw/void/, "vpx_highbd_${bd}_get16x16var", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-    add_proto qw/void/, "vpx_highbd_${bd}_get8x8var", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
-    add_proto qw/unsigned int/, "vpx_highbd_${bd}_mse16x16", "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-    add_proto qw/unsigned int/, "vpx_highbd_${bd}_mse16x8", "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-    add_proto qw/unsigned int/, "vpx_highbd_${bd}_mse8x16", "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-    add_proto qw/unsigned int/, "vpx_highbd_${bd}_mse8x8", "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-
-    specialize "vpx_highbd_${bd}_mse16x16", qw/sse2/;
-    specialize "vpx_highbd_${bd}_mse8x8", qw/sse2/;
-  }
-}
-
-#
-# ...
-#
-add_proto qw/void vpx_upsampled_pred/, "uint8_t *comp_pred, int width, int height, const uint8_t *ref, int ref_stride";
-specialize qw/vpx_upsampled_pred sse2/;
-add_proto qw/void vpx_comp_avg_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
-specialize qw/vpx_comp_avg_upsampled_pred sse2/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  add_proto qw/void vpx_highbd_upsampled_pred/, "uint16_t *comp_pred, int width, int height, const uint8_t *ref8, int ref_stride";
-  specialize qw/vpx_highbd_upsampled_pred sse2/;
-  add_proto qw/void vpx_highbd_comp_avg_upsampled_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride";
-  specialize qw/vpx_highbd_comp_avg_upsampled_pred sse2/;
-}
-
-#
-# ...
-#
-add_proto qw/unsigned int vpx_get_mb_ss/, "const int16_t *";
-add_proto qw/unsigned int vpx_get4x4sse_cs/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride";
-
-specialize qw/vpx_get_mb_ss sse2 msa/;
-specialize qw/vpx_get4x4sse_cs neon msa/;
-
-#
-# Variance / Subpixel Variance / Subpixel Avg Variance
-#
-foreach (@block_sizes) {
-  ($w, $h) = @$_;
-  add_proto qw/unsigned int/, "vpx_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  add_proto qw/uint32_t/, "vpx_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  add_proto qw/uint32_t/, "vpx_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-}
-
-specialize qw/vpx_variance64x64     sse2 avx2       neon msa/;
-specialize qw/vpx_variance64x32     sse2 avx2       neon msa/;
-specialize qw/vpx_variance32x64     sse2            neon msa/;
-specialize qw/vpx_variance32x32     sse2 avx2       neon msa/;
-specialize qw/vpx_variance32x16     sse2 avx2            msa/;
-specialize qw/vpx_variance16x32     sse2                 msa/;
-specialize qw/vpx_variance16x16     sse2 avx2 media neon msa/;
-specialize qw/vpx_variance16x8      sse2            neon msa/;
-specialize qw/vpx_variance8x16      sse2            neon msa/;
-specialize qw/vpx_variance8x8       sse2      media neon msa/;
-specialize qw/vpx_variance8x4       sse2                 msa/;
-specialize qw/vpx_variance4x8       sse2                 msa/;
-specialize qw/vpx_variance4x4       sse2                 msa/;
-
-specialize qw/vpx_sub_pixel_variance64x64     avx2       neon msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance64x32                     msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance32x64                     msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance32x32     avx2       neon msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance32x16                     msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance16x32                     msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance16x16          media neon msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance16x8                      msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance8x16                      msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance8x8            media neon msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance8x4                       msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance4x8                       msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance4x4                       msa sse2 ssse3/;
-
-specialize qw/vpx_sub_pixel_avg_variance64x64 avx2 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance64x32      msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance32x64      msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance32x32 avx2 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance32x16      msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance16x32      msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance16x16      msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance16x8       msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance8x16       msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance8x8        msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance8x4        msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance4x8        msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance4x4        msa sse2 ssse3/;
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  foreach $bd (8, 10, 12) {
-    foreach (@block_sizes) {
-      ($w, $h) = @$_;
-      add_proto qw/unsigned int/, "vpx_highbd_${bd}_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-      add_proto qw/uint32_t/, "vpx_highbd_${bd}_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-      add_proto qw/uint32_t/, "vpx_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-      if ($w != 128 && $h != 128 && $w != 4 && $h != 4) {
-        specialize "vpx_highbd_${bd}_variance${w}x${h}", "sse2";
-      }
-      if ($w == 4 && $h == 4) {
-        specialize "vpx_highbd_${bd}_variance${w}x${h}", "sse4_1";
-      }
-      if ($w != 128 && $h != 128 && $w != 4) {
-        specialize "vpx_highbd_${bd}_sub_pixel_variance${w}x${h}", qw/sse2/;
-        specialize "vpx_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", qw/sse2/;
-      }
-      if ($w == 4 && $h == 4) {
-        specialize "vpx_highbd_${bd}_sub_pixel_variance${w}x${h}", "sse4_1";
-        specialize "vpx_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", "sse4_1";
-      }
-    }
-  }
-}  # CONFIG_VP9_HIGHBITDEPTH
-
-if (vpx_config("CONFIG_EXT_INTER") eq "yes") {
-#
-# Masked Variance / Masked Subpixel Variance
-#
-  foreach (@block_sizes) {
-    ($w, $h) = @$_;
-    add_proto qw/unsigned int/, "vpx_masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
-    add_proto qw/unsigned int/, "vpx_masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
-    specialize "vpx_masked_variance${w}x${h}", qw/ssse3/;
-    specialize "vpx_masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
-  }
-
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    foreach $bd ("_", "_10_", "_12_") {
-      foreach (@block_sizes) {
-        ($w, $h) = @$_;
-        add_proto qw/unsigned int/, "vpx_highbd${bd}masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
-        add_proto qw/unsigned int/, "vpx_highbd${bd}masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
-        specialize "vpx_highbd${bd}masked_variance${w}x${h}", qw/ssse3/;
-        specialize "vpx_highbd${bd}masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
-      }
-    }
-  }
-}
-
-#
-# OBMC Variance / OBMC Subpixel Variance
-#
-if (vpx_config("CONFIG_OBMC") eq "yes") {
-  foreach (@block_sizes) {
-    ($w, $h) = @$_;
-    add_proto qw/unsigned int/, "vpx_obmc_variance${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
-    add_proto qw/unsigned int/, "vpx_obmc_sub_pixel_variance${w}x${h}", "const uint8_t *pre, int pre_stride, int xoffset, int yoffset, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
-    specialize "vpx_obmc_variance${w}x${h}", q/sse4_1/;
-    specialize "vpx_obmc_sub_pixel_variance${w}x${h}";
-  }
-
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    foreach $bd ("_", "_10_", "_12_") {
-      foreach (@block_sizes) {
-        ($w, $h) = @$_;
-        add_proto qw/unsigned int/, "vpx_highbd${bd}obmc_variance${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
-        add_proto qw/unsigned int/, "vpx_highbd${bd}obmc_sub_pixel_variance${w}x${h}", "const uint8_t *pre, int pre_stride, int xoffset, int yoffset, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
-        specialize "vpx_highbd${bd}obmc_variance${w}x${h}", qw/sse4_1/;
-        specialize "vpx_highbd${bd}obmc_sub_pixel_variance${w}x${h}";
-      }
-    }
-  }
-}
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance64x64 avx2 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance64x32 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance32x64 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance32x32 avx2 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance32x16 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance16x32 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance16x16 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance16x8 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance8x16 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance8x8 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance8x4 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance4x8 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_sub_pixel_avg_variance4x4 msa sse2 ssse3/;
-
-#
-# Specialty Subpixel
-#
-add_proto qw/uint32_t vpx_variance_halfpixvar16x16_h/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, uint32_t *sse";
-  specialize qw/vpx_variance_halfpixvar16x16_h sse2 media/;
-
-add_proto qw/uint32_t vpx_variance_halfpixvar16x16_v/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, uint32_t *sse";
-  specialize qw/vpx_variance_halfpixvar16x16_v sse2 media/;
-
-add_proto qw/uint32_t vpx_variance_halfpixvar16x16_hv/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, uint32_t *sse";
-  specialize qw/vpx_variance_halfpixvar16x16_hv sse2 media/;
-
-#
-# Comp Avg
-#
-add_proto qw/void vpx_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  add_proto qw/unsigned int vpx_highbd_12_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_variance64x64 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_variance64x32 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_variance32x64 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_variance32x32 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_variance32x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_variance16x32 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_variance16x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_variance16x8 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_variance8x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_variance8x8 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_12_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_12_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-
-  add_proto qw/unsigned int vpx_highbd_10_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_variance64x64 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_variance64x32 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_variance32x64 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_variance32x32 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_variance32x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_variance16x32 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_variance16x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_variance16x8 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_variance8x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_variance8x8 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_10_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_10_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-
-  add_proto qw/unsigned int vpx_highbd_8_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_variance64x64 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_8_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_variance64x32 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_8_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_variance32x64 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_8_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_variance32x32 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_8_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_variance32x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_8_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_variance16x32 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_8_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_variance16x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_8_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_variance16x8 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_8_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_variance8x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_8_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_variance8x8 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_8_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_8_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_8_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-
-  add_proto qw/void vpx_highbd_8_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-  add_proto qw/void vpx_highbd_8_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
-  add_proto qw/void vpx_highbd_10_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-  add_proto qw/void vpx_highbd_10_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
-  add_proto qw/void vpx_highbd_12_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-  add_proto qw/void vpx_highbd_12_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
-  add_proto qw/unsigned int vpx_highbd_8_mse16x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_mse16x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_8_mse16x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_8_mse8x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_8_mse8x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_8_mse8x8 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_mse16x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_mse16x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_10_mse16x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_10_mse8x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_10_mse8x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_10_mse8x8 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_mse16x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_mse16x16 sse2/;
-
-  add_proto qw/unsigned int vpx_highbd_12_mse16x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_12_mse8x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  add_proto qw/unsigned int vpx_highbd_12_mse8x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
-  specialize qw/vpx_highbd_12_mse8x8 sse2/;
-
-  add_proto qw/void vpx_highbd_comp_avg_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride";
-
-  #
-  # Subpixel Variance
-  #
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_12_sub_pixel_variance64x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_12_sub_pixel_variance64x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_12_sub_pixel_variance32x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_12_sub_pixel_variance32x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_12_sub_pixel_variance32x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_12_sub_pixel_variance16x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_12_sub_pixel_variance16x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_12_sub_pixel_variance16x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_12_sub_pixel_variance8x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_12_sub_pixel_variance8x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_12_sub_pixel_variance8x4 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_10_sub_pixel_variance64x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_10_sub_pixel_variance64x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_10_sub_pixel_variance32x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_10_sub_pixel_variance32x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_10_sub_pixel_variance32x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_10_sub_pixel_variance16x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_10_sub_pixel_variance16x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_10_sub_pixel_variance16x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_10_sub_pixel_variance8x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_10_sub_pixel_variance8x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_10_sub_pixel_variance8x4 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_8_sub_pixel_variance64x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_8_sub_pixel_variance64x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_8_sub_pixel_variance32x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_8_sub_pixel_variance32x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_8_sub_pixel_variance32x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_8_sub_pixel_variance16x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_8_sub_pixel_variance16x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_8_sub_pixel_variance16x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_8_sub_pixel_variance8x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_8_sub_pixel_variance8x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  specialize qw/vpx_highbd_8_sub_pixel_variance8x4 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_12_sub_pixel_avg_variance64x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_12_sub_pixel_avg_variance64x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x4 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_10_sub_pixel_avg_variance64x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_10_sub_pixel_avg_variance64x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x4 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_8_sub_pixel_avg_variance64x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_8_sub_pixel_avg_variance64x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x64 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x32 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x16 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x8 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x4 sse2/;
-
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-
-}  # CONFIG_VP9_HIGHBITDEPTH
-
-}  # CONFIG_ENCODERS
-
-1;
diff --git a/aom_dsp/x86/add_noise_sse2.asm b/aom_dsp/x86/add_noise_sse2.asm
index a86ca72..18fc165 100644
--- a/aom_dsp/x86/add_noise_sse2.asm
+++ b/aom_dsp/x86/add_noise_sse2.asm
@@ -11,14 +11,14 @@
 
 %include "aom_ports/x86_abi_support.asm"
 
-;void vpx_plane_add_noise_sse2(unsigned char *start, unsigned char *noise,
+;void aom_plane_add_noise_sse2(unsigned char *start, unsigned char *noise,
 ;                              unsigned char blackclamp[16],
 ;                              unsigned char whiteclamp[16],
 ;                              unsigned char bothclamp[16],
 ;                              unsigned int width, unsigned int height,
 ;                              int pitch)
-global sym(vpx_plane_add_noise_sse2) PRIVATE
-sym(vpx_plane_add_noise_sse2):
+global sym(aom_plane_add_noise_sse2) PRIVATE
+sym(aom_plane_add_noise_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 8
diff --git a/aom_dsp/x86/vpx_asm_stubs.c b/aom_dsp/x86/aom_asm_stubs.c
similarity index 60%
rename from aom_dsp/x86/vpx_asm_stubs.c
rename to aom_dsp/x86/aom_asm_stubs.c
index be56a69..0f0aaa8 100644
--- a/aom_dsp/x86/vpx_asm_stubs.c
+++ b/aom_dsp/x86/aom_asm_stubs.c
@@ -8,53 +8,53 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/x86/convolve.h"
 
 #if HAVE_SSE2
-filter8_1dfunction vpx_filter_block1d16_v8_sse2;
-filter8_1dfunction vpx_filter_block1d16_h8_sse2;
-filter8_1dfunction vpx_filter_block1d8_v8_sse2;
-filter8_1dfunction vpx_filter_block1d8_h8_sse2;
-filter8_1dfunction vpx_filter_block1d4_v8_sse2;
-filter8_1dfunction vpx_filter_block1d4_h8_sse2;
-filter8_1dfunction vpx_filter_block1d16_v8_avg_sse2;
-filter8_1dfunction vpx_filter_block1d16_h8_avg_sse2;
-filter8_1dfunction vpx_filter_block1d8_v8_avg_sse2;
-filter8_1dfunction vpx_filter_block1d8_h8_avg_sse2;
-filter8_1dfunction vpx_filter_block1d4_v8_avg_sse2;
-filter8_1dfunction vpx_filter_block1d4_h8_avg_sse2;
+filter8_1dfunction aom_filter_block1d16_v8_sse2;
+filter8_1dfunction aom_filter_block1d16_h8_sse2;
+filter8_1dfunction aom_filter_block1d8_v8_sse2;
+filter8_1dfunction aom_filter_block1d8_h8_sse2;
+filter8_1dfunction aom_filter_block1d4_v8_sse2;
+filter8_1dfunction aom_filter_block1d4_h8_sse2;
+filter8_1dfunction aom_filter_block1d16_v8_avg_sse2;
+filter8_1dfunction aom_filter_block1d16_h8_avg_sse2;
+filter8_1dfunction aom_filter_block1d8_v8_avg_sse2;
+filter8_1dfunction aom_filter_block1d8_h8_avg_sse2;
+filter8_1dfunction aom_filter_block1d4_v8_avg_sse2;
+filter8_1dfunction aom_filter_block1d4_h8_avg_sse2;
 
-filter8_1dfunction vpx_filter_block1d16_v2_sse2;
-filter8_1dfunction vpx_filter_block1d16_h2_sse2;
-filter8_1dfunction vpx_filter_block1d8_v2_sse2;
-filter8_1dfunction vpx_filter_block1d8_h2_sse2;
-filter8_1dfunction vpx_filter_block1d4_v2_sse2;
-filter8_1dfunction vpx_filter_block1d4_h2_sse2;
-filter8_1dfunction vpx_filter_block1d16_v2_avg_sse2;
-filter8_1dfunction vpx_filter_block1d16_h2_avg_sse2;
-filter8_1dfunction vpx_filter_block1d8_v2_avg_sse2;
-filter8_1dfunction vpx_filter_block1d8_h2_avg_sse2;
-filter8_1dfunction vpx_filter_block1d4_v2_avg_sse2;
-filter8_1dfunction vpx_filter_block1d4_h2_avg_sse2;
+filter8_1dfunction aom_filter_block1d16_v2_sse2;
+filter8_1dfunction aom_filter_block1d16_h2_sse2;
+filter8_1dfunction aom_filter_block1d8_v2_sse2;
+filter8_1dfunction aom_filter_block1d8_h2_sse2;
+filter8_1dfunction aom_filter_block1d4_v2_sse2;
+filter8_1dfunction aom_filter_block1d4_h2_sse2;
+filter8_1dfunction aom_filter_block1d16_v2_avg_sse2;
+filter8_1dfunction aom_filter_block1d16_h2_avg_sse2;
+filter8_1dfunction aom_filter_block1d8_v2_avg_sse2;
+filter8_1dfunction aom_filter_block1d8_h2_avg_sse2;
+filter8_1dfunction aom_filter_block1d4_v2_avg_sse2;
+filter8_1dfunction aom_filter_block1d4_h2_avg_sse2;
 
-// void vpx_convolve8_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride,
 //                               uint8_t *dst, ptrdiff_t dst_stride,
 //                               const int16_t *filter_x, int x_step_q4,
 //                               const int16_t *filter_y, int y_step_q4,
 //                               int w, int h);
-// void vpx_convolve8_vert_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_vert_sse2(const uint8_t *src, ptrdiff_t src_stride,
 //                              uint8_t *dst, ptrdiff_t dst_stride,
 //                              const int16_t *filter_x, int x_step_q4,
 //                              const int16_t *filter_y, int y_step_q4,
 //                              int w, int h);
-// void vpx_convolve8_avg_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride,
 //                                   uint8_t *dst, ptrdiff_t dst_stride,
 //                                   const int16_t *filter_x, int x_step_q4,
 //                                   const int16_t *filter_y, int y_step_q4,
 //                                   int w, int h);
-// void vpx_convolve8_avg_vert_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_vert_sse2(const uint8_t *src, ptrdiff_t src_stride,
 //                                  uint8_t *dst, ptrdiff_t dst_stride,
 //                                  const int16_t *filter_x, int x_step_q4,
 //                                  const int16_t *filter_y, int y_step_q4,
@@ -64,12 +64,12 @@
 FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, sse2);
 FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_, sse2);
 
-// void vpx_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
 //                         uint8_t *dst, ptrdiff_t dst_stride,
 //                         const int16_t *filter_x, int x_step_q4,
 //                         const int16_t *filter_y, int y_step_q4,
 //                         int w, int h);
-// void vpx_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride,
 //                             uint8_t *dst, ptrdiff_t dst_stride,
 //                             const int16_t *filter_x, int x_step_q4,
 //                             const int16_t *filter_y, int y_step_q4,
@@ -77,34 +77,34 @@
 FUN_CONV_2D(, sse2);
 FUN_CONV_2D(avg_, sse2);
 
-#if CONFIG_VP9_HIGHBITDEPTH && ARCH_X86_64
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_h8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_v8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_h8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v8_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_h8_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_v8_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_h8_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v8_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h8_avg_sse2;
+#if CONFIG_AOM_HIGHBITDEPTH && ARCH_X86_64
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_v8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_h8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_v8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_h8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_v8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_h8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_v8_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_h8_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_v8_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_h8_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_v8_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_h8_avg_sse2;
 
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_h2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_v2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_h2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v2_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_h2_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_v2_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_h2_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v2_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_v2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_h2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_v2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_h2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_v2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_h2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_v2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_h2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_v2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_h2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_v2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_h2_avg_sse2;
 
-// void vpx_highbd_convolve8_horiz_sse2(const uint8_t *src,
+// void aom_highbd_convolve8_horiz_sse2(const uint8_t *src,
 //                                      ptrdiff_t src_stride,
 //                                      uint8_t *dst,
 //                                      ptrdiff_t dst_stride,
@@ -113,7 +113,7 @@
 //                                      const int16_t *filter_y,
 //                                      int y_step_q4,
 //                                      int w, int h, int bd);
-// void vpx_highbd_convolve8_vert_sse2(const uint8_t *src,
+// void aom_highbd_convolve8_vert_sse2(const uint8_t *src,
 //                                     ptrdiff_t src_stride,
 //                                     uint8_t *dst,
 //                                     ptrdiff_t dst_stride,
@@ -122,7 +122,7 @@
 //                                     const int16_t *filter_y,
 //                                     int y_step_q4,
 //                                     int w, int h, int bd);
-// void vpx_highbd_convolve8_avg_horiz_sse2(const uint8_t *src,
+// void aom_highbd_convolve8_avg_horiz_sse2(const uint8_t *src,
 //                                          ptrdiff_t src_stride,
 //                                          uint8_t *dst,
 //                                          ptrdiff_t dst_stride,
@@ -131,7 +131,7 @@
 //                                          const int16_t *filter_y,
 //                                          int y_step_q4,
 //                                          int w, int h, int bd);
-// void vpx_highbd_convolve8_avg_vert_sse2(const uint8_t *src,
+// void aom_highbd_convolve8_avg_vert_sse2(const uint8_t *src,
 //                                         ptrdiff_t src_stride,
 //                                         uint8_t *dst,
 //                                         ptrdiff_t dst_stride,
@@ -146,17 +146,17 @@
 HIGH_FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_,
                  sse2);
 
-// void vpx_highbd_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_highbd_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
 //                                uint8_t *dst, ptrdiff_t dst_stride,
 //                                const int16_t *filter_x, int x_step_q4,
 //                                const int16_t *filter_y, int y_step_q4,
 //                                int w, int h, int bd);
-// void vpx_highbd_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_highbd_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride,
 //                                    uint8_t *dst, ptrdiff_t dst_stride,
 //                                    const int16_t *filter_x, int x_step_q4,
 //                                    const int16_t *filter_y, int y_step_q4,
 //                                    int w, int h, int bd);
 HIGH_FUN_CONV_2D(, sse2);
 HIGH_FUN_CONV_2D(avg_, sse2);
-#endif  // CONFIG_VP9_HIGHBITDEPTH && ARCH_X86_64
+#endif  // CONFIG_AOM_HIGHBITDEPTH && ARCH_X86_64
 #endif  // HAVE_SSE2
diff --git a/aom_dsp/x86/vpx_convolve_copy_sse2.asm b/aom_dsp/x86/aom_convolve_copy_sse2.asm
similarity index 98%
rename from aom_dsp/x86/vpx_convolve_copy_sse2.asm
rename to aom_dsp/x86/aom_convolve_copy_sse2.asm
index 964ee14..bbfcf03 100644
--- a/aom_dsp/x86/vpx_convolve_copy_sse2.asm
+++ b/aom_dsp/x86/aom_convolve_copy_sse2.asm
@@ -47,7 +47,7 @@
   cmp r4d, 32
   je .w32
 
-%if CONFIG_VP10 && CONFIG_EXT_PARTITION
+%if CONFIG_AV1 && CONFIG_EXT_PARTITION
   cmp r4d, 64
   je .w64
 %ifidn %2, highbd
@@ -157,7 +157,7 @@
   jnz .loop128
   RET
 
-%else  ; CONFIG_VP10 && CONFIG_EXT_PARTITION
+%else  ; CONFIG_AV1 && CONFIG_EXT_PARTITION
 
 %ifidn %2, highbd
   cmp r4d, 64
@@ -199,7 +199,7 @@
   jnz .loop128
   RET
 %endif
-%endif  ; CONFIG_VP10 && CONFIG_EXT_PARTITION
+%endif  ; CONFIG_AV1 && CONFIG_EXT_PARTITION
 
 .w64:
   mov                    r4d, dword hm
@@ -336,7 +336,7 @@
 INIT_XMM sse2
 convolve_fn copy
 convolve_fn avg
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
 convolve_fn copy, highbd
 convolve_fn avg, highbd
 %endif
diff --git a/aom_dsp/x86/vpx_high_subpixel_8t_sse2.asm b/aom_dsp/x86/aom_high_subpixel_8t_sse2.asm
similarity index 94%
rename from aom_dsp/x86/vpx_high_subpixel_8t_sse2.asm
rename to aom_dsp/x86/aom_high_subpixel_8t_sse2.asm
index f02845e..da738fe 100644
--- a/aom_dsp/x86/vpx_high_subpixel_8t_sse2.asm
+++ b/aom_dsp/x86/aom_high_subpixel_8t_sse2.asm
@@ -197,7 +197,7 @@
     movdqu      [rdi + %2], xmm0
 %endm
 
-;void vpx_filter_block1d4_v8_sse2
+;void aom_filter_block1d4_v8_sse2
 ;(
 ;    unsigned char *src_ptr,
 ;    unsigned int   src_pitch,
@@ -206,8 +206,8 @@
 ;    unsigned int   output_height,
 ;    short *filter
 ;)
-global sym(vpx_highbd_filter_block1d4_v8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_v8_sse2):
+global sym(aom_highbd_filter_block1d4_v8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_v8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -267,7 +267,7 @@
     pop         rbp
     ret
 
-;void vpx_filter_block1d8_v8_sse2
+;void aom_filter_block1d8_v8_sse2
 ;(
 ;    unsigned char *src_ptr,
 ;    unsigned int   src_pitch,
@@ -276,8 +276,8 @@
 ;    unsigned int   output_height,
 ;    short *filter
 ;)
-global sym(vpx_highbd_filter_block1d8_v8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_v8_sse2):
+global sym(aom_highbd_filter_block1d8_v8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_v8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -326,7 +326,7 @@
     pop         rbp
     ret
 
-;void vpx_filter_block1d16_v8_sse2
+;void aom_filter_block1d16_v8_sse2
 ;(
 ;    unsigned char *src_ptr,
 ;    unsigned int   src_pitch,
@@ -335,8 +335,8 @@
 ;    unsigned int   output_height,
 ;    short *filter
 ;)
-global sym(vpx_highbd_filter_block1d16_v8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_v8_sse2):
+global sym(aom_highbd_filter_block1d16_v8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_v8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -389,8 +389,8 @@
     pop         rbp
     ret
 
-global sym(vpx_highbd_filter_block1d4_v8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_v8_avg_sse2):
+global sym(aom_highbd_filter_block1d4_v8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_v8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -450,8 +450,8 @@
     pop         rbp
     ret
 
-global sym(vpx_highbd_filter_block1d8_v8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_v8_avg_sse2):
+global sym(aom_highbd_filter_block1d8_v8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_v8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -499,8 +499,8 @@
     pop         rbp
     ret
 
-global sym(vpx_highbd_filter_block1d16_v8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_v8_avg_sse2):
+global sym(aom_highbd_filter_block1d16_v8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_v8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -552,7 +552,7 @@
     pop         rbp
     ret
 
-;void vpx_filter_block1d4_h8_sse2
+;void aom_filter_block1d4_h8_sse2
 ;(
 ;    unsigned char  *src_ptr,
 ;    unsigned int    src_pixels_per_line,
@@ -561,8 +561,8 @@
 ;    unsigned int    output_height,
 ;    short *filter
 ;)
-global sym(vpx_highbd_filter_block1d4_h8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_h8_sse2):
+global sym(aom_highbd_filter_block1d4_h8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_h8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -627,7 +627,7 @@
     pop         rbp
     ret
 
-;void vpx_filter_block1d8_h8_sse2
+;void aom_filter_block1d8_h8_sse2
 ;(
 ;    unsigned char  *src_ptr,
 ;    unsigned int    src_pixels_per_line,
@@ -636,8 +636,8 @@
 ;    unsigned int    output_height,
 ;    short *filter
 ;)
-global sym(vpx_highbd_filter_block1d8_h8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_h8_sse2):
+global sym(aom_highbd_filter_block1d8_h8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_h8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -693,7 +693,7 @@
     pop         rbp
     ret
 
-;void vpx_filter_block1d16_h8_sse2
+;void aom_filter_block1d16_h8_sse2
 ;(
 ;    unsigned char  *src_ptr,
 ;    unsigned int    src_pixels_per_line,
@@ -702,8 +702,8 @@
 ;    unsigned int    output_height,
 ;    short *filter
 ;)
-global sym(vpx_highbd_filter_block1d16_h8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_h8_sse2):
+global sym(aom_highbd_filter_block1d16_h8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_h8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -770,8 +770,8 @@
     pop         rbp
     ret
 
-global sym(vpx_highbd_filter_block1d4_h8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_h8_avg_sse2):
+global sym(aom_highbd_filter_block1d4_h8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_h8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -836,8 +836,8 @@
     pop         rbp
     ret
 
-global sym(vpx_highbd_filter_block1d8_h8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_h8_avg_sse2):
+global sym(aom_highbd_filter_block1d8_h8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_h8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -893,8 +893,8 @@
     pop         rbp
     ret
 
-global sym(vpx_highbd_filter_block1d16_h8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_h8_avg_sse2):
+global sym(aom_highbd_filter_block1d16_h8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_h8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
diff --git a/aom_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm b/aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm
similarity index 89%
rename from aom_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm
rename to aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm
index eacedc5..c926ab6 100644
--- a/aom_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm
+++ b/aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm
@@ -171,8 +171,8 @@
 %endm
 %endif
 
-global sym(vpx_highbd_filter_block1d4_v2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_v2_sse2):
+global sym(aom_highbd_filter_block1d4_v2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_v2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -196,8 +196,8 @@
     ret
 
 %if ARCH_X86_64
-global sym(vpx_highbd_filter_block1d8_v2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_v2_sse2):
+global sym(aom_highbd_filter_block1d8_v2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_v2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -222,8 +222,8 @@
     pop         rbp
     ret
 
-global sym(vpx_highbd_filter_block1d16_v2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_v2_sse2):
+global sym(aom_highbd_filter_block1d16_v2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_v2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -251,8 +251,8 @@
     ret
 %endif
 
-global sym(vpx_highbd_filter_block1d4_v2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_v2_avg_sse2):
+global sym(aom_highbd_filter_block1d4_v2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_v2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -276,8 +276,8 @@
     ret
 
 %if ARCH_X86_64
-global sym(vpx_highbd_filter_block1d8_v2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_v2_avg_sse2):
+global sym(aom_highbd_filter_block1d8_v2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_v2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -302,8 +302,8 @@
     pop         rbp
     ret
 
-global sym(vpx_highbd_filter_block1d16_v2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_v2_avg_sse2):
+global sym(aom_highbd_filter_block1d16_v2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_v2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -331,8 +331,8 @@
     ret
 %endif
 
-global sym(vpx_highbd_filter_block1d4_h2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_h2_sse2):
+global sym(aom_highbd_filter_block1d4_h2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_h2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -357,8 +357,8 @@
     ret
 
 %if ARCH_X86_64
-global sym(vpx_highbd_filter_block1d8_h2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_h2_sse2):
+global sym(aom_highbd_filter_block1d8_h2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_h2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -383,8 +383,8 @@
     pop         rbp
     ret
 
-global sym(vpx_highbd_filter_block1d16_h2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_h2_sse2):
+global sym(aom_highbd_filter_block1d16_h2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_h2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -412,8 +412,8 @@
     ret
 %endif
 
-global sym(vpx_highbd_filter_block1d4_h2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_h2_avg_sse2):
+global sym(aom_highbd_filter_block1d4_h2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_h2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -438,8 +438,8 @@
     ret
 
 %if ARCH_X86_64
-global sym(vpx_highbd_filter_block1d8_h2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_h2_avg_sse2):
+global sym(aom_highbd_filter_block1d8_h2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_h2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -464,8 +464,8 @@
     pop         rbp
     ret
 
-global sym(vpx_highbd_filter_block1d16_h2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_h2_avg_sse2):
+global sym(aom_highbd_filter_block1d16_h2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_h2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
diff --git a/aom_dsp/x86/vpx_subpixel_8t_intrin_avx2.c b/aom_dsp/x86/aom_subpixel_8t_intrin_avx2.c
similarity index 92%
rename from aom_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
rename to aom_dsp/x86/aom_subpixel_8t_intrin_avx2.c
index 2453bca..61be3d8 100644
--- a/aom_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
+++ b/aom_dsp/x86/aom_subpixel_8t_intrin_avx2.c
@@ -10,7 +10,7 @@
 
 #include <immintrin.h>
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/x86/convolve.h"
 #include "aom_ports/mem.h"
 
@@ -59,7 +59,7 @@
 #define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
 #endif  // __clang__
 
-static void vpx_filter_block1d16_h8_avx2(
+static void aom_filter_block1d16_h8_avx2(
     const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
     ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
   __m128i filtersReg;
@@ -287,7 +287,7 @@
   }
 }
 
-static void vpx_filter_block1d16_v8_avx2(
+static void aom_filter_block1d16_v8_avx2(
     const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr,
     ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) {
   __m128i filtersReg;
@@ -523,41 +523,41 @@
 }
 
 #if HAVE_AVX2 && HAVE_SSSE3
-filter8_1dfunction vpx_filter_block1d4_v8_ssse3;
+filter8_1dfunction aom_filter_block1d4_v8_ssse3;
 #if ARCH_X86_64
-filter8_1dfunction vpx_filter_block1d8_v8_intrin_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h8_intrin_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h8_intrin_ssse3;
-#define vpx_filter_block1d8_v8_avx2 vpx_filter_block1d8_v8_intrin_ssse3
-#define vpx_filter_block1d8_h8_avx2 vpx_filter_block1d8_h8_intrin_ssse3
-#define vpx_filter_block1d4_h8_avx2 vpx_filter_block1d4_h8_intrin_ssse3
+filter8_1dfunction aom_filter_block1d8_v8_intrin_ssse3;
+filter8_1dfunction aom_filter_block1d8_h8_intrin_ssse3;
+filter8_1dfunction aom_filter_block1d4_h8_intrin_ssse3;
+#define aom_filter_block1d8_v8_avx2 aom_filter_block1d8_v8_intrin_ssse3
+#define aom_filter_block1d8_h8_avx2 aom_filter_block1d8_h8_intrin_ssse3
+#define aom_filter_block1d4_h8_avx2 aom_filter_block1d4_h8_intrin_ssse3
 #else  // ARCH_X86
-filter8_1dfunction vpx_filter_block1d8_v8_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h8_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h8_ssse3;
-#define vpx_filter_block1d8_v8_avx2 vpx_filter_block1d8_v8_ssse3
-#define vpx_filter_block1d8_h8_avx2 vpx_filter_block1d8_h8_ssse3
-#define vpx_filter_block1d4_h8_avx2 vpx_filter_block1d4_h8_ssse3
+filter8_1dfunction aom_filter_block1d8_v8_ssse3;
+filter8_1dfunction aom_filter_block1d8_h8_ssse3;
+filter8_1dfunction aom_filter_block1d4_h8_ssse3;
+#define aom_filter_block1d8_v8_avx2 aom_filter_block1d8_v8_ssse3
+#define aom_filter_block1d8_h8_avx2 aom_filter_block1d8_h8_ssse3
+#define aom_filter_block1d4_h8_avx2 aom_filter_block1d4_h8_ssse3
 #endif  // ARCH_X86_64
-filter8_1dfunction vpx_filter_block1d16_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d16_h2_ssse3;
-filter8_1dfunction vpx_filter_block1d8_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h2_ssse3;
-filter8_1dfunction vpx_filter_block1d4_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h2_ssse3;
-#define vpx_filter_block1d4_v8_avx2 vpx_filter_block1d4_v8_ssse3
-#define vpx_filter_block1d16_v2_avx2 vpx_filter_block1d16_v2_ssse3
-#define vpx_filter_block1d16_h2_avx2 vpx_filter_block1d16_h2_ssse3
-#define vpx_filter_block1d8_v2_avx2 vpx_filter_block1d8_v2_ssse3
-#define vpx_filter_block1d8_h2_avx2 vpx_filter_block1d8_h2_ssse3
-#define vpx_filter_block1d4_v2_avx2 vpx_filter_block1d4_v2_ssse3
-#define vpx_filter_block1d4_h2_avx2 vpx_filter_block1d4_h2_ssse3
-// void vpx_convolve8_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride,
+filter8_1dfunction aom_filter_block1d16_v2_ssse3;
+filter8_1dfunction aom_filter_block1d16_h2_ssse3;
+filter8_1dfunction aom_filter_block1d8_v2_ssse3;
+filter8_1dfunction aom_filter_block1d8_h2_ssse3;
+filter8_1dfunction aom_filter_block1d4_v2_ssse3;
+filter8_1dfunction aom_filter_block1d4_h2_ssse3;
+#define aom_filter_block1d4_v8_avx2 aom_filter_block1d4_v8_ssse3
+#define aom_filter_block1d16_v2_avx2 aom_filter_block1d16_v2_ssse3
+#define aom_filter_block1d16_h2_avx2 aom_filter_block1d16_h2_ssse3
+#define aom_filter_block1d8_v2_avx2 aom_filter_block1d8_v2_ssse3
+#define aom_filter_block1d8_h2_avx2 aom_filter_block1d8_h2_ssse3
+#define aom_filter_block1d4_v2_avx2 aom_filter_block1d4_v2_ssse3
+#define aom_filter_block1d4_h2_avx2 aom_filter_block1d4_h2_ssse3
+// void aom_convolve8_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride,
 //                                uint8_t *dst, ptrdiff_t dst_stride,
 //                                const int16_t *filter_x, int x_step_q4,
 //                                const int16_t *filter_y, int y_step_q4,
 //                                int w, int h);
-// void vpx_convolve8_vert_avx2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_vert_avx2(const uint8_t *src, ptrdiff_t src_stride,
 //                               uint8_t *dst, ptrdiff_t dst_stride,
 //                               const int16_t *filter_x, int x_step_q4,
 //                               const int16_t *filter_y, int y_step_q4,
@@ -565,7 +565,7 @@
 FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , avx2);
 FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , avx2);
 
-// void vpx_convolve8_avx2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avx2(const uint8_t *src, ptrdiff_t src_stride,
 //                          uint8_t *dst, ptrdiff_t dst_stride,
 //                          const int16_t *filter_x, int x_step_q4,
 //                          const int16_t *filter_y, int y_step_q4,
diff --git a/aom_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c b/aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c
similarity index 94%
rename from aom_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
rename to aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c
index bd472ff..6b22775 100644
--- a/aom_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
+++ b/aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c
@@ -10,10 +10,10 @@
 
 #include <tmmintrin.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/vpx_filter.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_filter.h"
 #include "aom_dsp/x86/convolve.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/emmintrin_compat.h"
 
@@ -44,11 +44,11 @@
 };
 
 // These are reused by the avx2 intrinsics.
-filter8_1dfunction vpx_filter_block1d8_v8_intrin_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h8_intrin_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h8_intrin_ssse3;
+filter8_1dfunction aom_filter_block1d8_v8_intrin_ssse3;
+filter8_1dfunction aom_filter_block1d8_h8_intrin_ssse3;
+filter8_1dfunction aom_filter_block1d4_h8_intrin_ssse3;
 
-void vpx_filter_block1d4_h8_intrin_ssse3(
+void aom_filter_block1d4_h8_intrin_ssse3(
     const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
     ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
   __m128i firstFilters, secondFilters, shuffle1, shuffle2;
@@ -116,7 +116,7 @@
   }
 }
 
-void vpx_filter_block1d8_h8_intrin_ssse3(
+void aom_filter_block1d8_h8_intrin_ssse3(
     const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
     ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
   __m128i firstFilters, secondFilters, thirdFilters, forthFilters, srcReg;
@@ -193,7 +193,7 @@
   }
 }
 
-void vpx_filter_block1d8_v8_intrin_ssse3(
+void aom_filter_block1d8_v8_intrin_ssse3(
     const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr,
     ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) {
   __m128i addFilterReg64, filtersReg, minReg;
@@ -278,48 +278,48 @@
   }
 }
 
-filter8_1dfunction vpx_filter_block1d16_v8_ssse3;
-filter8_1dfunction vpx_filter_block1d16_h8_ssse3;
-filter8_1dfunction vpx_filter_block1d8_v8_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h8_ssse3;
-filter8_1dfunction vpx_filter_block1d4_v8_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h8_ssse3;
-filter8_1dfunction vpx_filter_block1d16_v8_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d16_h8_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d8_v8_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h8_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d4_v8_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d16_v8_ssse3;
+filter8_1dfunction aom_filter_block1d16_h8_ssse3;
+filter8_1dfunction aom_filter_block1d8_v8_ssse3;
+filter8_1dfunction aom_filter_block1d8_h8_ssse3;
+filter8_1dfunction aom_filter_block1d4_v8_ssse3;
+filter8_1dfunction aom_filter_block1d4_h8_ssse3;
+filter8_1dfunction aom_filter_block1d16_v8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d16_h8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d8_v8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d8_h8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d4_v8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d4_h8_avg_ssse3;
 
-filter8_1dfunction vpx_filter_block1d16_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d16_h2_ssse3;
-filter8_1dfunction vpx_filter_block1d8_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h2_ssse3;
-filter8_1dfunction vpx_filter_block1d4_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h2_ssse3;
-filter8_1dfunction vpx_filter_block1d16_v2_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d16_h2_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d8_v2_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h2_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d4_v2_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d16_v2_ssse3;
+filter8_1dfunction aom_filter_block1d16_h2_ssse3;
+filter8_1dfunction aom_filter_block1d8_v2_ssse3;
+filter8_1dfunction aom_filter_block1d8_h2_ssse3;
+filter8_1dfunction aom_filter_block1d4_v2_ssse3;
+filter8_1dfunction aom_filter_block1d4_h2_ssse3;
+filter8_1dfunction aom_filter_block1d16_v2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d16_h2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d8_v2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d8_h2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d4_v2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d4_h2_avg_ssse3;
 
-// void vpx_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
 //                                uint8_t *dst, ptrdiff_t dst_stride,
 //                                const int16_t *filter_x, int x_step_q4,
 //                                const int16_t *filter_y, int y_step_q4,
 //                                int w, int h);
-// void vpx_convolve8_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
 //                               uint8_t *dst, ptrdiff_t dst_stride,
 //                               const int16_t *filter_x, int x_step_q4,
 //                               const int16_t *filter_y, int y_step_q4,
 //                               int w, int h);
-// void vpx_convolve8_avg_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
 //                                    uint8_t *dst, ptrdiff_t dst_stride,
 //                                    const int16_t *filter_x, int x_step_q4,
 //                                    const int16_t *filter_y, int y_step_q4,
 //                                    int w, int h);
-// void vpx_convolve8_avg_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
 //                                   uint8_t *dst, ptrdiff_t dst_stride,
 //                                   const int16_t *filter_x, int x_step_q4,
 //                                   const int16_t *filter_y, int y_step_q4,
@@ -873,7 +873,7 @@
   return (int)((const InterpKernel *)(intptr_t)f - base);
 }
 
-void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                          ptrdiff_t dst_stride, const int16_t *filter_x,
                          int x_step_q4, const int16_t *filter_y, int y_step_q4,
                          int w, int h) {
@@ -887,12 +887,12 @@
                    x_step_q4, filters_y, y0_q4, y_step_q4, w, h);
 }
 
-// void vpx_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride,
 //                          uint8_t *dst, ptrdiff_t dst_stride,
 //                          const int16_t *filter_x, int x_step_q4,
 //                          const int16_t *filter_y, int y_step_q4,
 //                          int w, int h);
-// void vpx_convolve8_avg_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_ssse3(const uint8_t *src, ptrdiff_t src_stride,
 //                              uint8_t *dst, ptrdiff_t dst_stride,
 //                              const int16_t *filter_x, int x_step_q4,
 //                              const int16_t *filter_y, int y_step_q4,
diff --git a/aom_dsp/x86/vpx_subpixel_8t_sse2.asm b/aom_dsp/x86/aom_subpixel_8t_sse2.asm
similarity index 94%
rename from aom_dsp/x86/vpx_subpixel_8t_sse2.asm
rename to aom_dsp/x86/aom_subpixel_8t_sse2.asm
index b197150..535581e 100644
--- a/aom_dsp/x86/vpx_subpixel_8t_sse2.asm
+++ b/aom_dsp/x86/aom_subpixel_8t_sse2.asm
@@ -176,7 +176,7 @@
     movq        [rdi + %2], xmm0
 %endm
 
-;void vpx_filter_block1d4_v8_sse2
+;void aom_filter_block1d4_v8_sse2
 ;(
 ;    unsigned char *src_ptr,
 ;    unsigned int   src_pitch,
@@ -185,8 +185,8 @@
 ;    unsigned int   output_height,
 ;    short *filter
 ;)
-global sym(vpx_filter_block1d4_v8_sse2) PRIVATE
-sym(vpx_filter_block1d4_v8_sse2):
+global sym(aom_filter_block1d4_v8_sse2) PRIVATE
+sym(aom_filter_block1d4_v8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -243,7 +243,7 @@
     pop         rbp
     ret
 
-;void vpx_filter_block1d8_v8_sse2
+;void aom_filter_block1d8_v8_sse2
 ;(
 ;    unsigned char *src_ptr,
 ;    unsigned int   src_pitch,
@@ -252,8 +252,8 @@
 ;    unsigned int   output_height,
 ;    short *filter
 ;)
-global sym(vpx_filter_block1d8_v8_sse2) PRIVATE
-sym(vpx_filter_block1d8_v8_sse2):
+global sym(aom_filter_block1d8_v8_sse2) PRIVATE
+sym(aom_filter_block1d8_v8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -302,7 +302,7 @@
     pop         rbp
     ret
 
-;void vpx_filter_block1d16_v8_sse2
+;void aom_filter_block1d16_v8_sse2
 ;(
 ;    unsigned char *src_ptr,
 ;    unsigned int   src_pitch,
@@ -311,8 +311,8 @@
 ;    unsigned int   output_height,
 ;    short *filter
 ;)
-global sym(vpx_filter_block1d16_v8_sse2) PRIVATE
-sym(vpx_filter_block1d16_v8_sse2):
+global sym(aom_filter_block1d16_v8_sse2) PRIVATE
+sym(aom_filter_block1d16_v8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -365,8 +365,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d4_v8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d4_v8_avg_sse2):
+global sym(aom_filter_block1d4_v8_avg_sse2) PRIVATE
+sym(aom_filter_block1d4_v8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -423,8 +423,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d8_v8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d8_v8_avg_sse2):
+global sym(aom_filter_block1d8_v8_avg_sse2) PRIVATE
+sym(aom_filter_block1d8_v8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -472,8 +472,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d16_v8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d16_v8_avg_sse2):
+global sym(aom_filter_block1d16_v8_avg_sse2) PRIVATE
+sym(aom_filter_block1d16_v8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -525,7 +525,7 @@
     pop         rbp
     ret
 
-;void vpx_filter_block1d4_h8_sse2
+;void aom_filter_block1d4_h8_sse2
 ;(
 ;    unsigned char  *src_ptr,
 ;    unsigned int    src_pixels_per_line,
@@ -534,8 +534,8 @@
 ;    unsigned int    output_height,
 ;    short *filter
 ;)
-global sym(vpx_filter_block1d4_h8_sse2) PRIVATE
-sym(vpx_filter_block1d4_h8_sse2):
+global sym(aom_filter_block1d4_h8_sse2) PRIVATE
+sym(aom_filter_block1d4_h8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -599,7 +599,7 @@
     pop         rbp
     ret
 
-;void vpx_filter_block1d8_h8_sse2
+;void aom_filter_block1d8_h8_sse2
 ;(
 ;    unsigned char  *src_ptr,
 ;    unsigned int    src_pixels_per_line,
@@ -608,8 +608,8 @@
 ;    unsigned int    output_height,
 ;    short *filter
 ;)
-global sym(vpx_filter_block1d8_h8_sse2) PRIVATE
-sym(vpx_filter_block1d8_h8_sse2):
+global sym(aom_filter_block1d8_h8_sse2) PRIVATE
+sym(aom_filter_block1d8_h8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -674,7 +674,7 @@
     pop         rbp
     ret
 
-;void vpx_filter_block1d16_h8_sse2
+;void aom_filter_block1d16_h8_sse2
 ;(
 ;    unsigned char  *src_ptr,
 ;    unsigned int    src_pixels_per_line,
@@ -683,8 +683,8 @@
 ;    unsigned int    output_height,
 ;    short *filter
 ;)
-global sym(vpx_filter_block1d16_h8_sse2) PRIVATE
-sym(vpx_filter_block1d16_h8_sse2):
+global sym(aom_filter_block1d16_h8_sse2) PRIVATE
+sym(aom_filter_block1d16_h8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -769,8 +769,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d4_h8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d4_h8_avg_sse2):
+global sym(aom_filter_block1d4_h8_avg_sse2) PRIVATE
+sym(aom_filter_block1d4_h8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -834,8 +834,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d8_h8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d8_h8_avg_sse2):
+global sym(aom_filter_block1d8_h8_avg_sse2) PRIVATE
+sym(aom_filter_block1d8_h8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -900,8 +900,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d16_h8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d16_h8_avg_sse2):
+global sym(aom_filter_block1d16_h8_avg_sse2) PRIVATE
+sym(aom_filter_block1d16_h8_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
diff --git a/aom_dsp/x86/vpx_subpixel_8t_ssse3.asm b/aom_dsp/x86/aom_subpixel_8t_ssse3.asm
similarity index 99%
rename from aom_dsp/x86/vpx_subpixel_8t_ssse3.asm
rename to aom_dsp/x86/aom_subpixel_8t_ssse3.asm
index c1a6f23..5b5eafe 100644
--- a/aom_dsp/x86/vpx_subpixel_8t_ssse3.asm
+++ b/aom_dsp/x86/aom_subpixel_8t_ssse3.asm
@@ -17,7 +17,7 @@
 ; NOTE: pmulhrsw has a latency of 5 cycles.  Tests showed a performance loss
 ; when using this instruction.
 ;
-; The add order below (based on ffvp9) must be followed to prevent outranges.
+; The add order below (based on ffav1) must be followed to prevent outranges.
 ; x = k0k1 + k4k5
 ; y = k2k3 + k6k7
 ; z = signed SAT(x + y)
diff --git a/aom_dsp/x86/vpx_subpixel_bilinear_sse2.asm b/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm
similarity index 89%
rename from aom_dsp/x86/vpx_subpixel_bilinear_sse2.asm
rename to aom_dsp/x86/aom_subpixel_bilinear_sse2.asm
index 7de58ff..78ac1c4 100644
--- a/aom_dsp/x86/vpx_subpixel_bilinear_sse2.asm
+++ b/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm
@@ -131,8 +131,8 @@
     dec         rcx
 %endm
 
-global sym(vpx_filter_block1d4_v2_sse2) PRIVATE
-sym(vpx_filter_block1d4_v2_sse2):
+global sym(aom_filter_block1d4_v2_sse2) PRIVATE
+sym(aom_filter_block1d4_v2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -155,8 +155,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d8_v2_sse2) PRIVATE
-sym(vpx_filter_block1d8_v2_sse2):
+global sym(aom_filter_block1d8_v2_sse2) PRIVATE
+sym(aom_filter_block1d8_v2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -181,8 +181,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d16_v2_sse2) PRIVATE
-sym(vpx_filter_block1d16_v2_sse2):
+global sym(aom_filter_block1d16_v2_sse2) PRIVATE
+sym(aom_filter_block1d16_v2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -209,8 +209,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d4_v2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d4_v2_avg_sse2):
+global sym(aom_filter_block1d4_v2_avg_sse2) PRIVATE
+sym(aom_filter_block1d4_v2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -233,8 +233,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d8_v2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d8_v2_avg_sse2):
+global sym(aom_filter_block1d8_v2_avg_sse2) PRIVATE
+sym(aom_filter_block1d8_v2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -259,8 +259,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d16_v2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d16_v2_avg_sse2):
+global sym(aom_filter_block1d16_v2_avg_sse2) PRIVATE
+sym(aom_filter_block1d16_v2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -287,8 +287,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d4_h2_sse2) PRIVATE
-sym(vpx_filter_block1d4_h2_sse2):
+global sym(aom_filter_block1d4_h2_sse2) PRIVATE
+sym(aom_filter_block1d4_h2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -312,8 +312,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d8_h2_sse2) PRIVATE
-sym(vpx_filter_block1d8_h2_sse2):
+global sym(aom_filter_block1d8_h2_sse2) PRIVATE
+sym(aom_filter_block1d8_h2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -339,8 +339,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d16_h2_sse2) PRIVATE
-sym(vpx_filter_block1d16_h2_sse2):
+global sym(aom_filter_block1d16_h2_sse2) PRIVATE
+sym(aom_filter_block1d16_h2_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -367,8 +367,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d4_h2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d4_h2_avg_sse2):
+global sym(aom_filter_block1d4_h2_avg_sse2) PRIVATE
+sym(aom_filter_block1d4_h2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -392,8 +392,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d8_h2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d8_h2_avg_sse2):
+global sym(aom_filter_block1d8_h2_avg_sse2) PRIVATE
+sym(aom_filter_block1d8_h2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -419,8 +419,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d16_h2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d16_h2_avg_sse2):
+global sym(aom_filter_block1d16_h2_avg_sse2) PRIVATE
+sym(aom_filter_block1d16_h2_avg_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
diff --git a/aom_dsp/x86/vpx_subpixel_bilinear_ssse3.asm b/aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm
similarity index 88%
rename from aom_dsp/x86/vpx_subpixel_bilinear_ssse3.asm
rename to aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm
index 318c7c4..5f24460 100644
--- a/aom_dsp/x86/vpx_subpixel_bilinear_ssse3.asm
+++ b/aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm
@@ -105,8 +105,8 @@
     dec         rcx
 %endm
 
-global sym(vpx_filter_block1d4_v2_ssse3) PRIVATE
-sym(vpx_filter_block1d4_v2_ssse3):
+global sym(aom_filter_block1d4_v2_ssse3) PRIVATE
+sym(aom_filter_block1d4_v2_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -129,8 +129,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d8_v2_ssse3) PRIVATE
-sym(vpx_filter_block1d8_v2_ssse3):
+global sym(aom_filter_block1d8_v2_ssse3) PRIVATE
+sym(aom_filter_block1d8_v2_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -155,8 +155,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d16_v2_ssse3) PRIVATE
-sym(vpx_filter_block1d16_v2_ssse3):
+global sym(aom_filter_block1d16_v2_ssse3) PRIVATE
+sym(aom_filter_block1d16_v2_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -182,8 +182,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d4_v2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d4_v2_avg_ssse3):
+global sym(aom_filter_block1d4_v2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d4_v2_avg_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -206,8 +206,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d8_v2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d8_v2_avg_ssse3):
+global sym(aom_filter_block1d8_v2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d8_v2_avg_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -232,8 +232,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d16_v2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d16_v2_avg_ssse3):
+global sym(aom_filter_block1d16_v2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d16_v2_avg_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -259,8 +259,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d4_h2_ssse3) PRIVATE
-sym(vpx_filter_block1d4_h2_ssse3):
+global sym(aom_filter_block1d4_h2_ssse3) PRIVATE
+sym(aom_filter_block1d4_h2_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -284,8 +284,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d8_h2_ssse3) PRIVATE
-sym(vpx_filter_block1d8_h2_ssse3):
+global sym(aom_filter_block1d8_h2_ssse3) PRIVATE
+sym(aom_filter_block1d8_h2_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -311,8 +311,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d16_h2_ssse3) PRIVATE
-sym(vpx_filter_block1d16_h2_ssse3):
+global sym(aom_filter_block1d16_h2_ssse3) PRIVATE
+sym(aom_filter_block1d16_h2_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -338,8 +338,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d4_h2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d4_h2_avg_ssse3):
+global sym(aom_filter_block1d4_h2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d4_h2_avg_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -363,8 +363,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d8_h2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d8_h2_avg_ssse3):
+global sym(aom_filter_block1d8_h2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d8_h2_avg_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -390,8 +390,8 @@
     pop         rbp
     ret
 
-global sym(vpx_filter_block1d16_h2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d16_h2_avg_ssse3):
+global sym(aom_filter_block1d16_h2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d16_h2_avg_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
diff --git a/aom_dsp/x86/avg_intrin_sse2.c b/aom_dsp/x86/avg_intrin_sse2.c
index c778f09..2365833 100644
--- a/aom_dsp/x86/avg_intrin_sse2.c
+++ b/aom_dsp/x86/avg_intrin_sse2.c
@@ -12,10 +12,10 @@
 
 #include "aom_dsp/x86/synonyms.h"
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
 
-void vpx_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp,
+void aom_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp,
                          int *min, int *max) {
   __m128i u0, s0, d0, diff, maxabsdiff, minabsdiff, negdiff, absdiff0, absdiff;
   u0 = _mm_setzero_si128();
@@ -93,7 +93,7 @@
   *min = _mm_extract_epi16(minabsdiff, 0);
 }
 
-unsigned int vpx_avg_8x8_sse2(const uint8_t *s, int p) {
+unsigned int aom_avg_8x8_sse2(const uint8_t *s, int p) {
   __m128i s0, s1, u0;
   unsigned int avg = 0;
   u0 = _mm_setzero_si128();
@@ -120,7 +120,7 @@
   return (avg + 32) >> 6;
 }
 
-unsigned int vpx_avg_4x4_sse2(const uint8_t *s, int p) {
+unsigned int aom_avg_4x4_sse2(const uint8_t *s, int p) {
   __m128i s0, s1, u0;
   unsigned int avg = 0;
 
@@ -215,7 +215,7 @@
   }
 }
 
-void vpx_hadamard_8x8_sse2(int16_t const *src_diff, int src_stride,
+void aom_hadamard_8x8_sse2(int16_t const *src_diff, int src_stride,
                            int16_t *coeff) {
   __m128i src[8];
   src[0] = _mm_load_si128((const __m128i *)src_diff);
@@ -247,13 +247,13 @@
   _mm_store_si128((__m128i *)coeff, src[7]);
 }
 
-void vpx_hadamard_16x16_sse2(int16_t const *src_diff, int src_stride,
+void aom_hadamard_16x16_sse2(int16_t const *src_diff, int src_stride,
                              int16_t *coeff) {
   int idx;
   for (idx = 0; idx < 4; ++idx) {
     int16_t const *src_ptr =
         src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
-    vpx_hadamard_8x8_sse2(src_ptr, src_stride, coeff + idx * 64);
+    aom_hadamard_8x8_sse2(src_ptr, src_stride, coeff + idx * 64);
   }
 
   for (idx = 0; idx < 64; idx += 8) {
@@ -286,7 +286,7 @@
   }
 }
 
-int vpx_satd_sse2(const int16_t *coeff, int length) {
+int aom_satd_sse2(const int16_t *coeff, int length) {
   int i;
   const __m128i zero = _mm_setzero_si128();
   __m128i accum = zero;
@@ -312,7 +312,7 @@
   return _mm_cvtsi128_si32(accum);
 }
 
-void vpx_int_pro_row_sse2(int16_t *hbuf, uint8_t const *ref,
+void aom_int_pro_row_sse2(int16_t *hbuf, uint8_t const *ref,
                           const int ref_stride, const int height) {
   int idx;
   __m128i zero = _mm_setzero_si128();
@@ -361,7 +361,7 @@
   _mm_storeu_si128((__m128i *)hbuf, s1);
 }
 
-int16_t vpx_int_pro_col_sse2(uint8_t const *ref, const int width) {
+int16_t aom_int_pro_col_sse2(uint8_t const *ref, const int width) {
   __m128i zero = _mm_setzero_si128();
   __m128i src_line = _mm_load_si128((const __m128i *)ref);
   __m128i s0 = _mm_sad_epu8(src_line, zero);
@@ -381,7 +381,7 @@
   return _mm_extract_epi16(s0, 0);
 }
 
-int vpx_vector_var_sse2(int16_t const *ref, int16_t const *src, const int bwl) {
+int aom_vector_var_sse2(int16_t const *ref, int16_t const *src, const int bwl) {
   int idx;
   int width = 4 << bwl;
   int16_t mean;
diff --git a/aom_dsp/x86/avg_ssse3_x86_64.asm b/aom_dsp/x86/avg_ssse3_x86_64.asm
index 26412e8..8f28874 100644
--- a/aom_dsp/x86/avg_ssse3_x86_64.asm
+++ b/aom_dsp/x86/avg_ssse3_x86_64.asm
@@ -8,7 +8,7 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-%define private_prefix vpx
+%define private_prefix aom
 
 %include "third_party/x86inc/x86inc.asm"
 
diff --git a/aom_dsp/x86/blend_a64_hmask_sse4.c b/aom_dsp/x86/blend_a64_hmask_sse4.c
index 1e452e5..4ee735d 100644
--- a/aom_dsp/x86/blend_a64_hmask_sse4.c
+++ b/aom_dsp/x86/blend_a64_hmask_sse4.c
@@ -8,28 +8,28 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
 // To start out, just dispatch to the function using the 2D mask and
 // pass mask stride as 0. This can be improved upon if necessary.
 
-void vpx_blend_a64_hmask_sse4_1(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_hmask_sse4_1(uint8_t *dst, uint32_t dst_stride,
                                 const uint8_t *src0, uint32_t src0_stride,
                                 const uint8_t *src1, uint32_t src1_stride,
                                 const uint8_t *mask, int h, int w) {
-  vpx_blend_a64_mask_sse4_1(dst, dst_stride, src0, src0_stride, src1,
+  aom_blend_a64_mask_sse4_1(dst, dst_stride, src0, src0_stride, src1,
                             src1_stride, mask, 0, h, w, 0, 0);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_blend_a64_hmask_sse4_1(
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_blend_a64_hmask_sse4_1(
     uint8_t *dst_8, uint32_t dst_stride, const uint8_t *src0_8,
     uint32_t src0_stride, const uint8_t *src1_8, uint32_t src1_stride,
     const uint8_t *mask, int h, int w, int bd) {
-  vpx_highbd_blend_a64_mask_sse4_1(dst_8, dst_stride, src0_8, src0_stride,
+  aom_highbd_blend_a64_mask_sse4_1(dst_8, dst_stride, src0_8, src0_stride,
                                    src1_8, src1_stride, mask, 0, h, w, 0, 0,
                                    bd);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/blend_a64_mask_sse4.c b/aom_dsp/x86/blend_a64_mask_sse4.c
index 2384556..6463ecc 100644
--- a/aom_dsp/x86/blend_a64_mask_sse4.c
+++ b/aom_dsp/x86/blend_a64_mask_sse4.c
@@ -12,15 +12,15 @@
 
 #include <assert.h>
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_dsp/blend.h"
 
 #include "aom_dsp/x86/synonyms.h"
 #include "aom_dsp/x86/blend_sse4.h"
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
 //////////////////////////////////////////////////////////////////////////////
 // No sub-sampling
@@ -31,7 +31,7 @@
                                      const uint8_t *src1, uint32_t src1_stride,
                                      const uint8_t *mask, uint32_t mask_stride,
                                      int h, int w) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   (void)w;
 
@@ -58,7 +58,7 @@
                                      const uint8_t *src1, uint32_t src1_stride,
                                      const uint8_t *mask, uint32_t mask_stride,
                                      int h, int w) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   (void)w;
 
@@ -84,7 +84,7 @@
     uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
     uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride,
     const uint8_t *mask, uint32_t mask_stride, int h, int w) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     int c;
@@ -121,7 +121,7 @@
     const uint8_t *mask, uint32_t mask_stride, int h, int w) {
   const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
                                          0xff, 0, 0xff, 0, 0xff, 0, 0xff);
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   (void)w;
 
@@ -151,7 +151,7 @@
     const uint8_t *mask, uint32_t mask_stride, int h, int w) {
   const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
                                          0xff, 0, 0xff, 0, 0xff, 0, 0xff);
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   (void)w;
 
@@ -181,7 +181,7 @@
     const uint8_t *mask, uint32_t mask_stride, int h, int w) {
   const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
                                          0xff, 0, 0xff, 0, 0xff, 0, 0xff);
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     int c;
@@ -219,7 +219,7 @@
     uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
     uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride,
     const uint8_t *mask, uint32_t mask_stride, int h, int w) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   (void)w;
 
@@ -248,7 +248,7 @@
     uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
     uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride,
     const uint8_t *mask, uint32_t mask_stride, int h, int w) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   (void)w;
 
@@ -278,7 +278,7 @@
     uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride,
     const uint8_t *mask, uint32_t mask_stride, int h, int w) {
   const __m128i v_zero = _mm_setzero_si128();
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     int c;
@@ -317,7 +317,7 @@
     const uint8_t *mask, uint32_t mask_stride, int h, int w) {
   const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
                                          0xff, 0, 0xff, 0, 0xff, 0, 0xff);
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   (void)w;
 
@@ -352,7 +352,7 @@
     const uint8_t *mask, uint32_t mask_stride, int h, int w) {
   const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
                                          0xff, 0, 0xff, 0, 0xff, 0, 0xff);
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   (void)w;
 
@@ -387,7 +387,7 @@
     const uint8_t *mask, uint32_t mask_stride, int h, int w) {
   const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
                                          0xff, 0, 0xff, 0, 0xff, 0, 0xff);
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     int c;
@@ -431,7 +431,7 @@
 // Dispatch
 //////////////////////////////////////////////////////////////////////////////
 
-void vpx_blend_a64_mask_sse4_1(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_mask_sse4_1(uint8_t *dst, uint32_t dst_stride,
                                const uint8_t *src0, uint32_t src0_stride,
                                const uint8_t *src1, uint32_t src1_stride,
                                const uint8_t *mask, uint32_t mask_stride, int h,
@@ -463,7 +463,7 @@
   assert(IS_POWER_OF_TWO(w));
 
   if (UNLIKELY((h | w) & 3)) {  // if (w <= 2 || h <= 2)
-    vpx_blend_a64_mask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+    aom_blend_a64_mask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
                          mask, mask_stride, h, w, suby, subx);
   } else {
     blend[(w >> 2) & 3][subx != 0][suby != 0](dst, dst_stride, src0,
@@ -472,7 +472,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 //////////////////////////////////////////////////////////////////////////////
 // No sub-sampling
 //////////////////////////////////////////////////////////////////////////////
@@ -481,7 +481,7 @@
     uint16_t *dst, uint32_t dst_stride, const uint16_t *src0,
     uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
     const uint8_t *mask, uint32_t mask_stride, int h, blend_unit_fn blend) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     const __m128i v_m0_b = xx_loadl_32(mask);
@@ -522,7 +522,7 @@
     uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
     const uint8_t *mask, uint32_t mask_stride, int h, int w,
     blend_unit_fn blend) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     int c;
@@ -570,7 +570,7 @@
     const uint8_t *mask, uint32_t mask_stride, int h, blend_unit_fn blend) {
   const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
                                          0xff, 0, 0xff, 0, 0xff, 0, 0xff);
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     const __m128i v_r_b = xx_loadl_64(mask);
@@ -617,7 +617,7 @@
     blend_unit_fn blend) {
   const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
                                          0xff, 0, 0xff, 0, 0xff, 0, 0xff);
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     int c;
@@ -665,7 +665,7 @@
     uint16_t *dst, uint32_t dst_stride, const uint16_t *src0,
     uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
     const uint8_t *mask, uint32_t mask_stride, int h, blend_unit_fn blend) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     const __m128i v_ra_b = xx_loadl_32(mask);
@@ -711,7 +711,7 @@
     uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
     const uint8_t *mask, uint32_t mask_stride, int h, int w,
     blend_unit_fn blend) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     int c;
@@ -762,7 +762,7 @@
     const uint8_t *mask, uint32_t mask_stride, int h, blend_unit_fn blend) {
   const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
                                          0xff, 0, 0xff, 0, 0xff, 0, 0xff);
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     const __m128i v_ra_b = xx_loadl_64(mask);
@@ -814,7 +814,7 @@
     blend_unit_fn blend) {
   const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
                                          0xff, 0, 0xff, 0, 0xff, 0, 0xff);
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     int c;
@@ -863,7 +863,7 @@
 // Dispatch
 //////////////////////////////////////////////////////////////////////////////
 
-void vpx_highbd_blend_a64_mask_sse4_1(uint8_t *dst_8, uint32_t dst_stride,
+void aom_highbd_blend_a64_mask_sse4_1(uint8_t *dst_8, uint32_t dst_stride,
                                       const uint8_t *src0_8,
                                       uint32_t src0_stride,
                                       const uint8_t *src1_8,
@@ -907,7 +907,7 @@
 
   assert(bd == 8 || bd == 10 || bd == 12);
   if (UNLIKELY((h | w) & 3)) {  // if (w <= 2 || h <= 2)
-    vpx_highbd_blend_a64_mask_c(dst_8, dst_stride, src0_8, src0_stride, src1_8,
+    aom_highbd_blend_a64_mask_c(dst_8, dst_stride, src0_8, src0_stride, src1_8,
                                 src1_stride, mask, mask_stride, h, w, suby,
                                 subx, bd);
   } else {
@@ -920,4 +920,4 @@
         mask_stride, h, w);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/blend_a64_vmask_sse4.c b/aom_dsp/x86/blend_a64_vmask_sse4.c
index 0108d02..946c8ff 100644
--- a/aom_dsp/x86/blend_a64_vmask_sse4.c
+++ b/aom_dsp/x86/blend_a64_vmask_sse4.c
@@ -12,15 +12,15 @@
 
 #include <assert.h>
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_dsp/blend.h"
 
 #include "aom_dsp/x86/synonyms.h"
 #include "aom_dsp/x86/blend_sse4.h"
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
 //////////////////////////////////////////////////////////////////////////////
 // Implementation - No sub-sampling
@@ -30,7 +30,7 @@
                                       const uint8_t *src0, uint32_t src0_stride,
                                       const uint8_t *src1, uint32_t src1_stride,
                                       const uint8_t *mask, int h, int w) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   (void)w;
 
@@ -55,7 +55,7 @@
                                       const uint8_t *src0, uint32_t src0_stride,
                                       const uint8_t *src1, uint32_t src1_stride,
                                       const uint8_t *mask, int h, int w) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   (void)w;
 
@@ -82,7 +82,7 @@
                                         const uint8_t *src1,
                                         uint32_t src1_stride,
                                         const uint8_t *mask, int h, int w) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     int c;
@@ -108,7 +108,7 @@
 // Dispatch
 //////////////////////////////////////////////////////////////////////////////
 
-void vpx_blend_a64_vmask_sse4_1(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_vmask_sse4_1(uint8_t *dst, uint32_t dst_stride,
                                 const uint8_t *src0, uint32_t src0_stride,
                                 const uint8_t *src1, uint32_t src1_stride,
                                 const uint8_t *mask, int h, int w) {
@@ -120,8 +120,8 @@
   // Dimension: width_index
   static const blend_fn blend[9] = {
     blend_a64_vmask_w16n_sse4_1,  // w % 16 == 0
-    vpx_blend_a64_vmask_c,        // w == 1
-    vpx_blend_a64_vmask_c,        // w == 2
+    aom_blend_a64_vmask_c,        // w == 1
+    aom_blend_a64_vmask_c,        // w == 2
     NULL,                         // INVALID
     blend_a64_vmask_w4_sse4_1,    // w == 4
     NULL,                         // INVALID
@@ -142,7 +142,7 @@
                  w);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 //////////////////////////////////////////////////////////////////////////////
 // Implementation - No sub-sampling
 //////////////////////////////////////////////////////////////////////////////
@@ -151,7 +151,7 @@
     uint16_t *dst, uint32_t dst_stride, const uint16_t *src0,
     uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
     const uint8_t *mask, int h, blend_unit_fn blend) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     const __m128i v_m0_w = _mm_set1_epi16(*mask);
@@ -194,7 +194,7 @@
     uint16_t *dst, uint32_t dst_stride, const uint16_t *src0,
     uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
     const uint8_t *mask, int h, int w, blend_unit_fn blend) {
-  const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+  const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
 
   do {
     int c;
@@ -236,7 +236,7 @@
 // Dispatch
 //////////////////////////////////////////////////////////////////////////////
 
-void vpx_highbd_blend_a64_vmask_sse4_1(
+void aom_highbd_blend_a64_vmask_sse4_1(
     uint8_t *dst_8, uint32_t dst_stride, const uint8_t *src0_8,
     uint32_t src0_stride, const uint8_t *src1_8, uint32_t src1_stride,
     const uint8_t *mask, int h, int w, int bd) {
@@ -270,7 +270,7 @@
   assert(bd == 8 || bd == 10 || bd == 12);
 
   if (UNLIKELY((h | w) & 3)) {  // if (w <= 2 || h <= 2)
-    vpx_highbd_blend_a64_vmask_c(dst_8, dst_stride, src0_8, src0_stride, src1_8,
+    aom_highbd_blend_a64_vmask_c(dst_8, dst_stride, src0_8, src0_stride, src1_8,
                                  src1_stride, mask, h, w, bd);
   } else {
     uint16_t *const dst = CONVERT_TO_SHORTPTR(dst_8);
@@ -281,4 +281,4 @@
                                   src1_stride, mask, h, w);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/blend_sse4.h b/aom_dsp/x86/blend_sse4.h
index 068518c..4fd5f5f 100644
--- a/aom_dsp/x86/blend_sse4.h
+++ b/aom_dsp/x86/blend_sse4.h
@@ -8,8 +8,8 @@
 *  be found in the AUTHORS file in the root of the source tree.
 */
 
-#ifndef VPX_DSP_X86_BLEND_SSE4_H_
-#define VPX_DSP_X86_BLEND_SSE4_H_
+#ifndef AOM_DSP_X86_BLEND_SSE4_H_
+#define AOM_DSP_X86_BLEND_SSE4_H_
 
 #include "aom_dsp/blend.h"
 #include "aom_dsp/x86/synonyms.h"
@@ -30,7 +30,7 @@
 
   const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
 
-  const __m128i v_res_w = xx_roundn_epu16(v_sum_w, VPX_BLEND_A64_ROUND_BITS);
+  const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
 
   return v_res_w;
 }
@@ -47,12 +47,12 @@
 
   const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
 
-  const __m128i v_res_w = xx_roundn_epu16(v_sum_w, VPX_BLEND_A64_ROUND_BITS);
+  const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
 
   return v_res_w;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef __m128i (*blend_unit_fn)(const uint16_t *src0, const uint16_t *src1,
                                  const __m128i v_m0_w, const __m128i v_m1_w);
 
@@ -66,7 +66,7 @@
 
   const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
 
-  const __m128i v_res_w = xx_roundn_epu16(v_sum_w, VPX_BLEND_A64_ROUND_BITS);
+  const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
 
   return v_res_w;
 }
@@ -81,7 +81,7 @@
 
   const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
 
-  const __m128i v_res_w = xx_roundn_epu16(v_sum_w, VPX_BLEND_A64_ROUND_BITS);
+  const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
 
   return v_res_w;
 }
@@ -100,7 +100,7 @@
 
   // Scale
   const __m128i v_ssum_d =
-      _mm_srli_epi32(v_sum_d, VPX_BLEND_A64_ROUND_BITS - 1);
+      _mm_srli_epi32(v_sum_d, AOM_BLEND_A64_ROUND_BITS - 1);
 
   // Pack
   const __m128i v_pssum_d = _mm_packs_epi32(v_ssum_d, v_ssum_d);
@@ -128,9 +128,9 @@
 
   // Scale
   const __m128i v_ssuml_d =
-      _mm_srli_epi32(v_suml_d, VPX_BLEND_A64_ROUND_BITS - 1);
+      _mm_srli_epi32(v_suml_d, AOM_BLEND_A64_ROUND_BITS - 1);
   const __m128i v_ssumh_d =
-      _mm_srli_epi32(v_sumh_d, VPX_BLEND_A64_ROUND_BITS - 1);
+      _mm_srli_epi32(v_sumh_d, AOM_BLEND_A64_ROUND_BITS - 1);
 
   // Pack
   const __m128i v_pssum_d = _mm_packs_epi32(v_ssuml_d, v_ssumh_d);
@@ -140,6 +140,6 @@
 
   return v_res_w;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#endif  // VPX_DSP_X86_BLEND_SSE4_H_
+#endif  // AOM_DSP_X86_BLEND_SSE4_H_
diff --git a/aom_dsp/x86/convolve.h b/aom_dsp/x86/convolve.h
index 9f1f10f..9c0e6aa 100644
--- a/aom_dsp/x86/convolve.h
+++ b/aom_dsp/x86/convolve.h
@@ -7,22 +7,22 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#ifndef VPX_DSP_X86_CONVOLVE_H_
-#define VPX_DSP_X86_CONVOLVE_H_
+#ifndef AOM_DSP_X86_CONVOLVE_H_
+#define AOM_DSP_X86_CONVOLVE_H_
 
 #include <assert.h>
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
-#include "aom_dsp/vpx_convolve.h"
+#include "aom_dsp/aom_convolve.h"
 
 typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch,
                                 uint8_t *output_ptr, ptrdiff_t out_pitch,
                                 uint32_t output_height, const int16_t *filter);
 
 #define FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt)         \
-  void vpx_convolve8_##name##_##opt(                                         \
+  void aom_convolve8_##name##_##opt(                                         \
       const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,                \
       ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,          \
       const int16_t *filter_y, int y_step_q4, int w, int h) {                \
@@ -30,39 +30,39 @@
     assert(step_q4 == 16);                                                   \
     if (filter[0] | filter[1] | filter[2]) {                                 \
       while (w >= 16) {                                                      \
-        vpx_filter_block1d16_##dir##8_##avg##opt(src_start, src_stride, dst, \
+        aom_filter_block1d16_##dir##8_##avg##opt(src_start, src_stride, dst, \
                                                  dst_stride, h, filter);     \
         src += 16;                                                           \
         dst += 16;                                                           \
         w -= 16;                                                             \
       }                                                                      \
       if (w == 8) {                                                          \
-        vpx_filter_block1d8_##dir##8_##avg##opt(src_start, src_stride, dst,  \
+        aom_filter_block1d8_##dir##8_##avg##opt(src_start, src_stride, dst,  \
                                                 dst_stride, h, filter);      \
       } else if (w == 4) {                                                   \
-        vpx_filter_block1d4_##dir##8_##avg##opt(src_start, src_stride, dst,  \
+        aom_filter_block1d4_##dir##8_##avg##opt(src_start, src_stride, dst,  \
                                                 dst_stride, h, filter);      \
       }                                                                      \
     } else {                                                                 \
       while (w >= 16) {                                                      \
-        vpx_filter_block1d16_##dir##2_##avg##opt(src, src_stride, dst,       \
+        aom_filter_block1d16_##dir##2_##avg##opt(src, src_stride, dst,       \
                                                  dst_stride, h, filter);     \
         src += 16;                                                           \
         dst += 16;                                                           \
         w -= 16;                                                             \
       }                                                                      \
       if (w == 8) {                                                          \
-        vpx_filter_block1d8_##dir##2_##avg##opt(src, src_stride, dst,        \
+        aom_filter_block1d8_##dir##2_##avg##opt(src, src_stride, dst,        \
                                                 dst_stride, h, filter);      \
       } else if (w == 4) {                                                   \
-        vpx_filter_block1d4_##dir##2_##avg##opt(src, src_stride, dst,        \
+        aom_filter_block1d4_##dir##2_##avg##opt(src, src_stride, dst,        \
                                                 dst_stride, h, filter);      \
       }                                                                      \
     }                                                                        \
   }
 
 #define FUN_CONV_2D(avg, opt)                                                \
-  void vpx_convolve8_##avg##opt(                                             \
+  void aom_convolve8_##avg##opt(                                             \
       const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,                \
       ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,          \
       const int16_t *filter_y, int y_step_q4, int w, int h) {                \
@@ -75,24 +75,24 @@
     if (filter_x[0] || filter_x[1] || filter_x[2] || filter_y[0] ||          \
         filter_y[1] || filter_y[2]) {                                        \
       DECLARE_ALIGNED(16, uint8_t, fdata2[MAX_SB_SIZE * (MAX_SB_SIZE + 7)]); \
-      vpx_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2,    \
+      aom_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2,    \
                                 MAX_SB_SIZE, filter_x, x_step_q4, filter_y,  \
                                 y_step_q4, w, h + 7);                        \
-      vpx_convolve8_##avg##vert_##opt(fdata2 + 3 * MAX_SB_SIZE, MAX_SB_SIZE, \
+      aom_convolve8_##avg##vert_##opt(fdata2 + 3 * MAX_SB_SIZE, MAX_SB_SIZE, \
                                       dst, dst_stride, filter_x, x_step_q4,  \
                                       filter_y, y_step_q4, w, h);            \
     } else {                                                                 \
       DECLARE_ALIGNED(16, uint8_t, fdata2[MAX_SB_SIZE * (MAX_SB_SIZE + 1)]); \
-      vpx_convolve8_horiz_##opt(src, src_stride, fdata2, MAX_SB_SIZE,        \
+      aom_convolve8_horiz_##opt(src, src_stride, fdata2, MAX_SB_SIZE,        \
                                 filter_x, x_step_q4, filter_y, y_step_q4, w, \
                                 h + 1);                                      \
-      vpx_convolve8_##avg##vert_##opt(fdata2, MAX_SB_SIZE, dst, dst_stride,  \
+      aom_convolve8_##avg##vert_##opt(fdata2, MAX_SB_SIZE, dst, dst_stride,  \
                                       filter_x, x_step_q4, filter_y,         \
                                       y_step_q4, w, h);                      \
     }                                                                        \
   }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 
 typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr,
                                        const ptrdiff_t src_pitch,
@@ -102,7 +102,7 @@
                                        const int16_t *filter, int bd);
 
 #define HIGH_FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt) \
-  void vpx_highbd_convolve8_##name##_##opt(                               \
+  void aom_highbd_convolve8_##name##_##opt(                               \
       const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8,           \
       ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,       \
       const int16_t *filter_y, int y_step_q4, int w, int h, int bd) {     \
@@ -111,21 +111,21 @@
       uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                          \
       if (filter[0] | filter[1] | filter[2]) {                            \
         while (w >= 16) {                                                 \
-          vpx_highbd_filter_block1d16_##dir##8_##avg##opt(                \
+          aom_highbd_filter_block1d16_##dir##8_##avg##opt(                \
               src_start, src_stride, dst, dst_stride, h, filter, bd);     \
           src += 16;                                                      \
           dst += 16;                                                      \
           w -= 16;                                                        \
         }                                                                 \
         while (w >= 8) {                                                  \
-          vpx_highbd_filter_block1d8_##dir##8_##avg##opt(                 \
+          aom_highbd_filter_block1d8_##dir##8_##avg##opt(                 \
               src_start, src_stride, dst, dst_stride, h, filter, bd);     \
           src += 8;                                                       \
           dst += 8;                                                       \
           w -= 8;                                                         \
         }                                                                 \
         while (w >= 4) {                                                  \
-          vpx_highbd_filter_block1d4_##dir##8_##avg##opt(                 \
+          aom_highbd_filter_block1d4_##dir##8_##avg##opt(                 \
               src_start, src_stride, dst, dst_stride, h, filter, bd);     \
           src += 4;                                                       \
           dst += 4;                                                       \
@@ -133,21 +133,21 @@
         }                                                                 \
       } else {                                                            \
         while (w >= 16) {                                                 \
-          vpx_highbd_filter_block1d16_##dir##2_##avg##opt(                \
+          aom_highbd_filter_block1d16_##dir##2_##avg##opt(                \
               src, src_stride, dst, dst_stride, h, filter, bd);           \
           src += 16;                                                      \
           dst += 16;                                                      \
           w -= 16;                                                        \
         }                                                                 \
         while (w >= 8) {                                                  \
-          vpx_highbd_filter_block1d8_##dir##2_##avg##opt(                 \
+          aom_highbd_filter_block1d8_##dir##2_##avg##opt(                 \
               src, src_stride, dst, dst_stride, h, filter, bd);           \
           src += 8;                                                       \
           dst += 8;                                                       \
           w -= 8;                                                         \
         }                                                                 \
         while (w >= 4) {                                                  \
-          vpx_highbd_filter_block1d4_##dir##2_##avg##opt(                 \
+          aom_highbd_filter_block1d4_##dir##2_##avg##opt(                 \
               src, src_stride, dst, dst_stride, h, filter, bd);           \
           src += 4;                                                       \
           dst += 4;                                                       \
@@ -156,14 +156,14 @@
       }                                                                   \
     }                                                                     \
     if (w) {                                                              \
-      vpx_highbd_convolve8_##name##_c(src8, src_stride, dst8, dst_stride, \
+      aom_highbd_convolve8_##name##_c(src8, src_stride, dst8, dst_stride, \
                                       filter_x, x_step_q4, filter_y,      \
                                       y_step_q4, w, h, bd);               \
     }                                                                     \
   }
 
 #define HIGH_FUN_CONV_2D(avg, opt)                                            \
-  void vpx_highbd_convolve8_##avg##opt(                                       \
+  void aom_highbd_convolve8_##avg##opt(                                       \
       const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,                 \
       ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,           \
       const int16_t *filter_y, int y_step_q4, int w, int h, int bd) {         \
@@ -174,29 +174,29 @@
           filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) {  \
         DECLARE_ALIGNED(16, uint16_t,                                         \
                         fdata2[MAX_SB_SIZE * (MAX_SB_SIZE + 7)]);             \
-        vpx_highbd_convolve8_horiz_##opt(src - 3 * src_stride, src_stride,    \
+        aom_highbd_convolve8_horiz_##opt(src - 3 * src_stride, src_stride,    \
                                          CONVERT_TO_BYTEPTR(fdata2),          \
                                          MAX_SB_SIZE, filter_x, x_step_q4,    \
                                          filter_y, y_step_q4, w, h + 7, bd);  \
-        vpx_highbd_convolve8_##avg##vert_##opt(                               \
+        aom_highbd_convolve8_##avg##vert_##opt(                               \
             CONVERT_TO_BYTEPTR(fdata2) + 3 * MAX_SB_SIZE, MAX_SB_SIZE, dst,   \
             dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd);  \
       } else {                                                                \
         DECLARE_ALIGNED(16, uint16_t,                                         \
                         fdata2[MAX_SB_SIZE * (MAX_SB_SIZE + 1)]);             \
-        vpx_highbd_convolve8_horiz_##opt(                                     \
+        aom_highbd_convolve8_horiz_##opt(                                     \
             src, src_stride, CONVERT_TO_BYTEPTR(fdata2), MAX_SB_SIZE,         \
             filter_x, x_step_q4, filter_y, y_step_q4, w, h + 1, bd);          \
-        vpx_highbd_convolve8_##avg##vert_##opt(                               \
+        aom_highbd_convolve8_##avg##vert_##opt(                               \
             CONVERT_TO_BYTEPTR(fdata2), MAX_SB_SIZE, dst, dst_stride,         \
             filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd);              \
       }                                                                       \
     } else {                                                                  \
-      vpx_highbd_convolve8_##avg##c(src, src_stride, dst, dst_stride,         \
+      aom_highbd_convolve8_##avg##c(src, src_stride, dst, dst_stride,         \
                                     filter_x, x_step_q4, filter_y, y_step_q4, \
                                     w, h, bd);                                \
     }                                                                         \
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#endif  // VPX_DSP_X86_CONVOLVE_H_
+#endif  // AOM_DSP_X86_CONVOLVE_H_
diff --git a/aom_dsp/x86/deblock_sse2.asm b/aom_dsp/x86/deblock_sse2.asm
index c3d23a3..bae6cf4 100644
--- a/aom_dsp/x86/deblock_sse2.asm
+++ b/aom_dsp/x86/deblock_sse2.asm
@@ -83,7 +83,7 @@
         add         rbx,        16
 %endmacro
 
-;void vpx_post_proc_down_and_across_mb_row_sse2
+;void aom_post_proc_down_and_across_mb_row_sse2
 ;(
 ;    unsigned char *src_ptr,
 ;    unsigned char *dst_ptr,
@@ -93,8 +93,8 @@
 ;    int *flimits,
 ;    int size
 ;)
-global sym(vpx_post_proc_down_and_across_mb_row_sse2) PRIVATE
-sym(vpx_post_proc_down_and_across_mb_row_sse2):
+global sym(aom_post_proc_down_and_across_mb_row_sse2) PRIVATE
+sym(aom_post_proc_down_and_across_mb_row_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -230,11 +230,11 @@
     ret
 %undef flimit
 
-;void vpx_mbpost_proc_down_xmm(unsigned char *dst,
+;void aom_mbpost_proc_down_xmm(unsigned char *dst,
 ;                            int pitch, int rows, int cols,int flimit)
-extern sym(vpx_rv)
-global sym(vpx_mbpost_proc_down_xmm) PRIVATE
-sym(vpx_mbpost_proc_down_xmm):
+extern sym(aom_rv)
+global sym(aom_mbpost_proc_down_xmm) PRIVATE
+sym(aom_mbpost_proc_down_xmm):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 5
@@ -257,7 +257,7 @@
 %define flimit4 [rsp+128]
 
 %if ABI_IS_32BIT=0
-    lea         r8,       [GLOBAL(sym(vpx_rv))]
+    lea         r8,       [GLOBAL(sym(aom_rv))]
 %endif
 
     ;rows +=8;
@@ -403,13 +403,13 @@
             and         rcx,        127
 %if ABI_IS_32BIT=1 && CONFIG_PIC=1
             push        rax
-            lea         rax,        [GLOBAL(sym(vpx_rv))]
-            movdqu      xmm4,       [rax + rcx*2] ;vpx_rv[rcx*2]
+            lea         rax,        [GLOBAL(sym(aom_rv))]
+            movdqu      xmm4,       [rax + rcx*2] ;aom_rv[rcx*2]
             pop         rax
 %elif ABI_IS_32BIT=0
-            movdqu      xmm4,       [r8 + rcx*2] ;vpx_rv[rcx*2]
+            movdqu      xmm4,       [r8 + rcx*2] ;aom_rv[rcx*2]
 %else
-            movdqu      xmm4,       [sym(vpx_rv) + rcx*2]
+            movdqu      xmm4,       [sym(aom_rv) + rcx*2]
 %endif
 
             paddw       xmm1,       xmm4
@@ -462,10 +462,10 @@
 %undef flimit4
 
 
-;void vpx_mbpost_proc_across_ip_xmm(unsigned char *src,
+;void aom_mbpost_proc_across_ip_xmm(unsigned char *src,
 ;                                int pitch, int rows, int cols,int flimit)
-global sym(vpx_mbpost_proc_across_ip_xmm) PRIVATE
-sym(vpx_mbpost_proc_across_ip_xmm):
+global sym(aom_mbpost_proc_across_ip_xmm) PRIVATE
+sym(aom_mbpost_proc_across_ip_xmm):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 5
diff --git a/aom_dsp/x86/fwd_dct32x32_impl_avx2.h b/aom_dsp/x86/fwd_dct32x32_impl_avx2.h
index 1d129bf..891b952 100644
--- a/aom_dsp/x86/fwd_dct32x32_impl_avx2.h
+++ b/aom_dsp/x86/fwd_dct32x32_impl_avx2.h
@@ -10,7 +10,7 @@
 
 #include <immintrin.h>  // AVX2
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/txfm_common.h"
 
 #define pair256_set_epi16(a, b)                                            \
@@ -2939,7 +2939,7 @@
             tr2_6 = _mm256_sub_epi16(tr2_6, tr2_6_0);
             tr2_7 = _mm256_sub_epi16(tr2_7, tr2_7_0);
             //           ... and here.
-            //           PS: also change code in vp9/encoder/vp9_dct.c
+            //           PS: also change code in av1/encoder/av1_dct.c
             tr2_0 = _mm256_add_epi16(tr2_0, kOne);
             tr2_1 = _mm256_add_epi16(tr2_1, kOne);
             tr2_2 = _mm256_add_epi16(tr2_2, kOne);
diff --git a/aom_dsp/x86/fwd_dct32x32_impl_sse2.h b/aom_dsp/x86/fwd_dct32x32_impl_sse2.h
index 04e3e37..3b1d5ba 100644
--- a/aom_dsp/x86/fwd_dct32x32_impl_sse2.h
+++ b/aom_dsp/x86/fwd_dct32x32_impl_sse2.h
@@ -21,31 +21,31 @@
 #define ADD_EPI16 _mm_adds_epi16
 #define SUB_EPI16 _mm_subs_epi16
 #if FDCT32x32_HIGH_PRECISION
-void vpx_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void aom_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
   int i, j;
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
-    vpx_fdct32(temp_in, temp_out, 0);
+    aom_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       out[j + i * 32] =
           (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
   }
 }
-#define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_c
-#define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rows_c
+#define HIGH_FDCT32x32_2D_C aom_highbd_fdct32x32_c
+#define HIGH_FDCT32x32_2D_ROWS_C aom_fdct32x32_rows_c
 #else
-void vpx_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void aom_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
   int i, j;
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
-    vpx_fdct32(temp_in, temp_out, 1);
+    aom_fdct32(temp_in, temp_out, 1);
     for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
   }
 }
-#define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_rd_c
-#define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rd_rows_c
+#define HIGH_FDCT32x32_2D_C aom_highbd_fdct32x32_rd_c
+#define HIGH_FDCT32x32_2D_ROWS_C aom_fdct32x32_rd_rows_c
 #endif  // FDCT32x32_HIGH_PRECISION
 #else
 #define ADD_EPI16 _mm_add_epi16
@@ -3145,7 +3145,7 @@
             tr2_6 = _mm_sub_epi16(tr2_6, tr2_6_0);
             tr2_7 = _mm_sub_epi16(tr2_7, tr2_7_0);
             //           ... and here.
-            //           PS: also change code in vp9/encoder/vp9_dct.c
+            //           PS: also change code in av1/encoder/av1_dct.c
             tr2_0 = _mm_add_epi16(tr2_0, kOne);
             tr2_1 = _mm_add_epi16(tr2_1, kOne);
             tr2_2 = _mm_add_epi16(tr2_2, kOne);
diff --git a/aom_dsp/x86/fwd_txfm_avx2.c b/aom_dsp/x86/fwd_txfm_avx2.c
index 325a5e9..d10e822 100644
--- a/aom_dsp/x86/fwd_txfm_avx2.c
+++ b/aom_dsp/x86/fwd_txfm_avx2.c
@@ -8,15 +8,15 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
-#define FDCT32x32_2D_AVX2 vpx_fdct32x32_rd_avx2
+#define FDCT32x32_2D_AVX2 aom_fdct32x32_rd_avx2
 #define FDCT32x32_HIGH_PRECISION 0
 #include "aom_dsp/x86/fwd_dct32x32_impl_avx2.h"
 #undef FDCT32x32_2D_AVX2
 #undef FDCT32x32_HIGH_PRECISION
 
-#define FDCT32x32_2D_AVX2 vpx_fdct32x32_avx2
+#define FDCT32x32_2D_AVX2 aom_fdct32x32_avx2
 #define FDCT32x32_HIGH_PRECISION 1
 #include "aom_dsp/x86/fwd_dct32x32_impl_avx2.h"  // NOLINT
 #undef FDCT32x32_2D_AVX2
diff --git a/aom_dsp/x86/fwd_txfm_impl_sse2.h b/aom_dsp/x86/fwd_txfm_impl_sse2.h
index 8b57fab..83c9d3b 100644
--- a/aom_dsp/x86/fwd_txfm_impl_sse2.h
+++ b/aom_dsp/x86/fwd_txfm_impl_sse2.h
@@ -10,7 +10,7 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/x86/fwd_txfm_sse2.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
@@ -98,7 +98,7 @@
                        _mm_cmplt_epi16(in1, _mm_set1_epi16(0xfc00)));
   test = _mm_movemask_epi8(_mm_or_si128(cmp0, cmp1));
   if (test) {
-    vpx_highbd_fdct4x4_c(input, output, stride);
+    aom_highbd_fdct4x4_c(input, output, stride);
     return;
   }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -169,7 +169,7 @@
 #if DCT_HIGH_BIT_DEPTH
     overflow = check_epi16_overflow_x2(&x0, &x1);
     if (overflow) {
-      vpx_highbd_fdct4x4_c(input, output, stride);
+      aom_highbd_fdct4x4_c(input, output, stride);
       return;
     }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -191,7 +191,7 @@
 #if DCT_HIGH_BIT_DEPTH
     overflow = check_epi16_overflow_x2(&t0, &t1);
     if (overflow) {
-      vpx_highbd_fdct4x4_c(input, output, stride);
+      aom_highbd_fdct4x4_c(input, output, stride);
       return;
     }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -230,7 +230,7 @@
 #if DCT_HIGH_BIT_DEPTH
       overflow = check_epi16_overflow_x2(&x0, &x1);
       if (overflow) {
-        vpx_highbd_fdct4x4_c(input, output, stride);
+        aom_highbd_fdct4x4_c(input, output, stride);
         return;
       }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -313,7 +313,7 @@
       overflow =
           check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
       if (overflow) {
-        vpx_highbd_fdct8x8_c(input, output, stride);
+        aom_highbd_fdct8x8_c(input, output, stride);
         return;
       }
     }
@@ -328,7 +328,7 @@
 #if DCT_HIGH_BIT_DEPTH
       overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
       if (overflow) {
-        vpx_highbd_fdct8x8_c(input, output, stride);
+        aom_highbd_fdct8x8_c(input, output, stride);
         return;
       }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -371,7 +371,7 @@
 #if DCT_HIGH_BIT_DEPTH
         overflow = check_epi16_overflow_x4(&res0, &res4, &res2, &res6);
         if (overflow) {
-          vpx_highbd_fdct8x8_c(input, output, stride);
+          aom_highbd_fdct8x8_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -401,7 +401,7 @@
 #if DCT_HIGH_BIT_DEPTH
       overflow = check_epi16_overflow_x2(&r0, &r1);
       if (overflow) {
-        vpx_highbd_fdct8x8_c(input, output, stride);
+        aom_highbd_fdct8x8_c(input, output, stride);
         return;
       }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -414,7 +414,7 @@
 #if DCT_HIGH_BIT_DEPTH
         overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
         if (overflow) {
-          vpx_highbd_fdct8x8_c(input, output, stride);
+          aom_highbd_fdct8x8_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -457,7 +457,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&res1, &res7, &res5, &res3);
           if (overflow) {
-            vpx_highbd_fdct8x8_c(input, output, stride);
+            aom_highbd_fdct8x8_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -720,7 +720,7 @@
         overflow = check_epi16_overflow_x8(&input0, &input1, &input2, &input3,
                                            &input4, &input5, &input6, &input7);
         if (overflow) {
-          vpx_highbd_fdct16x16_c(input, output, stride);
+          aom_highbd_fdct16x16_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -740,7 +740,7 @@
             check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
                                     &step1_4, &step1_5, &step1_6, &step1_7);
         if (overflow) {
-          vpx_highbd_fdct16x16_c(input, output, stride);
+          aom_highbd_fdct16x16_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -760,7 +760,7 @@
         overflow =
             check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
         if (overflow) {
-          vpx_highbd_fdct16x16_c(input, output, stride);
+          aom_highbd_fdct16x16_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -774,7 +774,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -796,7 +796,7 @@
 #if DCT_HIGH_BIT_DEPTH
             overflow = check_epi16_overflow_x4(&res00, &res08, &res04, &res12);
             if (overflow) {
-              vpx_highbd_fdct16x16_c(input, output, stride);
+              aom_highbd_fdct16x16_c(input, output, stride);
               return;
             }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -817,7 +817,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x2(&r0, &r1);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -830,7 +830,7 @@
 #if DCT_HIGH_BIT_DEPTH
             overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
             if (overflow) {
-              vpx_highbd_fdct16x16_c(input, output, stride);
+              aom_highbd_fdct16x16_c(input, output, stride);
               return;
             }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -853,7 +853,7 @@
               overflow =
                   check_epi16_overflow_x4(&res02, &res14, &res10, &res06);
               if (overflow) {
-                vpx_highbd_fdct16x16_c(input, output, stride);
+                aom_highbd_fdct16x16_c(input, output, stride);
                 return;
               }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -881,7 +881,7 @@
           overflow =
               check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, &step2_4);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -901,7 +901,7 @@
               check_epi16_overflow_x8(&step3_0, &step3_1, &step3_2, &step3_3,
                                       &step3_4, &step3_5, &step3_6, &step3_7);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -924,7 +924,7 @@
           overflow =
               check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, &step2_5);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -944,7 +944,7 @@
               check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
                                       &step1_4, &step1_5, &step1_6, &step1_7);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -966,7 +966,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&res01, &res09, &res15, &res07);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -987,7 +987,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&res05, &res13, &res11, &res03);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
diff --git a/aom_dsp/x86/fwd_txfm_sse2.c b/aom_dsp/x86/fwd_txfm_sse2.c
index d5b2f0d..7b66e28 100644
--- a/aom_dsp/x86/fwd_txfm_sse2.c
+++ b/aom_dsp/x86/fwd_txfm_sse2.c
@@ -10,12 +10,12 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_dsp/x86/fwd_txfm_sse2.h"
 
-void vpx_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   __m128i in0, in1;
   __m128i tmp;
   const __m128i zero = _mm_setzero_si128();
@@ -44,7 +44,7 @@
   output[0] = (tran_low_t)_mm_cvtsi128_si32(in0);
 }
 
-void vpx_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
   __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
   __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
@@ -84,7 +84,7 @@
   output[0] = (tran_low_t)_mm_cvtsi128_si32(in1);
 }
 
-void vpx_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
+void aom_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
                           int stride) {
   __m128i in0, in1, in2, in3;
   __m128i u0, u1;
@@ -153,7 +153,7 @@
   output[0] = (tran_low_t)_mm_cvtsi128_si32(in1);
 }
 
-void vpx_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
+void aom_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
                           int stride) {
   __m128i in0, in1, in2, in3;
   __m128i u0, u1;
@@ -226,47 +226,47 @@
 }
 
 #define DCT_HIGH_BIT_DEPTH 0
-#define FDCT4x4_2D vpx_fdct4x4_sse2
-#define FDCT8x8_2D vpx_fdct8x8_sse2
-#define FDCT16x16_2D vpx_fdct16x16_sse2
+#define FDCT4x4_2D aom_fdct4x4_sse2
+#define FDCT8x8_2D aom_fdct8x8_sse2
+#define FDCT16x16_2D aom_fdct16x16_sse2
 #include "aom_dsp/x86/fwd_txfm_impl_sse2.h"
 #undef FDCT4x4_2D
 #undef FDCT8x8_2D
 #undef FDCT16x16_2D
 
-#define FDCT32x32_2D vpx_fdct32x32_rd_sse2
+#define FDCT32x32_2D aom_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
 #include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h"
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 
-#define FDCT32x32_2D vpx_fdct32x32_sse2
+#define FDCT32x32_2D aom_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
 #include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 #undef DCT_HIGH_BIT_DEPTH
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define DCT_HIGH_BIT_DEPTH 1
-#define FDCT4x4_2D vpx_highbd_fdct4x4_sse2
-#define FDCT8x8_2D vpx_highbd_fdct8x8_sse2
-#define FDCT16x16_2D vpx_highbd_fdct16x16_sse2
+#define FDCT4x4_2D aom_highbd_fdct4x4_sse2
+#define FDCT8x8_2D aom_highbd_fdct8x8_sse2
+#define FDCT16x16_2D aom_highbd_fdct16x16_sse2
 #include "aom_dsp/x86/fwd_txfm_impl_sse2.h"  // NOLINT
 #undef FDCT4x4_2D
 #undef FDCT8x8_2D
 #undef FDCT16x16_2D
 
-#define FDCT32x32_2D vpx_highbd_fdct32x32_rd_sse2
+#define FDCT32x32_2D aom_highbd_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
 #include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 
-#define FDCT32x32_2D vpx_highbd_fdct32x32_sse2
+#define FDCT32x32_2D aom_highbd_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
 #include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 #undef DCT_HIGH_BIT_DEPTH
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/fwd_txfm_sse2.h b/aom_dsp/x86/fwd_txfm_sse2.h
index 5201e76..faf6d52 100644
--- a/aom_dsp/x86/fwd_txfm_sse2.h
+++ b/aom_dsp/x86/fwd_txfm_sse2.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_X86_FWD_TXFM_SSE2_H_
-#define VPX_DSP_X86_FWD_TXFM_SSE2_H_
+#ifndef AOM_DSP_X86_FWD_TXFM_SSE2_H_
+#define AOM_DSP_X86_FWD_TXFM_SSE2_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -244,7 +244,7 @@
 }
 
 static INLINE void store_output(const __m128i *poutput, tran_low_t *dst_ptr) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const __m128i zero = _mm_setzero_si128();
   const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
   __m128i out0 = _mm_unpacklo_epi16(*poutput, sign_bits);
@@ -253,11 +253,11 @@
   _mm_store_si128((__m128i *)(dst_ptr + 4), out1);
 #else
   _mm_store_si128((__m128i *)(dst_ptr), *poutput);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
 
 static INLINE void storeu_output(const __m128i *poutput, tran_low_t *dst_ptr) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const __m128i zero = _mm_setzero_si128();
   const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
   __m128i out0 = _mm_unpacklo_epi16(*poutput, sign_bits);
@@ -266,7 +266,7 @@
   _mm_storeu_si128((__m128i *)(dst_ptr + 4), out1);
 #else
   _mm_storeu_si128((__m128i *)(dst_ptr), *poutput);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
 
 static INLINE __m128i mult_round_shift(const __m128i *pin0, const __m128i *pin1,
@@ -368,4 +368,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_DSP_X86_FWD_TXFM_SSE2_H_
+#endif  // AOM_DSP_X86_FWD_TXFM_SSE2_H_
diff --git a/aom_dsp/x86/halfpix_variance_impl_sse2.asm b/aom_dsp/x86/halfpix_variance_impl_sse2.asm
index b91d1dc..66e752e 100644
--- a/aom_dsp/x86/halfpix_variance_impl_sse2.asm
+++ b/aom_dsp/x86/halfpix_variance_impl_sse2.asm
@@ -10,15 +10,15 @@
 
 %include "aom_ports/x86_abi_support.asm"
 
-;void vpx_half_horiz_vert_variance16x_h_sse2(unsigned char *ref,
+;void aom_half_horiz_vert_variance16x_h_sse2(unsigned char *ref,
 ;                                            int ref_stride,
 ;                                            unsigned char *src,
 ;                                            int src_stride,
 ;                                            unsigned int height,
 ;                                            int *sum,
 ;                                            unsigned int *sumsquared)
-global sym(vpx_half_horiz_vert_variance16x_h_sse2) PRIVATE
-sym(vpx_half_horiz_vert_variance16x_h_sse2):
+global sym(aom_half_horiz_vert_variance16x_h_sse2) PRIVATE
+sym(aom_half_horiz_vert_variance16x_h_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -45,7 +45,7 @@
 
         lea             rsi,            [rsi + rax]
 
-vpx_half_horiz_vert_variance16x_h_1:
+aom_half_horiz_vert_variance16x_h_1:
         movdqu          xmm1,           XMMWORD PTR [rsi]     ;
         movdqu          xmm2,           XMMWORD PTR [rsi+1]   ;
         pavgb           xmm1,           xmm2                ;  xmm1 = avg(xmm1,xmm3) horizontal line i+1
@@ -77,7 +77,7 @@
         lea             rdi,            [rdi + rdx]
 
         sub             rcx,            1                   ;
-        jnz             vpx_half_horiz_vert_variance16x_h_1     ;
+        jnz             aom_half_horiz_vert_variance16x_h_1     ;
 
         pxor        xmm1,           xmm1
         pxor        xmm5,           xmm5
@@ -123,15 +123,15 @@
     ret
 
 
-;void vpx_half_vert_variance16x_h_sse2(unsigned char *ref,
+;void aom_half_vert_variance16x_h_sse2(unsigned char *ref,
 ;                                      int ref_stride,
 ;                                      unsigned char *src,
 ;                                      int src_stride,
 ;                                      unsigned int height,
 ;                                      int *sum,
 ;                                      unsigned int *sumsquared)
-global sym(vpx_half_vert_variance16x_h_sse2) PRIVATE
-sym(vpx_half_vert_variance16x_h_sse2):
+global sym(aom_half_vert_variance16x_h_sse2) PRIVATE
+sym(aom_half_vert_variance16x_h_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -154,7 +154,7 @@
         lea             rsi,            [rsi + rax          ]
         pxor            xmm0,           xmm0
 
-vpx_half_vert_variance16x_h_1:
+aom_half_vert_variance16x_h_1:
         movdqu          xmm3,           XMMWORD PTR [rsi]
 
         pavgb           xmm5,           xmm3                ;  xmm5 = avg(xmm1,xmm3)
@@ -182,7 +182,7 @@
         lea             rdi,            [rdi + rdx]
 
         sub             rcx,            1
-        jnz             vpx_half_vert_variance16x_h_1
+        jnz             aom_half_vert_variance16x_h_1
 
         pxor        xmm1,           xmm1
         pxor        xmm5,           xmm5
@@ -228,15 +228,15 @@
     ret
 
 
-;void vpx_half_horiz_variance16x_h_sse2(unsigned char *ref,
+;void aom_half_horiz_variance16x_h_sse2(unsigned char *ref,
 ;                                       int ref_stride
 ;                                       unsigned char *src,
 ;                                       int src_stride,
 ;                                       unsigned int height,
 ;                                       int *sum,
 ;                                       unsigned int *sumsquared)
-global sym(vpx_half_horiz_variance16x_h_sse2) PRIVATE
-sym(vpx_half_horiz_variance16x_h_sse2):
+global sym(aom_half_horiz_variance16x_h_sse2) PRIVATE
+sym(aom_half_horiz_variance16x_h_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 7
@@ -257,7 +257,7 @@
 
         pxor            xmm0,           xmm0                ;
 
-vpx_half_horiz_variance16x_h_1:
+aom_half_horiz_variance16x_h_1:
         movdqu          xmm5,           XMMWORD PTR [rsi]     ;  xmm5 = s0,s1,s2..s15
         movdqu          xmm3,           XMMWORD PTR [rsi+1]   ;  xmm3 = s1,s2,s3..s16
 
@@ -284,7 +284,7 @@
         lea             rdi,            [rdi + rdx]
 
         sub             rcx,            1                   ;
-        jnz             vpx_half_horiz_variance16x_h_1        ;
+        jnz             aom_half_horiz_variance16x_h_1        ;
 
         pxor        xmm1,           xmm1
         pxor        xmm5,           xmm5
@@ -335,7 +335,7 @@
 xmm_bi_rd:
     times 8 dw 64
 align 16
-vpx_bilinear_filters_sse2:
+aom_bilinear_filters_sse2:
     dw 128, 128, 128, 128, 128, 128, 128, 128,  0,  0,  0,  0,  0,  0,  0,  0
     dw 112, 112, 112, 112, 112, 112, 112, 112, 16, 16, 16, 16, 16, 16, 16, 16
     dw 96, 96, 96, 96, 96, 96, 96, 96, 32, 32, 32, 32, 32, 32, 32, 32
diff --git a/aom_dsp/x86/halfpix_variance_sse2.c b/aom_dsp/x86/halfpix_variance_sse2.c
index 8d26b75..1de0c43 100644
--- a/aom_dsp/x86/halfpix_variance_sse2.c
+++ b/aom_dsp/x86/halfpix_variance_sse2.c
@@ -10,32 +10,32 @@
 
 #include <assert.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
-void vpx_half_horiz_vert_variance16x_h_sse2(const unsigned char *ref,
+void aom_half_horiz_vert_variance16x_h_sse2(const unsigned char *ref,
                                             int ref_stride,
                                             const unsigned char *src,
                                             int src_stride, unsigned int height,
                                             int *sum, unsigned int *sumsquared);
-void vpx_half_horiz_variance16x_h_sse2(const unsigned char *ref, int ref_stride,
+void aom_half_horiz_variance16x_h_sse2(const unsigned char *ref, int ref_stride,
                                        const unsigned char *src, int src_stride,
                                        unsigned int height, int *sum,
                                        unsigned int *sumsquared);
-void vpx_half_vert_variance16x_h_sse2(const unsigned char *ref, int ref_stride,
+void aom_half_vert_variance16x_h_sse2(const unsigned char *ref, int ref_stride,
                                       const unsigned char *src, int src_stride,
                                       unsigned int height, int *sum,
                                       unsigned int *sumsquared);
 
-uint32_t vpx_variance_halfpixvar16x16_h_sse2(const unsigned char *src,
+uint32_t aom_variance_halfpixvar16x16_h_sse2(const unsigned char *src,
                                              int src_stride,
                                              const unsigned char *dst,
                                              int dst_stride, uint32_t *sse) {
   int xsum0;
   unsigned int xxsum0;
 
-  vpx_half_horiz_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16,
+  aom_half_horiz_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16,
                                     &xsum0, &xxsum0);
 
   *sse = xxsum0;
@@ -44,13 +44,13 @@
   return (xxsum0 - ((uint32_t)((int64_t)xsum0 * xsum0) >> 8));
 }
 
-uint32_t vpx_variance_halfpixvar16x16_v_sse2(const unsigned char *src,
+uint32_t aom_variance_halfpixvar16x16_v_sse2(const unsigned char *src,
                                              int src_stride,
                                              const unsigned char *dst,
                                              int dst_stride, uint32_t *sse) {
   int xsum0;
   unsigned int xxsum0;
-  vpx_half_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, &xsum0,
+  aom_half_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, &xsum0,
                                    &xxsum0);
 
   *sse = xxsum0;
@@ -59,14 +59,14 @@
   return (xxsum0 - ((uint32_t)((int64_t)xsum0 * xsum0) >> 8));
 }
 
-uint32_t vpx_variance_halfpixvar16x16_hv_sse2(const unsigned char *src,
+uint32_t aom_variance_halfpixvar16x16_hv_sse2(const unsigned char *src,
                                               int src_stride,
                                               const unsigned char *dst,
                                               int dst_stride, uint32_t *sse) {
   int xsum0;
   unsigned int xxsum0;
 
-  vpx_half_horiz_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16,
+  aom_half_horiz_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16,
                                          &xsum0, &xxsum0);
 
   *sse = xxsum0;
diff --git a/aom_dsp/x86/highbd_loopfilter_sse2.c b/aom_dsp/x86/highbd_loopfilter_sse2.c
index 15b8283..ee4b83e 100644
--- a/aom_dsp/x86/highbd_loopfilter_sse2.c
+++ b/aom_dsp/x86/highbd_loopfilter_sse2.c
@@ -10,7 +10,7 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/emmintrin_compat.h"
 
@@ -48,7 +48,7 @@
 
 // TODO(debargha, peter): Break up large functions into smaller ones
 // in this file.
-void vpx_highbd_lpf_horizontal_edge_8_sse2(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_edge_8_sse2(uint16_t *s, int p,
                                            const uint8_t *_blimit,
                                            const uint8_t *_limit,
                                            const uint8_t *_thresh, int bd) {
@@ -475,15 +475,15 @@
   _mm_store_si128((__m128i *)(s - 0 * p), q0);
 }
 
-void vpx_highbd_lpf_horizontal_edge_16_sse2(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_edge_16_sse2(uint16_t *s, int p,
                                             const uint8_t *_blimit,
                                             const uint8_t *_limit,
                                             const uint8_t *_thresh, int bd) {
-  vpx_highbd_lpf_horizontal_edge_8_sse2(s, p, _blimit, _limit, _thresh, bd);
-  vpx_highbd_lpf_horizontal_edge_8_sse2(s + 8, p, _blimit, _limit, _thresh, bd);
+  aom_highbd_lpf_horizontal_edge_8_sse2(s, p, _blimit, _limit, _thresh, bd);
+  aom_highbd_lpf_horizontal_edge_8_sse2(s + 8, p, _blimit, _limit, _thresh, bd);
 }
 
-void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
                                       const uint8_t *_blimit,
                                       const uint8_t *_limit,
                                       const uint8_t *_thresh, int bd) {
@@ -640,7 +640,7 @@
   filt = _mm_adds_epi16(filt, work_a);
   filt = _mm_adds_epi16(filt, work_a);
   filt = _mm_adds_epi16(filt, work_a);
-  // (vpx_filter + 3 * (qs0 - ps0)) & mask
+  // (aom_filter + 3 * (qs0 - ps0)) & mask
   filt = signed_char_clamp_bd_sse2(filt, bd);
   filt = _mm_and_si128(filt, mask);
 
@@ -709,15 +709,15 @@
   _mm_store_si128((__m128i *)(s + 2 * p), q2);
 }
 
-void vpx_highbd_lpf_horizontal_8_dual_sse2(
+void aom_highbd_lpf_horizontal_8_dual_sse2(
     uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
     const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
     const uint8_t *_thresh1, int bd) {
-  vpx_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, bd);
-  vpx_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd);
+  aom_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, bd);
+  aom_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd);
 }
 
-void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
                                       const uint8_t *_blimit,
                                       const uint8_t *_limit,
                                       const uint8_t *_thresh, int bd) {
@@ -834,7 +834,7 @@
   filt = _mm_adds_epi16(filt, work_a);
   filt = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, work_a), bd);
 
-  // (vpx_filter + 3 * (qs0 - ps0)) & mask
+  // (aom_filter + 3 * (qs0 - ps0)) & mask
   filt = _mm_and_si128(filt, mask);
 
   filter1 = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, t4), bd);
@@ -879,12 +879,12 @@
   _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
 }
 
-void vpx_highbd_lpf_horizontal_4_dual_sse2(
+void aom_highbd_lpf_horizontal_4_dual_sse2(
     uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
     const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
     const uint8_t *_thresh1, int bd) {
-  vpx_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, bd);
-  vpx_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd);
+  aom_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, bd);
+  aom_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd);
 }
 
 static INLINE void highbd_transpose(uint16_t *src[], int in_p, uint16_t *dst[],
@@ -999,7 +999,7 @@
   highbd_transpose(src1, in_p, dest1, out_p, 1);
 }
 
-void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit,
                                     const uint8_t *limit, const uint8_t *thresh,
                                     int bd) {
   DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]);
@@ -1013,7 +1013,7 @@
   highbd_transpose(src, p, dst, 8, 1);
 
   // Loop filtering
-  vpx_highbd_lpf_horizontal_4_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd);
+  aom_highbd_lpf_horizontal_4_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd);
 
   src[0] = t_dst;
   dst[0] = s - 4;
@@ -1022,7 +1022,7 @@
   highbd_transpose(src, 8, dst, p, 1);
 }
 
-void vpx_highbd_lpf_vertical_4_dual_sse2(
+void aom_highbd_lpf_vertical_4_dual_sse2(
     uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
     const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
     const uint8_t *thresh1, int bd) {
@@ -1034,7 +1034,7 @@
   highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
 
   // Loop filtering
-  vpx_highbd_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
+  aom_highbd_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
                                         thresh0, blimit1, limit1, thresh1, bd);
   src[0] = t_dst;
   src[1] = t_dst + 8;
@@ -1045,7 +1045,7 @@
   highbd_transpose(src, 16, dst, p, 2);
 }
 
-void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit,
                                     const uint8_t *limit, const uint8_t *thresh,
                                     int bd) {
   DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]);
@@ -1059,7 +1059,7 @@
   highbd_transpose(src, p, dst, 8, 1);
 
   // Loop filtering
-  vpx_highbd_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd);
+  aom_highbd_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd);
 
   src[0] = t_dst;
   dst[0] = s - 4;
@@ -1068,7 +1068,7 @@
   highbd_transpose(src, 8, dst, p, 1);
 }
 
-void vpx_highbd_lpf_vertical_8_dual_sse2(
+void aom_highbd_lpf_vertical_8_dual_sse2(
     uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
     const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
     const uint8_t *thresh1, int bd) {
@@ -1080,7 +1080,7 @@
   highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
 
   // Loop filtering
-  vpx_highbd_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
+  aom_highbd_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
                                         thresh0, blimit1, limit1, thresh1, bd);
   src[0] = t_dst;
   src[1] = t_dst + 8;
@@ -1092,7 +1092,7 @@
   highbd_transpose(src, 16, dst, p, 2);
 }
 
-void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit,
                                      const uint8_t *limit,
                                      const uint8_t *thresh, int bd) {
   DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 16]);
@@ -1108,7 +1108,7 @@
   highbd_transpose(src, p, dst, 8, 2);
 
   // Loop filtering
-  vpx_highbd_lpf_horizontal_edge_8_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh,
+  aom_highbd_lpf_horizontal_edge_8_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh,
                                         bd);
   src[0] = t_dst;
   src[1] = t_dst + 8 * 8;
@@ -1119,7 +1119,7 @@
   highbd_transpose(src, 8, dst, p, 2);
 }
 
-void vpx_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, int p,
+void aom_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, int p,
                                           const uint8_t *blimit,
                                           const uint8_t *limit,
                                           const uint8_t *thresh, int bd) {
@@ -1130,7 +1130,7 @@
   highbd_transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16);
 
   //  Loop filtering
-  vpx_highbd_lpf_horizontal_edge_16_sse2(t_dst + 8 * 16, 16, blimit, limit,
+  aom_highbd_lpf_horizontal_edge_16_sse2(t_dst + 8 * 16, 16, blimit, limit,
                                          thresh, bd);
 
   //  Transpose back
diff --git a/aom_dsp/x86/highbd_quantize_intrin_sse2.c b/aom_dsp/x86/highbd_quantize_intrin_sse2.c
index ecde8c2..34028d9 100644
--- a/aom_dsp/x86/highbd_quantize_intrin_sse2.c
+++ b/aom_dsp/x86/highbd_quantize_intrin_sse2.c
@@ -10,13 +10,13 @@
 
 #include <emmintrin.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count,
                                 int skip_block, const int16_t *zbin_ptr,
                                 const int16_t *round_ptr,
                                 const int16_t *quant_ptr,
@@ -92,7 +92,7 @@
   *eob_ptr = eob_i + 1;
 }
 
-void vpx_highbd_quantize_b_32x32_sse2(
+void aom_highbd_quantize_b_32x32_sse2(
     const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
     const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
     const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
diff --git a/aom_dsp/x86/highbd_sad4d_sse2.asm b/aom_dsp/x86/highbd_sad4d_sse2.asm
index 6c2a61e..54501d1 100644
--- a/aom_dsp/x86/highbd_sad4d_sse2.asm
+++ b/aom_dsp/x86/highbd_sad4d_sse2.asm
@@ -209,7 +209,7 @@
   HIGH_PROCESS_32x2x4  0, %4, %5, (%4 + 32), (%5 + 32), %6
 %endmacro
 
-; void vpx_highbd_sadNxNx4d_sse2(uint8_t *src,    int src_stride,
+; void aom_highbd_sadNxNx4d_sse2(uint8_t *src,    int src_stride,
 ;                         uint8_t *ref[4], int ref_stride,
 ;                         uint32_t res[4]);
 ; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16 or 8x8
diff --git a/aom_dsp/x86/highbd_sad_sse2.asm b/aom_dsp/x86/highbd_sad_sse2.asm
index bc4b28d..2da8c83 100644
--- a/aom_dsp/x86/highbd_sad_sse2.asm
+++ b/aom_dsp/x86/highbd_sad_sse2.asm
@@ -50,7 +50,7 @@
 %endif
 %endmacro
 
-; unsigned int vpx_highbd_sad64x{16,32,64}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_highbd_sad64x{16,32,64}_sse2(uint8_t *src, int src_stride,
 ;                                    uint8_t *ref, int ref_stride);
 %macro HIGH_SAD64XN 1-2 0
   HIGH_SAD_FN 64, %1, 5, %2
@@ -157,7 +157,7 @@
 HIGH_SAD64XN 32, 1 ; highbd_sad64x32_avg_sse2
 
 
-; unsigned int vpx_highbd_sad32x{16,32,64}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_highbd_sad32x{16,32,64}_sse2(uint8_t *src, int src_stride,
 ;                                    uint8_t *ref, int ref_stride);
 %macro HIGH_SAD32XN 1-2 0
   HIGH_SAD_FN 32, %1, 5, %2
@@ -225,7 +225,7 @@
 HIGH_SAD32XN 32, 1 ; highbd_sad32x32_avg_sse2
 HIGH_SAD32XN 16, 1 ; highbd_sad32x16_avg_sse2
 
-; unsigned int vpx_highbd_sad16x{8,16,32}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_highbd_sad16x{8,16,32}_sse2(uint8_t *src, int src_stride,
 ;                                    uint8_t *ref, int ref_stride);
 %macro HIGH_SAD16XN 1-2 0
   HIGH_SAD_FN 16, %1, 5, %2
@@ -294,7 +294,7 @@
 HIGH_SAD16XN  8, 1 ; highbd_sad16x8_avg_sse2
 
 
-; unsigned int vpx_highbd_sad8x{4,8,16}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_highbd_sad8x{4,8,16}_sse2(uint8_t *src, int src_stride,
 ;                                    uint8_t *ref, int ref_stride);
 %macro HIGH_SAD8XN 1-2 0
   HIGH_SAD_FN 8, %1, 7, %2
diff --git a/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm b/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
index 30ee81b..1175742 100644
--- a/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
+++ b/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
@@ -30,7 +30,7 @@
 
 SECTION .text
 
-; int vpx_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride,
+; int aom_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride,
 ;                               int x_offset, int y_offset,
 ;                               const uint8_t *dst, ptrdiff_t dst_stride,
 ;                               int height, unsigned int *sse);
diff --git a/aom_dsp/x86/highbd_subtract_sse2.c b/aom_dsp/x86/highbd_subtract_sse2.c
index e7d5ac2..23d6630 100644
--- a/aom_dsp/x86/highbd_subtract_sse2.c
+++ b/aom_dsp/x86/highbd_subtract_sse2.c
@@ -12,8 +12,8 @@
 #include <emmintrin.h>
 #include <stddef.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
 typedef void (*SubtractWxHFuncType)(int16_t *diff, ptrdiff_t diff_stride,
                                     const uint16_t *src, ptrdiff_t src_stride,
@@ -349,7 +349,7 @@
   return ret_func_ptr;
 }
 
-void vpx_highbd_subtract_block_sse2(int rows, int cols, int16_t *diff,
+void aom_highbd_subtract_block_sse2(int rows, int cols, int16_t *diff,
                                     ptrdiff_t diff_stride, const uint8_t *src8,
                                     ptrdiff_t src_stride, const uint8_t *pred8,
                                     ptrdiff_t pred_stride, int bd) {
diff --git a/aom_dsp/x86/highbd_variance_impl_sse2.asm b/aom_dsp/x86/highbd_variance_impl_sse2.asm
index 1bf3abb..3abb44f 100644
--- a/aom_dsp/x86/highbd_variance_impl_sse2.asm
+++ b/aom_dsp/x86/highbd_variance_impl_sse2.asm
@@ -11,7 +11,7 @@
 
 %include "aom_ports/x86_abi_support.asm"
 
-;unsigned int vpx_highbd_calc16x16var_sse2
+;unsigned int aom_highbd_calc16x16var_sse2
 ;(
 ;    unsigned char   *  src_ptr,
 ;    int             source_stride,
@@ -20,8 +20,8 @@
 ;    unsigned int    *  SSE,
 ;    int             *  Sum
 ;)
-global sym(vpx_highbd_calc16x16var_sse2) PRIVATE
-sym(vpx_highbd_calc16x16var_sse2):
+global sym(aom_highbd_calc16x16var_sse2) PRIVATE
+sym(aom_highbd_calc16x16var_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
@@ -164,7 +164,7 @@
     ret
 
 
-;unsigned int vpx_highbd_calc8x8var_sse2
+;unsigned int aom_highbd_calc8x8var_sse2
 ;(
 ;    unsigned char   *  src_ptr,
 ;    int             source_stride,
@@ -173,8 +173,8 @@
 ;    unsigned int    *  SSE,
 ;    int             *  Sum
 ;)
-global sym(vpx_highbd_calc8x8var_sse2) PRIVATE
-sym(vpx_highbd_calc8x8var_sse2):
+global sym(aom_highbd_calc8x8var_sse2) PRIVATE
+sym(aom_highbd_calc8x8var_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 6
diff --git a/aom_dsp/x86/highbd_variance_sse2.c b/aom_dsp/x86/highbd_variance_sse2.c
index 90ef4d4..d19214c 100644
--- a/aom_dsp/x86/highbd_variance_sse2.c
+++ b/aom_dsp/x86/highbd_variance_sse2.c
@@ -10,8 +10,8 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
 #include "aom_ports/mem.h"
 
@@ -19,11 +19,11 @@
                                        const uint16_t *ref, int ref_stride,
                                        uint32_t *sse, int *sum);
 
-uint32_t vpx_highbd_calc8x8var_sse2(const uint16_t *src, int src_stride,
+uint32_t aom_highbd_calc8x8var_sse2(const uint16_t *src, int src_stride,
                                     const uint16_t *ref, int ref_stride,
                                     uint32_t *sse, int *sum);
 
-uint32_t vpx_highbd_calc16x16var_sse2(const uint16_t *src, int src_stride,
+uint32_t aom_highbd_calc16x16var_sse2(const uint16_t *src, int src_stride,
                                       const uint16_t *ref, int ref_stride,
                                       uint32_t *sse, int *sum);
 
@@ -93,32 +93,32 @@
 }
 
 #define HIGH_GET_VAR(S)                                                       \
-  void vpx_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \
+  void aom_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \
                                          const uint8_t *ref8, int ref_stride, \
                                          uint32_t *sse, int *sum) {           \
     uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                \
     uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                                \
-    vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
+    aom_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
                                        sum);                                  \
   }                                                                           \
                                                                               \
-  void vpx_highbd_10_get##S##x##S##var_sse2(                                  \
+  void aom_highbd_10_get##S##x##S##var_sse2(                                  \
       const uint8_t *src8, int src_stride, const uint8_t *ref8,               \
       int ref_stride, uint32_t *sse, int *sum) {                              \
     uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                \
     uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                                \
-    vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
+    aom_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
                                        sum);                                  \
     *sum = ROUND_POWER_OF_TWO(*sum, 2);                                       \
     *sse = ROUND_POWER_OF_TWO(*sse, 4);                                       \
   }                                                                           \
                                                                               \
-  void vpx_highbd_12_get##S##x##S##var_sse2(                                  \
+  void aom_highbd_12_get##S##x##S##var_sse2(                                  \
       const uint8_t *src8, int src_stride, const uint8_t *ref8,               \
       int ref_stride, uint32_t *sse, int *sum) {                              \
     uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                \
     uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                                \
-    vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
+    aom_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
                                        sum);                                  \
     *sum = ROUND_POWER_OF_TWO(*sum, 4);                                       \
     *sse = ROUND_POWER_OF_TWO(*sse, 8);                                       \
@@ -130,7 +130,7 @@
 #undef HIGH_GET_VAR
 
 #define VAR_FN(w, h, block_size, shift)                                    \
-  uint32_t vpx_highbd_8_variance##w##x##h##_sse2(                          \
+  uint32_t aom_highbd_8_variance##w##x##h##_sse2(                          \
       const uint8_t *src8, int src_stride, const uint8_t *ref8,            \
       int ref_stride, uint32_t *sse) {                                     \
     int sum;                                                               \
@@ -138,11 +138,11 @@
     uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                             \
     highbd_8_variance_sse2(                                                \
         src, src_stride, ref, ref_stride, w, h, sse, &sum,                 \
-        vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
+        aom_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
     return *sse - (((int64_t)sum * sum) >> shift);                         \
   }                                                                        \
                                                                            \
-  uint32_t vpx_highbd_10_variance##w##x##h##_sse2(                         \
+  uint32_t aom_highbd_10_variance##w##x##h##_sse2(                         \
       const uint8_t *src8, int src_stride, const uint8_t *ref8,            \
       int ref_stride, uint32_t *sse) {                                     \
     int sum;                                                               \
@@ -151,12 +151,12 @@
     uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                             \
     highbd_10_variance_sse2(                                               \
         src, src_stride, ref, ref_stride, w, h, sse, &sum,                 \
-        vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
+        aom_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
     var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift);               \
     return (var >= 0) ? (uint32_t)var : 0;                                 \
   }                                                                        \
                                                                            \
-  uint32_t vpx_highbd_12_variance##w##x##h##_sse2(                         \
+  uint32_t aom_highbd_12_variance##w##x##h##_sse2(                         \
       const uint8_t *src8, int src_stride, const uint8_t *ref8,            \
       int ref_stride, uint32_t *sse) {                                     \
     int sum;                                                               \
@@ -165,7 +165,7 @@
     uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                             \
     highbd_12_variance_sse2(                                               \
         src, src_stride, ref, ref_stride, w, h, sse, &sum,                 \
-        vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
+        aom_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
     var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift);               \
     return (var >= 0) ? (uint32_t)var : 0;                                 \
   }
@@ -183,69 +183,69 @@
 
 #undef VAR_FN
 
-unsigned int vpx_highbd_8_mse16x16_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_8_mse16x16_sse2(const uint8_t *src8, int src_stride,
                                         const uint8_t *ref8, int ref_stride,
                                         unsigned int *sse) {
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
   highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
-                         vpx_highbd_calc16x16var_sse2, 16);
+                         aom_highbd_calc16x16var_sse2, 16);
   return *sse;
 }
 
-unsigned int vpx_highbd_10_mse16x16_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_10_mse16x16_sse2(const uint8_t *src8, int src_stride,
                                          const uint8_t *ref8, int ref_stride,
                                          unsigned int *sse) {
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
   highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
-                          vpx_highbd_calc16x16var_sse2, 16);
+                          aom_highbd_calc16x16var_sse2, 16);
   return *sse;
 }
 
-unsigned int vpx_highbd_12_mse16x16_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_12_mse16x16_sse2(const uint8_t *src8, int src_stride,
                                          const uint8_t *ref8, int ref_stride,
                                          unsigned int *sse) {
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
   highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
-                          vpx_highbd_calc16x16var_sse2, 16);
+                          aom_highbd_calc16x16var_sse2, 16);
   return *sse;
 }
 
-unsigned int vpx_highbd_8_mse8x8_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_8_mse8x8_sse2(const uint8_t *src8, int src_stride,
                                       const uint8_t *ref8, int ref_stride,
                                       unsigned int *sse) {
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
   highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum,
-                         vpx_highbd_calc8x8var_sse2, 8);
+                         aom_highbd_calc8x8var_sse2, 8);
   return *sse;
 }
 
-unsigned int vpx_highbd_10_mse8x8_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_10_mse8x8_sse2(const uint8_t *src8, int src_stride,
                                        const uint8_t *ref8, int ref_stride,
                                        unsigned int *sse) {
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
   highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum,
-                          vpx_highbd_calc8x8var_sse2, 8);
+                          aom_highbd_calc8x8var_sse2, 8);
   return *sse;
 }
 
-unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride,
                                        const uint8_t *ref8, int ref_stride,
                                        unsigned int *sse) {
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
   highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum,
-                          vpx_highbd_calc8x8var_sse2, 8);
+                          aom_highbd_calc8x8var_sse2, 8);
   return *sse;
 }
 
@@ -253,7 +253,7 @@
 // These definitions are for functions defined in
 // highbd_subpel_variance_impl_sse2.asm
 #define DECL(w, opt)                                                         \
-  int vpx_highbd_sub_pixel_variance##w##xh_##opt(                            \
+  int aom_highbd_sub_pixel_variance##w##xh_##opt(                            \
       const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
       const uint16_t *dst, ptrdiff_t dst_stride, int height,                 \
       unsigned int *sse, void *unused0, void *unused);
@@ -267,29 +267,29 @@
 #undef DECL
 
 #define FN(w, h, wf, wlog2, hlog2, opt, cast)                                  \
-  uint32_t vpx_highbd_8_sub_pixel_variance##w##x##h##_##opt(                   \
+  uint32_t aom_highbd_8_sub_pixel_variance##w##x##h##_##opt(                   \
       const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
       const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) {                \
     uint32_t sse;                                                              \
     uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                 \
     uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                                 \
-    int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                      \
+    int se = aom_highbd_sub_pixel_variance##wf##xh_##opt(                      \
         src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL,   \
         NULL);                                                                 \
     if (w > wf) {                                                              \
       unsigned int sse2;                                                       \
-      int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                   \
+      int se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt(                   \
           src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h,   \
           &sse2, NULL, NULL);                                                  \
       se += se2;                                                               \
       sse += sse2;                                                             \
       if (w > wf * 2) {                                                        \
-        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                     \
+        se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt(                     \
             src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
             &sse2, NULL, NULL);                                                \
         se += se2;                                                             \
         sse += sse2;                                                           \
-        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                     \
+        se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt(                     \
             src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
             &sse2, NULL, NULL);                                                \
         se += se2;                                                             \
@@ -300,29 +300,29 @@
     return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
   }                                                                            \
                                                                                \
-  uint32_t vpx_highbd_10_sub_pixel_variance##w##x##h##_##opt(                  \
+  uint32_t aom_highbd_10_sub_pixel_variance##w##x##h##_##opt(                  \
       const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
       const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) {                \
     uint32_t sse;                                                              \
     uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                 \
     uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                                 \
-    int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                      \
+    int se = aom_highbd_sub_pixel_variance##wf##xh_##opt(                      \
         src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL,   \
         NULL);                                                                 \
     if (w > wf) {                                                              \
       uint32_t sse2;                                                           \
-      int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                   \
+      int se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt(                   \
           src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h,   \
           &sse2, NULL, NULL);                                                  \
       se += se2;                                                               \
       sse += sse2;                                                             \
       if (w > wf * 2) {                                                        \
-        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                     \
+        se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt(                     \
             src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
             &sse2, NULL, NULL);                                                \
         se += se2;                                                             \
         sse += sse2;                                                           \
-        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                     \
+        se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt(                     \
             src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
             &sse2, NULL, NULL);                                                \
         se += se2;                                                             \
@@ -335,7 +335,7 @@
     return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
   }                                                                            \
                                                                                \
-  uint32_t vpx_highbd_12_sub_pixel_variance##w##x##h##_##opt(                  \
+  uint32_t aom_highbd_12_sub_pixel_variance##w##x##h##_##opt(                  \
       const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
       const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) {                \
     int start_row;                                                             \
@@ -347,27 +347,27 @@
     for (start_row = 0; start_row < h; start_row += 16) {                      \
       uint32_t sse2;                                                           \
       int height = h - start_row < 16 ? h - start_row : 16;                    \
-      int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                   \
+      int se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt(                   \
           src + (start_row * src_stride), src_stride, x_offset, y_offset,      \
           dst + (start_row * dst_stride), dst_stride, height, &sse2, NULL,     \
           NULL);                                                               \
       se += se2;                                                               \
       long_sse += sse2;                                                        \
       if (w > wf) {                                                            \
-        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                     \
+        se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt(                     \
             src + 16 + (start_row * src_stride), src_stride, x_offset,         \
             y_offset, dst + 16 + (start_row * dst_stride), dst_stride, height, \
             &sse2, NULL, NULL);                                                \
         se += se2;                                                             \
         long_sse += sse2;                                                      \
         if (w > wf * 2) {                                                      \
-          se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                   \
+          se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt(                   \
               src + 32 + (start_row * src_stride), src_stride, x_offset,       \
               y_offset, dst + 32 + (start_row * dst_stride), dst_stride,       \
               height, &sse2, NULL, NULL);                                      \
           se += se2;                                                           \
           long_sse += sse2;                                                    \
-          se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                   \
+          se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt(                   \
               src + 48 + (start_row * src_stride), src_stride, x_offset,       \
               y_offset, dst + 48 + (start_row * dst_stride), dst_stride,       \
               height, &sse2, NULL, NULL);                                      \
@@ -402,7 +402,7 @@
 
 // The 2 unused parameters are place holders for PIC enabled build.
 #define DECL(w, opt)                                                         \
-  int vpx_highbd_sub_pixel_avg_variance##w##xh_##opt(                        \
+  int aom_highbd_sub_pixel_avg_variance##w##xh_##opt(                        \
       const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
       const uint16_t *dst, ptrdiff_t dst_stride, const uint16_t *sec,        \
       ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0,    \
@@ -416,7 +416,7 @@
 #undef DECLS
 
 #define FN(w, h, wf, wlog2, hlog2, opt, cast)                                  \
-  uint32_t vpx_highbd_8_sub_pixel_avg_variance##w##x##h##_##opt(               \
+  uint32_t aom_highbd_8_sub_pixel_avg_variance##w##x##h##_##opt(               \
       const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
       const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr,                  \
       const uint8_t *sec8) {                                                   \
@@ -424,23 +424,23 @@
     uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                 \
     uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                                 \
     uint16_t *sec = CONVERT_TO_SHORTPTR(sec8);                                 \
-    int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                  \
+    int se = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(                  \
         src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
         NULL, NULL);                                                           \
     if (w > wf) {                                                              \
       uint32_t sse2;                                                           \
-      int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
+      int se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
           src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride,      \
           sec + 16, w, h, &sse2, NULL, NULL);                                  \
       se += se2;                                                               \
       sse += sse2;                                                             \
       if (w > wf * 2) {                                                        \
-        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
+        se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
             src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride,    \
             sec + 32, w, h, &sse2, NULL, NULL);                                \
         se += se2;                                                             \
         sse += sse2;                                                           \
-        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
+        se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
             src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride,    \
             sec + 48, w, h, &sse2, NULL, NULL);                                \
         se += se2;                                                             \
@@ -451,7 +451,7 @@
     return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
   }                                                                            \
                                                                                \
-  uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt(              \
+  uint32_t aom_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt(              \
       const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
       const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr,                  \
       const uint8_t *sec8) {                                                   \
@@ -459,23 +459,23 @@
     uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                 \
     uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                                 \
     uint16_t *sec = CONVERT_TO_SHORTPTR(sec8);                                 \
-    int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                  \
+    int se = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(                  \
         src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
         NULL, NULL);                                                           \
     if (w > wf) {                                                              \
       uint32_t sse2;                                                           \
-      int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
+      int se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
           src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride,      \
           sec + 16, w, h, &sse2, NULL, NULL);                                  \
       se += se2;                                                               \
       sse += sse2;                                                             \
       if (w > wf * 2) {                                                        \
-        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
+        se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
             src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride,    \
             sec + 32, w, h, &sse2, NULL, NULL);                                \
         se += se2;                                                             \
         sse += sse2;                                                           \
-        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
+        se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
             src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride,    \
             sec + 48, w, h, &sse2, NULL, NULL);                                \
         se += se2;                                                             \
@@ -488,7 +488,7 @@
     return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
   }                                                                            \
                                                                                \
-  uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt(              \
+  uint32_t aom_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt(              \
       const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
       const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr,                  \
       const uint8_t *sec8) {                                                   \
@@ -502,27 +502,27 @@
     for (start_row = 0; start_row < h; start_row += 16) {                      \
       uint32_t sse2;                                                           \
       int height = h - start_row < 16 ? h - start_row : 16;                    \
-      int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
+      int se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
           src + (start_row * src_stride), src_stride, x_offset, y_offset,      \
           dst + (start_row * dst_stride), dst_stride, sec + (start_row * w),   \
           w, height, &sse2, NULL, NULL);                                       \
       se += se2;                                                               \
       long_sse += sse2;                                                        \
       if (w > wf) {                                                            \
-        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
+        se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
             src + 16 + (start_row * src_stride), src_stride, x_offset,         \
             y_offset, dst + 16 + (start_row * dst_stride), dst_stride,         \
             sec + 16 + (start_row * w), w, height, &sse2, NULL, NULL);         \
         se += se2;                                                             \
         long_sse += sse2;                                                      \
         if (w > wf * 2) {                                                      \
-          se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
+          se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
               src + 32 + (start_row * src_stride), src_stride, x_offset,       \
               y_offset, dst + 32 + (start_row * dst_stride), dst_stride,       \
               sec + 32 + (start_row * w), w, height, &sse2, NULL, NULL);       \
           se += se2;                                                           \
           long_sse += sse2;                                                    \
-          se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
+          se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
               src + 48 + (start_row * src_stride), src_stride, x_offset,       \
               y_offset, dst + 48 + (start_row * dst_stride), dst_stride,       \
               sec + 48 + (start_row * w), w, height, &sse2, NULL, NULL);       \
@@ -555,7 +555,7 @@
 #undef FNS
 #undef FN
 
-void vpx_highbd_upsampled_pred_sse2(uint16_t *comp_pred, int width, int height,
+void aom_highbd_upsampled_pred_sse2(uint16_t *comp_pred, int width, int height,
                                     const uint8_t *ref8, int ref_stride) {
   int i, j;
   int stride = ref_stride << 3;
@@ -612,7 +612,7 @@
   }
 }
 
-void vpx_highbd_comp_avg_upsampled_pred_sse2(uint16_t *comp_pred,
+void aom_highbd_comp_avg_upsampled_pred_sse2(uint16_t *comp_pred,
                                              const uint8_t *pred8, int width,
                                              int height, const uint8_t *ref8,
                                              int ref_stride) {
diff --git a/aom_dsp/x86/highbd_variance_sse4.c b/aom_dsp/x86/highbd_variance_sse4.c
index fb4bd8b..75d7038 100644
--- a/aom_dsp/x86/highbd_variance_sse4.c
+++ b/aom_dsp/x86/highbd_variance_sse4.c
@@ -10,11 +10,11 @@
 
 #include <smmintrin.h> /* SSE4.1 */
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
 #include "aom_dsp/variance.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
 
 static INLINE void variance4x4_64_sse4_1(const uint8_t *a8, int a_stride,
                                          const uint8_t *b8, int b_stride,
@@ -65,7 +65,7 @@
   *sum = (int64_t)_mm_extract_epi32(y0, 0);
 }
 
-uint32_t vpx_highbd_8_variance4x4_sse4_1(const uint8_t *a, int a_stride,
+uint32_t aom_highbd_8_variance4x4_sse4_1(const uint8_t *a, int a_stride,
                                          const uint8_t *b, int b_stride,
                                          uint32_t *sse) {
   int64_t sum;
@@ -77,7 +77,7 @@
   return *sse - (uint32_t)((sum * sum) >> 4);
 }
 
-uint32_t vpx_highbd_10_variance4x4_sse4_1(const uint8_t *a, int a_stride,
+uint32_t aom_highbd_10_variance4x4_sse4_1(const uint8_t *a, int a_stride,
                                           const uint8_t *b, int b_stride,
                                           uint32_t *sse) {
   int64_t sum;
@@ -90,7 +90,7 @@
   return *sse - (uint32_t)((sum * sum) >> 4);
 }
 
-uint32_t vpx_highbd_12_variance4x4_sse4_1(const uint8_t *a, int a_stride,
+uint32_t aom_highbd_12_variance4x4_sse4_1(const uint8_t *a, int a_stride,
                                           const uint8_t *b, int b_stride,
                                           uint32_t *sse) {
   int64_t sum;
@@ -104,54 +104,54 @@
 }
 
 // Sub-pixel
-uint32_t vpx_highbd_8_sub_pixel_variance4x4_sse4_1(
+uint32_t aom_highbd_8_sub_pixel_variance4x4_sse4_1(
     const uint8_t *src, int src_stride, int xoffset, int yoffset,
     const uint8_t *dst, int dst_stride, uint32_t *sse) {
   uint16_t fdata3[(4 + 1) * 4];
   uint16_t temp2[4 * 4];
 
-  vpx_highbd_var_filter_block2d_bil_first_pass(
+  aom_highbd_var_filter_block2d_bil_first_pass(
       src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
-  vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+  aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
                                                 bilinear_filters_2t[yoffset]);
 
-  return vpx_highbd_8_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst, dst_stride,
+  return aom_highbd_8_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst, dst_stride,
                                   sse);
 }
 
-uint32_t vpx_highbd_10_sub_pixel_variance4x4_sse4_1(
+uint32_t aom_highbd_10_sub_pixel_variance4x4_sse4_1(
     const uint8_t *src, int src_stride, int xoffset, int yoffset,
     const uint8_t *dst, int dst_stride, uint32_t *sse) {
   uint16_t fdata3[(4 + 1) * 4];
   uint16_t temp2[4 * 4];
 
-  vpx_highbd_var_filter_block2d_bil_first_pass(
+  aom_highbd_var_filter_block2d_bil_first_pass(
       src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
-  vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+  aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
                                                 bilinear_filters_2t[yoffset]);
 
-  return vpx_highbd_10_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst,
+  return aom_highbd_10_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst,
                                    dst_stride, sse);
 }
 
-uint32_t vpx_highbd_12_sub_pixel_variance4x4_sse4_1(
+uint32_t aom_highbd_12_sub_pixel_variance4x4_sse4_1(
     const uint8_t *src, int src_stride, int xoffset, int yoffset,
     const uint8_t *dst, int dst_stride, uint32_t *sse) {
   uint16_t fdata3[(4 + 1) * 4];
   uint16_t temp2[4 * 4];
 
-  vpx_highbd_var_filter_block2d_bil_first_pass(
+  aom_highbd_var_filter_block2d_bil_first_pass(
       src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
-  vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+  aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
                                                 bilinear_filters_2t[yoffset]);
 
-  return vpx_highbd_12_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst,
+  return aom_highbd_12_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst,
                                    dst_stride, sse);
 }
 
 // Sub-pixel average
 
-uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_sse4_1(
+uint32_t aom_highbd_8_sub_pixel_avg_variance4x4_sse4_1(
     const uint8_t *src, int src_stride, int xoffset, int yoffset,
     const uint8_t *dst, int dst_stride, uint32_t *sse,
     const uint8_t *second_pred) {
@@ -159,19 +159,19 @@
   uint16_t temp2[4 * 4];
   DECLARE_ALIGNED(16, uint16_t, temp3[4 * 4]);
 
-  vpx_highbd_var_filter_block2d_bil_first_pass(
+  aom_highbd_var_filter_block2d_bil_first_pass(
       src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
-  vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+  aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
                                                 bilinear_filters_2t[yoffset]);
 
-  vpx_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
+  aom_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
                            4);
 
-  return vpx_highbd_8_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst, dst_stride,
+  return aom_highbd_8_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst, dst_stride,
                                   sse);
 }
 
-uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_sse4_1(
+uint32_t aom_highbd_10_sub_pixel_avg_variance4x4_sse4_1(
     const uint8_t *src, int src_stride, int xoffset, int yoffset,
     const uint8_t *dst, int dst_stride, uint32_t *sse,
     const uint8_t *second_pred) {
@@ -179,19 +179,19 @@
   uint16_t temp2[4 * 4];
   DECLARE_ALIGNED(16, uint16_t, temp3[4 * 4]);
 
-  vpx_highbd_var_filter_block2d_bil_first_pass(
+  aom_highbd_var_filter_block2d_bil_first_pass(
       src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
-  vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+  aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
                                                 bilinear_filters_2t[yoffset]);
 
-  vpx_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
+  aom_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
                            4);
 
-  return vpx_highbd_10_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst,
+  return aom_highbd_10_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst,
                                    dst_stride, sse);
 }
 
-uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_sse4_1(
+uint32_t aom_highbd_12_sub_pixel_avg_variance4x4_sse4_1(
     const uint8_t *src, int src_stride, int xoffset, int yoffset,
     const uint8_t *dst, int dst_stride, uint32_t *sse,
     const uint8_t *second_pred) {
@@ -199,14 +199,14 @@
   uint16_t temp2[4 * 4];
   DECLARE_ALIGNED(16, uint16_t, temp3[4 * 4]);
 
-  vpx_highbd_var_filter_block2d_bil_first_pass(
+  aom_highbd_var_filter_block2d_bil_first_pass(
       src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
-  vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+  aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
                                                 bilinear_filters_2t[yoffset]);
 
-  vpx_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
+  aom_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
                            4);
 
-  return vpx_highbd_12_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst,
+  return aom_highbd_12_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst,
                                    dst_stride, sse);
 }
diff --git a/aom_dsp/x86/inv_txfm_sse2.c b/aom_dsp/x86/inv_txfm_sse2.c
index 97ae44b..7504b32 100644
--- a/aom_dsp/x86/inv_txfm_sse2.c
+++ b/aom_dsp/x86/inv_txfm_sse2.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/x86/inv_txfm_sse2.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
 
@@ -21,7 +21,7 @@
     *(int *)(dest) = _mm_cvtsi128_si32(d0);               \
   }
 
-void vpx_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest,
                              int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i eight = _mm_set1_epi16(8);
@@ -152,7 +152,7 @@
   }
 }
 
-void vpx_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest,
                             int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
@@ -448,7 +448,7 @@
     out7 = _mm_subs_epi16(stp1_0, stp2_7);                                    \
   }
 
-void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest,
                              int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
@@ -480,7 +480,7 @@
 
   // 2-D
   for (i = 0; i < 2; i++) {
-    // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
+    // 8x8 Transpose is copied from aom_fdct8x8_sse2()
     TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                   in4, in5, in6, in7);
 
@@ -518,7 +518,7 @@
   RECON_AND_STORE(dest + 7 * stride, in7);
 }
 
-void vpx_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest,
                             int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
@@ -556,7 +556,7 @@
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
 
-  // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
+  // 8x8 Transpose is copied from aom_fdct8x8_sse2()
   TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], in0,
                 in1, in2, in3, in4, in5, in6, in7);
 
@@ -793,7 +793,7 @@
   in[7] = _mm_sub_epi16(k__const_0, s1);
 }
 
-void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest,
                              int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
@@ -1163,7 +1163,7 @@
                            stp2_12)                                            \
   }
 
-void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
                                 int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -1288,7 +1288,7 @@
   }
 }
 
-void vpx_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest,
                               int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
@@ -2133,7 +2133,7 @@
   iadst16_8col(in1);
 }
 
-void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
                                int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -3007,7 +3007,7 @@
   }
 
 // Only upper-left 8x8 has non-zero coeff
-void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest,
                                int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -3164,7 +3164,7 @@
   }
 }
 
-void vpx_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest,
                                  int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -3438,7 +3438,7 @@
   }
 }
 
-void vpx_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest,
                               int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
@@ -3458,7 +3458,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
   __m128i ubounded, retval;
   const __m128i zero = _mm_set1_epi16(0);
@@ -3472,7 +3472,7 @@
   return retval;
 }
 
-void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
                                     int stride, int bd) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
@@ -3535,7 +3535,7 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vpx_highbd_idct4_c(input, outptr, bd);
+      aom_highbd_idct4_c(input, outptr, bd);
       input += 4;
       outptr += 4;
     }
@@ -3578,7 +3578,7 @@
     // Columns
     for (i = 0; i < 4; ++i) {
       for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
-      vpx_highbd_idct4_c(temp_in, temp_out, bd);
+      aom_highbd_idct4_c(temp_in, temp_out, bd);
       for (j = 0; j < 4; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -3587,7 +3587,7 @@
   }
 }
 
-void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
                                     int stride, int bd) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
@@ -3652,7 +3652,7 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 8; ++i) {
-      vpx_highbd_idct8_c(input, outptr, bd);
+      aom_highbd_idct8_c(input, outptr, bd);
       input += 8;
       outptr += 8;
     }
@@ -3678,7 +3678,7 @@
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
       for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-      vpx_highbd_idct8_c(temp_in, temp_out, bd);
+      aom_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3687,7 +3687,7 @@
   }
 }
 
-void vpx_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
                                     int stride, int bd) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
@@ -3755,7 +3755,7 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vpx_highbd_idct8_c(input, outptr, bd);
+      aom_highbd_idct8_c(input, outptr, bd);
       input += 8;
       outptr += 8;
     }
@@ -3781,7 +3781,7 @@
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
       for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-      vpx_highbd_idct8_c(temp_in, temp_out, bd);
+      aom_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3790,7 +3790,7 @@
   }
 }
 
-void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
                                        int stride, int bd) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
@@ -3863,7 +3863,7 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 16; ++i) {
-      vpx_highbd_idct16_c(input, outptr, bd);
+      aom_highbd_idct16_c(input, outptr, bd);
       input += 16;
       outptr += 16;
     }
@@ -3894,7 +3894,7 @@
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
       for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-      vpx_highbd_idct16_c(temp_in, temp_out, bd);
+      aom_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -3903,7 +3903,7 @@
   }
 }
 
-void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
                                       int stride, int bd) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
@@ -3981,7 +3981,7 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vpx_highbd_idct16_c(input, outptr, bd);
+      aom_highbd_idct16_c(input, outptr, bd);
       input += 16;
       outptr += 16;
     }
@@ -4012,7 +4012,7 @@
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
       for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-      vpx_highbd_idct16_c(temp_in, temp_out, bd);
+      aom_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -4020,4 +4020,4 @@
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/inv_txfm_sse2.h b/aom_dsp/x86/inv_txfm_sse2.h
index d7841bb..6edb91d 100644
--- a/aom_dsp/x86/inv_txfm_sse2.h
+++ b/aom_dsp/x86/inv_txfm_sse2.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_X86_INV_TXFM_SSE2_H_
-#define VPX_DSP_X86_INV_TXFM_SSE2_H_
+#ifndef AOM_DSP_X86_INV_TXFM_SSE2_H_
+#define AOM_DSP_X86_INV_TXFM_SSE2_H_
 
 #include <emmintrin.h>  // SSE2
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/inv_txfm.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
 
@@ -93,7 +93,7 @@
 // Function to allow 8 bit optimisations to be used when profile 0 is used with
 // highbitdepth enabled
 static INLINE __m128i load_input_data(const tran_low_t *data) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   return octa_set_epi16(data[0], data[1], data[2], data[3], data[4], data[5],
                         data[6], data[7]);
 #else
@@ -193,4 +193,4 @@
 void iadst8_sse2(__m128i *in);
 void iadst16_sse2(__m128i *in0, __m128i *in1);
 
-#endif  // VPX_DSP_X86_INV_TXFM_SSE2_H_
+#endif  // AOM_DSP_X86_INV_TXFM_SSE2_H_
diff --git a/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm b/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
index 20baf82..3890926 100644
--- a/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
+++ b/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
@@ -220,7 +220,7 @@
   mova    m12, [pw_11585x2]
 
   lea      r3, [2 * strideq]
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova     m0, [inputq +   0]
   packssdw m0, [inputq +  16]
   mova     m1, [inputq +  32]
@@ -271,7 +271,7 @@
 
   lea        r3, [2 * strideq]
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova       m0, [inputq +   0]
   packssdw   m0, [inputq +  16]
   mova       m1, [inputq +  32]
@@ -793,7 +793,7 @@
   lea             r4, [rsp + transposed_in]
 
 idct32x32_34_transpose:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova            m0, [r3 +       0]
   packssdw        m0, [r3 +      16]
   mova            m1, [r3 + 32 *  4]
@@ -1223,7 +1223,7 @@
   mov             r7, 2
 
 idct32x32_135_transpose:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova            m0, [r3 +       0]
   packssdw        m0, [r3 +      16]
   mova            m1, [r3 + 32 *  4]
@@ -1261,7 +1261,7 @@
   mova [r4 + 16 * 6], m6
   mova [r4 + 16 * 7], m7
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   add             r3, 32
 %else
   add             r3, 16
@@ -1272,7 +1272,7 @@
 
   IDCT32X32_135 16*0, 16*32, 16*64, 16*96
   lea            stp, [stp + 16 * 8]
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea         inputq, [inputq + 32 * 32]
 %else
   lea         inputq, [inputq + 16 * 32]
@@ -1687,7 +1687,7 @@
   mov             r7, 4
 
 idct32x32_1024_transpose:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova            m0, [r3 +       0]
   packssdw        m0, [r3 +      16]
   mova            m1, [r3 + 32 *  4]
@@ -1725,7 +1725,7 @@
   mova [r4 + 16 * 5], m5
   mova [r4 + 16 * 6], m6
   mova [r4 + 16 * 7], m7
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   add             r3, 32
 %else
   add             r3, 16
@@ -1737,7 +1737,7 @@
   IDCT32X32_1024 16*0, 16*32, 16*64, 16*96
 
   lea            stp, [stp + 16 * 8]
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea         inputq, [inputq + 32 * 32]
 %else
   lea         inputq, [inputq + 16 * 32]
diff --git a/aom_dsp/x86/inv_wht_sse2.asm b/aom_dsp/x86/inv_wht_sse2.asm
index fbbcd76..ee80563 100644
--- a/aom_dsp/x86/inv_wht_sse2.asm
+++ b/aom_dsp/x86/inv_wht_sse2.asm
@@ -82,7 +82,7 @@
 
 INIT_XMM sse2
 cglobal iwht4x4_16_add, 3, 3, 7, input, output, stride
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova            m0,        [inputq +  0]
   packssdw        m0,        [inputq + 16]
   mova            m1,        [inputq + 32]
diff --git a/aom_dsp/x86/loopfilter_avx2.c b/aom_dsp/x86/loopfilter_avx2.c
index f444c5d..fd73def 100644
--- a/aom_dsp/x86/loopfilter_avx2.c
+++ b/aom_dsp/x86/loopfilter_avx2.c
@@ -10,10 +10,10 @@
 
 #include <immintrin.h> /* AVX2 */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
 
-void vpx_lpf_horizontal_edge_8_avx2(unsigned char *s, int p,
+void aom_lpf_horizontal_edge_8_avx2(unsigned char *s, int p,
                                     const unsigned char *_blimit,
                                     const unsigned char *_limit,
                                     const unsigned char *_thresh) {
@@ -101,7 +101,7 @@
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
-    /* (vpx_filter + 3 * (qs0 - ps0)) & mask */
+    /* (aom_filter + 3 * (qs0 - ps0)) & mask */
     filt = _mm_and_si128(filt, mask);
 
     filter1 = _mm_adds_epi8(filt, t4);
@@ -367,7 +367,7 @@
   8, 128, 9, 128, 10, 128, 11, 128, 12, 128, 13, 128, 14, 128, 15, 128
 };
 
-void vpx_lpf_horizontal_edge_16_avx2(unsigned char *s, int p,
+void aom_lpf_horizontal_edge_16_avx2(unsigned char *s, int p,
                                      const unsigned char *_blimit,
                                      const unsigned char *_limit,
                                      const unsigned char *_thresh) {
@@ -480,7 +480,7 @@
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
-    /* (vpx_filter + 3 * (qs0 - ps0)) & mask */
+    /* (aom_filter + 3 * (qs0 - ps0)) & mask */
     filt = _mm_and_si128(filt, mask);
 
     filter1 = _mm_adds_epi8(filt, t4);
diff --git a/aom_dsp/x86/loopfilter_sse2.c b/aom_dsp/x86/loopfilter_sse2.c
index aaa42f3..3260a7e 100644
--- a/aom_dsp/x86/loopfilter_sse2.c
+++ b/aom_dsp/x86/loopfilter_sse2.c
@@ -10,7 +10,7 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/emmintrin_compat.h"
 
@@ -103,7 +103,7 @@
     ps1ps0 = _mm_xor_si128(ps1ps0, t80); /* ^ 0x80 */                       \
   } while (0)
 
-void vpx_lpf_horizontal_4_sse2(uint8_t *s, int p /* pitch */,
+void aom_lpf_horizontal_4_sse2(uint8_t *s, int p /* pitch */,
                                const uint8_t *_blimit, const uint8_t *_limit,
                                const uint8_t *_thresh) {
   const __m128i zero = _mm_set1_epi16(0);
@@ -138,7 +138,7 @@
   _mm_storeh_pi((__m64 *)(s + 1 * p), _mm_castsi128_ps(qs1qs0));  // *oq1
 }
 
-void vpx_lpf_vertical_4_sse2(uint8_t *s, int p /* pitch */,
+void aom_lpf_vertical_4_sse2(uint8_t *s, int p /* pitch */,
                              const uint8_t *_blimit, const uint8_t *_limit,
                              const uint8_t *_thresh) {
   const __m128i zero = _mm_set1_epi16(0);
@@ -229,7 +229,7 @@
   *(int *)(s + 7 * p - 2) = _mm_cvtsi128_si32(qs1qs0);
 }
 
-void vpx_lpf_horizontal_edge_8_sse2(unsigned char *s, int p,
+void aom_lpf_horizontal_edge_8_sse2(unsigned char *s, int p,
                                     const unsigned char *_blimit,
                                     const unsigned char *_limit,
                                     const unsigned char *_thresh) {
@@ -309,7 +309,7 @@
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
-    // (vpx_filter + 3 * (qs0 - ps0)) & mask
+    // (aom_filter + 3 * (qs0 - ps0)) & mask
     filt = _mm_and_si128(filt, mask);
 
     filter1 = _mm_adds_epi8(filt, t4);
@@ -591,7 +591,7 @@
   return _mm_or_si128(_mm_andnot_si128(*flat, *other_filt), result);
 }
 
-void vpx_lpf_horizontal_edge_16_sse2(unsigned char *s, int p,
+void aom_lpf_horizontal_edge_16_sse2(unsigned char *s, int p,
                                      const unsigned char *_blimit,
                                      const unsigned char *_limit,
                                      const unsigned char *_thresh) {
@@ -702,7 +702,7 @@
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
-    // (vpx_filter + 3 * (qs0 - ps0)) & mask
+    // (aom_filter + 3 * (qs0 - ps0)) & mask
     filt = _mm_and_si128(filt, mask);
     filter1 = _mm_adds_epi8(filt, t4);
     filter2 = _mm_adds_epi8(filt, t3);
@@ -923,7 +923,7 @@
   }
 }
 
-void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
+void aom_lpf_horizontal_8_sse2(unsigned char *s, int p,
                                const unsigned char *_blimit,
                                const unsigned char *_limit,
                                const unsigned char *_thresh) {
@@ -1066,7 +1066,7 @@
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
-    // (vpx_filter + 3 * (qs0 - ps0)) & mask
+    // (aom_filter + 3 * (qs0 - ps0)) & mask
     filt = _mm_and_si128(filt, mask);
 
     filter1 = _mm_adds_epi8(filt, t4);
@@ -1135,7 +1135,7 @@
   }
 }
 
-void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0,
+void aom_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0,
                                     const uint8_t *_limit0,
                                     const uint8_t *_thresh0,
                                     const uint8_t *_blimit1,
@@ -1302,7 +1302,7 @@
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
-    // (vpx_filter + 3 * (qs0 - ps0)) & mask
+    // (aom_filter + 3 * (qs0 - ps0)) & mask
     filt = _mm_and_si128(filt, mask);
 
     filter1 = _mm_adds_epi8(filt, t4);
@@ -1377,7 +1377,7 @@
   }
 }
 
-void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p,
+void aom_lpf_horizontal_4_dual_sse2(unsigned char *s, int p,
                                     const unsigned char *_blimit0,
                                     const unsigned char *_limit0,
                                     const unsigned char *_thresh0,
@@ -1471,7 +1471,7 @@
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
     filt = _mm_adds_epi8(filt, work_a);
-    // (vpx_filter + 3 * (qs0 - ps0)) & mask
+    // (aom_filter + 3 * (qs0 - ps0)) & mask
     filt = _mm_and_si128(filt, mask);
 
     filter1 = _mm_adds_epi8(filt, t4);
@@ -1657,7 +1657,7 @@
   } while (++idx8x8 < num_8x8_to_transpose);
 }
 
-void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
                                   const uint8_t *limit0, const uint8_t *thresh0,
                                   const uint8_t *blimit1, const uint8_t *limit1,
                                   const uint8_t *thresh1) {
@@ -1669,7 +1669,7 @@
   transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
 
   // Loop filtering
-  vpx_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0,
+  aom_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0,
                                  blimit1, limit1, thresh1);
   src[0] = t_dst;
   src[1] = t_dst + 8;
@@ -1680,7 +1680,7 @@
   transpose(src, 16, dst, p, 2);
 }
 
-void vpx_lpf_vertical_8_sse2(unsigned char *s, int p,
+void aom_lpf_vertical_8_sse2(unsigned char *s, int p,
                              const unsigned char *blimit,
                              const unsigned char *limit,
                              const unsigned char *thresh) {
@@ -1695,7 +1695,7 @@
   transpose(src, p, dst, 8, 1);
 
   // Loop filtering
-  vpx_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh);
+  aom_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh);
 
   src[0] = t_dst;
   dst[0] = s - 4;
@@ -1704,7 +1704,7 @@
   transpose(src, 8, dst, p, 1);
 }
 
-void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
                                   const uint8_t *limit0, const uint8_t *thresh0,
                                   const uint8_t *blimit1, const uint8_t *limit1,
                                   const uint8_t *thresh1) {
@@ -1716,7 +1716,7 @@
   transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
 
   // Loop filtering
-  vpx_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0,
+  aom_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0,
                                  blimit1, limit1, thresh1);
   src[0] = t_dst;
   src[1] = t_dst + 8;
@@ -1728,7 +1728,7 @@
   transpose(src, 16, dst, p, 2);
 }
 
-void vpx_lpf_vertical_16_sse2(unsigned char *s, int p,
+void aom_lpf_vertical_16_sse2(unsigned char *s, int p,
                               const unsigned char *blimit,
                               const unsigned char *limit,
                               const unsigned char *thresh) {
@@ -1745,7 +1745,7 @@
   transpose(src, p, dst, 8, 2);
 
   // Loop filtering
-  vpx_lpf_horizontal_edge_8_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh);
+  aom_lpf_horizontal_edge_8_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh);
 
   src[0] = t_dst;
   src[1] = t_dst + 8 * 8;
@@ -1756,7 +1756,7 @@
   transpose(src, 8, dst, p, 2);
 }
 
-void vpx_lpf_vertical_16_dual_sse2(unsigned char *s, int p,
+void aom_lpf_vertical_16_dual_sse2(unsigned char *s, int p,
                                    const uint8_t *blimit, const uint8_t *limit,
                                    const uint8_t *thresh) {
   DECLARE_ALIGNED(16, unsigned char, t_dst[256]);
@@ -1766,7 +1766,7 @@
   transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16);
 
   // Loop filtering
-  vpx_lpf_horizontal_edge_16_sse2(t_dst + 8 * 16, 16, blimit, limit, thresh);
+  aom_lpf_horizontal_edge_16_sse2(t_dst + 8 * 16, 16, blimit, limit, thresh);
 
   // Transpose back
   transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, p);
diff --git a/aom_dsp/x86/masked_sad_intrin_ssse3.c b/aom_dsp/x86/masked_sad_intrin_ssse3.c
index cf1fd76..44d5011 100644
--- a/aom_dsp/x86/masked_sad_intrin_ssse3.c
+++ b/aom_dsp/x86/masked_sad_intrin_ssse3.c
@@ -13,8 +13,8 @@
 #include <tmmintrin.h>
 
 #include "aom_ports/mem.h"
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 static INLINE __m128i width8_load_2rows(const uint8_t *ptr, int stride) {
   __m128i temp1 = _mm_loadl_epi64((const __m128i *)ptr);
@@ -46,7 +46,7 @@
     const uint8_t *m_ptr, int m_stride, int height);
 
 #define MASKSADMXN_SSSE3(m, n)                                                 \
-  unsigned int vpx_masked_sad##m##x##n##_ssse3(                                \
+  unsigned int aom_masked_sad##m##x##n##_ssse3(                                \
       const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,  \
       const uint8_t *msk, int msk_stride) {                                    \
     return masked_sad_ssse3(src, src_stride, ref, ref_stride, msk, msk_stride, \
@@ -68,7 +68,7 @@
 MASKSADMXN_SSSE3(16, 8)
 
 #define MASKSAD8XN_SSSE3(n)                                                   \
-  unsigned int vpx_masked_sad8x##n##_ssse3(                                   \
+  unsigned int aom_masked_sad8x##n##_ssse3(                                   \
       const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
       const uint8_t *msk, int msk_stride) {                                   \
     return masked_sad8xh_ssse3(src, src_stride, ref, ref_stride, msk,         \
@@ -80,7 +80,7 @@
 MASKSAD8XN_SSSE3(4)
 
 #define MASKSAD4XN_SSSE3(n)                                                   \
-  unsigned int vpx_masked_sad4x##n##_ssse3(                                   \
+  unsigned int aom_masked_sad4x##n##_ssse3(                                   \
       const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
       const uint8_t *msk, int msk_stride) {                                   \
     return masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk,         \
@@ -203,7 +203,7 @@
   return (_mm_cvtsi128_si32(res) + 31) >> 6;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE __m128i highbd_width4_load_2rows(const uint16_t *ptr,
                                                int stride) {
   __m128i temp1 = _mm_loadl_epi64((const __m128i *)ptr);
@@ -220,7 +220,7 @@
     const uint8_t *m_ptr, int m_stride, int height);
 
 #define HIGHBD_MASKSADMXN_SSSE3(m, n)                                         \
-  unsigned int vpx_highbd_masked_sad##m##x##n##_ssse3(                        \
+  unsigned int aom_highbd_masked_sad##m##x##n##_ssse3(                        \
       const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
       const uint8_t *msk, int msk_stride) {                                   \
     return highbd_masked_sad_ssse3(src, src_stride, ref, ref_stride, msk,     \
@@ -245,7 +245,7 @@
 HIGHBD_MASKSADMXN_SSSE3(8, 4)
 
 #define HIGHBD_MASKSAD4XN_SSSE3(n)                                            \
-  unsigned int vpx_highbd_masked_sad4x##n##_ssse3(                            \
+  unsigned int aom_highbd_masked_sad4x##n##_ssse3(                            \
       const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
       const uint8_t *msk, int msk_stride) {                                   \
     return highbd_masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk,  \
@@ -330,4 +330,4 @@
   // sad = (sad + 31) >> 6;
   return (_mm_cvtsi128_si32(res) + 31) >> 6;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/masked_variance_intrin_ssse3.c b/aom_dsp/x86/masked_variance_intrin_ssse3.c
index c18f870..2a838a6 100644
--- a/aom_dsp/x86/masked_variance_intrin_ssse3.c
+++ b/aom_dsp/x86/masked_variance_intrin_ssse3.c
@@ -13,10 +13,10 @@
 #include <emmintrin.h>
 #include <tmmintrin.h>
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
 
 // Half pixel shift
 #define HALF_PIXEL_OFFSET (BIL_SUBPEL_SHIFTS / 2)
@@ -44,14 +44,14 @@
 #endif
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE int64_t hsum_epi32_si64(__m128i v_d) {
   const __m128i v_sign_d = _mm_cmplt_epi32(v_d, _mm_setzero_si128());
   const __m128i v_0_q = _mm_unpacklo_epi32(v_d, v_sign_d);
   const __m128i v_1_q = _mm_unpackhi_epi32(v_d, v_sign_d);
   return hsum_epi64_si64(_mm_add_epi64(v_0_q, v_1_q));
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static INLINE uint32_t calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
                                             uint32_t *sse, const int w,
@@ -144,7 +144,7 @@
 }
 
 #define MASKED_VARWXH(W, H)                                                   \
-  unsigned int vpx_masked_variance##W##x##H##_ssse3(                          \
+  unsigned int aom_masked_variance##W##x##H##_ssse3(                          \
       const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,         \
       const uint8_t *m, int m_stride, unsigned int *sse) {                    \
     return masked_variancewxh_ssse3(a, a_stride, b, b_stride, m, m_stride, W, \
@@ -219,7 +219,7 @@
 }
 
 #define MASKED_VAR8XH(H)                                                      \
-  unsigned int vpx_masked_variance8x##H##_ssse3(                              \
+  unsigned int aom_masked_variance8x##H##_ssse3(                              \
       const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,         \
       const uint8_t *m, int m_stride, unsigned int *sse) {                    \
     return masked_variance8xh_ssse3(a, a_stride, b, b_stride, m, m_stride, H, \
@@ -294,7 +294,7 @@
 }
 
 #define MASKED_VAR4XH(H)                                                      \
-  unsigned int vpx_masked_variance4x##H##_ssse3(                              \
+  unsigned int aom_masked_variance4x##H##_ssse3(                              \
       const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,         \
       const uint8_t *m, int m_stride, unsigned int *sse) {                    \
     return masked_variance4xh_ssse3(a, a_stride, b, b_stride, m, m_stride, H, \
@@ -304,7 +304,7 @@
 MASKED_VAR4XH(4)
 MASKED_VAR4XH(8)
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 
 // Main calculation for n*8 wide blocks
 static INLINE void highbd_masked_variance64_ssse3(
@@ -517,7 +517,7 @@
 }
 
 #define HIGHBD_MASKED_VARWXH(W, H)                                         \
-  unsigned int vpx_highbd_masked_variance##W##x##H##_ssse3(                \
+  unsigned int aom_highbd_masked_variance##W##x##H##_ssse3(                \
       const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride,    \
       const uint8_t *m, int m_stride, unsigned int *sse) {                 \
     uint16_t *a = CONVERT_TO_SHORTPTR(a8);                                 \
@@ -526,7 +526,7 @@
                                            m_stride, W, H, sse);           \
   }                                                                        \
                                                                            \
-  unsigned int vpx_highbd_10_masked_variance##W##x##H##_ssse3(             \
+  unsigned int aom_highbd_10_masked_variance##W##x##H##_ssse3(             \
       const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride,    \
       const uint8_t *m, int m_stride, unsigned int *sse) {                 \
     uint16_t *a = CONVERT_TO_SHORTPTR(a8);                                 \
@@ -535,7 +535,7 @@
                                               m_stride, W, H, sse);        \
   }                                                                        \
                                                                            \
-  unsigned int vpx_highbd_12_masked_variance##W##x##H##_ssse3(             \
+  unsigned int aom_highbd_12_masked_variance##W##x##H##_ssse3(             \
       const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride,    \
       const uint8_t *m, int m_stride, unsigned int *sse) {                 \
     uint16_t *a = CONVERT_TO_SHORTPTR(a8);                                 \
@@ -644,7 +644,7 @@
 }
 
 // Functions for width (W) >= 16
-unsigned int vpx_masked_subpel_varWxH_xzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_varWxH_xzero(const uint8_t *src, int src_stride,
                                             int yoffset, const uint8_t *dst,
                                             int dst_stride, const uint8_t *msk,
                                             int msk_stride, unsigned int *sse,
@@ -689,7 +689,7 @@
   }
   return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
 }
-unsigned int vpx_masked_subpel_varWxH_yzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_varWxH_yzero(const uint8_t *src, int src_stride,
                                             int xoffset, const uint8_t *dst,
                                             int dst_stride, const uint8_t *msk,
                                             int msk_stride, unsigned int *sse,
@@ -720,7 +720,7 @@
   }
   return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
 }
-unsigned int vpx_masked_subpel_varWxH_xnonzero_ynonzero(
+unsigned int aom_masked_subpel_varWxH_xnonzero_ynonzero(
     const uint8_t *src, int src_stride, int xoffset, int yoffset,
     const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
     unsigned int *sse, int w, int h, filter_fn_t xfilter_fn,
@@ -780,7 +780,7 @@
 
 // Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
 // xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int vpx_masked_subpel_var4xH_xzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_var4xH_xzero(const uint8_t *src, int src_stride,
                                             int yoffset, const uint8_t *dst,
                                             int dst_stride, const uint8_t *msk,
                                             int msk_stride, unsigned int *sse,
@@ -849,7 +849,7 @@
 }
 
 // Note order in which rows loaded xmm[127:64] = row 1, xmm[63:0] = row 2
-unsigned int vpx_masked_subpel_var8xH_xzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_var8xH_xzero(const uint8_t *src, int src_stride,
                                             int yoffset, const uint8_t *dst,
                                             int dst_stride, const uint8_t *msk,
                                             int msk_stride, unsigned int *sse,
@@ -904,7 +904,7 @@
 
 // Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
 // xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int vpx_masked_subpel_var4xH_yzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_var4xH_yzero(const uint8_t *src, int src_stride,
                                             int xoffset, const uint8_t *dst,
                                             int dst_stride, const uint8_t *msk,
                                             int msk_stride, unsigned int *sse,
@@ -969,7 +969,7 @@
   return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
 }
 
-unsigned int vpx_masked_subpel_var8xH_yzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_var8xH_yzero(const uint8_t *src, int src_stride,
                                             int xoffset, const uint8_t *dst,
                                             int dst_stride, const uint8_t *msk,
                                             int msk_stride, unsigned int *sse,
@@ -1018,7 +1018,7 @@
 
 // Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
 // xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int vpx_masked_subpel_var4xH_xnonzero_ynonzero(
+unsigned int aom_masked_subpel_var4xH_xnonzero_ynonzero(
     const uint8_t *src, int src_stride, int xoffset, int yoffset,
     const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
     unsigned int *sse, int h) {
@@ -1117,7 +1117,7 @@
   return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
 }
 
-unsigned int vpx_masked_subpel_var8xH_xnonzero_ynonzero(
+unsigned int aom_masked_subpel_var8xH_xnonzero_ynonzero(
     const uint8_t *src, int src_stride, int xoffset, int yoffset,
     const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
     unsigned int *sse, int h) {
@@ -1226,49 +1226,49 @@
 
 // For W >=16
 #define MASK_SUBPIX_VAR_LARGE(W, H)                                            \
-  unsigned int vpx_masked_sub_pixel_variance##W##x##H##_ssse3(                 \
+  unsigned int aom_masked_sub_pixel_variance##W##x##H##_ssse3(                 \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,            \
       const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,  \
       unsigned int *sse) {                                                     \
     assert(W % 16 == 0);                                                       \
     if (xoffset == 0) {                                                        \
       if (yoffset == 0)                                                        \
-        return vpx_masked_variance##W##x##H##_ssse3(                           \
+        return aom_masked_variance##W##x##H##_ssse3(                           \
             src, src_stride, dst, dst_stride, msk, msk_stride, sse);           \
       else if (yoffset == HALF_PIXEL_OFFSET)                                   \
-        return vpx_masked_subpel_varWxH_xzero(                                 \
+        return aom_masked_subpel_varWxH_xzero(                                 \
             src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk,          \
             msk_stride, sse, W, H, apply_filter_avg);                          \
       else                                                                     \
-        return vpx_masked_subpel_varWxH_xzero(src, src_stride, yoffset, dst,   \
+        return aom_masked_subpel_varWxH_xzero(src, src_stride, yoffset, dst,   \
                                               dst_stride, msk, msk_stride,     \
                                               sse, W, H, apply_filter);        \
     } else if (yoffset == 0) {                                                 \
       if (xoffset == HALF_PIXEL_OFFSET)                                        \
-        return vpx_masked_subpel_varWxH_yzero(                                 \
+        return aom_masked_subpel_varWxH_yzero(                                 \
             src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk,          \
             msk_stride, sse, W, H, apply_filter_avg);                          \
       else                                                                     \
-        return vpx_masked_subpel_varWxH_yzero(src, src_stride, xoffset, dst,   \
+        return aom_masked_subpel_varWxH_yzero(src, src_stride, xoffset, dst,   \
                                               dst_stride, msk, msk_stride,     \
                                               sse, W, H, apply_filter);        \
     } else if (xoffset == HALF_PIXEL_OFFSET) {                                 \
       if (yoffset == HALF_PIXEL_OFFSET)                                        \
-        return vpx_masked_subpel_varWxH_xnonzero_ynonzero(                     \
+        return aom_masked_subpel_varWxH_xnonzero_ynonzero(                     \
             src, src_stride, HALF_PIXEL_OFFSET, HALF_PIXEL_OFFSET, dst,        \
             dst_stride, msk, msk_stride, sse, W, H, apply_filter_avg,          \
             apply_filter_avg);                                                 \
       else                                                                     \
-        return vpx_masked_subpel_varWxH_xnonzero_ynonzero(                     \
+        return aom_masked_subpel_varWxH_xnonzero_ynonzero(                     \
             src, src_stride, HALF_PIXEL_OFFSET, yoffset, dst, dst_stride, msk, \
             msk_stride, sse, W, H, apply_filter_avg, apply_filter);            \
     } else {                                                                   \
       if (yoffset == HALF_PIXEL_OFFSET)                                        \
-        return vpx_masked_subpel_varWxH_xnonzero_ynonzero(                     \
+        return aom_masked_subpel_varWxH_xnonzero_ynonzero(                     \
             src, src_stride, xoffset, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
             msk_stride, sse, W, H, apply_filter, apply_filter_avg);            \
       else                                                                     \
-        return vpx_masked_subpel_varWxH_xnonzero_ynonzero(                     \
+        return aom_masked_subpel_varWxH_xnonzero_ynonzero(                     \
             src, src_stride, xoffset, yoffset, dst, dst_stride, msk,           \
             msk_stride, sse, W, H, apply_filter, apply_filter);                \
     }                                                                          \
@@ -1276,22 +1276,22 @@
 
 // For W < 16
 #define MASK_SUBPIX_VAR_SMALL(W, H)                                            \
-  unsigned int vpx_masked_sub_pixel_variance##W##x##H##_ssse3(                 \
+  unsigned int aom_masked_sub_pixel_variance##W##x##H##_ssse3(                 \
       const uint8_t *src, int src_stride, int xoffset, int yoffset,            \
       const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,  \
       unsigned int *sse) {                                                     \
     assert(W == 4 || W == 8);                                                  \
     if (xoffset == 0 && yoffset == 0)                                          \
-      return vpx_masked_variance##W##x##H##_ssse3(                             \
+      return aom_masked_variance##W##x##H##_ssse3(                             \
           src, src_stride, dst, dst_stride, msk, msk_stride, sse);             \
     else if (xoffset == 0)                                                     \
-      return vpx_masked_subpel_var##W##xH_xzero(                               \
+      return aom_masked_subpel_var##W##xH_xzero(                               \
           src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse, H); \
     else if (yoffset == 0)                                                     \
-      return vpx_masked_subpel_var##W##xH_yzero(                               \
+      return aom_masked_subpel_var##W##xH_yzero(                               \
           src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse, H); \
     else                                                                       \
-      return vpx_masked_subpel_var##W##xH_xnonzero_ynonzero(                   \
+      return aom_masked_subpel_var##W##xH_xnonzero_ynonzero(                   \
           src, src_stride, xoffset, yoffset, dst, dst_stride, msk, msk_stride, \
           sse, H);                                                             \
   }
@@ -1315,7 +1315,7 @@
 MASK_SUBPIX_VAR_LARGE(128, 128)
 #endif  // CONFIG_EXT_PARTITION
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef uint32_t (*highbd_calc_masked_var_t)(__m128i v_sum_d, __m128i v_sse_q,
                                              uint32_t *sse, const int w,
                                              const int h);
@@ -1446,7 +1446,7 @@
 }
 
 // High bit depth functions for width (W) >= 8
-unsigned int vpx_highbd_masked_subpel_varWxH_xzero(
+unsigned int aom_highbd_masked_subpel_varWxH_xzero(
     const uint16_t *src, int src_stride, int yoffset, const uint16_t *dst,
     int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
     int w, int h, highbd_filter_fn_t filter_fn,
@@ -1491,7 +1491,7 @@
   }
   return calc_var(v_sum_d, v_sse_q, sse, w, h);
 }
-unsigned int vpx_highbd_masked_subpel_varWxH_yzero(
+unsigned int aom_highbd_masked_subpel_varWxH_yzero(
     const uint16_t *src, int src_stride, int xoffset, const uint16_t *dst,
     int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
     int w, int h, highbd_filter_fn_t filter_fn,
@@ -1523,7 +1523,7 @@
   return calc_var(v_sum_d, v_sse_q, sse, w, h);
 }
 
-unsigned int vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero(
+unsigned int aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero(
     const uint16_t *src, int src_stride, int xoffset, int yoffset,
     const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
     unsigned int *sse, int w, int h, highbd_filter_fn_t xfilter_fn,
@@ -1584,7 +1584,7 @@
 }
 
 // Note order in which rows loaded xmm[127:64] = row 1, xmm[63:0] = row 2
-unsigned int vpx_highbd_masked_subpel_var4xH_xzero(
+unsigned int aom_highbd_masked_subpel_var4xH_xzero(
     const uint16_t *src, int src_stride, int yoffset, const uint16_t *dst,
     int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
     int h, highbd_calc_masked_var_t calc_var) {
@@ -1635,7 +1635,7 @@
   return calc_var(v_sum_d, v_sse_q, sse, 4, h);
 }
 
-unsigned int vpx_highbd_masked_subpel_var4xH_yzero(
+unsigned int aom_highbd_masked_subpel_var4xH_yzero(
     const uint16_t *src, int src_stride, int xoffset, const uint16_t *dst,
     int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
     int h, highbd_calc_masked_var_t calc_var) {
@@ -1683,7 +1683,7 @@
   return calc_var(v_sum_d, v_sse_q, sse, 4, h);
 }
 
-unsigned int vpx_highbd_masked_subpel_var4xH_xnonzero_ynonzero(
+unsigned int aom_highbd_masked_subpel_var4xH_xnonzero_ynonzero(
     const uint16_t *src, int src_stride, int xoffset, int yoffset,
     const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
     unsigned int *sse, int h, highbd_calc_masked_var_t calc_var) {
@@ -1811,41 +1811,41 @@
         return full_variance_function(src8, src_stride, dst8, dst_stride, msk, \
                                       msk_stride, sse);                        \
       else if (yoffset == HALF_PIXEL_OFFSET)                                   \
-        return vpx_highbd_masked_subpel_varWxH_xzero(                          \
+        return aom_highbd_masked_subpel_varWxH_xzero(                          \
             src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk,          \
             msk_stride, sse, W, H, highbd_apply_filter_avg, calc_var);         \
       else                                                                     \
-        return vpx_highbd_masked_subpel_varWxH_xzero(                          \
+        return aom_highbd_masked_subpel_varWxH_xzero(                          \
             src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse,   \
             W, H, highbd_apply_filter, calc_var);                              \
     } else if (yoffset == 0) {                                                 \
       if (xoffset == HALF_PIXEL_OFFSET)                                        \
-        return vpx_highbd_masked_subpel_varWxH_yzero(                          \
+        return aom_highbd_masked_subpel_varWxH_yzero(                          \
             src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk,          \
             msk_stride, sse, W, H, highbd_apply_filter_avg, calc_var);         \
       else                                                                     \
-        return vpx_highbd_masked_subpel_varWxH_yzero(                          \
+        return aom_highbd_masked_subpel_varWxH_yzero(                          \
             src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse,   \
             W, H, highbd_apply_filter, calc_var);                              \
     } else if (xoffset == HALF_PIXEL_OFFSET) {                                 \
       if (yoffset == HALF_PIXEL_OFFSET)                                        \
-        return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero(              \
+        return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero(              \
             src, src_stride, HALF_PIXEL_OFFSET, HALF_PIXEL_OFFSET, dst,        \
             dst_stride, msk, msk_stride, sse, W, H, highbd_apply_filter_avg,   \
             highbd_apply_filter_avg, calc_var);                                \
       else                                                                     \
-        return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero(              \
+        return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero(              \
             src, src_stride, HALF_PIXEL_OFFSET, yoffset, dst, dst_stride, msk, \
             msk_stride, sse, W, H, highbd_apply_filter_avg,                    \
             highbd_apply_filter, calc_var);                                    \
     } else {                                                                   \
       if (yoffset == HALF_PIXEL_OFFSET)                                        \
-        return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero(              \
+        return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero(              \
             src, src_stride, xoffset, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
             msk_stride, sse, W, H, highbd_apply_filter,                        \
             highbd_apply_filter_avg, calc_var);                                \
       else                                                                     \
-        return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero(              \
+        return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero(              \
             src, src_stride, xoffset, yoffset, dst, dst_stride, msk,           \
             msk_stride, sse, W, H, highbd_apply_filter, highbd_apply_filter,   \
             calc_var);                                                         \
@@ -1866,46 +1866,46 @@
       return full_variance_function(src8, src_stride, dst8, dst_stride, msk,   \
                                     msk_stride, sse);                          \
     else if (xoffset == 0)                                                     \
-      return vpx_highbd_masked_subpel_var4xH_xzero(                            \
+      return aom_highbd_masked_subpel_var4xH_xzero(                            \
           src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse, H,  \
           calc_var);                                                           \
     else if (yoffset == 0)                                                     \
-      return vpx_highbd_masked_subpel_var4xH_yzero(                            \
+      return aom_highbd_masked_subpel_var4xH_yzero(                            \
           src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse, H,  \
           calc_var);                                                           \
     else                                                                       \
-      return vpx_highbd_masked_subpel_var4xH_xnonzero_ynonzero(                \
+      return aom_highbd_masked_subpel_var4xH_xnonzero_ynonzero(                \
           src, src_stride, xoffset, yoffset, dst, dst_stride, msk, msk_stride, \
           sse, H, calc_var);                                                   \
   }
 
 #define HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(W, H)                                  \
-  unsigned int vpx_highbd_masked_sub_pixel_variance##W##x##H##_ssse3(          \
+  unsigned int aom_highbd_masked_sub_pixel_variance##W##x##H##_ssse3(          \
       const uint8_t *src8, int src_stride, int xoffset, int yoffset,           \
       const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
       unsigned int *sse) {                                                     \
     return highbd_masked_sub_pixel_variance##W##x##H##_ssse3(                  \
         src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
         sse, calc_masked_variance,                                             \
-        vpx_highbd_masked_variance##W##x##H##_ssse3);                          \
+        aom_highbd_masked_variance##W##x##H##_ssse3);                          \
   }                                                                            \
-  unsigned int vpx_highbd_10_masked_sub_pixel_variance##W##x##H##_ssse3(       \
+  unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_ssse3(       \
       const uint8_t *src8, int src_stride, int xoffset, int yoffset,           \
       const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
       unsigned int *sse) {                                                     \
     return highbd_masked_sub_pixel_variance##W##x##H##_ssse3(                  \
         src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
         sse, highbd_10_calc_masked_variance,                                   \
-        vpx_highbd_10_masked_variance##W##x##H##_ssse3);                       \
+        aom_highbd_10_masked_variance##W##x##H##_ssse3);                       \
   }                                                                            \
-  unsigned int vpx_highbd_12_masked_sub_pixel_variance##W##x##H##_ssse3(       \
+  unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_ssse3(       \
       const uint8_t *src8, int src_stride, int xoffset, int yoffset,           \
       const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
       unsigned int *sse) {                                                     \
     return highbd_masked_sub_pixel_variance##W##x##H##_ssse3(                  \
         src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
         sse, highbd_12_calc_masked_variance,                                   \
-        vpx_highbd_12_masked_variance##W##x##H##_ssse3);                       \
+        aom_highbd_12_masked_variance##W##x##H##_ssse3);                       \
   }
 
 HIGHBD_MASK_SUBPIX_VAR_SMALL(4, 4)
diff --git a/aom_dsp/x86/obmc_sad_sse4.c b/aom_dsp/x86/obmc_sad_sse4.c
index b4c839b..97943f8 100644
--- a/aom_dsp/x86/obmc_sad_sse4.c
+++ b/aom_dsp/x86/obmc_sad_sse4.c
@@ -11,11 +11,11 @@
 #include <assert.h>
 #include <immintrin.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_ports/mem.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_dsp/x86/synonyms.h"
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -107,7 +107,7 @@
 }
 
 #define OBMCSADWXH(w, h)                                       \
-  unsigned int vpx_obmc_sad##w##x##h##_sse4_1(                 \
+  unsigned int aom_obmc_sad##w##x##h##_sse4_1(                 \
       const uint8_t *pre, int pre_stride, const int32_t *wsrc, \
       const int32_t *msk) {                                    \
     if (w == 4) {                                              \
@@ -140,7 +140,7 @@
 // High bit-depth
 ////////////////////////////////////////////////////////////////////////////////
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE unsigned int hbd_obmc_sad_w4(const uint8_t *pre8,
                                            const int pre_stride,
                                            const int32_t *wsrc,
@@ -230,7 +230,7 @@
 }
 
 #define HBD_OBMCSADWXH(w, h)                                      \
-  unsigned int vpx_highbd_obmc_sad##w##x##h##_sse4_1(             \
+  unsigned int aom_highbd_obmc_sad##w##x##h##_sse4_1(             \
       const uint8_t *pre, int pre_stride, const int32_t *wsrc,    \
       const int32_t *mask) {                                      \
     if (w == 4) {                                                 \
@@ -258,4 +258,4 @@
 HBD_OBMCSADWXH(8, 4)
 HBD_OBMCSADWXH(4, 8)
 HBD_OBMCSADWXH(4, 4)
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/obmc_variance_sse4.c b/aom_dsp/x86/obmc_variance_sse4.c
index 71c3c7e..0142551 100644
--- a/aom_dsp/x86/obmc_variance_sse4.c
+++ b/aom_dsp/x86/obmc_variance_sse4.c
@@ -11,13 +11,13 @@
 #include <assert.h>
 #include <immintrin.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_ports/mem.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_dsp/x86/synonyms.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
 
 ////////////////////////////////////////////////////////////////////////////////
 // 8 bit
@@ -114,7 +114,7 @@
 }
 
 #define OBMCVARWXH(W, H)                                               \
-  unsigned int vpx_obmc_variance##W##x##H##_sse4_1(                    \
+  unsigned int aom_obmc_variance##W##x##H##_sse4_1(                    \
       const uint8_t *pre, int pre_stride, const int32_t *wsrc,         \
       const int32_t *mask, unsigned int *sse) {                        \
     int sum;                                                           \
@@ -149,7 +149,7 @@
 // High bit-depth
 ////////////////////////////////////////////////////////////////////////////////
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE void hbd_obmc_variance_w4(
     const uint8_t *pre8, const int pre_stride, const int32_t *wsrc,
     const int32_t *mask, uint64_t *const sse, int64_t *const sum, const int h) {
@@ -305,7 +305,7 @@
 }
 
 #define HBD_OBMCVARWXH(W, H)                                               \
-  unsigned int vpx_highbd_obmc_variance##W##x##H##_sse4_1(                 \
+  unsigned int aom_highbd_obmc_variance##W##x##H##_sse4_1(                 \
       const uint8_t *pre, int pre_stride, const int32_t *wsrc,             \
       const int32_t *mask, unsigned int *sse) {                            \
     int sum;                                                               \
@@ -313,7 +313,7 @@
     return *sse - (((int64_t)sum * sum) / (W * H));                        \
   }                                                                        \
                                                                            \
-  unsigned int vpx_highbd_10_obmc_variance##W##x##H##_sse4_1(              \
+  unsigned int aom_highbd_10_obmc_variance##W##x##H##_sse4_1(              \
       const uint8_t *pre, int pre_stride, const int32_t *wsrc,             \
       const int32_t *mask, unsigned int *sse) {                            \
     int sum;                                                               \
@@ -321,7 +321,7 @@
     return *sse - (((int64_t)sum * sum) / (W * H));                        \
   }                                                                        \
                                                                            \
-  unsigned int vpx_highbd_12_obmc_variance##W##x##H##_sse4_1(              \
+  unsigned int aom_highbd_12_obmc_variance##W##x##H##_sse4_1(              \
       const uint8_t *pre, int pre_stride, const int32_t *wsrc,             \
       const int32_t *mask, unsigned int *sse) {                            \
     int sum;                                                               \
@@ -347,4 +347,4 @@
 HBD_OBMCVARWXH(8, 4)
 HBD_OBMCVARWXH(4, 8)
 HBD_OBMCVARWXH(4, 4)
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/quantize_avx_x86_64.asm b/aom_dsp/x86/quantize_avx_x86_64.asm
index 01c4129..b74d6ea 100644
--- a/aom_dsp/x86/quantize_avx_x86_64.asm
+++ b/aom_dsp/x86/quantize_avx_x86_64.asm
@@ -41,7 +41,7 @@
   mova                            m0, [zbinq]              ; m0 = zbin
 
   ; Get DC and first 15 AC coeffs - in this special case, that is all.
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; coeff stored as 32bit numbers but we process them as 16 bit numbers
   mova                            m9, [coeffq]
   packssdw                        m9, [coeffq+16]          ; m9 = c[i]
@@ -73,7 +73,7 @@
   ptest                          m14, m14
   jnz .single_nonzero
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova                       [r1   ], ymm5
   mova                       [r1+32], ymm5
   mova                       [r2   ], ymm5
@@ -121,7 +121,7 @@
   pand                            m8, m7
   pand                           m13, m12
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; Store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m8
   punpckhwd                       m6, m8, m6
@@ -142,7 +142,7 @@
   punpckhqdq                      m3, m3
   pmullw                         m13, m3                   ; dqc[i] = qc[i] * q
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; Store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m8
   punpckhwd                       m6, m8, m6
@@ -226,7 +226,7 @@
 
   DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, eob
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea                         coeffq, [  coeffq+ncoeffq*4]
   lea                        qcoeffq, [ qcoeffq+ncoeffq*4]
   lea                       dqcoeffq, [dqcoeffq+ncoeffq*4]
@@ -239,7 +239,7 @@
   neg                        ncoeffq
 
   ; get DC and first 15 AC coeffs
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; coeff stored as 32bit numbers & require 16bit numbers
   mova                            m9, [coeffq+ncoeffq*4+ 0]
   packssdw                        m9, [coeffq+ncoeffq*4+16]
@@ -261,7 +261,7 @@
   ptest                          m14, m14
   jnz .first_nonzero
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova        [qcoeffq+ncoeffq*4   ], ymm5
   mova        [qcoeffq+ncoeffq*4+32], ymm5
   mova       [dqcoeffq+ncoeffq*4   ], ymm5
@@ -299,7 +299,7 @@
   pand                            m8, m7
   pand                           m13, m12
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m8
   punpckhwd                       m6, m8, m6
@@ -330,7 +330,7 @@
   psignw                         m13, m10
 %endif
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m8
   punpckhwd                       m6, m8, m6
@@ -360,7 +360,7 @@
 
 .ac_only_loop:
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; pack coeff from 32bit to 16bit array
   mova                            m9, [coeffq+ncoeffq*4+ 0]
   packssdw                        m9, [coeffq+ncoeffq*4+16]
@@ -382,7 +382,7 @@
   ptest                          m14, m14
   jnz .rest_nonzero
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova        [qcoeffq+ncoeffq*4+ 0], ymm5
   mova        [qcoeffq+ncoeffq*4+32], ymm5
   mova       [dqcoeffq+ncoeffq*4+ 0], ymm5
@@ -421,7 +421,7 @@
   pand                           m14, m7
   pand                           m13, m12
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m14
   punpckhwd                       m6, m14, m6
@@ -451,7 +451,7 @@
   psignw                         m13, m10
 %endif
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m14
   punpckhwd                       m6, m14, m6
@@ -507,7 +507,7 @@
 
 DEFINE_ARGS dqcoeff, ncoeff, qcoeff, eob
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea                       dqcoeffq, [dqcoeffq+ncoeffq*4]
   lea                        qcoeffq, [ qcoeffq+ncoeffq*4]
 %else
@@ -519,7 +519,7 @@
   pxor                            m7, m7
 
 .blank_loop:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova       [dqcoeffq+ncoeffq*4+ 0], ymm7
   mova       [dqcoeffq+ncoeffq*4+32], ymm7
   mova        [qcoeffq+ncoeffq*4+ 0], ymm7
diff --git a/aom_dsp/x86/quantize_sse2.c b/aom_dsp/x86/quantize_sse2.c
index c6ff06d..f320e4e 100644
--- a/aom_dsp/x86/quantize_sse2.c
+++ b/aom_dsp/x86/quantize_sse2.c
@@ -11,11 +11,11 @@
 #include <emmintrin.h>
 #include <xmmintrin.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
 static INLINE __m128i load_coefficients(const tran_low_t *coeff_ptr) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   return _mm_setr_epi16((int16_t)coeff_ptr[0], (int16_t)coeff_ptr[1],
                         (int16_t)coeff_ptr[2], (int16_t)coeff_ptr[3],
                         (int16_t)coeff_ptr[4], (int16_t)coeff_ptr[5],
@@ -27,7 +27,7 @@
 
 static INLINE void store_coefficients(__m128i coeff_vals,
                                       tran_low_t *coeff_ptr) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   __m128i one = _mm_set1_epi16(1);
   __m128i coeff_vals_hi = _mm_mulhi_epi16(coeff_vals, one);
   __m128i coeff_vals_lo = _mm_mullo_epi16(coeff_vals, one);
@@ -40,7 +40,7 @@
 #endif
 }
 
-void vpx_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                          int skip_block, const int16_t *zbin_ptr,
                          const int16_t *round_ptr, const int16_t *quant_ptr,
                          const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
diff --git a/aom_dsp/x86/quantize_ssse3_x86_64.asm b/aom_dsp/x86/quantize_ssse3_x86_64.asm
index ca21539..4503370 100644
--- a/aom_dsp/x86/quantize_ssse3_x86_64.asm
+++ b/aom_dsp/x86/quantize_ssse3_x86_64.asm
@@ -53,7 +53,7 @@
 %endif
   pxor                            m5, m5                   ; m5 = dedicated zero
   DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, eob
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea                         coeffq, [  coeffq+ncoeffq*4]
   lea                        qcoeffq, [ qcoeffq+ncoeffq*4]
   lea                       dqcoeffq, [dqcoeffq+ncoeffq*4]
@@ -66,7 +66,7 @@
   neg                        ncoeffq
 
   ; get DC and first 15 AC coeffs
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; coeff stored as 32bit numbers & require 16bit numbers
   mova                            m9, [  coeffq+ncoeffq*4+ 0]
   packssdw                        m9, [  coeffq+ncoeffq*4+16]
@@ -96,7 +96,7 @@
   psignw                         m13, m10                  ; m13 = reinsert sign
   pand                            m8, m7
   pand                           m13, m12
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   mova                           m11, m8
   mova                            m6, m8
@@ -131,7 +131,7 @@
   psignw                          m8, m9
   psignw                         m13, m10
 %endif
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   mova                            m11, m8
   mova                            m6, m8
@@ -166,7 +166,7 @@
   jz .accumulate_eob
 
 .ac_only_loop:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; pack coeff from 32bit to 16bit array
   mova                            m9, [  coeffq+ncoeffq*4+ 0]
   packssdw                        m9, [  coeffq+ncoeffq*4+16]
@@ -198,7 +198,7 @@
   psignw                         m13, m10                  ; m13 = reinsert sign
   pand                           m14, m7
   pand                           m13, m12
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pxor                           m11, m11
   mova                           m11, m14
@@ -233,7 +233,7 @@
   psignw                         m14, m9
   psignw                         m13, m10
 %endif
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   mova                           m11, m14
   mova                            m6, m14
@@ -271,7 +271,7 @@
 %ifidn %1, b_32x32
   jmp .accumulate_eob
 .skip_iter:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova        [qcoeffq+ncoeffq*4+ 0], m5
   mova        [qcoeffq+ncoeffq*4+16], m5
   mova        [qcoeffq+ncoeffq*4+32], m5
@@ -310,7 +310,7 @@
   mov                             r2, qcoeffmp
   mov                             r3, eobmp
   DEFINE_ARGS dqcoeff, ncoeff, qcoeff, eob
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea                       dqcoeffq, [dqcoeffq+ncoeffq*4]
   lea                        qcoeffq, [ qcoeffq+ncoeffq*4]
 %else
@@ -320,7 +320,7 @@
   neg                        ncoeffq
   pxor                            m7, m7
 .blank_loop:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova       [dqcoeffq+ncoeffq*4+ 0], m7
   mova       [dqcoeffq+ncoeffq*4+16], m7
   mova       [dqcoeffq+ncoeffq*4+32], m7
diff --git a/aom_dsp/x86/sad4d_avx2.c b/aom_dsp/x86/sad4d_avx2.c
index 585d473..9609a94 100644
--- a/aom_dsp/x86/sad4d_avx2.c
+++ b/aom_dsp/x86/sad4d_avx2.c
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 #include <immintrin.h>  // AVX2
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
-void vpx_sad32x32x4d_avx2(const uint8_t *src, int src_stride,
+void aom_sad32x32x4d_avx2(const uint8_t *src, int src_stride,
                           const uint8_t *const ref[4], int ref_stride,
                           uint32_t res[4]) {
   __m256i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg;
@@ -79,7 +79,7 @@
   }
 }
 
-void vpx_sad64x64x4d_avx2(const uint8_t *src, int src_stride,
+void aom_sad64x64x4d_avx2(const uint8_t *src, int src_stride,
                           const uint8_t *const ref[4], int ref_stride,
                           uint32_t res[4]) {
   __m256i src_reg, srcnext_reg, ref0_reg, ref0next_reg;
diff --git a/aom_dsp/x86/sad4d_sse2.asm b/aom_dsp/x86/sad4d_sse2.asm
index 6d49869..4f4b799 100644
--- a/aom_dsp/x86/sad4d_sse2.asm
+++ b/aom_dsp/x86/sad4d_sse2.asm
@@ -181,7 +181,7 @@
   PROCESS_64x2x4  0, %4, %5, %4 + 64, %5 + 64, %6
 %endmacro
 
-; void vpx_sadNxNx4d_sse2(uint8_t *src,    int src_stride,
+; void aom_sadNxNx4d_sse2(uint8_t *src,    int src_stride,
 ;                         uint8_t *ref[4], int ref_stride,
 ;                         uint32_t res[4]);
 ; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16, 8x8, 8x4, 4x8 and 4x4
diff --git a/aom_dsp/x86/sad_avx2.c b/aom_dsp/x86/sad_avx2.c
index c66ab7c..34d8c0d 100644
--- a/aom_dsp/x86/sad_avx2.c
+++ b/aom_dsp/x86/sad_avx2.c
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 #include <immintrin.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
 
 #define FSAD64_H(h)                                                           \
-  unsigned int vpx_sad64x##h##_avx2(const uint8_t *src_ptr, int src_stride,   \
+  unsigned int aom_sad64x##h##_avx2(const uint8_t *src_ptr, int src_stride,   \
                                     const uint8_t *ref_ptr, int ref_stride) { \
     int i, res;                                                               \
     __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg;                           \
@@ -40,7 +40,7 @@
   }
 
 #define FSAD32_H(h)                                                           \
-  unsigned int vpx_sad32x##h##_avx2(const uint8_t *src_ptr, int src_stride,   \
+  unsigned int aom_sad32x##h##_avx2(const uint8_t *src_ptr, int src_stride,   \
                                     const uint8_t *ref_ptr, int ref_stride) { \
     int i, res;                                                               \
     __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg;                           \
@@ -89,7 +89,7 @@
 #undef FSAD32_H
 
 #define FSADAVG64_H(h)                                                        \
-  unsigned int vpx_sad64x##h##_avg_avx2(                                      \
+  unsigned int aom_sad64x##h##_avg_avx2(                                      \
       const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,         \
       int ref_stride, const uint8_t *second_pred) {                           \
     int i, res;                                                               \
@@ -123,7 +123,7 @@
   }
 
 #define FSADAVG32_H(h)                                                        \
-  unsigned int vpx_sad32x##h##_avg_avx2(                                      \
+  unsigned int aom_sad32x##h##_avg_avx2(                                      \
       const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,         \
       int ref_stride, const uint8_t *second_pred) {                           \
     int i, res;                                                               \
diff --git a/aom_dsp/x86/sad_sse2.asm b/aom_dsp/x86/sad_sse2.asm
index edef2a7..01ae405 100644
--- a/aom_dsp/x86/sad_sse2.asm
+++ b/aom_dsp/x86/sad_sse2.asm
@@ -45,7 +45,7 @@
 %endmacro
 
 %if CONFIG_EXT_PARTITION
-; unsigned int vpx_sad128x128_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad128x128_sse2(uint8_t *src, int src_stride,
 ;                                  uint8_t *ref, int ref_stride);
 %macro SAD128XN 1-2 0
   SAD_FN 128, %1, 5, %2
@@ -114,7 +114,7 @@
 %endif
 
 
-; unsigned int vpx_sad64x64_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad64x64_sse2(uint8_t *src, int src_stride,
 ;                                uint8_t *ref, int ref_stride);
 %macro SAD64XN 1-2 0
   SAD_FN 64, %1, 5, %2
@@ -161,7 +161,7 @@
 SAD64XN 64, 1 ; sad64x64_avg_sse2
 SAD64XN 32, 1 ; sad64x32_avg_sse2
 
-; unsigned int vpx_sad32x32_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad32x32_sse2(uint8_t *src, int src_stride,
 ;                                uint8_t *ref, int ref_stride);
 %macro SAD32XN 1-2 0
   SAD_FN 32, %1, 5, %2
@@ -206,7 +206,7 @@
 SAD32XN 32, 1 ; sad32x32_avg_sse2
 SAD32XN 16, 1 ; sad32x16_avg_sse2
 
-; unsigned int vpx_sad16x{8,16}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad16x{8,16}_sse2(uint8_t *src, int src_stride,
 ;                                    uint8_t *ref, int ref_stride);
 %macro SAD16XN 1-2 0
   SAD_FN 16, %1, 7, %2
@@ -252,7 +252,7 @@
 SAD16XN 16, 1 ; sad16x16_avg_sse2
 SAD16XN  8, 1 ; sad16x8_avg_sse2
 
-; unsigned int vpx_sad8x{8,16}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad8x{8,16}_sse2(uint8_t *src, int src_stride,
 ;                                   uint8_t *ref, int ref_stride);
 %macro SAD8XN 1-2 0
   SAD_FN 8, %1, 7, %2
@@ -296,7 +296,7 @@
 SAD8XN  8, 1 ; sad8x8_avg_sse2
 SAD8XN  4, 1 ; sad8x4_avg_sse2
 
-; unsigned int vpx_sad4x{4, 8}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad4x{4, 8}_sse2(uint8_t *src, int src_stride,
 ;                                   uint8_t *ref, int ref_stride);
 %macro SAD4XN 1-2 0
   SAD_FN 4, %1, 7, %2
diff --git a/aom_dsp/x86/sad_sse3.asm b/aom_dsp/x86/sad_sse3.asm
index 4665fb9..1de346e 100644
--- a/aom_dsp/x86/sad_sse3.asm
+++ b/aom_dsp/x86/sad_sse3.asm
@@ -165,14 +165,14 @@
         paddw           mm7,       mm3
 %endmacro
 
-;void int vpx_sad16x16x3_sse3(
+;void int aom_sad16x16x3_sse3(
 ;    unsigned char *src_ptr,
 ;    int  src_stride,
 ;    unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    int  *results)
-global sym(vpx_sad16x16x3_sse3) PRIVATE
-sym(vpx_sad16x16x3_sse3):
+global sym(aom_sad16x16x3_sse3) PRIVATE
+sym(aom_sad16x16x3_sse3):
 
     STACK_FRAME_CREATE_X3
 
@@ -207,14 +207,14 @@
 
     STACK_FRAME_DESTROY_X3
 
-;void int vpx_sad16x8x3_sse3(
+;void int aom_sad16x8x3_sse3(
 ;    unsigned char *src_ptr,
 ;    int  src_stride,
 ;    unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    int  *results)
-global sym(vpx_sad16x8x3_sse3) PRIVATE
-sym(vpx_sad16x8x3_sse3):
+global sym(aom_sad16x8x3_sse3) PRIVATE
+sym(aom_sad16x8x3_sse3):
 
     STACK_FRAME_CREATE_X3
 
@@ -245,14 +245,14 @@
 
     STACK_FRAME_DESTROY_X3
 
-;void int vpx_sad8x16x3_sse3(
+;void int aom_sad8x16x3_sse3(
 ;    unsigned char *src_ptr,
 ;    int  src_stride,
 ;    unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    int  *results)
-global sym(vpx_sad8x16x3_sse3) PRIVATE
-sym(vpx_sad8x16x3_sse3):
+global sym(aom_sad8x16x3_sse3) PRIVATE
+sym(aom_sad8x16x3_sse3):
 
     STACK_FRAME_CREATE_X3
 
@@ -274,14 +274,14 @@
 
     STACK_FRAME_DESTROY_X3
 
-;void int vpx_sad8x8x3_sse3(
+;void int aom_sad8x8x3_sse3(
 ;    unsigned char *src_ptr,
 ;    int  src_stride,
 ;    unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    int  *results)
-global sym(vpx_sad8x8x3_sse3) PRIVATE
-sym(vpx_sad8x8x3_sse3):
+global sym(aom_sad8x8x3_sse3) PRIVATE
+sym(aom_sad8x8x3_sse3):
 
     STACK_FRAME_CREATE_X3
 
@@ -299,14 +299,14 @@
 
     STACK_FRAME_DESTROY_X3
 
-;void int vpx_sad4x4x3_sse3(
+;void int aom_sad4x4x3_sse3(
 ;    unsigned char *src_ptr,
 ;    int  src_stride,
 ;    unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    int  *results)
-global sym(vpx_sad4x4x3_sse3) PRIVATE
-sym(vpx_sad4x4x3_sse3):
+global sym(aom_sad4x4x3_sse3) PRIVATE
+sym(aom_sad4x4x3_sse3):
 
     STACK_FRAME_CREATE_X3
 
diff --git a/aom_dsp/x86/sad_sse4.asm b/aom_dsp/x86/sad_sse4.asm
index 07e28b48a..fd2c70b 100644
--- a/aom_dsp/x86/sad_sse4.asm
+++ b/aom_dsp/x86/sad_sse4.asm
@@ -165,14 +165,14 @@
     movdqa          [rdi + 16],    xmm2
 %endmacro
 
-;void vpx_sad16x16x8_sse4_1(
+;void aom_sad16x16x8_sse4_1(
 ;    const unsigned char *src_ptr,
 ;    int  src_stride,
 ;    const unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    unsigned short *sad_array);
-global sym(vpx_sad16x16x8_sse4_1) PRIVATE
-sym(vpx_sad16x16x8_sse4_1):
+global sym(aom_sad16x16x8_sse4_1) PRIVATE
+sym(aom_sad16x16x8_sse4_1):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 5
@@ -205,15 +205,15 @@
     ret
 
 
-;void vpx_sad16x8x8_sse4_1(
+;void aom_sad16x8x8_sse4_1(
 ;    const unsigned char *src_ptr,
 ;    int  src_stride,
 ;    const unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    unsigned short *sad_array
 ;);
-global sym(vpx_sad16x8x8_sse4_1) PRIVATE
-sym(vpx_sad16x8x8_sse4_1):
+global sym(aom_sad16x8x8_sse4_1) PRIVATE
+sym(aom_sad16x8x8_sse4_1):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 5
@@ -242,15 +242,15 @@
     ret
 
 
-;void vpx_sad8x8x8_sse4_1(
+;void aom_sad8x8x8_sse4_1(
 ;    const unsigned char *src_ptr,
 ;    int  src_stride,
 ;    const unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    unsigned short *sad_array
 ;);
-global sym(vpx_sad8x8x8_sse4_1) PRIVATE
-sym(vpx_sad8x8x8_sse4_1):
+global sym(aom_sad8x8x8_sse4_1) PRIVATE
+sym(aom_sad8x8x8_sse4_1):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 5
@@ -279,15 +279,15 @@
     ret
 
 
-;void vpx_sad8x16x8_sse4_1(
+;void aom_sad8x16x8_sse4_1(
 ;    const unsigned char *src_ptr,
 ;    int  src_stride,
 ;    const unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    unsigned short *sad_array
 ;);
-global sym(vpx_sad8x16x8_sse4_1) PRIVATE
-sym(vpx_sad8x16x8_sse4_1):
+global sym(aom_sad8x16x8_sse4_1) PRIVATE
+sym(aom_sad8x16x8_sse4_1):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 5
@@ -320,15 +320,15 @@
     ret
 
 
-;void vpx_sad4x4x8_sse4_1(
+;void aom_sad4x4x8_sse4_1(
 ;    const unsigned char *src_ptr,
 ;    int  src_stride,
 ;    const unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    unsigned short *sad_array
 ;);
-global sym(vpx_sad4x4x8_sse4_1) PRIVATE
-sym(vpx_sad4x4x8_sse4_1):
+global sym(aom_sad4x4x8_sse4_1) PRIVATE
+sym(aom_sad4x4x8_sse4_1):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 5
diff --git a/aom_dsp/x86/sad_ssse3.asm b/aom_dsp/x86/sad_ssse3.asm
index 8315f97..b1c97ea 100644
--- a/aom_dsp/x86/sad_ssse3.asm
+++ b/aom_dsp/x86/sad_ssse3.asm
@@ -146,14 +146,14 @@
 
 %endmacro
 
-;void int vpx_sad16x16x3_ssse3(
+;void int aom_sad16x16x3_ssse3(
 ;    unsigned char *src_ptr,
 ;    int  src_stride,
 ;    unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    int  *results)
-global sym(vpx_sad16x16x3_ssse3) PRIVATE
-sym(vpx_sad16x16x3_ssse3):
+global sym(aom_sad16x16x3_ssse3) PRIVATE
+sym(aom_sad16x16x3_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 5
@@ -169,31 +169,31 @@
         mov             rdx,        0xf
         and             rdx,        rdi
 
-        jmp .vpx_sad16x16x3_ssse3_skiptable
-.vpx_sad16x16x3_ssse3_jumptable:
-        dd .vpx_sad16x16x3_ssse3_aligned_by_0  - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_1  - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_2  - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_3  - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_4  - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_5  - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_6  - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_7  - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_8  - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_9  - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_10 - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_11 - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_12 - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_13 - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_14 - .vpx_sad16x16x3_ssse3_do_jump
-        dd .vpx_sad16x16x3_ssse3_aligned_by_15 - .vpx_sad16x16x3_ssse3_do_jump
-.vpx_sad16x16x3_ssse3_skiptable:
+        jmp .aom_sad16x16x3_ssse3_skiptable
+.aom_sad16x16x3_ssse3_jumptable:
+        dd .aom_sad16x16x3_ssse3_aligned_by_0  - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_1  - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_2  - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_3  - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_4  - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_5  - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_6  - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_7  - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_8  - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_9  - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_10 - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_11 - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_12 - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_13 - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_14 - .aom_sad16x16x3_ssse3_do_jump
+        dd .aom_sad16x16x3_ssse3_aligned_by_15 - .aom_sad16x16x3_ssse3_do_jump
+.aom_sad16x16x3_ssse3_skiptable:
 
-        call .vpx_sad16x16x3_ssse3_do_jump
-.vpx_sad16x16x3_ssse3_do_jump:
+        call .aom_sad16x16x3_ssse3_do_jump
+.aom_sad16x16x3_ssse3_do_jump:
         pop             rcx                         ; get the address of do_jump
-        mov             rax,  .vpx_sad16x16x3_ssse3_jumptable - .vpx_sad16x16x3_ssse3_do_jump
-        add             rax,  rcx  ; get the absolute address of vpx_sad16x16x3_ssse3_jumptable
+        mov             rax,  .aom_sad16x16x3_ssse3_jumptable - .aom_sad16x16x3_ssse3_do_jump
+        add             rax,  rcx  ; get the absolute address of aom_sad16x16x3_ssse3_jumptable
 
         movsxd          rax,  dword [rax + 4*rdx]   ; get the 32 bit offset from the jumptable
         add             rcx,        rax
@@ -203,23 +203,23 @@
 
         jmp             rcx
 
-        PROCESS_16X16X3_OFFSET 0,  .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 1,  .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 2,  .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 3,  .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 4,  .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 5,  .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 6,  .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 7,  .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 8,  .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 9,  .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 10, .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 11, .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 12, .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 13, .vpx_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 14, .vpx_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 0,  .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 1,  .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 2,  .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 3,  .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 4,  .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 5,  .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 6,  .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 7,  .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 8,  .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 9,  .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 10, .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 11, .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 12, .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 13, .aom_sad16x16x3_ssse3
+        PROCESS_16X16X3_OFFSET 14, .aom_sad16x16x3_ssse3
 
-.vpx_sad16x16x3_ssse3_aligned_by_15:
+.aom_sad16x16x3_ssse3_aligned_by_15:
         PROCESS_16X2X3 1
         PROCESS_16X2X3 0
         PROCESS_16X2X3 0
@@ -229,7 +229,7 @@
         PROCESS_16X2X3 0
         PROCESS_16X2X3 0
 
-.vpx_sad16x16x3_ssse3_store_off:
+.aom_sad16x16x3_ssse3_store_off:
         mov             rdi,        arg(4) ;Results
 
         movq            xmm0,       xmm5
@@ -259,14 +259,14 @@
     pop         rbp
     ret
 
-;void int vpx_sad16x8x3_ssse3(
+;void int aom_sad16x8x3_ssse3(
 ;    unsigned char *src_ptr,
 ;    int  src_stride,
 ;    unsigned char *ref_ptr,
 ;    int  ref_stride,
 ;    int  *results)
-global sym(vpx_sad16x8x3_ssse3) PRIVATE
-sym(vpx_sad16x8x3_ssse3):
+global sym(aom_sad16x8x3_ssse3) PRIVATE
+sym(aom_sad16x8x3_ssse3):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 5
@@ -282,31 +282,31 @@
         mov             rdx,        0xf
         and             rdx,        rdi
 
-        jmp .vpx_sad16x8x3_ssse3_skiptable
-.vpx_sad16x8x3_ssse3_jumptable:
-        dd .vpx_sad16x8x3_ssse3_aligned_by_0  - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_1  - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_2  - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_3  - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_4  - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_5  - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_6  - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_7  - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_8  - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_9  - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_10 - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_11 - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_12 - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_13 - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_14 - .vpx_sad16x8x3_ssse3_do_jump
-        dd .vpx_sad16x8x3_ssse3_aligned_by_15 - .vpx_sad16x8x3_ssse3_do_jump
-.vpx_sad16x8x3_ssse3_skiptable:
+        jmp .aom_sad16x8x3_ssse3_skiptable
+.aom_sad16x8x3_ssse3_jumptable:
+        dd .aom_sad16x8x3_ssse3_aligned_by_0  - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_1  - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_2  - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_3  - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_4  - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_5  - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_6  - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_7  - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_8  - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_9  - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_10 - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_11 - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_12 - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_13 - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_14 - .aom_sad16x8x3_ssse3_do_jump
+        dd .aom_sad16x8x3_ssse3_aligned_by_15 - .aom_sad16x8x3_ssse3_do_jump
+.aom_sad16x8x3_ssse3_skiptable:
 
-        call .vpx_sad16x8x3_ssse3_do_jump
-.vpx_sad16x8x3_ssse3_do_jump:
+        call .aom_sad16x8x3_ssse3_do_jump
+.aom_sad16x8x3_ssse3_do_jump:
         pop             rcx                         ; get the address of do_jump
-        mov             rax,  .vpx_sad16x8x3_ssse3_jumptable - .vpx_sad16x8x3_ssse3_do_jump
-        add             rax,  rcx  ; get the absolute address of vpx_sad16x8x3_ssse3_jumptable
+        mov             rax,  .aom_sad16x8x3_ssse3_jumptable - .aom_sad16x8x3_ssse3_do_jump
+        add             rax,  rcx  ; get the absolute address of aom_sad16x8x3_ssse3_jumptable
 
         movsxd          rax,  dword [rax + 4*rdx]   ; get the 32 bit offset from the jumptable
         add             rcx,        rax
@@ -316,30 +316,30 @@
 
         jmp             rcx
 
-        PROCESS_16X8X3_OFFSET 0,  .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 1,  .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 2,  .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 3,  .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 4,  .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 5,  .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 6,  .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 7,  .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 8,  .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 9,  .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 10, .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 11, .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 12, .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 13, .vpx_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 14, .vpx_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 0,  .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 1,  .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 2,  .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 3,  .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 4,  .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 5,  .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 6,  .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 7,  .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 8,  .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 9,  .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 10, .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 11, .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 12, .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 13, .aom_sad16x8x3_ssse3
+        PROCESS_16X8X3_OFFSET 14, .aom_sad16x8x3_ssse3
 
-.vpx_sad16x8x3_ssse3_aligned_by_15:
+.aom_sad16x8x3_ssse3_aligned_by_15:
 
         PROCESS_16X2X3 1
         PROCESS_16X2X3 0
         PROCESS_16X2X3 0
         PROCESS_16X2X3 0
 
-.vpx_sad16x8x3_ssse3_store_off:
+.aom_sad16x8x3_ssse3_store_off:
         mov             rdi,        arg(4) ;Results
 
         movq            xmm0,       xmm5
diff --git a/aom_dsp/x86/ssim_opt_x86_64.asm b/aom_dsp/x86/ssim_opt_x86_64.asm
index fc49c30..ebc3703 100644
--- a/aom_dsp/x86/ssim_opt_x86_64.asm
+++ b/aom_dsp/x86/ssim_opt_x86_64.asm
@@ -61,8 +61,8 @@
 ; or pavgb At this point this is just meant to be first pass for calculating
 ; all the parms needed for 16x16 ssim so we can play with dssim as distortion
 ; in mode selection code.
-global sym(vpx_ssim_parms_16x16_sse2) PRIVATE
-sym(vpx_ssim_parms_16x16_sse2):
+global sym(aom_ssim_parms_16x16_sse2) PRIVATE
+sym(aom_ssim_parms_16x16_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 9
@@ -151,8 +151,8 @@
 ; or pavgb At this point this is just meant to be first pass for calculating
 ; all the parms needed for 16x16 ssim so we can play with dssim as distortion
 ; in mode selection code.
-global sym(vpx_ssim_parms_8x8_sse2) PRIVATE
-sym(vpx_ssim_parms_8x8_sse2):
+global sym(aom_ssim_parms_8x8_sse2) PRIVATE
+sym(aom_ssim_parms_8x8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 9
diff --git a/aom_dsp/x86/subpel_variance_sse2.asm b/aom_dsp/x86/subpel_variance_sse2.asm
index cee4468..899167a 100644
--- a/aom_dsp/x86/subpel_variance_sse2.asm
+++ b/aom_dsp/x86/subpel_variance_sse2.asm
@@ -39,7 +39,7 @@
 
 SECTION .text
 
-; int vpx_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride,
+; int aom_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride,
 ;                               int x_offset, int y_offset,
 ;                               const uint8_t *dst, ptrdiff_t dst_stride,
 ;                               int height, unsigned int *sse);
diff --git a/aom_dsp/x86/subtract_sse2.asm b/aom_dsp/x86/subtract_sse2.asm
index 2225b7c..fe2830e 100644
--- a/aom_dsp/x86/subtract_sse2.asm
+++ b/aom_dsp/x86/subtract_sse2.asm
@@ -12,7 +12,7 @@
 
 SECTION .text
 
-; void vpx_subtract_block(int rows, int cols,
+; void aom_subtract_block(int rows, int cols,
 ;                         int16_t *diff, ptrdiff_t diff_stride,
 ;                         const uint8_t *src, ptrdiff_t src_stride,
 ;                         const uint8_t *pred, ptrdiff_t pred_stride)
diff --git a/aom_dsp/x86/sum_squares_sse2.c b/aom_dsp/x86/sum_squares_sse2.c
index 958493c..eb1d912 100644
--- a/aom_dsp/x86/sum_squares_sse2.c
+++ b/aom_dsp/x86/sum_squares_sse2.c
@@ -14,9 +14,9 @@
 
 #include "aom_dsp/x86/synonyms.h"
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
-static uint64_t vpx_sum_squares_2d_i16_4x4_sse2(const int16_t *src,
+static uint64_t aom_sum_squares_2d_i16_4x4_sse2(const int16_t *src,
                                                 int stride) {
   const __m128i v_val_0_w =
       _mm_loadl_epi64((const __m128i *)(src + 0 * stride));
@@ -44,12 +44,12 @@
 
 #ifdef __GNUC__
 // This prevents GCC/Clang from inlining this function into
-// vpx_sum_squares_2d_i16_sse2, which in turn saves some stack
+// aom_sum_squares_2d_i16_sse2, which in turn saves some stack
 // maintenance instructions in the common case of 4x4.
 __attribute__((noinline))
 #endif
 static uint64_t
-vpx_sum_squares_2d_i16_nxn_sse2(const int16_t *src, int stride, int size) {
+aom_sum_squares_2d_i16_nxn_sse2(const int16_t *src, int stride, int size) {
   int r, c;
 
   const __m128i v_zext_mask_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff);
@@ -118,15 +118,15 @@
 #endif
 }
 
-uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t *src, int stride, int size) {
+uint64_t aom_sum_squares_2d_i16_sse2(const int16_t *src, int stride, int size) {
   // 4 elements per row only requires half an XMM register, so this
   // must be a special case, but also note that over 75% of all calls
   // are with size == 4, so it is also the common case.
   if (LIKELY(size == 4)) {
-    return vpx_sum_squares_2d_i16_4x4_sse2(src, stride);
+    return aom_sum_squares_2d_i16_4x4_sse2(src, stride);
   } else {
     // Generic case
-    return vpx_sum_squares_2d_i16_nxn_sse2(src, stride, size);
+    return aom_sum_squares_2d_i16_nxn_sse2(src, stride, size);
   }
 }
 
@@ -134,7 +134,7 @@
 // 1D version
 //////////////////////////////////////////////////////////////////////////////
 
-static uint64_t vpx_sum_squares_i16_64n_sse2(const int16_t *src, uint32_t n) {
+static uint64_t aom_sum_squares_i16_64n_sse2(const int16_t *src, uint32_t n) {
   const __m128i v_zext_mask_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff);
   __m128i v_acc0_q = _mm_setzero_si128();
   __m128i v_acc1_q = _mm_setzero_si128();
@@ -192,14 +192,14 @@
 #endif
 }
 
-uint64_t vpx_sum_squares_i16_sse2(const int16_t *src, uint32_t n) {
+uint64_t aom_sum_squares_i16_sse2(const int16_t *src, uint32_t n) {
   if (n % 64 == 0) {
-    return vpx_sum_squares_i16_64n_sse2(src, n);
+    return aom_sum_squares_i16_64n_sse2(src, n);
   } else if (n > 64) {
     int k = n & ~(64 - 1);
-    return vpx_sum_squares_i16_64n_sse2(src, k) +
-           vpx_sum_squares_i16_c(src + k, n - k);
+    return aom_sum_squares_i16_64n_sse2(src, k) +
+           aom_sum_squares_i16_c(src + k, n - k);
   } else {
-    return vpx_sum_squares_i16_c(src, n);
+    return aom_sum_squares_i16_c(src, n);
   }
 }
diff --git a/aom_dsp/x86/synonyms.h b/aom_dsp/x86/synonyms.h
index e815f7e..b38bb35 100644
--- a/aom_dsp/x86/synonyms.h
+++ b/aom_dsp/x86/synonyms.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_X86_SYNONYS_H_
-#define VPX_DSP_X86_SYNONYS_H_
+#ifndef AOM_DSP_X86_SYNONYS_H_
+#define AOM_DSP_X86_SYNONYS_H_
 
 #include <immintrin.h>
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 /**
  * Various reusable shorthands for x86 SIMD intrinsics.
@@ -108,4 +108,4 @@
 }
 #endif  // __SSSE3__
 
-#endif  // VPX_DSP_X86_SYNONYS_H_
+#endif  // AOM_DSP_X86_SYNONYS_H_
diff --git a/aom_dsp/x86/txfm_common_sse2.h b/aom_dsp/x86/txfm_common_sse2.h
index aed7d4e..6f32d09 100644
--- a/aom_dsp/x86/txfm_common_sse2.h
+++ b/aom_dsp/x86/txfm_common_sse2.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_X86_TXFM_COMMON_SSE2_H_
-#define VPX_DSP_X86_TXFM_COMMON_SSE2_H_
+#ifndef AOM_DSP_X86_TXFM_COMMON_SSE2_H_
+#define AOM_DSP_X86_TXFM_COMMON_SSE2_H_
 
 #include <emmintrin.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #define pair_set_epi16(a, b)                                            \
   _mm_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
@@ -33,4 +33,4 @@
   return _mm_shuffle_epi32(b, 0x4e);
 }
 
-#endif  // VPX_DSP_X86_TXFM_COMMON_SSE2_H_
+#endif  // AOM_DSP_X86_TXFM_COMMON_SSE2_H_
diff --git a/aom_dsp/x86/variance_avx2.c b/aom_dsp/x86/variance_avx2.c
index 7bc2693..e603711 100644
--- a/aom_dsp/x86/variance_avx2.c
+++ b/aom_dsp/x86/variance_avx2.c
@@ -7,13 +7,13 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
 typedef void (*get_var_avx2)(const uint8_t *src, int src_stride,
                              const uint8_t *ref, int ref_stride,
                              unsigned int *sse, int *sum);
 
-void vpx_get32x32var_avx2(const uint8_t *src, int src_stride,
+void aom_get32x32var_avx2(const uint8_t *src, int src_stride,
                           const uint8_t *ref, int ref_stride, unsigned int *sse,
                           int *sum);
 
@@ -38,104 +38,104 @@
   }
 }
 
-unsigned int vpx_variance16x16_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_variance16x16_avx2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
   variance_avx2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
-                vpx_get16x16var_avx2, 16);
+                aom_get16x16var_avx2, 16);
   return *sse - (((uint32_t)((int64_t)sum * sum)) >> 8);
 }
 
-unsigned int vpx_mse16x16_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_mse16x16_avx2(const uint8_t *src, int src_stride,
                                const uint8_t *ref, int ref_stride,
                                unsigned int *sse) {
   int sum;
-  vpx_get16x16var_avx2(src, src_stride, ref, ref_stride, sse, &sum);
+  aom_get16x16var_avx2(src, src_stride, ref, ref_stride, sse, &sum);
   return *sse;
 }
 
-unsigned int vpx_variance32x16_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_variance32x16_avx2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
   variance_avx2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum,
-                vpx_get32x32var_avx2, 32);
+                aom_get32x32var_avx2, 32);
   return *sse - (((int64_t)sum * sum) >> 9);
 }
 
-unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_variance32x32_avx2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
   variance_avx2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum,
-                vpx_get32x32var_avx2, 32);
+                aom_get32x32var_avx2, 32);
   return *sse - (((int64_t)sum * sum) >> 10);
 }
 
-unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_variance64x64_avx2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
   variance_avx2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum,
-                vpx_get32x32var_avx2, 32);
+                aom_get32x32var_avx2, 32);
   return *sse - (((int64_t)sum * sum) >> 12);
 }
 
-unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_variance64x32_avx2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
   variance_avx2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum,
-                vpx_get32x32var_avx2, 32);
+                aom_get32x32var_avx2, 32);
   return *sse - (((int64_t)sum * sum) >> 11);
 }
 
-unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
                                              int x_offset, int y_offset,
                                              const uint8_t *dst, int dst_stride,
                                              int height, unsigned int *sse);
 
-unsigned int vpx_sub_pixel_avg_variance32xh_avx2(
+unsigned int aom_sub_pixel_avg_variance32xh_avx2(
     const uint8_t *src, int src_stride, int x_offset, int y_offset,
     const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride,
     int height, unsigned int *sseptr);
 
-unsigned int vpx_sub_pixel_variance64x64_avx2(const uint8_t *src,
+unsigned int aom_sub_pixel_variance64x64_avx2(const uint8_t *src,
                                               int src_stride, int x_offset,
                                               int y_offset, const uint8_t *dst,
                                               int dst_stride,
                                               unsigned int *sse) {
   unsigned int sse1;
-  const int se1 = vpx_sub_pixel_variance32xh_avx2(
+  const int se1 = aom_sub_pixel_variance32xh_avx2(
       src, src_stride, x_offset, y_offset, dst, dst_stride, 64, &sse1);
   unsigned int sse2;
   const int se2 =
-      vpx_sub_pixel_variance32xh_avx2(src + 32, src_stride, x_offset, y_offset,
+      aom_sub_pixel_variance32xh_avx2(src + 32, src_stride, x_offset, y_offset,
                                       dst + 32, dst_stride, 64, &sse2);
   const int se = se1 + se2;
   *sse = sse1 + sse2;
   return *sse - (((int64_t)se * se) >> 12);
 }
 
-unsigned int vpx_sub_pixel_variance32x32_avx2(const uint8_t *src,
+unsigned int aom_sub_pixel_variance32x32_avx2(const uint8_t *src,
                                               int src_stride, int x_offset,
                                               int y_offset, const uint8_t *dst,
                                               int dst_stride,
                                               unsigned int *sse) {
-  const int se = vpx_sub_pixel_variance32xh_avx2(
+  const int se = aom_sub_pixel_variance32xh_avx2(
       src, src_stride, x_offset, y_offset, dst, dst_stride, 32, sse);
   return *sse - (((int64_t)se * se) >> 10);
 }
 
-unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
+unsigned int aom_sub_pixel_avg_variance64x64_avx2(
     const uint8_t *src, int src_stride, int x_offset, int y_offset,
     const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) {
   unsigned int sse1;
-  const int se1 = vpx_sub_pixel_avg_variance32xh_avx2(
+  const int se1 = aom_sub_pixel_avg_variance32xh_avx2(
       src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 64, 64, &sse1);
   unsigned int sse2;
-  const int se2 = vpx_sub_pixel_avg_variance32xh_avx2(
+  const int se2 = aom_sub_pixel_avg_variance32xh_avx2(
       src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, sec + 32,
       64, 64, &sse2);
   const int se = se1 + se2;
@@ -145,11 +145,11 @@
   return *sse - (((int64_t)se * se) >> 12);
 }
 
-unsigned int vpx_sub_pixel_avg_variance32x32_avx2(
+unsigned int aom_sub_pixel_avg_variance32x32_avx2(
     const uint8_t *src, int src_stride, int x_offset, int y_offset,
     const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) {
   // Process 32 elements in parallel.
-  const int se = vpx_sub_pixel_avg_variance32xh_avx2(
+  const int se = aom_sub_pixel_avg_variance32xh_avx2(
       src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 32, 32, sse);
   return *sse - (((int64_t)se * se) >> 10);
 }
diff --git a/aom_dsp/x86/variance_impl_avx2.c b/aom_dsp/x86/variance_impl_avx2.c
index 3166025..0e2d145 100644
--- a/aom_dsp/x86/variance_impl_avx2.c
+++ b/aom_dsp/x86/variance_impl_avx2.c
@@ -10,7 +10,7 @@
 
 #include <immintrin.h>  // AVX2
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
 
 /* clang-format off */
@@ -34,7 +34,7 @@
 };
 /* clang-format on */
 
-void vpx_get16x16var_avx2(const unsigned char *src_ptr, int source_stride,
+void aom_get16x16var_avx2(const unsigned char *src_ptr, int source_stride,
                           const unsigned char *ref_ptr, int recon_stride,
                           unsigned int *SSE, int *Sum) {
   __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low;
@@ -140,7 +140,7 @@
   }
 }
 
-void vpx_get32x32var_avx2(const unsigned char *src_ptr, int source_stride,
+void aom_get32x32var_avx2(const unsigned char *src_ptr, int source_stride,
                           const unsigned char *ref_ptr, int recon_stride,
                           unsigned int *SSE, int *Sum) {
   __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low;
@@ -297,7 +297,7 @@
   sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) +               \
         _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1));
 
-unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
                                              int x_offset, int y_offset,
                                              const uint8_t *dst, int dst_stride,
                                              int height, unsigned int *sse) {
@@ -484,7 +484,7 @@
   return sum;
 }
 
-unsigned int vpx_sub_pixel_avg_variance32xh_avx2(
+unsigned int aom_sub_pixel_avg_variance32xh_avx2(
     const uint8_t *src, int src_stride, int x_offset, int y_offset,
     const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride,
     int height, unsigned int *sse) {
diff --git a/aom_dsp/x86/variance_sse2.c b/aom_dsp/x86/variance_sse2.c
index 0788850..e0397d3 100644
--- a/aom_dsp/x86/variance_sse2.c
+++ b/aom_dsp/x86/variance_sse2.c
@@ -10,8 +10,8 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
 #include "aom_ports/mem.h"
 
@@ -19,7 +19,7 @@
                                const unsigned char *ref, int ref_stride,
                                unsigned int *sse, int *sum);
 
-unsigned int vpx_get_mb_ss_sse2(const int16_t *src) {
+unsigned int aom_get_mb_ss_sse2(const int16_t *src) {
   __m128i vsum = _mm_setzero_si128();
   int i;
 
@@ -65,7 +65,7 @@
   *sse = _mm_cvtsi128_si32(vsum);
 }
 
-void vpx_get8x8var_sse2(const uint8_t *src, int src_stride, const uint8_t *ref,
+void aom_get8x8var_sse2(const uint8_t *src, int src_stride, const uint8_t *ref,
                         int ref_stride, unsigned int *sse, int *sum) {
   const __m128i zero = _mm_setzero_si128();
   __m128i vsum = _mm_setzero_si128();
@@ -103,7 +103,7 @@
   *sse = _mm_cvtsi128_si32(vsse);
 }
 
-void vpx_get16x16var_sse2(const uint8_t *src, int src_stride,
+void aom_get16x16var_sse2(const uint8_t *src, int src_stride,
                           const uint8_t *ref, int ref_stride, unsigned int *sse,
                           int *sum) {
   const __m128i zero = _mm_setzero_si128();
@@ -165,7 +165,7 @@
   }
 }
 
-unsigned int vpx_variance4x4_sse2(const unsigned char *src, int src_stride,
+unsigned int aom_variance4x4_sse2(const unsigned char *src, int src_stride,
                                   const unsigned char *ref, int ref_stride,
                                   unsigned int *sse) {
   int sum;
@@ -173,7 +173,7 @@
   return *sse - ((sum * sum) >> 4);
 }
 
-unsigned int vpx_variance8x4_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance8x4_sse2(const uint8_t *src, int src_stride,
                                   const uint8_t *ref, int ref_stride,
                                   unsigned int *sse) {
   int sum;
@@ -182,7 +182,7 @@
   return *sse - ((sum * sum) >> 5);
 }
 
-unsigned int vpx_variance4x8_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance4x8_sse2(const uint8_t *src, int src_stride,
                                   const uint8_t *ref, int ref_stride,
                                   unsigned int *sse) {
   int sum;
@@ -191,126 +191,126 @@
   return *sse - ((sum * sum) >> 5);
 }
 
-unsigned int vpx_variance8x8_sse2(const unsigned char *src, int src_stride,
+unsigned int aom_variance8x8_sse2(const unsigned char *src, int src_stride,
                                   const unsigned char *ref, int ref_stride,
                                   unsigned int *sse) {
   int sum;
-  vpx_get8x8var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
+  aom_get8x8var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
   return *sse - ((sum * sum) >> 6);
 }
 
-unsigned int vpx_variance16x8_sse2(const unsigned char *src, int src_stride,
+unsigned int aom_variance16x8_sse2(const unsigned char *src, int src_stride,
                                    const unsigned char *ref, int ref_stride,
                                    unsigned int *sse) {
   int sum;
   variance_sse2(src, src_stride, ref, ref_stride, 16, 8, sse, &sum,
-                vpx_get8x8var_sse2, 8);
+                aom_get8x8var_sse2, 8);
   return *sse - ((sum * sum) >> 7);
 }
 
-unsigned int vpx_variance8x16_sse2(const unsigned char *src, int src_stride,
+unsigned int aom_variance8x16_sse2(const unsigned char *src, int src_stride,
                                    const unsigned char *ref, int ref_stride,
                                    unsigned int *sse) {
   int sum;
   variance_sse2(src, src_stride, ref, ref_stride, 8, 16, sse, &sum,
-                vpx_get8x8var_sse2, 8);
+                aom_get8x8var_sse2, 8);
   return *sse - ((sum * sum) >> 7);
 }
 
-unsigned int vpx_variance16x16_sse2(const unsigned char *src, int src_stride,
+unsigned int aom_variance16x16_sse2(const unsigned char *src, int src_stride,
                                     const unsigned char *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  vpx_get16x16var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
+  aom_get16x16var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
   return *sse - (((uint32_t)((int64_t)sum * sum)) >> 8);
 }
 
-unsigned int vpx_variance32x32_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance32x32_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
   variance_sse2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum,
-                vpx_get16x16var_sse2, 16);
+                aom_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 10);
 }
 
-unsigned int vpx_variance32x16_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance32x16_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
   variance_sse2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum,
-                vpx_get16x16var_sse2, 16);
+                aom_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 9);
 }
 
-unsigned int vpx_variance16x32_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance16x32_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
   variance_sse2(src, src_stride, ref, ref_stride, 16, 32, sse, &sum,
-                vpx_get16x16var_sse2, 16);
+                aom_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 9);
 }
 
-unsigned int vpx_variance64x64_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance64x64_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
   variance_sse2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum,
-                vpx_get16x16var_sse2, 16);
+                aom_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 12);
 }
 
-unsigned int vpx_variance64x32_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance64x32_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
   variance_sse2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum,
-                vpx_get16x16var_sse2, 16);
+                aom_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 11);
 }
 
-unsigned int vpx_variance32x64_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance32x64_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
   variance_sse2(src, src_stride, ref, ref_stride, 32, 64, sse, &sum,
-                vpx_get16x16var_sse2, 16);
+                aom_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 11);
 }
 
-unsigned int vpx_mse8x8_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_mse8x8_sse2(const uint8_t *src, int src_stride,
                              const uint8_t *ref, int ref_stride,
                              unsigned int *sse) {
-  vpx_variance8x8_sse2(src, src_stride, ref, ref_stride, sse);
+  aom_variance8x8_sse2(src, src_stride, ref, ref_stride, sse);
   return *sse;
 }
 
-unsigned int vpx_mse8x16_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_mse8x16_sse2(const uint8_t *src, int src_stride,
                               const uint8_t *ref, int ref_stride,
                               unsigned int *sse) {
-  vpx_variance8x16_sse2(src, src_stride, ref, ref_stride, sse);
+  aom_variance8x16_sse2(src, src_stride, ref, ref_stride, sse);
   return *sse;
 }
 
-unsigned int vpx_mse16x8_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_mse16x8_sse2(const uint8_t *src, int src_stride,
                               const uint8_t *ref, int ref_stride,
                               unsigned int *sse) {
-  vpx_variance16x8_sse2(src, src_stride, ref, ref_stride, sse);
+  aom_variance16x8_sse2(src, src_stride, ref, ref_stride, sse);
   return *sse;
 }
 
-unsigned int vpx_mse16x16_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_mse16x16_sse2(const uint8_t *src, int src_stride,
                                const uint8_t *ref, int ref_stride,
                                unsigned int *sse) {
-  vpx_variance16x16_sse2(src, src_stride, ref, ref_stride, sse);
+  aom_variance16x16_sse2(src, src_stride, ref, ref_stride, sse);
   return *sse;
 }
 
 // The 2 unused parameters are place holders for PIC enabled build.
 // These definitions are for functions defined in subpel_variance.asm
 #define DECL(w, opt)                                                           \
-  int vpx_sub_pixel_variance##w##xh_##opt(                                     \
+  int aom_sub_pixel_variance##w##xh_##opt(                                     \
       const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset,    \
       const uint8_t *dst, ptrdiff_t dst_stride, int height, unsigned int *sse, \
       void *unused0, void *unused)
@@ -325,27 +325,27 @@
 #undef DECL
 
 #define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast)                       \
-  unsigned int vpx_sub_pixel_variance##w##x##h##_##opt(                        \
+  unsigned int aom_sub_pixel_variance##w##x##h##_##opt(                        \
       const uint8_t *src, int src_stride, int x_offset, int y_offset,          \
       const uint8_t *dst, int dst_stride, unsigned int *sse_ptr) {             \
     unsigned int sse;                                                          \
-    int se = vpx_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset,   \
+    int se = aom_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset,   \
                                                   y_offset, dst, dst_stride,   \
                                                   h, &sse, NULL, NULL);        \
     if (w > wf) {                                                              \
       unsigned int sse2;                                                       \
-      int se2 = vpx_sub_pixel_variance##wf##xh_##opt(                          \
+      int se2 = aom_sub_pixel_variance##wf##xh_##opt(                          \
           src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h,   \
           &sse2, NULL, NULL);                                                  \
       se += se2;                                                               \
       sse += sse2;                                                             \
       if (w > wf * 2) {                                                        \
-        se2 = vpx_sub_pixel_variance##wf##xh_##opt(                            \
+        se2 = aom_sub_pixel_variance##wf##xh_##opt(                            \
             src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
             &sse2, NULL, NULL);                                                \
         se += se2;                                                             \
         sse += sse2;                                                           \
-        se2 = vpx_sub_pixel_variance##wf##xh_##opt(                            \
+        se2 = aom_sub_pixel_variance##wf##xh_##opt(                            \
             src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
             &sse2, NULL, NULL);                                                \
         se += se2;                                                             \
@@ -379,7 +379,7 @@
 
 // The 2 unused parameters are place holders for PIC enabled build.
 #define DECL(w, opt)                                                        \
-  int vpx_sub_pixel_avg_variance##w##xh_##opt(                              \
+  int aom_sub_pixel_avg_variance##w##xh_##opt(                              \
       const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
       const uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *sec,         \
       ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0,   \
@@ -395,28 +395,28 @@
 #undef DECLS
 
 #define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast)                       \
-  unsigned int vpx_sub_pixel_avg_variance##w##x##h##_##opt(                    \
+  unsigned int aom_sub_pixel_avg_variance##w##x##h##_##opt(                    \
       const uint8_t *src, int src_stride, int x_offset, int y_offset,          \
       const uint8_t *dst, int dst_stride, unsigned int *sseptr,                \
       const uint8_t *sec) {                                                    \
     unsigned int sse;                                                          \
-    int se = vpx_sub_pixel_avg_variance##wf##xh_##opt(                         \
+    int se = aom_sub_pixel_avg_variance##wf##xh_##opt(                         \
         src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
         NULL, NULL);                                                           \
     if (w > wf) {                                                              \
       unsigned int sse2;                                                       \
-      int se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(                      \
+      int se2 = aom_sub_pixel_avg_variance##wf##xh_##opt(                      \
           src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride,      \
           sec + 16, w, h, &sse2, NULL, NULL);                                  \
       se += se2;                                                               \
       sse += sse2;                                                             \
       if (w > wf * 2) {                                                        \
-        se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(                        \
+        se2 = aom_sub_pixel_avg_variance##wf##xh_##opt(                        \
             src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride,    \
             sec + 32, w, h, &sse2, NULL, NULL);                                \
         se += se2;                                                             \
         sse += sse2;                                                           \
-        se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(                        \
+        se2 = aom_sub_pixel_avg_variance##wf##xh_##opt(                        \
             src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride,    \
             sec + 48, w, h, &sse2, NULL, NULL);                                \
         se += se2;                                                             \
@@ -448,7 +448,7 @@
 #undef FNS
 #undef FN
 
-void vpx_upsampled_pred_sse2(uint8_t *comp_pred, int width, int height,
+void aom_upsampled_pred_sse2(uint8_t *comp_pred, int width, int height,
                              const uint8_t *ref, int ref_stride) {
   int i, j;
   int stride = ref_stride << 3;
@@ -536,7 +536,7 @@
   }
 }
 
-void vpx_comp_avg_upsampled_pred_sse2(uint8_t *comp_pred, const uint8_t *pred,
+void aom_comp_avg_upsampled_pred_sse2(uint8_t *comp_pred, const uint8_t *pred,
                                       int width, int height, const uint8_t *ref,
                                       int ref_stride) {
   const __m128i zero = _mm_set1_epi16(0);
diff --git a/aom_mem/vpx_mem.c b/aom_mem/aom_mem.c
similarity index 78%
rename from aom_mem/vpx_mem.c
rename to aom_mem/aom_mem.c
index e8aaf6d..e308b1b 100644
--- a/aom_mem/vpx_mem.c
+++ b/aom_mem/aom_mem.c
@@ -8,14 +8,16 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "vpx_mem.h"
+#define __AOM_MEM_C__
+
+#include "aom_mem.h"
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include "include/vpx_mem_intrnl.h"
-#include "aom/vpx_integer.h"
+#include "include/aom_mem_intrnl.h"
+#include "aom/aom_integer.h"
 
-void *vpx_memalign(size_t align, size_t size) {
+void *aom_memalign(size_t align, size_t size) {
   void *addr, *x = NULL;
 
   addr = malloc(size + align - 1 + ADDRESS_STORAGE_SIZE);
@@ -29,19 +31,19 @@
   return x;
 }
 
-void *vpx_malloc(size_t size) { return vpx_memalign(DEFAULT_ALIGNMENT, size); }
+void *aom_malloc(size_t size) { return aom_memalign(DEFAULT_ALIGNMENT, size); }
 
-void *vpx_calloc(size_t num, size_t size) {
+void *aom_calloc(size_t num, size_t size) {
   void *x;
 
-  x = vpx_memalign(DEFAULT_ALIGNMENT, num * size);
+  x = aom_memalign(DEFAULT_ALIGNMENT, num * size);
 
   if (x) memset(x, 0, num * size);
 
   return x;
 }
 
-void *vpx_realloc(void *memblk, size_t size) {
+void *aom_realloc(void *memblk, size_t size) {
   void *addr, *new_addr = NULL;
   int align = DEFAULT_ALIGNMENT;
 
@@ -54,9 +56,9 @@
   not a null pointer, the object pointed to is freed.
   */
   if (!memblk)
-    new_addr = vpx_malloc(size);
+    new_addr = aom_malloc(size);
   else if (!size)
-    vpx_free(memblk);
+    aom_free(memblk);
   else {
     addr = (void *)(((size_t *)memblk)[-1]);
     memblk = NULL;
@@ -77,18 +79,18 @@
   return new_addr;
 }
 
-void vpx_free(void *memblk) {
+void aom_free(void *memblk) {
   if (memblk) {
     void *addr = (void *)(((size_t *)memblk)[-1]);
     free(addr);
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void *vpx_memset16(void *dest, int val, size_t length) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void *aom_memset16(void *dest, int val, size_t length) {
   size_t i;
   uint16_t *dest16 = (uint16_t *)dest;
   for (i = 0; i < length; i++) *dest16++ = val;
   return dest;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_mem/aom_mem.h b/aom_mem/aom_mem.h
new file mode 100644
index 0000000..7527fdc
--- /dev/null
+++ b/aom_mem/aom_mem.h
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AOM_MEM_AOM_MEM_H_
+#define AOM_MEM_AOM_MEM_H_
+
+#include "aom_config.h"
+#if defined(__uClinux__)
+#include <lddk.h>
+#endif
+
+#include <stdlib.h>
+#include <stddef.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+void *aom_memalign(size_t align, size_t size);
+void *aom_malloc(size_t size);
+void *aom_calloc(size_t num, size_t size);
+void *aom_realloc(void *memblk, size_t size);
+void aom_free(void *memblk);
+
+#if CONFIG_AOM_HIGHBITDEPTH
+void *aom_memset16(void *dest, int val, size_t length);
+#endif
+
+#include <string.h>
+
+#ifdef AOM_MEM_PLTFRM
+#include AOM_MEM_PLTFRM
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif  // AOM_MEM_AOM_MEM_H_
diff --git a/aom_mem/aom_mem.mk b/aom_mem/aom_mem.mk
new file mode 100644
index 0000000..e9162c2
--- /dev/null
+++ b/aom_mem/aom_mem.mk
@@ -0,0 +1,4 @@
+MEM_SRCS-yes += aom_mem.mk
+MEM_SRCS-yes += aom_mem.c
+MEM_SRCS-yes += aom_mem.h
+MEM_SRCS-yes += include/aom_mem_intrnl.h
diff --git a/aom_mem/include/vpx_mem_intrnl.h b/aom_mem/include/aom_mem_intrnl.h
similarity index 77%
rename from aom_mem/include/vpx_mem_intrnl.h
rename to aom_mem/include/aom_mem_intrnl.h
index b62d238..36beaf0 100644
--- a/aom_mem/include/vpx_mem_intrnl.h
+++ b/aom_mem/include/aom_mem_intrnl.h
@@ -8,16 +8,16 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_MEM_INCLUDE_VPX_MEM_INTRNL_H_
-#define VPX_MEM_INCLUDE_VPX_MEM_INTRNL_H_
-#include "./vpx_config.h"
+#ifndef AOM_MEM_INCLUDE_AOM_MEM_INTRNL_H_
+#define AOM_MEM_INCLUDE_AOM_MEM_INTRNL_H_
+#include "./aom_config.h"
 
 #define ADDRESS_STORAGE_SIZE sizeof(size_t)
 
 #ifndef DEFAULT_ALIGNMENT
 #if defined(VXWORKS)
-/*default addr alignment to use in calls to vpx_* functions other than
- * vpx_memalign*/
+/*default addr alignment to use in calls to aom_* functions other than
+ * aom_memalign*/
 #define DEFAULT_ALIGNMENT 32
 #else
 #define DEFAULT_ALIGNMENT (2 * sizeof(void *)) /* NOLINT */
@@ -28,4 +28,4 @@
 #define align_addr(addr, align) \
   (void *)(((size_t)(addr) + ((align)-1)) & (size_t) - (align))
 
-#endif  // VPX_MEM_INCLUDE_VPX_MEM_INTRNL_H_
+#endif  // AOM_MEM_INCLUDE_AOM_MEM_INTRNL_H_
diff --git a/aom_mem/vpx_mem.h b/aom_mem/vpx_mem.h
deleted file mode 100644
index c14f288..0000000
--- a/aom_mem/vpx_mem.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VPX_MEM_VPX_MEM_H_
-#define VPX_MEM_VPX_MEM_H_
-
-#include "vpx_config.h"
-#if defined(__uClinux__)
-#include <lddk.h>
-#endif
-
-#include <stdlib.h>
-#include <stddef.h>
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-void *vpx_memalign(size_t align, size_t size);
-void *vpx_malloc(size_t size);
-void *vpx_calloc(size_t num, size_t size);
-void *vpx_realloc(void *memblk, size_t size);
-void vpx_free(void *memblk);
-
-#if CONFIG_VP9_HIGHBITDEPTH
-void *vpx_memset16(void *dest, int val, size_t length);
-#endif
-
-#include <string.h>
-
-#ifdef VPX_MEM_PLTFRM
-#include VPX_MEM_PLTFRM
-#endif
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif  // VPX_MEM_VPX_MEM_H_
diff --git a/aom_mem/vpx_mem.mk b/aom_mem/vpx_mem.mk
deleted file mode 100644
index 7f275ea..0000000
--- a/aom_mem/vpx_mem.mk
+++ /dev/null
@@ -1,4 +0,0 @@
-MEM_SRCS-yes += vpx_mem.mk
-MEM_SRCS-yes += vpx_mem.c
-MEM_SRCS-yes += vpx_mem.h
-MEM_SRCS-yes += include/vpx_mem_intrnl.h
diff --git a/aom_ports/vpx_once.h b/aom_ports/aom_once.h
similarity index 94%
rename from aom_ports/vpx_once.h
rename to aom_ports/aom_once.h
index 7d9fc3b..3dd1650 100644
--- a/aom_ports/vpx_once.h
+++ b/aom_ports/aom_once.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_VPX_ONCE_H_
-#define VPX_PORTS_VPX_ONCE_H_
+#ifndef AOM_PORTS_AOM_ONCE_H_
+#define AOM_PORTS_AOM_ONCE_H_
 
-#include "vpx_config.h"
+#include "aom_config.h"
 
 /* Implement a function wrapper to guarantee initialization
  * thread-safety for library singletons.
@@ -20,18 +20,18 @@
  * used with one common argument per compilation unit. So
  *
  * file1.c:
- *   vpx_once(foo);
+ *   aom_once(foo);
  *   ...
- *   vpx_once(foo);
+ *   aom_once(foo);
  *
  *   file2.c:
- *     vpx_once(bar);
+ *     aom_once(bar);
  *
  * will ensure foo() and bar() are each called only once, but in
  *
  * file1.c:
- *   vpx_once(foo);
- *   vpx_once(bar):
+ *   aom_once(foo);
+ *   aom_once(bar):
  *
  * bar() will never be called because the lock is used up
  * by the call to foo().
@@ -137,4 +137,4 @@
 }
 #endif
 
-#endif  // VPX_PORTS_VPX_ONCE_H_
+#endif  // AOM_PORTS_AOM_ONCE_H_
diff --git a/aom_ports/vpx_ports.mk b/aom_ports/aom_ports.mk
similarity index 92%
rename from aom_ports/vpx_ports.mk
rename to aom_ports/aom_ports.mk
index 36b1493..4afbc9a 100644
--- a/aom_ports/vpx_ports.mk
+++ b/aom_ports/aom_ports.mk
@@ -9,13 +9,13 @@
 ##
 
 
-PORTS_SRCS-yes += vpx_ports.mk
+PORTS_SRCS-yes += aom_ports.mk
 
 PORTS_SRCS-yes += bitops.h
 PORTS_SRCS-yes += mem.h
 PORTS_SRCS-yes += msvc.h
 PORTS_SRCS-yes += system_state.h
-PORTS_SRCS-yes += vpx_timer.h
+PORTS_SRCS-yes += aom_timer.h
 
 ifeq ($(ARCH_X86)$(ARCH_X86_64),yes)
 PORTS_SRCS-yes += emms.asm
diff --git a/aom_ports/vpx_timer.h b/aom_ports/aom_timer.h
similarity index 77%
rename from aom_ports/vpx_timer.h
rename to aom_ports/aom_timer.h
index 6d145da..8735b13 100644
--- a/aom_ports/vpx_timer.h
+++ b/aom_ports/aom_timer.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_VPX_TIMER_H_
-#define VPX_PORTS_VPX_TIMER_H_
+#ifndef AOM_PORTS_AOM_TIMER_H_
+#define AOM_PORTS_AOM_TIMER_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #if CONFIG_OS_SUPPORT
 
@@ -45,7 +45,7 @@
 #endif
 #endif
 
-struct vpx_usec_timer {
+struct aom_usec_timer {
 #if defined(_WIN32)
   LARGE_INTEGER begin, end;
 #else
@@ -53,7 +53,7 @@
 #endif
 };
 
-static INLINE void vpx_usec_timer_start(struct vpx_usec_timer *t) {
+static INLINE void aom_usec_timer_start(struct aom_usec_timer *t) {
 #if defined(_WIN32)
   QueryPerformanceCounter(&t->begin);
 #else
@@ -61,7 +61,7 @@
 #endif
 }
 
-static INLINE void vpx_usec_timer_mark(struct vpx_usec_timer *t) {
+static INLINE void aom_usec_timer_mark(struct aom_usec_timer *t) {
 #if defined(_WIN32)
   QueryPerformanceCounter(&t->end);
 #else
@@ -69,7 +69,7 @@
 #endif
 }
 
-static INLINE int64_t vpx_usec_timer_elapsed(struct vpx_usec_timer *t) {
+static INLINE int64_t aom_usec_timer_elapsed(struct aom_usec_timer *t) {
 #if defined(_WIN32)
   LARGE_INTEGER freq, diff;
 
@@ -92,16 +92,16 @@
 #define timersub(a, b, result)
 #endif
 
-struct vpx_usec_timer {
+struct aom_usec_timer {
   void *dummy;
 };
 
-static INLINE void vpx_usec_timer_start(struct vpx_usec_timer *t) {}
+static INLINE void aom_usec_timer_start(struct aom_usec_timer *t) {}
 
-static INLINE void vpx_usec_timer_mark(struct vpx_usec_timer *t) {}
+static INLINE void aom_usec_timer_mark(struct aom_usec_timer *t) {}
 
-static INLINE int vpx_usec_timer_elapsed(struct vpx_usec_timer *t) { return 0; }
+static INLINE int aom_usec_timer_elapsed(struct aom_usec_timer *t) { return 0; }
 
 #endif /* CONFIG_OS_SUPPORT */
 
-#endif  // VPX_PORTS_VPX_TIMER_H_
+#endif  // AOM_PORTS_AOM_TIMER_H_
diff --git a/aom_ports/arm.h b/aom_ports/arm.h
index 7be6104..a4c9af9 100644
--- a/aom_ports/arm.h
+++ b/aom_ports/arm.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_ARM_H_
-#define VPX_PORTS_ARM_H_
+#ifndef AOM_PORTS_ARM_H_
+#define AOM_PORTS_ARM_H_
 #include <stdlib.h>
-#include "vpx_config.h"
+#include "aom_config.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -29,11 +29,11 @@
 // Earlier gcc compilers have issues with some neon intrinsics
 #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 4 && \
     __GNUC_MINOR__ <= 6
-#define VPX_INCOMPATIBLE_GCC
+#define AOM_INCOMPATIBLE_GCC
 #endif
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_PORTS_ARM_H_
+#endif  // AOM_PORTS_ARM_H_
diff --git a/aom_ports/arm_cpudetect.c b/aom_ports/arm_cpudetect.c
index fe98662..c4eb1fa 100644
--- a/aom_ports/arm_cpudetect.c
+++ b/aom_ports/arm_cpudetect.c
@@ -11,7 +11,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include "aom_ports/arm.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #ifdef WINAPI_FAMILY
 #include <winapifamily.h>
@@ -22,7 +22,7 @@
 
 static int arm_cpu_env_flags(int *flags) {
   char *env;
-  env = getenv("VPX_SIMD_CAPS");
+  env = getenv("AOM_SIMD_CAPS");
   if (env && *env) {
     *flags = (int)strtol(env, NULL, 0);
     return 0;
@@ -33,7 +33,7 @@
 
 static int arm_cpu_env_mask(void) {
   char *env;
-  env = getenv("VPX_SIMD_CAPS_MASK");
+  env = getenv("AOM_SIMD_CAPS_MASK");
   return env && *env ? (int)strtol(env, NULL, 0) : ~0;
 }
 
diff --git a/aom_ports/bitops.h b/aom_ports/bitops.h
index 3c76547..eb80b22 100644
--- a/aom_ports/bitops.h
+++ b/aom_ports/bitops.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_BITOPS_H_
-#define VPX_PORTS_BITOPS_H_
+#ifndef AOM_PORTS_BITOPS_H_
+#define AOM_PORTS_BITOPS_H_
 
 #include <assert.h>
 
@@ -72,4 +72,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_PORTS_BITOPS_H_
+#endif  // AOM_PORTS_BITOPS_H_
diff --git a/aom_ports/config.h b/aom_ports/config.h
index 3c1ab99..2abd73a 100644
--- a/aom_ports/config.h
+++ b/aom_ports/config.h
@@ -8,9 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_CONFIG_H_
-#define VPX_PORTS_CONFIG_H_
+#ifndef AOM_PORTS_CONFIG_H_
+#define AOM_PORTS_CONFIG_H_
 
-#include "vpx_config.h"
+#include "aom_config.h"
 
-#endif  // VPX_PORTS_CONFIG_H_
+#endif  // AOM_PORTS_CONFIG_H_
diff --git a/aom_ports/emmintrin_compat.h b/aom_ports/emmintrin_compat.h
index 903534e..8d707e6 100644
--- a/aom_ports/emmintrin_compat.h
+++ b/aom_ports/emmintrin_compat.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_EMMINTRIN_COMPAT_H_
-#define VPX_PORTS_EMMINTRIN_COMPAT_H_
+#ifndef AOM_PORTS_EMMINTRIN_COMPAT_H_
+#define AOM_PORTS_EMMINTRIN_COMPAT_H_
 
 #if defined(__GNUC__) && __GNUC__ < 4
 /* From emmintrin.h (gcc 4.5.3) */
@@ -52,4 +52,4 @@
 }
 #endif
 
-#endif  // VPX_PORTS_EMMINTRIN_COMPAT_H_
+#endif  // AOM_PORTS_EMMINTRIN_COMPAT_H_
diff --git a/aom_ports/emms.asm b/aom_ports/emms.asm
index e26ec9a..547d582 100644
--- a/aom_ports/emms.asm
+++ b/aom_ports/emms.asm
@@ -12,15 +12,15 @@
 %include "aom_ports/x86_abi_support.asm"
 
 section .text
-global sym(vpx_reset_mmx_state) PRIVATE
-sym(vpx_reset_mmx_state):
+global sym(aom_reset_mmx_state) PRIVATE
+sym(aom_reset_mmx_state):
     emms
     ret
 
 
 %if LIBAOM_YASM_WIN64
-global sym(vpx_winx64_fldcw) PRIVATE
-sym(vpx_winx64_fldcw):
+global sym(aom_winx64_fldcw) PRIVATE
+sym(aom_winx64_fldcw):
     sub   rsp, 8
     mov   [rsp], rcx ; win x64 specific
     fldcw [rsp]
@@ -28,8 +28,8 @@
     ret
 
 
-global sym(vpx_winx64_fstcw) PRIVATE
-sym(vpx_winx64_fstcw):
+global sym(aom_winx64_fstcw) PRIVATE
+sym(aom_winx64_fstcw):
     sub   rsp, 8
     fstcw [rsp]
     mov   rax, [rsp]
diff --git a/aom_ports/mem.h b/aom_ports/mem.h
index 34b7455..dbcba97 100644
--- a/aom_ports/mem.h
+++ b/aom_ports/mem.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_MEM_H_
-#define VPX_PORTS_MEM_H_
+#ifndef AOM_PORTS_MEM_H_
+#define AOM_PORTS_MEM_H_
 
-#include "vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "aom_config.h"
+#include "aom/aom_integer.h"
 
 #if (defined(__GNUC__) && __GNUC__) || defined(__SUNPRO_C)
 #define DECLARE_ALIGNED(n, typ, val) typ val __attribute__((aligned(n)))
@@ -49,8 +49,8 @@
   (((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1))
 
 #define CONVERT_TO_SHORTPTR(x) ((uint16_t *)(((uintptr_t)(x)) << 1))
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define CONVERT_TO_BYTEPTR(x) ((uint8_t *)(((uintptr_t)(x)) >> 1))
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#endif  // VPX_PORTS_MEM_H_
+#endif  // AOM_PORTS_MEM_H_
diff --git a/aom_ports/mem_ops.h b/aom_ports/mem_ops.h
index 343f275..70e21e0 100644
--- a/aom_ports/mem_ops.h
+++ b/aom_ports/mem_ops.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_MEM_OPS_H_
-#define VPX_PORTS_MEM_OPS_H_
+#ifndef AOM_PORTS_MEM_OPS_H_
+#define AOM_PORTS_MEM_OPS_H_
 
 /* \file
  * \brief Provides portable memory access primitives
@@ -133,7 +133,7 @@
 }
 
 #define mem_get_s_generic(end, sz)                                            \
-  static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz(const void *vmem) { \
+  static AOM_INLINE signed MEM_VALUE_T mem_get_s##end##sz(const void *vmem) { \
     const MAU_T *mem = (const MAU_T *)vmem;                                   \
     signed MEM_VALUE_T val = mem_get_##end##sz(mem);                          \
     return (val << (MEM_VALUE_T_SZ_BITS - sz)) >> (MEM_VALUE_T_SZ_BITS - sz); \
@@ -166,7 +166,7 @@
 
 #undef  mem_put_be16
 #define mem_put_be16 mem_ops_wrap_symbol(mem_put_be16)
-static VPX_INLINE void mem_put_be16(void *vmem, MEM_VALUE_T val) {
+static AOM_INLINE void mem_put_be16(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
   mem[0] = (MAU_T)((val >> 8) & 0xff);
@@ -175,7 +175,7 @@
 
 #undef  mem_put_be24
 #define mem_put_be24 mem_ops_wrap_symbol(mem_put_be24)
-static VPX_INLINE void mem_put_be24(void *vmem, MEM_VALUE_T val) {
+static AOM_INLINE void mem_put_be24(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
   mem[0] = (MAU_T)((val >> 16) & 0xff);
@@ -185,7 +185,7 @@
 
 #undef  mem_put_be32
 #define mem_put_be32 mem_ops_wrap_symbol(mem_put_be32)
-static VPX_INLINE void mem_put_be32(void *vmem, MEM_VALUE_T val) {
+static AOM_INLINE void mem_put_be32(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
   mem[0] = (MAU_T)((val >> 24) & 0xff);
@@ -196,7 +196,7 @@
 
 #undef  mem_put_le16
 #define mem_put_le16 mem_ops_wrap_symbol(mem_put_le16)
-static VPX_INLINE void mem_put_le16(void *vmem, MEM_VALUE_T val) {
+static AOM_INLINE void mem_put_le16(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
   mem[0] = (MAU_T)((val >> 0) & 0xff);
@@ -205,7 +205,7 @@
 
 #undef  mem_put_le24
 #define mem_put_le24 mem_ops_wrap_symbol(mem_put_le24)
-static VPX_INLINE void mem_put_le24(void *vmem, MEM_VALUE_T val) {
+static AOM_INLINE void mem_put_le24(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
   mem[0] = (MAU_T)((val >>  0) & 0xff);
@@ -215,7 +215,7 @@
 
 #undef  mem_put_le32
 #define mem_put_le32 mem_ops_wrap_symbol(mem_put_le32)
-static VPX_INLINE void mem_put_le32(void *vmem, MEM_VALUE_T val) {
+static AOM_INLINE void mem_put_le32(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
   mem[0] = (MAU_T)((val >>  0) & 0xff);
@@ -225,4 +225,4 @@
 }
 /* clang-format on */
 
-#endif  // VPX_PORTS_MEM_OPS_H_
+#endif  // AOM_PORTS_MEM_OPS_H_
diff --git a/aom_ports/mem_ops_aligned.h b/aom_ports/mem_ops_aligned.h
index d352992..a98f85a 100644
--- a/aom_ports/mem_ops_aligned.h
+++ b/aom_ports/mem_ops_aligned.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_MEM_OPS_ALIGNED_H_
-#define VPX_PORTS_MEM_OPS_ALIGNED_H_
+#ifndef AOM_PORTS_MEM_OPS_ALIGNED_H_
+#define AOM_PORTS_MEM_OPS_ALIGNED_H_
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 /* \file
  * \brief Provides portable memory access primitives for operating on aligned
@@ -44,21 +44,21 @@
 #define swap_endian_32_se(val, raw) swap_endian_32(val, raw)
 
 #define mem_get_ne_aligned_generic(end, sz)                           \
-  static VPX_INLINE unsigned MEM_VALUE_T mem_get_##end##sz##_aligned( \
+  static AOM_INLINE unsigned MEM_VALUE_T mem_get_##end##sz##_aligned( \
       const void *vmem) {                                             \
     const uint##sz##_t *mem = (const uint##sz##_t *)vmem;             \
     return *mem;                                                      \
   }
 
 #define mem_get_sne_aligned_generic(end, sz)                         \
-  static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz##_aligned( \
+  static AOM_INLINE signed MEM_VALUE_T mem_get_s##end##sz##_aligned( \
       const void *vmem) {                                            \
     const int##sz##_t *mem = (const int##sz##_t *)vmem;              \
     return *mem;                                                     \
   }
 
 #define mem_get_se_aligned_generic(end, sz)                           \
-  static VPX_INLINE unsigned MEM_VALUE_T mem_get_##end##sz##_aligned( \
+  static AOM_INLINE unsigned MEM_VALUE_T mem_get_##end##sz##_aligned( \
       const void *vmem) {                                             \
     const uint##sz##_t *mem = (const uint##sz##_t *)vmem;             \
     unsigned MEM_VALUE_T val, raw = *mem;                             \
@@ -67,7 +67,7 @@
   }
 
 #define mem_get_sse_aligned_generic(end, sz)                         \
-  static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz##_aligned( \
+  static AOM_INLINE signed MEM_VALUE_T mem_get_s##end##sz##_aligned( \
       const void *vmem) {                                            \
     const int##sz##_t *mem = (const int##sz##_t *)vmem;              \
     unsigned MEM_VALUE_T val, raw = *mem;                            \
@@ -76,21 +76,21 @@
   }
 
 #define mem_put_ne_aligned_generic(end, sz)                             \
-  static VPX_INLINE void mem_put_##end##sz##_aligned(void *vmem,        \
+  static AOM_INLINE void mem_put_##end##sz##_aligned(void *vmem,        \
                                                      MEM_VALUE_T val) { \
     uint##sz##_t *mem = (uint##sz##_t *)vmem;                           \
     *mem = (uint##sz##_t)val;                                           \
   }
 
 #define mem_put_se_aligned_generic(end, sz)                             \
-  static VPX_INLINE void mem_put_##end##sz##_aligned(void *vmem,        \
+  static AOM_INLINE void mem_put_##end##sz##_aligned(void *vmem,        \
                                                      MEM_VALUE_T val) { \
     uint##sz##_t *mem = (uint##sz##_t *)vmem, raw;                      \
     swap_endian_##sz(raw, val);                                         \
     *mem = (uint##sz##_t)raw;                                           \
   }
 
-#include "vpx_config.h"
+#include "aom_config.h"
 #if CONFIG_BIG_ENDIAN
 #define mem_get_be_aligned_generic(sz) mem_get_ne_aligned_generic(be, sz)
 #define mem_get_sbe_aligned_generic(sz) mem_get_sne_aligned_generic(be, sz)
@@ -168,4 +168,4 @@
 #undef swap_endian_32_se
 /* clang-format on */
 
-#endif  // VPX_PORTS_MEM_OPS_ALIGNED_H_
+#endif  // AOM_PORTS_MEM_OPS_ALIGNED_H_
diff --git a/aom_ports/msvc.h b/aom_ports/msvc.h
index 6183f90..e7fcb48 100644
--- a/aom_ports/msvc.h
+++ b/aom_ports/msvc.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_MSVC_H_
-#define VPX_PORTS_MSVC_H_
+#ifndef AOM_PORTS_MSVC_H_
+#define AOM_PORTS_MSVC_H_
 #ifdef _MSC_VER
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #if _MSC_VER < 1900  // VS2015 provides snprintf
 #define snprintf _snprintf
@@ -43,4 +43,4 @@
 #endif  // _MSC_VER < 1800
 
 #endif  // _MSC_VER
-#endif  // VPX_PORTS_MSVC_H_
+#endif  // AOM_PORTS_MSVC_H_
diff --git a/aom_ports/system_state.h b/aom_ports/system_state.h
index 086c646..38ab319 100644
--- a/aom_ports/system_state.h
+++ b/aom_ports/system_state.h
@@ -8,15 +8,15 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_SYSTEM_STATE_H_
-#define VPX_PORTS_SYSTEM_STATE_H_
+#ifndef AOM_PORTS_SYSTEM_STATE_H_
+#define AOM_PORTS_SYSTEM_STATE_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #if ARCH_X86 || ARCH_X86_64
-void vpx_reset_mmx_state(void);
-#define vpx_clear_system_state() vpx_reset_mmx_state()
+void aom_reset_mmx_state(void);
+#define aom_clear_system_state() aom_reset_mmx_state()
 #else
-#define vpx_clear_system_state()
+#define aom_clear_system_state()
 #endif  // ARCH_X86 || ARCH_X86_64
-#endif  // VPX_PORTS_SYSTEM_STATE_H_
+#endif  // AOM_PORTS_SYSTEM_STATE_H_
diff --git a/aom_ports/x86.h b/aom_ports/x86.h
index 3b8ba10..82443a5 100644
--- a/aom_ports/x86.h
+++ b/aom_ports/x86.h
@@ -8,39 +8,39 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_PORTS_X86_H_
-#define VPX_PORTS_X86_H_
+#ifndef AOM_PORTS_X86_H_
+#define AOM_PORTS_X86_H_
 #include <stdlib.h>
 
 #if defined(_MSC_VER)
 #include <intrin.h> /* For __cpuidex, __rdtsc */
 #endif
 
-#include "vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "aom_config.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 typedef enum {
-  VPX_CPU_UNKNOWN = -1,
-  VPX_CPU_AMD,
-  VPX_CPU_AMD_OLD,
-  VPX_CPU_CENTAUR,
-  VPX_CPU_CYRIX,
-  VPX_CPU_INTEL,
-  VPX_CPU_NEXGEN,
-  VPX_CPU_NSC,
-  VPX_CPU_RISE,
-  VPX_CPU_SIS,
-  VPX_CPU_TRANSMETA,
-  VPX_CPU_TRANSMETA_OLD,
-  VPX_CPU_UMC,
-  VPX_CPU_VIA,
+  AOM_CPU_UNKNOWN = -1,
+  AOM_CPU_AMD,
+  AOM_CPU_AMD_OLD,
+  AOM_CPU_CENTAUR,
+  AOM_CPU_CYRIX,
+  AOM_CPU_INTEL,
+  AOM_CPU_NEXGEN,
+  AOM_CPU_NSC,
+  AOM_CPU_RISE,
+  AOM_CPU_SIS,
+  AOM_CPU_TRANSMETA,
+  AOM_CPU_TRANSMETA_OLD,
+  AOM_CPU_UMC,
+  AOM_CPU_VIA,
 
-  VPX_CPU_LAST
-} vpx_cpu_t;
+  AOM_CPU_LAST
+} aom_cpu_t;
 
 #if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__)
 #if ARCH_X86_64
@@ -173,11 +173,11 @@
   (void)reg_ebx;
 
   /* See if the CPU capabilities are being overridden by the environment */
-  env = getenv("VPX_SIMD_CAPS");
+  env = getenv("AOM_SIMD_CAPS");
 
   if (env && *env) return (int)strtol(env, NULL, 0);
 
-  env = getenv("VPX_SIMD_CAPS_MASK");
+  env = getenv("AOM_SIMD_CAPS_MASK");
 
   if (env && *env) mask = (unsigned int)strtoul(env, NULL, 0);
 
@@ -291,10 +291,10 @@
 }
 #elif ARCH_X86_64
 /* No fldcw intrinsics on Windows x64, punt to external asm */
-extern void vpx_winx64_fldcw(unsigned short mode);
-extern unsigned short vpx_winx64_fstcw(void);
-#define x87_set_control_word vpx_winx64_fldcw
-#define x87_get_control_word vpx_winx64_fstcw
+extern void aom_winx64_fldcw(unsigned short mode);
+extern unsigned short aom_winx64_fstcw(void);
+#define x87_set_control_word aom_winx64_fldcw
+#define x87_get_control_word aom_winx64_fstcw
 #else
 static void x87_set_control_word(unsigned short mode) {
   __asm { fldcw mode }
@@ -312,10 +312,10 @@
   return mode;
 }
 
-extern void vpx_reset_mmx_state(void);
+extern void aom_reset_mmx_state(void);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPX_PORTS_X86_H_
+#endif  // AOM_PORTS_X86_H_
diff --git a/aom_ports/x86_abi_support.asm b/aom_ports/x86_abi_support.asm
index a2281a9..799cea5 100644
--- a/aom_ports/x86_abi_support.asm
+++ b/aom_ports/x86_abi_support.asm
@@ -9,7 +9,7 @@
 ;
 
 
-%include "vpx_config.asm"
+%include "aom_config.asm"
 
 ; 32/64 bit compatibility macros
 ;
diff --git a/aom_scale/vpx_scale.h b/aom_scale/aom_scale.h
similarity index 82%
rename from aom_scale/vpx_scale.h
rename to aom_scale/aom_scale.h
index 19bb09e..b525b35 100644
--- a/aom_scale/vpx_scale.h
+++ b/aom_scale/aom_scale.h
@@ -8,15 +8,15 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_SCALE_VPX_SCALE_H_
-#define VPX_SCALE_VPX_SCALE_H_
+#ifndef AOM_SCALE_AOM_SCALE_H_
+#define AOM_SCALE_AOM_SCALE_H_
 
 #include "aom_scale/yv12config.h"
 
-extern void vpx_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
+extern void aom_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
                             unsigned char *temp_area, unsigned char temp_height,
                             unsigned int hscale, unsigned int hratio,
                             unsigned int vscale, unsigned int vratio,
                             unsigned int interlaced);
 
-#endif  // VPX_SCALE_VPX_SCALE_H_
+#endif  // AOM_SCALE_AOM_SCALE_H_
diff --git a/aom_scale/aom_scale.mk b/aom_scale/aom_scale.mk
new file mode 100644
index 0000000..e3a68cf
--- /dev/null
+++ b/aom_scale/aom_scale.mk
@@ -0,0 +1,16 @@
+SCALE_SRCS-yes += aom_scale.mk
+SCALE_SRCS-yes += yv12config.h
+SCALE_SRCS-$(CONFIG_SPATIAL_RESAMPLING) += aom_scale.h
+SCALE_SRCS-$(CONFIG_SPATIAL_RESAMPLING) += generic/aom_scale.c
+SCALE_SRCS-yes += generic/yv12config.c
+SCALE_SRCS-yes += generic/yv12extend.c
+SCALE_SRCS-$(CONFIG_SPATIAL_RESAMPLING) += generic/gen_scalers.c
+SCALE_SRCS-yes += aom_scale_rtcd.c
+SCALE_SRCS-yes += aom_scale_rtcd.pl
+
+#mips(dspr2)
+SCALE_SRCS-$(HAVE_DSPR2)  += mips/dspr2/yv12extend_dspr2.c
+
+SCALE_SRCS-no += $(SCALE_SRCS_REMOVE-yes)
+
+$(eval $(call rtcd_h_template,aom_scale_rtcd,aom_scale/aom_scale_rtcd.pl))
diff --git a/aom_scale/vpx_scale_rtcd.c b/aom_scale/aom_scale_rtcd.c
similarity index 75%
rename from aom_scale/vpx_scale_rtcd.c
rename to aom_scale/aom_scale_rtcd.c
index ed32e12..96a81e9 100644
--- a/aom_scale/vpx_scale_rtcd.c
+++ b/aom_scale/aom_scale_rtcd.c
@@ -7,9 +7,9 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #define RTCD_C
-#include "./vpx_scale_rtcd.h"
-#include "aom_ports/vpx_once.h"
+#include "./aom_scale_rtcd.h"
+#include "aom_ports/aom_once.h"
 
-void vpx_scale_rtcd() { once(setup_rtcd_internal); }
+void aom_scale_rtcd() { once(setup_rtcd_internal); }
diff --git a/aom_scale/aom_scale_rtcd.pl b/aom_scale/aom_scale_rtcd.pl
new file mode 100644
index 0000000..fd6b577
--- /dev/null
+++ b/aom_scale/aom_scale_rtcd.pl
@@ -0,0 +1,35 @@
+sub aom_scale_forward_decls() {
+print <<EOF
+struct yv12_buffer_config;
+EOF
+}
+forward_decls qw/aom_scale_forward_decls/;
+
+# Scaler functions
+if (aom_config("CONFIG_SPATIAL_RESAMPLING") eq "yes") {
+    add_proto qw/void aom_horizontal_line_5_4_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
+    add_proto qw/void aom_vertical_band_5_4_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
+    add_proto qw/void aom_horizontal_line_5_3_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
+    add_proto qw/void aom_vertical_band_5_3_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
+    add_proto qw/void aom_horizontal_line_2_1_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
+    add_proto qw/void aom_vertical_band_2_1_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
+    add_proto qw/void aom_vertical_band_2_1_scale_i/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
+}
+
+add_proto qw/void aom_yv12_extend_frame_borders/, "struct yv12_buffer_config *ybf";
+
+add_proto qw/void aom_yv12_copy_frame/, "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc";
+
+add_proto qw/void aom_yv12_copy_y/, "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc";
+
+if (aom_config("CONFIG_AV1") eq "yes") {
+    add_proto qw/void aom_extend_frame_borders/, "struct yv12_buffer_config *ybf";
+    specialize qw/aom_extend_frame_borders dspr2/;
+
+    add_proto qw/void aom_extend_frame_inner_borders/, "struct yv12_buffer_config *ybf";
+    specialize qw/aom_extend_frame_inner_borders dspr2/;
+
+    add_proto qw/void aom_extend_frame_borders_y/, "struct yv12_buffer_config *ybf";
+    specialize qw/aom_extend_frame_borders_y/;
+}
+1;
diff --git a/aom_scale/generic/vpx_scale.c b/aom_scale/generic/aom_scale.c
similarity index 96%
rename from aom_scale/generic/vpx_scale.c
rename to aom_scale/generic/aom_scale.c
index 5bf5835..8a99082 100644
--- a/aom_scale/generic/vpx_scale.c
+++ b/aom_scale/generic/aom_scale.c
@@ -19,9 +19,9 @@
 /****************************************************************************
 *  Header Files
 ****************************************************************************/
-#include "./vpx_scale_rtcd.h"
-#include "aom_mem/vpx_mem.h"
-#include "aom_scale/vpx_scale.h"
+#include "./aom_scale_rtcd.h"
+#include "aom_mem/aom_mem.h"
+#include "aom_scale/aom_scale.h"
 #include "aom_scale/yv12config.h"
 
 typedef struct {
@@ -275,15 +275,15 @@
   switch (hratio * 10 / hscale) {
     case 8:
       /* 4-5 Scale in Width direction */
-      horiz_line_scale = vpx_horizontal_line_5_4_scale;
+      horiz_line_scale = aom_horizontal_line_5_4_scale;
       break;
     case 6:
       /* 3-5 Scale in Width direction */
-      horiz_line_scale = vpx_horizontal_line_5_3_scale;
+      horiz_line_scale = aom_horizontal_line_5_3_scale;
       break;
     case 5:
       /* 1-2 Scale in Width direction */
-      horiz_line_scale = vpx_horizontal_line_2_1_scale;
+      horiz_line_scale = aom_horizontal_line_2_1_scale;
       break;
     default:
       /* The ratio is not acceptable now */
@@ -295,13 +295,13 @@
   switch (vratio * 10 / vscale) {
     case 8:
       /* 4-5 Scale in vertical direction */
-      vert_band_scale = vpx_vertical_band_5_4_scale;
+      vert_band_scale = aom_vertical_band_5_4_scale;
       source_band_height = 5;
       dest_band_height = 4;
       break;
     case 6:
       /* 3-5 Scale in vertical direction */
-      vert_band_scale = vpx_vertical_band_5_3_scale;
+      vert_band_scale = aom_vertical_band_5_3_scale;
       source_band_height = 5;
       dest_band_height = 3;
       break;
@@ -310,11 +310,11 @@
 
       if (interlaced) {
         /* if the content is interlaced, point sampling is used */
-        vert_band_scale = vpx_vertical_band_2_1_scale;
+        vert_band_scale = aom_vertical_band_2_1_scale;
       } else {
         interpolation = 1;
         /* if the content is progressive, interplo */
-        vert_band_scale = vpx_vertical_band_2_1_scale_i;
+        vert_band_scale = aom_vertical_band_2_1_scale_i;
       }
 
       source_band_height = 2;
@@ -443,7 +443,7 @@
 
 /****************************************************************************
  *
- *  ROUTINE       : vpx_scale_frame
+ *  ROUTINE       : aom_scale_frame
  *
  *  INPUTS        : YV12_BUFFER_CONFIG *src        : Pointer to frame to be
  *                                                   scaled.
@@ -471,7 +471,7 @@
  *                  caching.
  *
  ****************************************************************************/
-void vpx_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
+void aom_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
                      unsigned char *temp_area, unsigned char temp_height,
                      unsigned int hscale, unsigned int hratio,
                      unsigned int vscale, unsigned int vratio,
diff --git a/aom_scale/generic/gen_scalers.c b/aom_scale/generic/gen_scalers.c
index c4cf255..2eaee34 100644
--- a/aom_scale/generic/gen_scalers.c
+++ b/aom_scale/generic/gen_scalers.c
@@ -8,9 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_scale_rtcd.h"
-#include "aom_scale/vpx_scale.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_scale_rtcd.h"
+#include "aom_scale/aom_scale.h"
+#include "aom_mem/aom_mem.h"
 /****************************************************************************
 *  Imports
 ****************************************************************************/
@@ -33,7 +33,7 @@
  *  SPECIAL NOTES : None.
  *
  ****************************************************************************/
-void vpx_horizontal_line_5_4_scale_c(const unsigned char *source,
+void aom_horizontal_line_5_4_scale_c(const unsigned char *source,
                                      unsigned int source_width,
                                      unsigned char *dest,
                                      unsigned int dest_width) {
@@ -61,7 +61,7 @@
   }
 }
 
-void vpx_vertical_band_5_4_scale_c(unsigned char *source,
+void aom_vertical_band_5_4_scale_c(unsigned char *source,
                                    unsigned int src_pitch, unsigned char *dest,
                                    unsigned int dest_pitch,
                                    unsigned int dest_width) {
@@ -89,7 +89,7 @@
 
 /*7***************************************************************************
  *
- *  ROUTINE       : vpx_horizontal_line_3_5_scale_c
+ *  ROUTINE       : aom_horizontal_line_3_5_scale_c
  *
  *  INPUTS        : const unsigned char *source : Pointer to source data.
  *                  unsigned int source_width   : Stride of source.
@@ -107,7 +107,7 @@
  *
  *
  ****************************************************************************/
-void vpx_horizontal_line_5_3_scale_c(const unsigned char *source,
+void aom_horizontal_line_5_3_scale_c(const unsigned char *source,
                                      unsigned int source_width,
                                      unsigned char *dest,
                                      unsigned int dest_width) {
@@ -134,7 +134,7 @@
   }
 }
 
-void vpx_vertical_band_5_3_scale_c(unsigned char *source,
+void aom_vertical_band_5_3_scale_c(unsigned char *source,
                                    unsigned int src_pitch, unsigned char *dest,
                                    unsigned int dest_pitch,
                                    unsigned int dest_width) {
@@ -161,7 +161,7 @@
 
 /****************************************************************************
  *
- *  ROUTINE       : vpx_horizontal_line_1_2_scale_c
+ *  ROUTINE       : aom_horizontal_line_1_2_scale_c
  *
  *  INPUTS        : const unsigned char *source : Pointer to source data.
  *                  unsigned int source_width   : Stride of source.
@@ -178,7 +178,7 @@
  *  SPECIAL NOTES : None.
  *
  ****************************************************************************/
-void vpx_horizontal_line_2_1_scale_c(const unsigned char *source,
+void aom_horizontal_line_2_1_scale_c(const unsigned char *source,
                                      unsigned int source_width,
                                      unsigned char *dest,
                                      unsigned int dest_width) {
@@ -197,7 +197,7 @@
   }
 }
 
-void vpx_vertical_band_2_1_scale_c(unsigned char *source,
+void aom_vertical_band_2_1_scale_c(unsigned char *source,
                                    unsigned int src_pitch, unsigned char *dest,
                                    unsigned int dest_pitch,
                                    unsigned int dest_width) {
@@ -206,7 +206,7 @@
   memcpy(dest, source, dest_width);
 }
 
-void vpx_vertical_band_2_1_scale_i_c(unsigned char *source,
+void aom_vertical_band_2_1_scale_i_c(unsigned char *source,
                                      unsigned int src_pitch,
                                      unsigned char *dest,
                                      unsigned int dest_pitch,
diff --git a/aom_scale/generic/yv12config.c b/aom_scale/generic/yv12config.c
index a2ee28e..0e661f2 100644
--- a/aom_scale/generic/yv12config.c
+++ b/aom_scale/generic/yv12config.c
@@ -11,7 +11,7 @@
 #include <assert.h>
 
 #include "aom_scale/yv12config.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 /****************************************************************************
@@ -24,13 +24,12 @@
 #define yv12_align_addr(addr, align) \
   (void *)(((size_t)(addr) + ((align)-1)) & (size_t) - (align))
 
-#if CONFIG_VP10
-// TODO(jkoleszar): Maybe replace this with struct vpx_image
-
-int vpx_free_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
+int aom_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
   if (ybf) {
+    // If libaom is using frame buffer callbacks then buffer_alloc_sz must
+    // not be set.
     if (ybf->buffer_alloc_sz > 0) {
-      vpx_free(ybf->buffer_alloc);
+      aom_free(ybf->buffer_alloc);
     }
 
     /* buffer_alloc isn't accessed by most functions.  Rather y_buffer,
@@ -44,16 +43,106 @@
   return 0;
 }
 
-int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
+int aom_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width,
+                                  int height, int border) {
+  if (ybf) {
+    int aligned_width = (width + 15) & ~15;
+    int aligned_height = (height + 15) & ~15;
+    int y_stride = ((aligned_width + 2 * border) + 31) & ~31;
+    int yplane_size = (aligned_height + 2 * border) * y_stride;
+    int uv_width = aligned_width >> 1;
+    int uv_height = aligned_height >> 1;
+    /** There is currently a bunch of code which assumes
+      *  uv_stride == y_stride/2, so enforce this here. */
+    int uv_stride = y_stride >> 1;
+    int uvplane_size = (uv_height + border) * uv_stride;
+    const int frame_size = yplane_size + 2 * uvplane_size;
+
+    if (!ybf->buffer_alloc) {
+      ybf->buffer_alloc = (uint8_t *)aom_memalign(32, frame_size);
+      ybf->buffer_alloc_sz = frame_size;
+    }
+
+    if (!ybf->buffer_alloc || ybf->buffer_alloc_sz < frame_size) return -1;
+
+    /* Only support allocating buffers that have a border that's a multiple
+     * of 32. The border restriction is required to get 16-byte alignment of
+     * the start of the chroma rows without introducing an arbitrary gap
+     * between planes, which would break the semantics of things like
+     * aom_img_set_rect(). */
+    if (border & 0x1f) return -3;
+
+    ybf->y_crop_width = width;
+    ybf->y_crop_height = height;
+    ybf->y_width = aligned_width;
+    ybf->y_height = aligned_height;
+    ybf->y_stride = y_stride;
+
+    ybf->uv_crop_width = (width + 1) / 2;
+    ybf->uv_crop_height = (height + 1) / 2;
+    ybf->uv_width = uv_width;
+    ybf->uv_height = uv_height;
+    ybf->uv_stride = uv_stride;
+
+    ybf->alpha_width = 0;
+    ybf->alpha_height = 0;
+    ybf->alpha_stride = 0;
+
+    ybf->border = border;
+    ybf->frame_size = frame_size;
+
+    ybf->y_buffer = ybf->buffer_alloc + (border * y_stride) + border;
+    ybf->u_buffer =
+        ybf->buffer_alloc + yplane_size + (border / 2 * uv_stride) + border / 2;
+    ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size +
+                    (border / 2 * uv_stride) + border / 2;
+    ybf->alpha_buffer = NULL;
+
+    ybf->corrupted = 0; /* assume not currupted by errors */
+    return 0;
+  }
+  return -2;
+}
+
+int aom_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
+                                int border) {
+  if (ybf) {
+    aom_yv12_de_alloc_frame_buffer(ybf);
+    return aom_yv12_realloc_frame_buffer(ybf, width, height, border);
+  }
+  return -2;
+}
+
+#if CONFIG_AV1
+// TODO(jkoleszar): Maybe replace this with struct aom_image
+
+int aom_free_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
+  if (ybf) {
+    if (ybf->buffer_alloc_sz > 0) {
+      aom_free(ybf->buffer_alloc);
+    }
+
+    /* buffer_alloc isn't accessed by most functions.  Rather y_buffer,
+      u_buffer and v_buffer point to buffer_alloc and are used.  Clear out
+      all of this so that a freed pointer isn't inadvertently used */
+    memset(ybf, 0, sizeof(YV12_BUFFER_CONFIG));
+  } else {
+    return -1;
+  }
+
+  return 0;
+}
+
+int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
                              int ss_x, int ss_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                              int use_highbitdepth,
 #endif
                              int border, int byte_alignment,
-                             vpx_codec_frame_buffer_t *fb,
-                             vpx_get_frame_buffer_cb_fn_t cb, void *cb_priv) {
+                             aom_codec_frame_buffer_t *fb,
+                             aom_get_frame_buffer_cb_fn_t cb, void *cb_priv) {
   if (ybf) {
-    const int vpx_byte_align = (byte_alignment == 0) ? 1 : byte_alignment;
+    const int aom_byte_align = (byte_alignment == 0) ? 1 : byte_alignment;
     const int aligned_width = (width + 7) & ~7;
     const int aligned_height = (height + 7) & ~7;
     const int y_stride = ((aligned_width + 2 * border) + 31) & ~31;
@@ -67,12 +156,12 @@
     const uint64_t uvplane_size =
         (uv_height + 2 * uv_border_h) * (uint64_t)uv_stride + byte_alignment;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     const uint64_t frame_size =
         (1 + use_highbitdepth) * (yplane_size + 2 * uvplane_size);
 #else
     const uint64_t frame_size = yplane_size + 2 * uvplane_size;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     uint8_t *buf = NULL;
 
@@ -99,18 +188,14 @@
       memset(ybf->buffer_alloc, 0, (int)frame_size);
 #endif
 #endif
-    } else if (frame_size > ybf->buffer_alloc_sz) {
+    } else if (frame_size > (size_t)ybf->buffer_alloc_sz) {
       // Allocation to hold larger frame, or first allocation.
-      vpx_free(ybf->buffer_alloc);
+      aom_free(ybf->buffer_alloc);
       ybf->buffer_alloc = NULL;
 
       if (frame_size != (size_t)frame_size) return -1;
 
-      // TODO(yunqingwang): On 32 bit systems, the maximum resolution supported
-      // in the encoder is 4k(3840x2160). The malloc() would fail if encoding
-      // >4k video on a 32 bit system. Later, maybe disable usage of up-sampled
-      // references to allow >4k video encoding on 32 bit platforms.
-      ybf->buffer_alloc = (uint8_t *)vpx_memalign(32, (size_t)frame_size);
+      ybf->buffer_alloc = (uint8_t *)aom_memalign(32, (size_t)frame_size);
       if (!ybf->buffer_alloc) return -1;
 
       ybf->buffer_alloc_sz = (size_t)frame_size;
@@ -125,7 +210,7 @@
      * of 32. The border restriction is required to get 16-byte alignment of
      * the start of the chroma rows without introducing an arbitrary gap
      * between planes, which would break the semantics of things like
-     * vpx_img_set_rect(). */
+     * aom_img_set_rect(). */
     if (border & 0x1f) return -3;
 
     ybf->y_crop_width = width;
@@ -146,7 +231,7 @@
     ybf->subsampling_y = ss_y;
 
     buf = ybf->buffer_alloc;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (use_highbitdepth) {
       // Store uint16 addresses when using 16bit framebuffers
       buf = CONVERT_TO_BYTEPTR(ybf->buffer_alloc);
@@ -154,17 +239,17 @@
     } else {
       ybf->flags = 0;
     }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     ybf->y_buffer = (uint8_t *)yv12_align_addr(
-        buf + (border * y_stride) + border, vpx_byte_align);
+        buf + (border * y_stride) + border, aom_byte_align);
     ybf->u_buffer = (uint8_t *)yv12_align_addr(
         buf + yplane_size + (uv_border_h * uv_stride) + uv_border_w,
-        vpx_byte_align);
+        aom_byte_align);
     ybf->v_buffer =
         (uint8_t *)yv12_align_addr(buf + yplane_size + uvplane_size +
                                        (uv_border_h * uv_stride) + uv_border_w,
-                                   vpx_byte_align);
+                                   aom_byte_align);
 
     ybf->corrupted = 0; /* assume not corrupted by errors */
     return 0;
@@ -172,16 +257,16 @@
   return -2;
 }
 
-int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
+int aom_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
                            int ss_x, int ss_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                            int use_highbitdepth,
 #endif
                            int border, int byte_alignment) {
   if (ybf) {
-    vpx_free_frame_buffer(ybf);
-    return vpx_realloc_frame_buffer(ybf, width, height, ss_x, ss_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+    aom_free_frame_buffer(ybf);
+    return aom_realloc_frame_buffer(ybf, width, height, ss_x, ss_y,
+#if CONFIG_AOM_HIGHBITDEPTH
                                     use_highbitdepth,
 #endif
                                     border, byte_alignment, NULL, NULL, NULL);
diff --git a/aom_scale/generic/yv12extend.c b/aom_scale/generic/yv12extend.c
index fec6ec4..8e9a697 100644
--- a/aom_scale/generic/yv12extend.c
+++ b/aom_scale/generic/yv12extend.c
@@ -9,10 +9,10 @@
  */
 
 #include <assert.h>
-#include "./vpx_config.h"
-#include "./vpx_scale_rtcd.h"
-#include "aom/vpx_integer.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_config.h"
+#include "./aom_scale_rtcd.h"
+#include "aom/aom_integer.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 #include "aom_scale/yv12config.h"
 
@@ -56,7 +56,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void extend_plane_high(uint8_t *const src8, int src_stride, int width,
                               int height, int extend_top, int extend_left,
                               int extend_bottom, int extend_right) {
@@ -71,8 +71,8 @@
   uint16_t *dst_ptr2 = src + width;
 
   for (i = 0; i < height; ++i) {
-    vpx_memset16(dst_ptr1, src_ptr1[0], extend_left);
-    vpx_memset16(dst_ptr2, src_ptr2[0], extend_right);
+    aom_memset16(dst_ptr1, src_ptr1[0], extend_left);
+    aom_memset16(dst_ptr2, src_ptr2[0], extend_right);
     src_ptr1 += src_stride;
     src_ptr2 += src_stride;
     dst_ptr1 += src_stride;
@@ -99,7 +99,7 @@
 }
 #endif
 
-void vpx_yv12_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
+void aom_yv12_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
   const int uv_border = ybf->border / 2;
 
   assert(ybf->border % 2 == 0);
@@ -108,7 +108,7 @@
   assert(ybf->y_height - ybf->y_crop_height >= 0);
   assert(ybf->y_width - ybf->y_crop_width >= 0);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (ybf->flags & YV12_FLAG_HIGHBITDEPTH) {
     extend_plane_high(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
                       ybf->y_crop_height, ybf->border, ybf->border,
@@ -143,7 +143,7 @@
                uv_border + ybf->uv_width - ybf->uv_crop_width);
 }
 
-#if CONFIG_VP10
+#if CONFIG_AV1
 static void extend_frame(YV12_BUFFER_CONFIG *const ybf, int ext_size) {
   const int c_w = ybf->uv_crop_width;
   const int c_h = ybf->uv_crop_height;
@@ -159,7 +159,7 @@
   assert(ybf->y_height - ybf->y_crop_height >= 0);
   assert(ybf->y_width - ybf->y_crop_width >= 0);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (ybf->flags & YV12_FLAG_HIGHBITDEPTH) {
     extend_plane_high(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
                       ybf->y_crop_height, ext_size, ext_size,
@@ -182,25 +182,25 @@
   extend_plane(ybf->v_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, c_er);
 }
 
-void vpx_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
+void aom_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
   extend_frame(ybf, ybf->border);
 }
 
-void vpx_extend_frame_inner_borders_c(YV12_BUFFER_CONFIG *ybf) {
-  const int inner_bw = (ybf->border > VPXINNERBORDERINPIXELS)
-                           ? VPXINNERBORDERINPIXELS
+void aom_extend_frame_inner_borders_c(YV12_BUFFER_CONFIG *ybf) {
+  const int inner_bw = (ybf->border > AOMINNERBORDERINPIXELS)
+                           ? AOMINNERBORDERINPIXELS
                            : ybf->border;
   extend_frame(ybf, inner_bw);
 }
 
-void vpx_extend_frame_borders_y_c(YV12_BUFFER_CONFIG *ybf) {
+void aom_extend_frame_borders_y_c(YV12_BUFFER_CONFIG *ybf) {
   int ext_size = ybf->border;
   assert(ybf->y_height - ybf->y_crop_height < 16);
   assert(ybf->y_width - ybf->y_crop_width < 16);
   assert(ybf->y_height - ybf->y_crop_height >= 0);
   assert(ybf->y_width - ybf->y_crop_width >= 0);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (ybf->flags & YV12_FLAG_HIGHBITDEPTH) {
     extend_plane_high(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
                       ybf->y_crop_height, ext_size, ext_size,
@@ -214,20 +214,20 @@
                ext_size + ybf->y_height - ybf->y_crop_height,
                ext_size + ybf->y_width - ybf->y_crop_width);
 }
-#endif  // CONFIG_VP10
+#endif  // CONFIG_AV1
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void memcpy_short_addr(uint8_t *dst8, const uint8_t *src8, int num) {
   uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   memcpy(dst, src, num * sizeof(uint16_t));
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // Copies the source image into the destination image and updates the
 // destination's UMV borders.
 // Note: The frames are assumed to be identical in size.
-void vpx_yv12_copy_frame_c(const YV12_BUFFER_CONFIG *src_ybc,
+void aom_yv12_copy_frame_c(const YV12_BUFFER_CONFIG *src_ybc,
                            YV12_BUFFER_CONFIG *dst_ybc) {
   int row;
   const uint8_t *src = src_ybc->y_buffer;
@@ -241,7 +241,7 @@
   assert(src_ybc->y_height == dst_ybc->y_height);
 #endif
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (src_ybc->flags & YV12_FLAG_HIGHBITDEPTH) {
     assert(dst_ybc->flags & YV12_FLAG_HIGHBITDEPTH);
     for (row = 0; row < src_ybc->y_height; ++row) {
@@ -268,7 +268,7 @@
       dst += dst_ybc->uv_stride;
     }
 
-    vpx_yv12_extend_frame_borders_c(dst_ybc);
+    aom_yv12_extend_frame_borders_c(dst_ybc);
     return;
   } else {
     assert(!(dst_ybc->flags & YV12_FLAG_HIGHBITDEPTH));
@@ -299,16 +299,16 @@
     dst += dst_ybc->uv_stride;
   }
 
-  vpx_yv12_extend_frame_borders_c(dst_ybc);
+  aom_yv12_extend_frame_borders_c(dst_ybc);
 }
 
-void vpx_yv12_copy_y_c(const YV12_BUFFER_CONFIG *src_ybc,
+void aom_yv12_copy_y_c(const YV12_BUFFER_CONFIG *src_ybc,
                        YV12_BUFFER_CONFIG *dst_ybc) {
   int row;
   const uint8_t *src = src_ybc->y_buffer;
   uint8_t *dst = dst_ybc->y_buffer;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (src_ybc->flags & YV12_FLAG_HIGHBITDEPTH) {
     const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
     uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
diff --git a/aom_scale/mips/dspr2/yv12extend_dspr2.c b/aom_scale/mips/dspr2/yv12extend_dspr2.c
index 27a2605..378d609 100644
--- a/aom_scale/mips/dspr2/yv12extend_dspr2.c
+++ b/aom_scale/mips/dspr2/yv12extend_dspr2.c
@@ -10,10 +10,10 @@
 
 #include <assert.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_scale/yv12config.h"
-#include "aom_mem/vpx_mem.h"
-#include "aom_scale/vpx_scale.h"
+#include "aom_mem/aom_mem.h"
+#include "aom_scale/aom_scale.h"
 
 #if HAVE_DSPR2
 static void extend_plane(uint8_t *const src, int src_stride, int width,
@@ -125,13 +125,13 @@
   extend_plane(ybf->v_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, c_er);
 }
 
-void vpx_extend_frame_borders_dspr2(YV12_BUFFER_CONFIG *ybf) {
+void aom_extend_frame_borders_dspr2(YV12_BUFFER_CONFIG *ybf) {
   extend_frame(ybf, ybf->border);
 }
 
-void vpx_extend_frame_inner_borders_dspr2(YV12_BUFFER_CONFIG *ybf) {
-  const int inner_bw = (ybf->border > VPXINNERBORDERINPIXELS)
-                           ? VPXINNERBORDERINPIXELS
+void aom_extend_frame_inner_borders_dspr2(YV12_BUFFER_CONFIG *ybf) {
+  const int inner_bw = (ybf->border > AOMINNERBORDERINPIXELS)
+                           ? AOMINNERBORDERINPIXELS
                            : ybf->border;
   extend_frame(ybf, inner_bw);
 }
diff --git a/aom_scale/vpx_scale.mk b/aom_scale/vpx_scale.mk
deleted file mode 100644
index 2ae0d84..0000000
--- a/aom_scale/vpx_scale.mk
+++ /dev/null
@@ -1,16 +0,0 @@
-SCALE_SRCS-yes += vpx_scale.mk
-SCALE_SRCS-yes += yv12config.h
-SCALE_SRCS-$(CONFIG_SPATIAL_RESAMPLING) += vpx_scale.h
-SCALE_SRCS-$(CONFIG_SPATIAL_RESAMPLING) += generic/vpx_scale.c
-SCALE_SRCS-yes += generic/yv12config.c
-SCALE_SRCS-yes += generic/yv12extend.c
-SCALE_SRCS-$(CONFIG_SPATIAL_RESAMPLING) += generic/gen_scalers.c
-SCALE_SRCS-yes += vpx_scale_rtcd.c
-SCALE_SRCS-yes += vpx_scale_rtcd.pl
-
-#mips(dspr2)
-SCALE_SRCS-$(HAVE_DSPR2)  += mips/dspr2/yv12extend_dspr2.c
-
-SCALE_SRCS-no += $(SCALE_SRCS_REMOVE-yes)
-
-$(eval $(call rtcd_h_template,vpx_scale_rtcd,aom_scale/vpx_scale_rtcd.pl))
diff --git a/aom_scale/vpx_scale_rtcd.pl b/aom_scale/vpx_scale_rtcd.pl
deleted file mode 100644
index 2e5d54d..0000000
--- a/aom_scale/vpx_scale_rtcd.pl
+++ /dev/null
@@ -1,35 +0,0 @@
-sub vpx_scale_forward_decls() {
-print <<EOF
-struct yv12_buffer_config;
-EOF
-}
-forward_decls qw/vpx_scale_forward_decls/;
-
-# Scaler functions
-if (vpx_config("CONFIG_SPATIAL_RESAMPLING") eq "yes") {
-    add_proto qw/void vpx_horizontal_line_5_4_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
-    add_proto qw/void vpx_vertical_band_5_4_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
-    add_proto qw/void vpx_horizontal_line_5_3_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
-    add_proto qw/void vpx_vertical_band_5_3_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
-    add_proto qw/void vpx_horizontal_line_2_1_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
-    add_proto qw/void vpx_vertical_band_2_1_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
-    add_proto qw/void vpx_vertical_band_2_1_scale_i/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
-}
-
-add_proto qw/void vpx_yv12_extend_frame_borders/, "struct yv12_buffer_config *ybf";
-
-add_proto qw/void vpx_yv12_copy_frame/, "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc";
-
-add_proto qw/void vpx_yv12_copy_y/, "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc";
-
-if (vpx_config("CONFIG_VP10") eq "yes") {
-    add_proto qw/void vpx_extend_frame_borders/, "struct yv12_buffer_config *ybf";
-    specialize qw/vpx_extend_frame_borders dspr2/;
-
-    add_proto qw/void vpx_extend_frame_inner_borders/, "struct yv12_buffer_config *ybf";
-    specialize qw/vpx_extend_frame_inner_borders dspr2/;
-
-    add_proto qw/void vpx_extend_frame_borders_y/, "struct yv12_buffer_config *ybf";
-    specialize qw/vpx_extend_frame_borders_y/;
-}
-1;
diff --git a/aom_scale/yv12config.h b/aom_scale/yv12config.h
index cd7822a..07d0ce7 100644
--- a/aom_scale/yv12config.h
+++ b/aom_scale/yv12config.h
@@ -8,27 +8,27 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_SCALE_YV12CONFIG_H_
-#define VPX_SCALE_YV12CONFIG_H_
+#ifndef AOM_SCALE_YV12CONFIG_H_
+#define AOM_SCALE_YV12CONFIG_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-#include "./vpx_config.h"
-#include "aom/vpx_codec.h"
-#include "aom/vpx_frame_buffer.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_codec.h"
+#include "aom/aom_frame_buffer.h"
+#include "aom/aom_integer.h"
 
 #define VP8BORDERINPIXELS 32
 #if CONFIG_EXT_PARTITION
-#define VPXINNERBORDERINPIXELS 160
+#define AOMINNERBORDERINPIXELS 160
 #else
-#define VPXINNERBORDERINPIXELS 96
+#define AOMINNERBORDERINPIXELS 96
 #endif  // CONFIG_EXT_PARTITION
-#define VPX_INTERP_EXTEND 4
-#define VPX_ENC_BORDER_IN_PIXELS 160
-#define VPX_DEC_BORDER_IN_PIXELS 160
+#define AOM_INTERP_EXTEND 4
+#define AOM_ENC_BORDER_IN_PIXELS 160
+#define AOM_DEC_BORDER_IN_PIXELS 160
 
 typedef struct yv12_buffer_config {
   int y_width;
@@ -59,8 +59,8 @@
   int subsampling_x;
   int subsampling_y;
   unsigned int bit_depth;
-  vpx_color_space_t color_space;
-  vpx_color_range_t color_range;
+  aom_color_space_t color_space;
+  aom_color_range_t color_range;
   int render_width;
   int render_height;
 
@@ -70,9 +70,15 @@
 
 #define YV12_FLAG_HIGHBITDEPTH 8
 
-int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
+int aom_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
+                                int border);
+int aom_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width,
+                                  int height, int border);
+int aom_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf);
+
+int aom_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
                            int ss_x, int ss_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                            int use_highbitdepth,
 #endif
                            int border, int byte_alignment);
@@ -84,18 +90,18 @@
 // to decode the current frame. If cb is NULL, libaom will allocate memory
 // internally to decode the current frame. Returns 0 on success. Returns < 0
 // on failure.
-int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
+int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
                              int ss_x, int ss_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                              int use_highbitdepth,
 #endif
                              int border, int byte_alignment,
-                             vpx_codec_frame_buffer_t *fb,
-                             vpx_get_frame_buffer_cb_fn_t cb, void *cb_priv);
-int vpx_free_frame_buffer(YV12_BUFFER_CONFIG *ybf);
+                             aom_codec_frame_buffer_t *fb,
+                             aom_get_frame_buffer_cb_fn_t cb, void *cb_priv);
+int aom_free_frame_buffer(YV12_BUFFER_CONFIG *ybf);
 
 #ifdef __cplusplus
 }
 #endif
 
-#endif  // VPX_SCALE_YV12CONFIG_H_
+#endif  // AOM_SCALE_YV12CONFIG_H_
diff --git a/aom_util/vpx_thread.c b/aom_util/aom_thread.c
similarity index 84%
rename from aom_util/vpx_thread.c
rename to aom_util/aom_thread.c
index 01930b6..346fa6f 100644
--- a/aom_util/vpx_thread.c
+++ b/aom_util/aom_thread.c
@@ -14,12 +14,12 @@
 
 #include <assert.h>
 #include <string.h>  // for memset()
-#include "./vpx_thread.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_thread.h"
+#include "aom_mem/aom_mem.h"
 
 #if CONFIG_MULTITHREAD
 
-struct VPxWorkerImpl {
+struct AVxWorkerImpl {
   pthread_mutex_t mutex_;
   pthread_cond_t condition_;
   pthread_t thread_;
@@ -27,10 +27,10 @@
 
 //------------------------------------------------------------------------------
 
-static void execute(VPxWorker *const worker);  // Forward declaration.
+static void execute(AVxWorker *const worker);  // Forward declaration.
 
 static THREADFN thread_loop(void *ptr) {
-  VPxWorker *const worker = (VPxWorker *)ptr;
+  AVxWorker *const worker = (AVxWorker *)ptr;
   int done = 0;
   while (!done) {
     pthread_mutex_lock(&worker->impl_->mutex_);
@@ -51,7 +51,7 @@
 }
 
 // main thread state control
-static void change_state(VPxWorker *const worker, VPxWorkerStatus new_status) {
+static void change_state(AVxWorker *const worker, AVxWorkerStatus new_status) {
   // No-op when attempting to change state on a thread that didn't come up.
   // Checking status_ without acquiring the lock first would result in a data
   // race.
@@ -76,12 +76,12 @@
 
 //------------------------------------------------------------------------------
 
-static void init(VPxWorker *const worker) {
+static void init(AVxWorker *const worker) {
   memset(worker, 0, sizeof(*worker));
   worker->status_ = NOT_OK;
 }
 
-static int sync(VPxWorker *const worker) {
+static int sync(AVxWorker *const worker) {
 #if CONFIG_MULTITHREAD
   change_state(worker, OK);
 #endif
@@ -89,12 +89,12 @@
   return !worker->had_error;
 }
 
-static int reset(VPxWorker *const worker) {
+static int reset(AVxWorker *const worker) {
   int ok = 1;
   worker->had_error = 0;
   if (worker->status_ < OK) {
 #if CONFIG_MULTITHREAD
-    worker->impl_ = (VPxWorkerImpl *)vpx_calloc(1, sizeof(*worker->impl_));
+    worker->impl_ = (AVxWorkerImpl *)aom_calloc(1, sizeof(*worker->impl_));
     if (worker->impl_ == NULL) {
       return 0;
     }
@@ -113,7 +113,7 @@
       pthread_mutex_destroy(&worker->impl_->mutex_);
       pthread_cond_destroy(&worker->impl_->condition_);
     Error:
-      vpx_free(worker->impl_);
+      aom_free(worker->impl_);
       worker->impl_ = NULL;
       return 0;
     }
@@ -127,13 +127,13 @@
   return ok;
 }
 
-static void execute(VPxWorker *const worker) {
+static void execute(AVxWorker *const worker) {
   if (worker->hook != NULL) {
     worker->had_error |= !worker->hook(worker->data1, worker->data2);
   }
 }
 
-static void launch(VPxWorker *const worker) {
+static void launch(AVxWorker *const worker) {
 #if CONFIG_MULTITHREAD
   change_state(worker, WORK);
 #else
@@ -141,14 +141,14 @@
 #endif
 }
 
-static void end(VPxWorker *const worker) {
+static void end(AVxWorker *const worker) {
 #if CONFIG_MULTITHREAD
   if (worker->impl_ != NULL) {
     change_state(worker, NOT_OK);
     pthread_join(worker->impl_->thread_, NULL);
     pthread_mutex_destroy(&worker->impl_->mutex_);
     pthread_cond_destroy(&worker->impl_->condition_);
-    vpx_free(worker->impl_);
+    aom_free(worker->impl_);
     worker->impl_ = NULL;
   }
 #else
@@ -160,10 +160,10 @@
 
 //------------------------------------------------------------------------------
 
-static VPxWorkerInterface g_worker_interface = { init,   reset,   sync,
+static AVxWorkerInterface g_worker_interface = { init,   reset,   sync,
                                                  launch, execute, end };
 
-int vpx_set_worker_interface(const VPxWorkerInterface *const winterface) {
+int aom_set_worker_interface(const AVxWorkerInterface *const winterface) {
   if (winterface == NULL || winterface->init == NULL ||
       winterface->reset == NULL || winterface->sync == NULL ||
       winterface->launch == NULL || winterface->execute == NULL ||
@@ -174,7 +174,7 @@
   return 1;
 }
 
-const VPxWorkerInterface *vpx_get_worker_interface(void) {
+const AVxWorkerInterface *aom_get_worker_interface(void) {
   return &g_worker_interface;
 }
 
diff --git a/aom_util/vpx_thread.h b/aom_util/aom_thread.h
similarity index 94%
rename from aom_util/vpx_thread.h
rename to aom_util/aom_thread.h
index 3b852b2..799c9ff 100644
--- a/aom_util/vpx_thread.h
+++ b/aom_util/aom_thread.h
@@ -12,10 +12,10 @@
 // Original source:
 //  https://chromium.googlesource.com/webm/libwebp
 
-#ifndef VPX_THREAD_H_
-#define VPX_THREAD_H_
+#ifndef AOM_THREAD_H_
+#define AOM_THREAD_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -347,59 +347,59 @@
   NOT_OK = 0,  // object is unusable
   OK,          // ready to work
   WORK         // busy finishing the current task
-} VPxWorkerStatus;
+} AVxWorkerStatus;
 
 // Function to be called by the worker thread. Takes two opaque pointers as
 // arguments (data1 and data2), and should return false in case of error.
-typedef int (*VPxWorkerHook)(void *, void *);
+typedef int (*AVxWorkerHook)(void *, void *);
 
 // Platform-dependent implementation details for the worker.
-typedef struct VPxWorkerImpl VPxWorkerImpl;
+typedef struct AVxWorkerImpl AVxWorkerImpl;
 
 // Synchronization object used to launch job in the worker thread
 typedef struct {
-  VPxWorkerImpl *impl_;
-  VPxWorkerStatus status_;
-  VPxWorkerHook hook;  // hook to call
+  AVxWorkerImpl *impl_;
+  AVxWorkerStatus status_;
+  AVxWorkerHook hook;  // hook to call
   void *data1;         // first argument passed to 'hook'
   void *data2;         // second argument passed to 'hook'
   int had_error;       // return value of the last call to 'hook'
-} VPxWorker;
+} AVxWorker;
 
 // The interface for all thread-worker related functions. All these functions
 // must be implemented.
 typedef struct {
   // Must be called first, before any other method.
-  void (*init)(VPxWorker *const worker);
+  void (*init)(AVxWorker *const worker);
   // Must be called to initialize the object and spawn the thread. Re-entrant.
   // Will potentially launch the thread. Returns false in case of error.
-  int (*reset)(VPxWorker *const worker);
+  int (*reset)(AVxWorker *const worker);
   // Makes sure the previous work is finished. Returns true if worker->had_error
   // was not set and no error condition was triggered by the working thread.
-  int (*sync)(VPxWorker *const worker);
+  int (*sync)(AVxWorker *const worker);
   // Triggers the thread to call hook() with data1 and data2 arguments. These
   // hook/data1/data2 values can be changed at any time before calling this
   // function, but not be changed afterward until the next call to Sync().
-  void (*launch)(VPxWorker *const worker);
+  void (*launch)(AVxWorker *const worker);
   // This function is similar to launch() except that it calls the
   // hook directly instead of using a thread. Convenient to bypass the thread
-  // mechanism while still using the VPxWorker structs. sync() must
+  // mechanism while still using the AVxWorker structs. sync() must
   // still be called afterward (for error reporting).
-  void (*execute)(VPxWorker *const worker);
+  void (*execute)(AVxWorker *const worker);
   // Kill the thread and terminate the object. To use the object again, one
   // must call reset() again.
-  void (*end)(VPxWorker *const worker);
-} VPxWorkerInterface;
+  void (*end)(AVxWorker *const worker);
+} AVxWorkerInterface;
 
 // Install a new set of threading functions, overriding the defaults. This
 // should be done before any workers are started, i.e., before any encoding or
 // decoding takes place. The contents of the interface struct are copied, it
 // is safe to free the corresponding memory after this call. This function is
 // not thread-safe. Return false in case of invalid pointer or methods.
-int vpx_set_worker_interface(const VPxWorkerInterface *const winterface);
+int aom_set_worker_interface(const AVxWorkerInterface *const winterface);
 
 // Retrieve the currently set thread worker interface.
-const VPxWorkerInterface *vpx_get_worker_interface(void);
+const AVxWorkerInterface *aom_get_worker_interface(void);
 
 //------------------------------------------------------------------------------
 
@@ -407,4 +407,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_THREAD_H_
+#endif  // AOM_THREAD_H_
diff --git a/aom_util/vpx_util.mk b/aom_util/aom_util.mk
similarity index 84%
rename from aom_util/vpx_util.mk
rename to aom_util/aom_util.mk
index 480e61f..b634f7f 100644
--- a/aom_util/vpx_util.mk
+++ b/aom_util/aom_util.mk
@@ -8,9 +8,9 @@
 ##  be found in the AUTHORS file in the root of the source tree.
 ##
 
-UTIL_SRCS-yes += vpx_util.mk
-UTIL_SRCS-yes += vpx_thread.c
-UTIL_SRCS-yes += vpx_thread.h
+UTIL_SRCS-yes += aom_util.mk
+UTIL_SRCS-yes += aom_thread.c
+UTIL_SRCS-yes += aom_thread.h
 UTIL_SRCS-yes += debug_util.c
 UTIL_SRCS-yes += debug_util.h
 UTIL_SRCS-yes += endian_inl.h
diff --git a/aom_util/debug_util.h b/aom_util/debug_util.h
index 0438be4..aa6896c 100644
--- a/aom_util/debug_util.h
+++ b/aom_util/debug_util.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_UTIL_DEBUG_UTIL_H_
-#define VPX_UTIL_DEBUG_UTIL_H_
+#ifndef AOM_UTIL_DEBUG_UTIL_H_
+#define AOM_UTIL_DEBUG_UTIL_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -40,4 +40,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPX_UTIL_DEBUG_UTIL_H_
+#endif  // AOM_UTIL_DEBUG_UTIL_H_
diff --git a/aom_util/endian_inl.h b/aom_util/endian_inl.h
index 36b8138..9eb0e35 100644
--- a/aom_util/endian_inl.h
+++ b/aom_util/endian_inl.h
@@ -9,12 +9,12 @@
 //
 // Endian related functions.
 
-#ifndef VPX_UTIL_ENDIAN_INL_H_
-#define VPX_UTIL_ENDIAN_INL_H_
+#ifndef AOM_UTIL_ENDIAN_INL_H_
+#define AOM_UTIL_ENDIAN_INL_H_
 
 #include <stdlib.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 #if defined(__GNUC__)
 #define LOCAL_GCC_VERSION ((__GNUC__ << 8) | __GNUC_MINOR__)
@@ -62,7 +62,7 @@
 
 #if HAVE_MIPS32 && defined(__mips__) && !defined(__mips64) && \
     defined(__mips_isa_rev) && (__mips_isa_rev >= 2) && (__mips_isa_rev < 6)
-#define VPX_USE_MIPS32_R2
+#define AOM_USE_MIPS32_R2
 #endif
 
 static INLINE uint16_t BSwap16(uint16_t x) {
@@ -77,7 +77,7 @@
 }
 
 static INLINE uint32_t BSwap32(uint32_t x) {
-#if defined(VPX_USE_MIPS32_R2)
+#if defined(AOM_USE_MIPS32_R2)
   uint32_t ret;
   __asm__ volatile(
       "wsbh   %[ret], %[x]          \n\t"
@@ -115,4 +115,4 @@
 #endif  // HAVE_BUILTIN_BSWAP64
 }
 
-#endif  // VPX_UTIL_ENDIAN_INL_H_
+#endif  // AOM_UTIL_ENDIAN_INL_H_
diff --git a/vpxdec.c b/aomdec.c
similarity index 78%
rename from vpxdec.c
rename to aomdec.c
index 5650049..1c6df344 100644
--- a/vpxdec.c
+++ b/aomdec.c
@@ -15,7 +15,7 @@
 #include <string.h>
 #include <limits.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #if CONFIG_LIBYUV
 #include "third_party/libyuv/include/libyuv/scale.h"
@@ -24,12 +24,12 @@
 #include "./args.h"
 #include "./ivfdec.h"
 
-#include "aom/vpx_decoder.h"
+#include "aom/aom_decoder.h"
 #include "aom_ports/mem_ops.h"
-#include "aom_ports/vpx_timer.h"
+#include "aom_ports/aom_timer.h"
 
-#if CONFIG_VP10_DECODER
-#include "aom/vp8dx.h"
+#if CONFIG_AV1_DECODER
+#include "aom/aomdx.h"
 #endif
 
 #include "./md5_utils.h"
@@ -42,8 +42,8 @@
 
 static const char *exec_name;
 
-struct VpxDecInputContext {
-  struct VpxInputContext *vpx_input_ctx;
+struct AvxDecInputContext {
+  struct AvxInputContext *aom_input_ctx;
   struct WebmInputContext *webm_ctx;
 };
 
@@ -88,7 +88,7 @@
     ARG_DEF(NULL, "frame-buffers", 1, "Number of frame buffers to use");
 static const arg_def_t md5arg =
     ARG_DEF(NULL, "md5", 0, "Compute the MD5 sum of the decoded frame");
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const arg_def_t outbitdeptharg =
     ARG_DEF(NULL, "output-bit-depth", 1, "Output bit-depth for decoded frames");
 #endif
@@ -121,7 +121,7 @@
                                        &md5arg,
                                        &error_concealment,
                                        &continuearg,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                        &outbitdeptharg,
 #endif
 #if CONFIG_EXT_TILE
@@ -131,29 +131,29 @@
                                        NULL };
 
 #if CONFIG_LIBYUV
-static INLINE int libyuv_scale(vpx_image_t *src, vpx_image_t *dst,
+static INLINE int libyuv_scale(aom_image_t *src, aom_image_t *dst,
                                FilterModeEnum mode) {
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (src->fmt == VPX_IMG_FMT_I42016) {
-    assert(dst->fmt == VPX_IMG_FMT_I42016);
+#if CONFIG_AOM_HIGHBITDEPTH
+  if (src->fmt == AOM_IMG_FMT_I42016) {
+    assert(dst->fmt == AOM_IMG_FMT_I42016);
     return I420Scale_16(
-        (uint16_t *)src->planes[VPX_PLANE_Y], src->stride[VPX_PLANE_Y] / 2,
-        (uint16_t *)src->planes[VPX_PLANE_U], src->stride[VPX_PLANE_U] / 2,
-        (uint16_t *)src->planes[VPX_PLANE_V], src->stride[VPX_PLANE_V] / 2,
-        src->d_w, src->d_h, (uint16_t *)dst->planes[VPX_PLANE_Y],
-        dst->stride[VPX_PLANE_Y] / 2, (uint16_t *)dst->planes[VPX_PLANE_U],
-        dst->stride[VPX_PLANE_U] / 2, (uint16_t *)dst->planes[VPX_PLANE_V],
-        dst->stride[VPX_PLANE_V] / 2, dst->d_w, dst->d_h, mode);
+        (uint16_t *)src->planes[AOM_PLANE_Y], src->stride[AOM_PLANE_Y] / 2,
+        (uint16_t *)src->planes[AOM_PLANE_U], src->stride[AOM_PLANE_U] / 2,
+        (uint16_t *)src->planes[AOM_PLANE_V], src->stride[AOM_PLANE_V] / 2,
+        src->d_w, src->d_h, (uint16_t *)dst->planes[AOM_PLANE_Y],
+        dst->stride[AOM_PLANE_Y] / 2, (uint16_t *)dst->planes[AOM_PLANE_U],
+        dst->stride[AOM_PLANE_U] / 2, (uint16_t *)dst->planes[AOM_PLANE_V],
+        dst->stride[AOM_PLANE_V] / 2, dst->d_w, dst->d_h, mode);
   }
 #endif
-  assert(src->fmt == VPX_IMG_FMT_I420);
-  assert(dst->fmt == VPX_IMG_FMT_I420);
-  return I420Scale(src->planes[VPX_PLANE_Y], src->stride[VPX_PLANE_Y],
-                   src->planes[VPX_PLANE_U], src->stride[VPX_PLANE_U],
-                   src->planes[VPX_PLANE_V], src->stride[VPX_PLANE_V], src->d_w,
-                   src->d_h, dst->planes[VPX_PLANE_Y], dst->stride[VPX_PLANE_Y],
-                   dst->planes[VPX_PLANE_U], dst->stride[VPX_PLANE_U],
-                   dst->planes[VPX_PLANE_V], dst->stride[VPX_PLANE_V], dst->d_w,
+  assert(src->fmt == AOM_IMG_FMT_I420);
+  assert(dst->fmt == AOM_IMG_FMT_I420);
+  return I420Scale(src->planes[AOM_PLANE_Y], src->stride[AOM_PLANE_Y],
+                   src->planes[AOM_PLANE_U], src->stride[AOM_PLANE_U],
+                   src->planes[AOM_PLANE_V], src->stride[AOM_PLANE_V], src->d_w,
+                   src->d_h, dst->planes[AOM_PLANE_Y], dst->stride[AOM_PLANE_Y],
+                   dst->planes[AOM_PLANE_U], dst->stride[AOM_PLANE_U],
+                   dst->planes[AOM_PLANE_V], dst->stride[AOM_PLANE_V], dst->d_w,
                    dst->d_h, mode);
 }
 #endif
@@ -182,10 +182,10 @@
           "not specified, the output will be\n  directed to stdout.\n");
   fprintf(stderr, "\nIncluded decoders:\n\n");
 
-  for (i = 0; i < get_vpx_decoder_count(); ++i) {
-    const VpxInterface *const decoder = get_vpx_decoder_by_index(i);
+  for (i = 0; i < get_aom_decoder_count(); ++i) {
+    const AvxInterface *const decoder = get_aom_decoder_by_index(i);
     fprintf(stderr, "    %-6s - %s\n", decoder->name,
-            vpx_codec_iface_name(decoder->codec_interface()));
+            aom_codec_iface_name(decoder->codec_interface()));
   }
 
   exit(EXIT_FAILURE);
@@ -236,24 +236,24 @@
   return 0;
 }
 
-static int read_frame(struct VpxDecInputContext *input, uint8_t **buf,
+static int read_frame(struct AvxDecInputContext *input, uint8_t **buf,
                       size_t *bytes_in_buffer, size_t *buffer_size) {
-  switch (input->vpx_input_ctx->file_type) {
+  switch (input->aom_input_ctx->file_type) {
 #if CONFIG_WEBM_IO
     case FILE_TYPE_WEBM:
       return webm_read_frame(input->webm_ctx, buf, bytes_in_buffer);
 #endif
     case FILE_TYPE_RAW:
-      return raw_read_frame(input->vpx_input_ctx->file, buf, bytes_in_buffer,
+      return raw_read_frame(input->aom_input_ctx->file, buf, bytes_in_buffer,
                             buffer_size);
     case FILE_TYPE_IVF:
-      return ivf_read_frame(input->vpx_input_ctx->file, buf, bytes_in_buffer,
+      return ivf_read_frame(input->aom_input_ctx->file, buf, bytes_in_buffer,
                             buffer_size);
     default: return 1;
   }
 }
 
-static void update_image_md5(const vpx_image_t *img, const int planes[3],
+static void update_image_md5(const aom_image_t *img, const int planes[3],
                              MD5Context *md5) {
   int i, y;
 
@@ -261,9 +261,9 @@
     const int plane = planes[i];
     const unsigned char *buf = img->planes[plane];
     const int stride = img->stride[plane];
-    const int w = vpx_img_plane_width(img, plane) *
-                  ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
-    const int h = vpx_img_plane_height(img, plane);
+    const int w = aom_img_plane_width(img, plane) *
+                  ((img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
+    const int h = aom_img_plane_height(img, plane);
 
     for (y = 0; y < h; ++y) {
       MD5Update(md5, buf, w);
@@ -272,11 +272,11 @@
   }
 }
 
-static void write_image_file(const vpx_image_t *img, const int planes[3],
+static void write_image_file(const aom_image_t *img, const int planes[3],
                              FILE *file) {
   int i, y;
-#if CONFIG_VP9_HIGHBITDEPTH
-  const int bytes_per_sample = ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
+#if CONFIG_AOM_HIGHBITDEPTH
+  const int bytes_per_sample = ((img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
 #else
   const int bytes_per_sample = 1;
 #endif
@@ -285,8 +285,8 @@
     const int plane = planes[i];
     const unsigned char *buf = img->planes[plane];
     const int stride = img->stride[plane];
-    const int w = vpx_img_plane_width(img, plane);
-    const int h = vpx_img_plane_height(img, plane);
+    const int w = aom_img_plane_width(img, plane);
+    const int h = aom_img_plane_height(img, plane);
 
     for (y = 0; y < h; ++y) {
       fwrite(buf, bytes_per_sample, w, file);
@@ -295,10 +295,10 @@
   }
 }
 
-static int file_is_raw(struct VpxInputContext *input) {
+static int file_is_raw(struct AvxInputContext *input) {
   uint8_t buf[32];
   int is_raw = 0;
-  vpx_codec_stream_info_t si;
+  aom_codec_stream_info_t si;
 
   si.sz = sizeof(si);
 
@@ -306,9 +306,9 @@
     int i;
 
     if (mem_get_le32(buf) < 256 * 1024 * 1024) {
-      for (i = 0; i < get_vpx_decoder_count(); ++i) {
-        const VpxInterface *const decoder = get_vpx_decoder_by_index(i);
-        if (!vpx_codec_peek_stream_info(decoder->codec_interface(), buf + 4,
+      for (i = 0; i < get_aom_decoder_count(); ++i) {
+        const AvxInterface *const decoder = get_aom_decoder_by_index(i);
+        if (!aom_codec_peek_stream_info(decoder->codec_interface(), buf + 4,
                                         32 - 4, &si)) {
           is_raw = 1;
           input->fourcc = decoder->fourcc;
@@ -348,8 +348,8 @@
 // Application private data passed into the set function. |min_size| is the
 // minimum size in bytes needed to decode the next frame. |fb| pointer to the
 // frame buffer.
-static int get_vp9_frame_buffer(void *cb_priv, size_t min_size,
-                                vpx_codec_frame_buffer_t *fb) {
+static int get_av1_frame_buffer(void *cb_priv, size_t min_size,
+                                aom_codec_frame_buffer_t *fb) {
   int i;
   struct ExternalFrameBufferList *const ext_fb_list =
       (struct ExternalFrameBufferList *)cb_priv;
@@ -382,8 +382,8 @@
 // Callback used by libaom when there are no references to the frame buffer.
 // |cb_priv| user private data passed into the set function. |fb| pointer
 // to the frame buffer.
-static int release_vp9_frame_buffer(void *cb_priv,
-                                    vpx_codec_frame_buffer_t *fb) {
+static int release_av1_frame_buffer(void *cb_priv,
+                                    aom_codec_frame_buffer_t *fb) {
   struct ExternalFrameBuffer *const ext_fb =
       (struct ExternalFrameBuffer *)fb->priv;
   (void)cb_priv;
@@ -476,17 +476,17 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-static int img_shifted_realloc_required(const vpx_image_t *img,
-                                        const vpx_image_t *shifted,
-                                        vpx_img_fmt_t required_fmt) {
+#if CONFIG_AOM_HIGHBITDEPTH
+static int img_shifted_realloc_required(const aom_image_t *img,
+                                        const aom_image_t *shifted,
+                                        aom_img_fmt_t required_fmt) {
   return img->d_w != shifted->d_w || img->d_h != shifted->d_h ||
          required_fmt != shifted->fmt;
 }
 #endif
 
 static int main_loop(int argc, const char **argv_) {
-  vpx_codec_ctx_t decoder;
+  aom_codec_ctx_t decoder;
   char *fn = NULL;
   int i;
   uint8_t *buf = NULL;
@@ -498,8 +498,8 @@
   int arg_skip = 0;
   int ec_enabled = 0;
   int keep_going = 0;
-  const VpxInterface *interface = NULL;
-  const VpxInterface *fourcc_interface = NULL;
+  const AvxInterface *interface = NULL;
+  const AvxInterface *fourcc_interface = NULL;
   uint64_t dx_time = 0;
   struct arg arg;
   char **argv, **argi, **argj;
@@ -508,8 +508,8 @@
   int use_y4m = 1;
   int opt_yv12 = 0;
   int opt_i420 = 0;
-  vpx_codec_dec_cfg_t cfg = { 0, 0, 0 };
-#if CONFIG_VP9_HIGHBITDEPTH
+  aom_codec_dec_cfg_t cfg = { 0, 0, 0 };
+#if CONFIG_AOM_HIGHBITDEPTH
   unsigned int output_bit_depth = 0;
 #endif
 #if CONFIG_EXT_TILE
@@ -519,9 +519,9 @@
   int frames_corrupted = 0;
   int dec_flags = 0;
   int do_scale = 0;
-  vpx_image_t *scaled_img = NULL;
-#if CONFIG_VP9_HIGHBITDEPTH
-  vpx_image_t *img_shifted = NULL;
+  aom_image_t *scaled_img = NULL;
+#if CONFIG_AOM_HIGHBITDEPTH
+  aom_image_t *img_shifted = NULL;
 #endif
   int frame_avail, got_data, flush_decoder = 0;
   int num_external_frame_buffers = 0;
@@ -534,14 +534,14 @@
   MD5Context md5_ctx;
   unsigned char md5_digest[16];
 
-  struct VpxDecInputContext input = { NULL, NULL };
-  struct VpxInputContext vpx_input_ctx;
+  struct AvxDecInputContext input = { NULL, NULL };
+  struct AvxInputContext aom_input_ctx;
 #if CONFIG_WEBM_IO
   struct WebmInputContext webm_ctx;
   memset(&(webm_ctx), 0, sizeof(webm_ctx));
   input.webm_ctx = &webm_ctx;
 #endif
-  input.vpx_input_ctx = &vpx_input_ctx;
+  input.aom_input_ctx = &aom_input_ctx;
 
   /* Parse command line */
   exec_name = argv_[0];
@@ -552,7 +552,7 @@
     arg.argv_step = 1;
 
     if (arg_match(&arg, &codecarg, argi)) {
-      interface = get_vpx_decoder_by_name(arg.val);
+      interface = get_aom_decoder_by_name(arg.val);
       if (!interface)
         die("Error: Unrecognized argument (%s) to --codec\n", arg.val);
     } else if (arg_match(&arg, &looparg, argi)) {
@@ -587,7 +587,7 @@
       summary = 1;
     else if (arg_match(&arg, &threadsarg, argi))
       cfg.threads = arg_parse_uint(&arg);
-#if CONFIG_VP9_DECODER || CONFIG_VP10_DECODER
+#if CONFIG_AV1_DECODER
     else if (arg_match(&arg, &frameparallelarg, argi))
       frame_parallel = 1;
 #endif
@@ -599,7 +599,7 @@
       num_external_frame_buffers = arg_parse_uint(&arg);
     else if (arg_match(&arg, &continuearg, argi))
       keep_going = 1;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     else if (arg_match(&arg, &outbitdeptharg, argi)) {
       output_bit_depth = arg_parse_uint(&arg);
     }
@@ -641,19 +641,19 @@
     return EXIT_FAILURE;
   }
 #endif
-  input.vpx_input_ctx->file = infile;
-  if (file_is_ivf(input.vpx_input_ctx))
-    input.vpx_input_ctx->file_type = FILE_TYPE_IVF;
+  input.aom_input_ctx->file = infile;
+  if (file_is_ivf(input.aom_input_ctx))
+    input.aom_input_ctx->file_type = FILE_TYPE_IVF;
 #if CONFIG_WEBM_IO
-  else if (file_is_webm(input.webm_ctx, input.vpx_input_ctx))
-    input.vpx_input_ctx->file_type = FILE_TYPE_WEBM;
+  else if (file_is_webm(input.webm_ctx, input.aom_input_ctx))
+    input.aom_input_ctx->file_type = FILE_TYPE_WEBM;
 #endif
-  else if (file_is_raw(input.vpx_input_ctx))
-    input.vpx_input_ctx->file_type = FILE_TYPE_RAW;
+  else if (file_is_raw(input.aom_input_ctx))
+    input.aom_input_ctx->file_type = FILE_TYPE_RAW;
   else {
     fprintf(stderr, "Unrecognized input file type.\n");
 #if !CONFIG_WEBM_IO
-    fprintf(stderr, "vpxdec was built without WebM container support.\n");
+    fprintf(stderr, "aomdec was built without WebM container support.\n");
 #endif
     return EXIT_FAILURE;
   }
@@ -663,7 +663,7 @@
 
   if (!noblit && single_file) {
     generate_filename(outfile_pattern, outfile_name, PATH_MAX,
-                      vpx_input_ctx.width, vpx_input_ctx.height, 0);
+                      aom_input_ctx.width, aom_input_ctx.height, 0);
     if (do_md5)
       MD5Init(&md5_ctx);
     else
@@ -679,8 +679,8 @@
     }
 
 #if CONFIG_WEBM_IO
-    if (vpx_input_ctx.file_type == FILE_TYPE_WEBM) {
-      if (webm_guess_framerate(input.webm_ctx, input.vpx_input_ctx)) {
+    if (aom_input_ctx.file_type == FILE_TYPE_WEBM) {
+      if (webm_guess_framerate(input.webm_ctx, input.aom_input_ctx)) {
         fprintf(stderr,
                 "Failed to guess framerate -- error parsing "
                 "webm file?\n");
@@ -690,37 +690,37 @@
 #endif
   }
 
-  fourcc_interface = get_vpx_decoder_by_fourcc(vpx_input_ctx.fourcc);
+  fourcc_interface = get_aom_decoder_by_fourcc(aom_input_ctx.fourcc);
   if (interface && fourcc_interface && interface != fourcc_interface)
     warn("Header indicates codec: %s\n", fourcc_interface->name);
   else
     interface = fourcc_interface;
 
-  if (!interface) interface = get_vpx_decoder_by_index(0);
+  if (!interface) interface = get_aom_decoder_by_index(0);
 
-  dec_flags = (postproc ? VPX_CODEC_USE_POSTPROC : 0) |
-              (ec_enabled ? VPX_CODEC_USE_ERROR_CONCEALMENT : 0) |
-              (frame_parallel ? VPX_CODEC_USE_FRAME_THREADING : 0);
-  if (vpx_codec_dec_init(&decoder, interface->codec_interface(), &cfg,
+  dec_flags = (postproc ? AOM_CODEC_USE_POSTPROC : 0) |
+              (ec_enabled ? AOM_CODEC_USE_ERROR_CONCEALMENT : 0) |
+              (frame_parallel ? AOM_CODEC_USE_FRAME_THREADING : 0);
+  if (aom_codec_dec_init(&decoder, interface->codec_interface(), &cfg,
                          dec_flags)) {
     fprintf(stderr, "Failed to initialize decoder: %s\n",
-            vpx_codec_error(&decoder));
+            aom_codec_error(&decoder));
     return EXIT_FAILURE;
   }
 
   if (!quiet) fprintf(stderr, "%s\n", decoder.name);
 
-#if CONFIG_VP10_DECODER && CONFIG_EXT_TILE
-  if (strncmp(decoder.name, "WebM Project VP10", 17) == 0) {
-    if (vpx_codec_control(&decoder, VP10_SET_DECODE_TILE_ROW, tile_row)) {
+#if CONFIG_AV1_DECODER && CONFIG_EXT_TILE
+  if (strncmp(decoder.name, "WebM Project AV1", 17) == 0) {
+    if (aom_codec_control(&decoder, AV1_SET_DECODE_TILE_ROW, tile_row)) {
       fprintf(stderr, "Failed to set decode_tile_row: %s\n",
-              vpx_codec_error(&decoder));
+              aom_codec_error(&decoder));
       return EXIT_FAILURE;
     }
 
-    if (vpx_codec_control(&decoder, VP10_SET_DECODE_TILE_COL, tile_col)) {
+    if (aom_codec_control(&decoder, AV1_SET_DECODE_TILE_COL, tile_col)) {
       fprintf(stderr, "Failed to set decode_tile_col: %s\n",
-              vpx_codec_error(&decoder));
+              aom_codec_error(&decoder));
       return EXIT_FAILURE;
     }
   }
@@ -736,11 +736,11 @@
     ext_fb_list.num_external_frame_buffers = num_external_frame_buffers;
     ext_fb_list.ext_fb = (struct ExternalFrameBuffer *)calloc(
         num_external_frame_buffers, sizeof(*ext_fb_list.ext_fb));
-    if (vpx_codec_set_frame_buffer_functions(&decoder, get_vp9_frame_buffer,
-                                             release_vp9_frame_buffer,
+    if (aom_codec_set_frame_buffer_functions(&decoder, get_av1_frame_buffer,
+                                             release_av1_frame_buffer,
                                              &ext_fb_list)) {
       fprintf(stderr, "Failed to configure external frame buffers: %s\n",
-              vpx_codec_error(&decoder));
+              aom_codec_error(&decoder));
       return EXIT_FAILURE;
     }
   }
@@ -750,9 +750,9 @@
 
   /* Decode file */
   while (frame_avail || got_data) {
-    vpx_codec_iter_t iter = NULL;
-    vpx_image_t *img;
-    struct vpx_usec_timer timer;
+    aom_codec_iter_t iter = NULL;
+    aom_image_t *img;
+    struct aom_usec_timer timer;
     int corrupted = 0;
 
     frame_avail = 0;
@@ -761,20 +761,20 @@
         frame_avail = 1;
         frame_in++;
 
-        vpx_usec_timer_start(&timer);
+        aom_usec_timer_start(&timer);
 
-        if (vpx_codec_decode(&decoder, buf, (unsigned int)bytes_in_buffer, NULL,
+        if (aom_codec_decode(&decoder, buf, (unsigned int)bytes_in_buffer, NULL,
                              0)) {
-          const char *detail = vpx_codec_error_detail(&decoder);
+          const char *detail = aom_codec_error_detail(&decoder);
           warn("Failed to decode frame %d: %s", frame_in,
-               vpx_codec_error(&decoder));
+               aom_codec_error(&decoder));
 
           if (detail) warn("Additional information: %s", detail);
           if (!keep_going) goto fail;
         }
 
-        vpx_usec_timer_mark(&timer);
-        dx_time += vpx_usec_timer_elapsed(&timer);
+        aom_usec_timer_mark(&timer);
+        dx_time += aom_usec_timer_elapsed(&timer);
       } else {
         flush_decoder = 1;
       }
@@ -782,27 +782,27 @@
       flush_decoder = 1;
     }
 
-    vpx_usec_timer_start(&timer);
+    aom_usec_timer_start(&timer);
 
     if (flush_decoder) {
       // Flush the decoder in frame parallel decode.
-      if (vpx_codec_decode(&decoder, NULL, 0, NULL, 0)) {
-        warn("Failed to flush decoder: %s", vpx_codec_error(&decoder));
+      if (aom_codec_decode(&decoder, NULL, 0, NULL, 0)) {
+        warn("Failed to flush decoder: %s", aom_codec_error(&decoder));
       }
     }
 
     got_data = 0;
-    if ((img = vpx_codec_get_frame(&decoder, &iter))) {
+    if ((img = aom_codec_get_frame(&decoder, &iter))) {
       ++frame_out;
       got_data = 1;
     }
 
-    vpx_usec_timer_mark(&timer);
-    dx_time += (unsigned int)vpx_usec_timer_elapsed(&timer);
+    aom_usec_timer_mark(&timer);
+    dx_time += (unsigned int)aom_usec_timer_elapsed(&timer);
 
     if (!frame_parallel &&
-        vpx_codec_control(&decoder, VP8D_GET_FRAME_CORRUPTED, &corrupted)) {
-      warn("Failed VP8_GET_FRAME_CORRUPTED: %s", vpx_codec_error(&decoder));
+        aom_codec_control(&decoder, AOMD_GET_FRAME_CORRUPTED, &corrupted)) {
+      warn("Failed AOM_GET_FRAME_CORRUPTED: %s", aom_codec_error(&decoder));
       if (!keep_going) goto fail;
     }
     frames_corrupted += corrupted;
@@ -810,8 +810,8 @@
     if (progress) show_progress(frame_in, frame_out, dx_time);
 
     if (!noblit && img) {
-      const int PLANES_YUV[] = { VPX_PLANE_Y, VPX_PLANE_U, VPX_PLANE_V };
-      const int PLANES_YVU[] = { VPX_PLANE_Y, VPX_PLANE_V, VPX_PLANE_U };
+      const int PLANES_YUV[] = { AOM_PLANE_Y, AOM_PLANE_U, AOM_PLANE_V };
+      const int PLANES_YVU[] = { AOM_PLANE_Y, AOM_PLANE_V, AOM_PLANE_U };
       const int *planes = flipuv ? PLANES_YVU : PLANES_YUV;
 
       if (do_scale) {
@@ -821,11 +821,11 @@
           // these is set to 0, use the display size set in the first frame
           // header. If that is unavailable, use the raw decoded size of the
           // first decoded frame.
-          int render_width = vpx_input_ctx.width;
-          int render_height = vpx_input_ctx.height;
+          int render_width = aom_input_ctx.width;
+          int render_height = aom_input_ctx.height;
           if (!render_width || !render_height) {
             int render_size[2];
-            if (vpx_codec_control(&decoder, VP9D_GET_DISPLAY_SIZE,
+            if (aom_codec_control(&decoder, AV1D_GET_DISPLAY_SIZE,
                                   render_size)) {
               // As last resort use size of first frame as display size.
               render_width = img->d_w;
@@ -836,7 +836,7 @@
             }
           }
           scaled_img =
-              vpx_img_alloc(NULL, img->fmt, render_width, render_height, 16);
+              aom_img_alloc(NULL, img->fmt, render_width, render_height, 16);
           scaled_img->bit_depth = img->bit_depth;
         }
 
@@ -849,36 +849,36 @@
                   "Failed  to scale output frame: %s.\n"
                   "Scaling is disabled in this configuration. "
                   "To enable scaling, configure with --enable-libyuv\n",
-                  vpx_codec_error(&decoder));
+                  aom_codec_error(&decoder));
           return EXIT_FAILURE;
 #endif
         }
       }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       // Default to codec bit depth if output bit depth not set
       if (!output_bit_depth && single_file && !do_md5) {
         output_bit_depth = img->bit_depth;
       }
       // Shift up or down if necessary
       if (output_bit_depth != 0 && output_bit_depth != img->bit_depth) {
-        const vpx_img_fmt_t shifted_fmt =
+        const aom_img_fmt_t shifted_fmt =
             output_bit_depth == 8
-                ? img->fmt ^ (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH)
-                : img->fmt | VPX_IMG_FMT_HIGHBITDEPTH;
+                ? img->fmt ^ (img->fmt & AOM_IMG_FMT_HIGHBITDEPTH)
+                : img->fmt | AOM_IMG_FMT_HIGHBITDEPTH;
         if (img_shifted &&
             img_shifted_realloc_required(img, img_shifted, shifted_fmt)) {
-          vpx_img_free(img_shifted);
+          aom_img_free(img_shifted);
           img_shifted = NULL;
         }
         if (!img_shifted) {
           img_shifted =
-              vpx_img_alloc(NULL, shifted_fmt, img->d_w, img->d_h, 16);
+              aom_img_alloc(NULL, shifted_fmt, img->d_w, img->d_h, 16);
           img_shifted->bit_depth = output_bit_depth;
         }
         if (output_bit_depth > img->bit_depth) {
-          vpx_img_upshift(img_shifted, img, output_bit_depth - img->bit_depth);
+          aom_img_upshift(img_shifted, img, output_bit_depth - img->bit_depth);
         } else {
-          vpx_img_downshift(img_shifted, img,
+          aom_img_downshift(img_shifted, img,
                             img->bit_depth - output_bit_depth);
         }
         img = img_shifted;
@@ -886,23 +886,23 @@
 #endif
 
 #if CONFIG_EXT_TILE
-      vpx_input_ctx.width = img->d_w;
-      vpx_input_ctx.height = img->d_h;
+      aom_input_ctx.width = img->d_w;
+      aom_input_ctx.height = img->d_h;
 #endif  // CONFIG_EXT_TILE
 
       if (single_file) {
         if (use_y4m) {
           char buf[Y4M_BUFFER_SIZE] = { 0 };
           size_t len = 0;
-          if (img->fmt == VPX_IMG_FMT_I440 || img->fmt == VPX_IMG_FMT_I44016) {
+          if (img->fmt == AOM_IMG_FMT_I440 || img->fmt == AOM_IMG_FMT_I44016) {
             fprintf(stderr, "Cannot produce y4m output for 440 sampling.\n");
             goto fail;
           }
           if (frame_out == 1) {
             // Y4M file header
             len = y4m_write_file_header(
-                buf, sizeof(buf), vpx_input_ctx.width, vpx_input_ctx.height,
-                &vpx_input_ctx.framerate, img->fmt, img->bit_depth);
+                buf, sizeof(buf), aom_input_ctx.width, aom_input_ctx.height,
+                &aom_input_ctx.framerate, img->fmt, img->bit_depth);
             if (do_md5) {
               MD5Update(&md5_ctx, (md5byte *)buf, (unsigned int)len);
             } else {
@@ -922,15 +922,15 @@
             // Check if --yv12 or --i420 options are consistent with the
             // bit-stream decoded
             if (opt_i420) {
-              if (img->fmt != VPX_IMG_FMT_I420 &&
-                  img->fmt != VPX_IMG_FMT_I42016) {
+              if (img->fmt != AOM_IMG_FMT_I420 &&
+                  img->fmt != AOM_IMG_FMT_I42016) {
                 fprintf(stderr, "Cannot produce i420 output for bit-stream.\n");
                 goto fail;
               }
             }
             if (opt_yv12) {
-              if ((img->fmt != VPX_IMG_FMT_I420 &&
-                   img->fmt != VPX_IMG_FMT_YV12) ||
+              if ((img->fmt != AOM_IMG_FMT_I420 &&
+                   img->fmt != AOM_IMG_FMT_YV12) ||
                   img->bit_depth != 8) {
                 fprintf(stderr, "Cannot produce yv12 output for bit-stream.\n");
                 goto fail;
@@ -971,9 +971,9 @@
 
 fail:
 
-  if (vpx_codec_destroy(&decoder)) {
+  if (aom_codec_destroy(&decoder)) {
     fprintf(stderr, "Failed to destroy decoder: %s\n",
-            vpx_codec_error(&decoder));
+            aom_codec_error(&decoder));
     return EXIT_FAILURE;
   }
 
@@ -987,15 +987,15 @@
   }
 
 #if CONFIG_WEBM_IO
-  if (input.vpx_input_ctx->file_type == FILE_TYPE_WEBM)
+  if (input.aom_input_ctx->file_type == FILE_TYPE_WEBM)
     webm_free(input.webm_ctx);
 #endif
 
-  if (input.vpx_input_ctx->file_type != FILE_TYPE_WEBM) free(buf);
+  if (input.aom_input_ctx->file_type != FILE_TYPE_WEBM) free(buf);
 
-  if (scaled_img) vpx_img_free(scaled_img);
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (img_shifted) vpx_img_free(img_shifted);
+  if (scaled_img) aom_img_free(scaled_img);
+#if CONFIG_AOM_HIGHBITDEPTH
+  if (img_shifted) aom_img_free(img_shifted);
 #endif
 
   for (i = 0; i < ext_fb_list.num_external_frame_buffers; ++i) {
diff --git a/vpxenc.c b/aomenc.c
similarity index 78%
rename from vpxenc.c
rename to aomenc.c
index 32cb12f..e1093a2 100644
--- a/vpxenc.c
+++ b/aomenc.c
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpxenc.h"
-#include "./vpx_config.h"
+#include "./aomenc.h"
+#include "./aom_config.h"
 
 #include <assert.h>
 #include <limits.h>
@@ -23,27 +23,27 @@
 #include "third_party/libyuv/include/libyuv/scale.h"
 #endif
 
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 #if CONFIG_DECODERS
-#include "aom/vpx_decoder.h"
+#include "aom/aom_decoder.h"
 #endif
 
 #include "./args.h"
 #include "./ivfenc.h"
 #include "./tools_common.h"
 
-#if CONFIG_VP10_ENCODER
-#include "aom/vp8cx.h"
+#if CONFIG_AV1_ENCODER
+#include "aom/aomcx.h"
 #endif
-#if CONFIG_VP10_DECODER
-#include "aom/vp8dx.h"
+#if CONFIG_AV1_DECODER
+#include "aom/aomdx.h"
 #endif
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem_ops.h"
-#include "aom_ports/vpx_timer.h"
+#include "aom_ports/aom_timer.h"
 #include "./rate_hist.h"
-#include "./vpxstats.h"
+#include "./aomstats.h"
 #include "./warnings.h"
 #if CONFIG_WEBM_IO
 #include "./webmenc.h"
@@ -64,13 +64,13 @@
 
 static const char *exec_name;
 
-static void warn_or_exit_on_errorv(vpx_codec_ctx_t *ctx, int fatal,
+static void warn_or_exit_on_errorv(aom_codec_ctx_t *ctx, int fatal,
                                    const char *s, va_list ap) {
   if (ctx->err) {
-    const char *detail = vpx_codec_error_detail(ctx);
+    const char *detail = aom_codec_error_detail(ctx);
 
     vfprintf(stderr, s, ap);
-    fprintf(stderr, ": %s\n", vpx_codec_error(ctx));
+    fprintf(stderr, ": %s\n", aom_codec_error(ctx));
 
     if (detail) fprintf(stderr, "    %s\n", detail);
 
@@ -78,7 +78,7 @@
   }
 }
 
-static void ctx_exit_on_error(vpx_codec_ctx_t *ctx, const char *s, ...) {
+static void ctx_exit_on_error(aom_codec_ctx_t *ctx, const char *s, ...) {
   va_list ap;
 
   va_start(ap, s);
@@ -86,7 +86,7 @@
   va_end(ap);
 }
 
-static void warn_or_exit_on_error(vpx_codec_ctx_t *ctx, int fatal,
+static void warn_or_exit_on_error(aom_codec_ctx_t *ctx, int fatal,
                                   const char *s, ...) {
   va_list ap;
 
@@ -95,7 +95,7 @@
   va_end(ap);
 }
 
-static int read_frame(struct VpxInputContext *input_ctx, vpx_image_t *img) {
+static int read_frame(struct AvxInputContext *input_ctx, aom_image_t *img) {
   FILE *f = input_ctx->file;
   y4m_input *y4m = &input_ctx->y4m;
   int shortread = 0;
@@ -194,7 +194,7 @@
     ARG_DEF("y", "disable-warning-prompt", 0,
             "Display warnings, but do not prompt user to continue.");
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const arg_def_t test16bitinternalarg = ARG_DEF(
     NULL, "test-16bit-internal", 0, "Force use of 16 bit internal buffer");
 #endif
@@ -267,7 +267,7 @@
                                           &timebase,
                                           &framerate,
                                           &error_resilient,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                           &test16bitinternalarg,
 #endif
                                           &lag_in_frames,
@@ -285,10 +285,10 @@
     ARG_DEF(NULL, "resize-up", 1, "Upscale threshold (buf %)");
 static const arg_def_t resize_down_thresh =
     ARG_DEF(NULL, "resize-down", 1, "Downscale threshold (buf %)");
-static const struct arg_enum_list end_usage_enum[] = { { "vbr", VPX_VBR },
-                                                       { "cbr", VPX_CBR },
-                                                       { "cq", VPX_CQ },
-                                                       { "q", VPX_Q },
+static const struct arg_enum_list end_usage_enum[] = { { "vbr", AOM_VBR },
+                                                       { "cbr", AOM_CBR },
+                                                       { "cq", AOM_CQ },
+                                                       { "q", AOM_Q },
                                                        { NULL, 0 } };
 static const arg_def_t end_usage =
     ARG_DEF_ENUM(NULL, "end-usage", 1, "Rate control mode", end_usage_enum);
@@ -347,7 +347,7 @@
     ARG_DEF(NULL, "arnr-strength", 1, "AltRef filter strength (0..6)");
 static const arg_def_t arnr_type = ARG_DEF(NULL, "arnr-type", 1, "AltRef type");
 static const struct arg_enum_list tuning_enum[] = {
-  { "psnr", VPX_TUNE_PSNR }, { "ssim", VPX_TUNE_SSIM }, { NULL, 0 }
+  { "psnr", AOM_TUNE_PSNR }, { "ssim", AOM_TUNE_SSIM }, { NULL, 0 }
 };
 static const arg_def_t tune_ssim =
     ARG_DEF_ENUM(NULL, "tune", 1, "Material to favor", tuning_enum);
@@ -356,8 +356,8 @@
 static const arg_def_t max_intra_rate_pct =
     ARG_DEF(NULL, "max-intra-rate", 1, "Max I-frame bitrate (pct)");
 
-#if CONFIG_VP10_ENCODER
-static const arg_def_t cpu_used_vp9 =
+#if CONFIG_AV1_ENCODER
+static const arg_def_t cpu_used_av1 =
     ARG_DEF(NULL, "cpu-used", 1, "CPU Used (-8..8)");
 static const arg_def_t tile_cols =
     ARG_DEF(NULL, "tile-columns", 1, "Number of tile columns to use, log2");
@@ -396,14 +396,14 @@
     "max gf/arf frame interval (default 0, indicating in-built behavior)");
 
 static const struct arg_enum_list color_space_enum[] = {
-  { "unknown", VPX_CS_UNKNOWN },
-  { "bt601", VPX_CS_BT_601 },
-  { "bt709", VPX_CS_BT_709 },
-  { "smpte170", VPX_CS_SMPTE_170 },
-  { "smpte240", VPX_CS_SMPTE_240 },
-  { "bt2020", VPX_CS_BT_2020 },
-  { "reserved", VPX_CS_RESERVED },
-  { "sRGB", VPX_CS_SRGB },
+  { "unknown", AOM_CS_UNKNOWN },
+  { "bt601", AOM_CS_BT_601 },
+  { "bt709", AOM_CS_BT_709 },
+  { "smpte170", AOM_CS_SMPTE_170 },
+  { "smpte240", AOM_CS_SMPTE_240 },
+  { "bt2020", AOM_CS_BT_2020 },
+  { "reserved", AOM_CS_RESERVED },
+  { "sRGB", AOM_CS_SRGB },
   { NULL, 0 }
 };
 
@@ -411,9 +411,9 @@
     ARG_DEF_ENUM(NULL, "color-space", 1, "The color space of input content:",
                  color_space_enum);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const struct arg_enum_list bitdepth_enum[] = {
-  { "8", VPX_BITS_8 }, { "10", VPX_BITS_10 }, { "12", VPX_BITS_12 }, { NULL, 0 }
+  { "8", AOM_BITS_8 }, { "10", AOM_BITS_10 }, { "12", AOM_BITS_12 }, { NULL, 0 }
 };
 
 static const arg_def_t bitdeptharg = ARG_DEF_ENUM(
@@ -425,8 +425,8 @@
 #endif
 
 static const struct arg_enum_list tune_content_enum[] = {
-  { "default", VPX_CONTENT_DEFAULT },
-  { "screen", VPX_CONTENT_SCREEN },
+  { "default", AOM_CONTENT_DEFAULT },
+  { "screen", AOM_CONTENT_SCREEN },
   { NULL, 0 }
 };
 
@@ -434,83 +434,83 @@
     NULL, "tune-content", 1, "Tune content type", tune_content_enum);
 #endif
 
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
 #if CONFIG_EXT_PARTITION
 static const struct arg_enum_list superblock_size_enum[] = {
-  { "dynamic", VPX_SUPERBLOCK_SIZE_DYNAMIC },
-  { "64", VPX_SUPERBLOCK_SIZE_64X64 },
-  { "128", VPX_SUPERBLOCK_SIZE_128X128 },
+  { "dynamic", AOM_SUPERBLOCK_SIZE_DYNAMIC },
+  { "64", AOM_SUPERBLOCK_SIZE_64X64 },
+  { "128", AOM_SUPERBLOCK_SIZE_128X128 },
   { NULL, 0 }
 };
 static const arg_def_t superblock_size = ARG_DEF_ENUM(
     NULL, "sb-size", 1, "Superblock size to use", superblock_size_enum);
 #endif  // CONFIG_EXT_PARTITION
 
-static const arg_def_t *vp10_args[] = { &cpu_used_vp9,
-                                        &auto_altref,
-                                        &sharpness,
-                                        &static_thresh,
-                                        &tile_cols,
-                                        &tile_rows,
-                                        &arnr_maxframes,
-                                        &arnr_strength,
-                                        &arnr_type,
-                                        &tune_ssim,
-                                        &cq_level,
-                                        &max_intra_rate_pct,
-                                        &max_inter_rate_pct,
-                                        &gf_cbr_boost_pct,
-                                        &lossless,
-                                        &frame_parallel_decoding,
-                                        &aq_mode,
-                                        &frame_periodic_boost,
-                                        &noise_sens,
-                                        &tune_content,
-                                        &input_color_space,
-                                        &min_gf_interval,
-                                        &max_gf_interval,
+static const arg_def_t *av1_args[] = { &cpu_used_av1,
+                                       &auto_altref,
+                                       &sharpness,
+                                       &static_thresh,
+                                       &tile_cols,
+                                       &tile_rows,
+                                       &arnr_maxframes,
+                                       &arnr_strength,
+                                       &arnr_type,
+                                       &tune_ssim,
+                                       &cq_level,
+                                       &max_intra_rate_pct,
+                                       &max_inter_rate_pct,
+                                       &gf_cbr_boost_pct,
+                                       &lossless,
+                                       &frame_parallel_decoding,
+                                       &aq_mode,
+                                       &frame_periodic_boost,
+                                       &noise_sens,
+                                       &tune_content,
+                                       &input_color_space,
+                                       &min_gf_interval,
+                                       &max_gf_interval,
 #if CONFIG_EXT_PARTITION
-                                        &superblock_size,
+                                       &superblock_size,
 #endif  // CONFIG_EXT_PARTITION
-#if CONFIG_VP9_HIGHBITDEPTH
-                                        &bitdeptharg,
-                                        &inbitdeptharg,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                                        NULL };
-static const int vp10_arg_ctrl_map[] = { VP8E_SET_CPUUSED,
-                                         VP8E_SET_ENABLEAUTOALTREF,
-                                         VP8E_SET_SHARPNESS,
-                                         VP8E_SET_STATIC_THRESHOLD,
-                                         VP9E_SET_TILE_COLUMNS,
-                                         VP9E_SET_TILE_ROWS,
-                                         VP8E_SET_ARNR_MAXFRAMES,
-                                         VP8E_SET_ARNR_STRENGTH,
-                                         VP8E_SET_ARNR_TYPE,
-                                         VP8E_SET_TUNING,
-                                         VP8E_SET_CQ_LEVEL,
-                                         VP8E_SET_MAX_INTRA_BITRATE_PCT,
-                                         VP9E_SET_MAX_INTER_BITRATE_PCT,
-                                         VP9E_SET_GF_CBR_BOOST_PCT,
-                                         VP9E_SET_LOSSLESS,
-                                         VP9E_SET_FRAME_PARALLEL_DECODING,
-                                         VP9E_SET_AQ_MODE,
-                                         VP9E_SET_FRAME_PERIODIC_BOOST,
-                                         VP9E_SET_NOISE_SENSITIVITY,
-                                         VP9E_SET_TUNE_CONTENT,
-                                         VP9E_SET_COLOR_SPACE,
-                                         VP9E_SET_MIN_GF_INTERVAL,
-                                         VP9E_SET_MAX_GF_INTERVAL,
+#if CONFIG_AOM_HIGHBITDEPTH
+                                       &bitdeptharg,
+                                       &inbitdeptharg,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                                       NULL };
+static const int av1_arg_ctrl_map[] = { AOME_SET_CPUUSED,
+                                        AOME_SET_ENABLEAUTOALTREF,
+                                        AOME_SET_SHARPNESS,
+                                        AOME_SET_STATIC_THRESHOLD,
+                                        AV1E_SET_TILE_COLUMNS,
+                                        AV1E_SET_TILE_ROWS,
+                                        AOME_SET_ARNR_MAXFRAMES,
+                                        AOME_SET_ARNR_STRENGTH,
+                                        AOME_SET_ARNR_TYPE,
+                                        AOME_SET_TUNING,
+                                        AOME_SET_CQ_LEVEL,
+                                        AOME_SET_MAX_INTRA_BITRATE_PCT,
+                                        AV1E_SET_MAX_INTER_BITRATE_PCT,
+                                        AV1E_SET_GF_CBR_BOOST_PCT,
+                                        AV1E_SET_LOSSLESS,
+                                        AV1E_SET_FRAME_PARALLEL_DECODING,
+                                        AV1E_SET_AQ_MODE,
+                                        AV1E_SET_FRAME_PERIODIC_BOOST,
+                                        AV1E_SET_NOISE_SENSITIVITY,
+                                        AV1E_SET_TUNE_CONTENT,
+                                        AV1E_SET_COLOR_SPACE,
+                                        AV1E_SET_MIN_GF_INTERVAL,
+                                        AV1E_SET_MAX_GF_INTERVAL,
 #if CONFIG_EXT_PARTITION
-                                         VP10E_SET_SUPERBLOCK_SIZE,
+                                        AV1E_SET_SUPERBLOCK_SIZE,
 #endif  // CONFIG_EXT_PARTITION
-                                         0 };
+                                        0 };
 #endif
 
 static const arg_def_t *no_args[] = { NULL };
 
 void usage_exit(void) {
   int i;
-  const int num_encoder = get_vpx_encoder_count();
+  const int num_encoder = get_aom_encoder_count();
 
   fprintf(stderr, "Usage: %s <options> -o dst_filename src_filename \n",
           exec_name);
@@ -525,9 +525,9 @@
   arg_show_usage(stderr, rc_twopass_args);
   fprintf(stderr, "\nKeyframe Placement Options:\n");
   arg_show_usage(stderr, kf_args);
-#if CONFIG_VP10_ENCODER
-  fprintf(stderr, "\nVP10 Specific Options:\n");
-  arg_show_usage(stderr, vp10_args);
+#if CONFIG_AV1_ENCODER
+  fprintf(stderr, "\nAV1 Specific Options:\n");
+  arg_show_usage(stderr, av1_args);
 #endif
   fprintf(stderr,
           "\nStream timebase (--timebase):\n"
@@ -536,10 +536,10 @@
   fprintf(stderr, "\nIncluded encoders:\n\n");
 
   for (i = 0; i < num_encoder; ++i) {
-    const VpxInterface *const encoder = get_vpx_encoder_by_index(i);
+    const AvxInterface *const encoder = get_aom_encoder_by_index(i);
     const char *defstr = (i == (num_encoder - 1)) ? "(default)" : "";
     fprintf(stderr, "    %-6s - %s %s\n", encoder->name,
-            vpx_codec_iface_name(encoder->codec_interface()), defstr);
+            aom_codec_iface_name(encoder->codec_interface()), defstr);
   }
   fprintf(stderr, "\n        ");
   fprintf(stderr, "Use --codec to switch to a non-default encoder.\n\n");
@@ -549,9 +549,9 @@
 
 #define mmin(a, b) ((a) < (b) ? (a) : (b))
 
-#if CONFIG_VP9_HIGHBITDEPTH
-static void find_mismatch_high(const vpx_image_t *const img1,
-                               const vpx_image_t *const img2, int yloc[4],
+#if CONFIG_AOM_HIGHBITDEPTH
+static void find_mismatch_high(const aom_image_t *const img1,
+                               const aom_image_t *const img2, int yloc[4],
                                int uloc[4], int vloc[4]) {
   uint16_t *plane1, *plane2;
   uint32_t stride1, stride2;
@@ -565,10 +565,10 @@
   int match = 1;
   uint32_t i, j;
   yloc[0] = yloc[1] = yloc[2] = yloc[3] = -1;
-  plane1 = (uint16_t *)img1->planes[VPX_PLANE_Y];
-  plane2 = (uint16_t *)img2->planes[VPX_PLANE_Y];
-  stride1 = img1->stride[VPX_PLANE_Y] / 2;
-  stride2 = img2->stride[VPX_PLANE_Y] / 2;
+  plane1 = (uint16_t *)img1->planes[AOM_PLANE_Y];
+  plane2 = (uint16_t *)img2->planes[AOM_PLANE_Y];
+  stride1 = img1->stride[AOM_PLANE_Y] / 2;
+  stride2 = img2->stride[AOM_PLANE_Y] / 2;
   for (i = 0, match = 1; match && i < img1->d_h; i += bsize) {
     for (j = 0; match && j < img1->d_w; j += bsize) {
       int k, l;
@@ -591,10 +591,10 @@
   }
 
   uloc[0] = uloc[1] = uloc[2] = uloc[3] = -1;
-  plane1 = (uint16_t *)img1->planes[VPX_PLANE_U];
-  plane2 = (uint16_t *)img2->planes[VPX_PLANE_U];
-  stride1 = img1->stride[VPX_PLANE_U] / 2;
-  stride2 = img2->stride[VPX_PLANE_U] / 2;
+  plane1 = (uint16_t *)img1->planes[AOM_PLANE_U];
+  plane2 = (uint16_t *)img2->planes[AOM_PLANE_U];
+  stride1 = img1->stride[AOM_PLANE_U] / 2;
+  stride2 = img2->stride[AOM_PLANE_U] / 2;
   for (i = 0, match = 1; match && i < c_h; i += bsizey) {
     for (j = 0; match && j < c_w; j += bsizex) {
       int k, l;
@@ -617,10 +617,10 @@
   }
 
   vloc[0] = vloc[1] = vloc[2] = vloc[3] = -1;
-  plane1 = (uint16_t *)img1->planes[VPX_PLANE_V];
-  plane2 = (uint16_t *)img2->planes[VPX_PLANE_V];
-  stride1 = img1->stride[VPX_PLANE_V] / 2;
-  stride2 = img2->stride[VPX_PLANE_V] / 2;
+  plane1 = (uint16_t *)img1->planes[AOM_PLANE_V];
+  plane2 = (uint16_t *)img2->planes[AOM_PLANE_V];
+  stride1 = img1->stride[AOM_PLANE_V] / 2;
+  stride2 = img2->stride[AOM_PLANE_V] / 2;
   for (i = 0, match = 1; match && i < c_h; i += bsizey) {
     for (j = 0; match && j < c_w; j += bsizex) {
       int k, l;
@@ -644,8 +644,8 @@
 }
 #endif
 
-static void find_mismatch(const vpx_image_t *const img1,
-                          const vpx_image_t *const img2, int yloc[4],
+static void find_mismatch(const aom_image_t *const img1,
+                          const aom_image_t *const img2, int yloc[4],
                           int uloc[4], int vloc[4]) {
   const uint32_t bsize = 64;
   const uint32_t bsizey = bsize >> img1->y_chroma_shift;
@@ -664,16 +664,16 @@
       const int sj = mmin(j + bsize, img1->d_w) - j;
       for (k = 0; match && k < si; ++k) {
         for (l = 0; match && l < sj; ++l) {
-          if (*(img1->planes[VPX_PLANE_Y] +
-                (i + k) * img1->stride[VPX_PLANE_Y] + j + l) !=
-              *(img2->planes[VPX_PLANE_Y] +
-                (i + k) * img2->stride[VPX_PLANE_Y] + j + l)) {
+          if (*(img1->planes[AOM_PLANE_Y] +
+                (i + k) * img1->stride[AOM_PLANE_Y] + j + l) !=
+              *(img2->planes[AOM_PLANE_Y] +
+                (i + k) * img2->stride[AOM_PLANE_Y] + j + l)) {
             yloc[0] = i + k;
             yloc[1] = j + l;
-            yloc[2] = *(img1->planes[VPX_PLANE_Y] +
-                        (i + k) * img1->stride[VPX_PLANE_Y] + j + l);
-            yloc[3] = *(img2->planes[VPX_PLANE_Y] +
-                        (i + k) * img2->stride[VPX_PLANE_Y] + j + l);
+            yloc[2] = *(img1->planes[AOM_PLANE_Y] +
+                        (i + k) * img1->stride[AOM_PLANE_Y] + j + l);
+            yloc[3] = *(img2->planes[AOM_PLANE_Y] +
+                        (i + k) * img2->stride[AOM_PLANE_Y] + j + l);
             match = 0;
             break;
           }
@@ -690,16 +690,16 @@
       const int sj = mmin(j + bsizex, c_w - j);
       for (k = 0; match && k < si; ++k) {
         for (l = 0; match && l < sj; ++l) {
-          if (*(img1->planes[VPX_PLANE_U] +
-                (i + k) * img1->stride[VPX_PLANE_U] + j + l) !=
-              *(img2->planes[VPX_PLANE_U] +
-                (i + k) * img2->stride[VPX_PLANE_U] + j + l)) {
+          if (*(img1->planes[AOM_PLANE_U] +
+                (i + k) * img1->stride[AOM_PLANE_U] + j + l) !=
+              *(img2->planes[AOM_PLANE_U] +
+                (i + k) * img2->stride[AOM_PLANE_U] + j + l)) {
             uloc[0] = i + k;
             uloc[1] = j + l;
-            uloc[2] = *(img1->planes[VPX_PLANE_U] +
-                        (i + k) * img1->stride[VPX_PLANE_U] + j + l);
-            uloc[3] = *(img2->planes[VPX_PLANE_U] +
-                        (i + k) * img2->stride[VPX_PLANE_U] + j + l);
+            uloc[2] = *(img1->planes[AOM_PLANE_U] +
+                        (i + k) * img1->stride[AOM_PLANE_U] + j + l);
+            uloc[3] = *(img2->planes[AOM_PLANE_U] +
+                        (i + k) * img2->stride[AOM_PLANE_U] + j + l);
             match = 0;
             break;
           }
@@ -715,16 +715,16 @@
       const int sj = mmin(j + bsizex, c_w - j);
       for (k = 0; match && k < si; ++k) {
         for (l = 0; match && l < sj; ++l) {
-          if (*(img1->planes[VPX_PLANE_V] +
-                (i + k) * img1->stride[VPX_PLANE_V] + j + l) !=
-              *(img2->planes[VPX_PLANE_V] +
-                (i + k) * img2->stride[VPX_PLANE_V] + j + l)) {
+          if (*(img1->planes[AOM_PLANE_V] +
+                (i + k) * img1->stride[AOM_PLANE_V] + j + l) !=
+              *(img2->planes[AOM_PLANE_V] +
+                (i + k) * img2->stride[AOM_PLANE_V] + j + l)) {
             vloc[0] = i + k;
             vloc[1] = j + l;
-            vloc[2] = *(img1->planes[VPX_PLANE_V] +
-                        (i + k) * img1->stride[VPX_PLANE_V] + j + l);
-            vloc[3] = *(img2->planes[VPX_PLANE_V] +
-                        (i + k) * img2->stride[VPX_PLANE_V] + j + l);
+            vloc[2] = *(img1->planes[AOM_PLANE_V] +
+                        (i + k) * img1->stride[AOM_PLANE_V] + j + l);
+            vloc[3] = *(img2->planes[AOM_PLANE_V] +
+                        (i + k) * img2->stride[AOM_PLANE_V] + j + l);
             match = 0;
             break;
           }
@@ -734,8 +734,8 @@
   }
 }
 
-static int compare_img(const vpx_image_t *const img1,
-                       const vpx_image_t *const img2) {
+static int compare_img(const aom_image_t *const img1,
+                       const aom_image_t *const img2) {
   uint32_t l_w = img1->d_w;
   uint32_t c_w = (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift;
   const uint32_t c_h =
@@ -746,34 +746,34 @@
   match &= (img1->fmt == img2->fmt);
   match &= (img1->d_w == img2->d_w);
   match &= (img1->d_h == img2->d_h);
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (img1->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
+#if CONFIG_AOM_HIGHBITDEPTH
+  if (img1->fmt & AOM_IMG_FMT_HIGHBITDEPTH) {
     l_w *= 2;
     c_w *= 2;
   }
 #endif
 
   for (i = 0; i < img1->d_h; ++i)
-    match &= (memcmp(img1->planes[VPX_PLANE_Y] + i * img1->stride[VPX_PLANE_Y],
-                     img2->planes[VPX_PLANE_Y] + i * img2->stride[VPX_PLANE_Y],
+    match &= (memcmp(img1->planes[AOM_PLANE_Y] + i * img1->stride[AOM_PLANE_Y],
+                     img2->planes[AOM_PLANE_Y] + i * img2->stride[AOM_PLANE_Y],
                      l_w) == 0);
 
   for (i = 0; i < c_h; ++i)
-    match &= (memcmp(img1->planes[VPX_PLANE_U] + i * img1->stride[VPX_PLANE_U],
-                     img2->planes[VPX_PLANE_U] + i * img2->stride[VPX_PLANE_U],
+    match &= (memcmp(img1->planes[AOM_PLANE_U] + i * img1->stride[AOM_PLANE_U],
+                     img2->planes[AOM_PLANE_U] + i * img2->stride[AOM_PLANE_U],
                      c_w) == 0);
 
   for (i = 0; i < c_h; ++i)
-    match &= (memcmp(img1->planes[VPX_PLANE_V] + i * img1->stride[VPX_PLANE_V],
-                     img2->planes[VPX_PLANE_V] + i * img2->stride[VPX_PLANE_V],
+    match &= (memcmp(img1->planes[AOM_PLANE_V] + i * img1->stride[AOM_PLANE_V],
+                     img2->planes[AOM_PLANE_V] + i * img2->stride[AOM_PLANE_V],
                      c_w) == 0);
 
   return match;
 }
 
 #define NELEMENTS(x) (sizeof(x) / sizeof(x[0]))
-#if CONFIG_VP10_ENCODER
-#define ARG_CTRL_CNT_MAX NELEMENTS(vp10_arg_ctrl_map)
+#if CONFIG_AV1_ENCODER
+#define ARG_CTRL_CNT_MAX NELEMENTS(av1_arg_ctrl_map)
 #endif
 
 #if !CONFIG_WEBM_IO
@@ -785,7 +785,7 @@
 
 /* Per-stream configuration */
 struct stream_config {
-  struct vpx_codec_enc_cfg cfg;
+  struct aom_codec_enc_cfg cfg;
   const char *out_fn;
   const char *stats_fn;
 #if CONFIG_FP_MB_STATS
@@ -795,7 +795,7 @@
   int arg_ctrls[ARG_CTRL_CNT_MAX][2];
   int arg_ctrl_cnt;
   int write_webm;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   // whether to use 16bit internal buffers
   int use_16bit_internal;
 #endif
@@ -813,7 +813,7 @@
   double psnr_totals[4];
   int psnr_count;
   int counts[64];
-  vpx_codec_ctx_t encoder;
+  aom_codec_ctx_t encoder;
   unsigned int frames_out;
   uint64_t cx_time;
   size_t nbytes;
@@ -821,13 +821,13 @@
 #if CONFIG_FP_MB_STATS
   stats_io_t fpmb_stats;
 #endif
-  struct vpx_image *img;
-  vpx_codec_ctx_t decoder;
+  struct aom_image *img;
+  aom_codec_ctx_t decoder;
   int mismatch_seen;
 };
 
 static void validate_positive_rational(const char *msg,
-                                       struct vpx_rational *rat) {
+                                       struct aom_rational *rat) {
   if (rat->den < 0) {
     rat->num *= -1;
     rat->den *= -1;
@@ -838,26 +838,26 @@
   if (!rat->den) die("Error: %s has zero denominator\n", msg);
 }
 
-static void parse_global_config(struct VpxEncoderConfig *global, char **argv) {
+static void parse_global_config(struct AvxEncoderConfig *global, char **argv) {
   char **argi, **argj;
   struct arg arg;
-  const int num_encoder = get_vpx_encoder_count();
+  const int num_encoder = get_aom_encoder_count();
 
   if (num_encoder < 1) die("Error: no valid encoder available\n");
 
   /* Initialize default parameters */
   memset(global, 0, sizeof(*global));
-  global->codec = get_vpx_encoder_by_index(num_encoder - 1);
+  global->codec = get_aom_encoder_by_index(num_encoder - 1);
   global->passes = 0;
   global->color_type = I420;
   /* Assign default deadline to good quality */
-  global->deadline = VPX_DL_GOOD_QUALITY;
+  global->deadline = AOM_DL_GOOD_QUALITY;
 
   for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step) {
     arg.argv_step = 1;
 
     if (arg_match(&arg, &codecarg, argi)) {
-      global->codec = get_vpx_encoder_by_name(arg.val);
+      global->codec = get_aom_encoder_by_name(arg.val);
       if (!global->codec)
         die("Error: Unrecognized argument (%s) to --codec\n", arg.val);
     } else if (arg_match(&arg, &passes, argi)) {
@@ -875,11 +875,11 @@
     else if (arg_match(&arg, &deadline, argi))
       global->deadline = arg_parse_uint(&arg);
     else if (arg_match(&arg, &best_dl, argi))
-      global->deadline = VPX_DL_BEST_QUALITY;
+      global->deadline = AOM_DL_BEST_QUALITY;
     else if (arg_match(&arg, &good_dl, argi))
-      global->deadline = VPX_DL_GOOD_QUALITY;
+      global->deadline = AOM_DL_GOOD_QUALITY;
     else if (arg_match(&arg, &rt_dl, argi))
-      global->deadline = VPX_DL_REALTIME;
+      global->deadline = AOM_DL_REALTIME;
     else if (arg_match(&arg, &use_yv12, argi))
       global->color_type = YV12;
     else if (arg_match(&arg, &use_i420, argi))
@@ -932,12 +932,12 @@
   }
   /* Validate global config */
   if (global->passes == 0) {
-#if CONFIG_VP10_ENCODER
-    // Make default VP9 passes = 2 until there is a better quality 1-pass
+#if CONFIG_AV1_ENCODER
+    // Make default AV1 passes = 2 until there is a better quality 1-pass
     // encoder
     if (global->codec != NULL && global->codec->name != NULL)
-      global->passes = (strcmp(global->codec->name, "vp9") == 0 &&
-                        global->deadline != VPX_DL_REALTIME)
+      global->passes = (strcmp(global->codec->name, "av1") == 0 &&
+                        global->deadline != AOM_DL_REALTIME)
                            ? 2
                            : 1;
 #else
@@ -945,13 +945,13 @@
 #endif
   }
 
-  if (global->deadline == VPX_DL_REALTIME && global->passes > 1) {
+  if (global->deadline == AOM_DL_REALTIME && global->passes > 1) {
     warn("Enforcing one-pass encoding in realtime mode\n");
     global->passes = 1;
   }
 }
 
-static void open_input_file(struct VpxInputContext *input) {
+static void open_input_file(struct AvxInputContext *input) {
   /* Parse certain options from the input file, if possible */
   input->file = strcmp(input->filename, "-") ? fopen(input->filename, "rb")
                                              : set_binary_mode(stdin);
@@ -986,7 +986,7 @@
       input->pixel_aspect_ratio.denominator = input->y4m.par_d;
       input->framerate.numerator = input->y4m.fps_n;
       input->framerate.denominator = input->y4m.fps_d;
-      input->fmt = input->y4m.vpx_fmt;
+      input->fmt = input->y4m.aom_fmt;
       input->bit_depth = input->y4m.bit_depth;
     } else
       fatal("Unsupported Y4M stream.");
@@ -997,12 +997,12 @@
   }
 }
 
-static void close_input_file(struct VpxInputContext *input) {
+static void close_input_file(struct AvxInputContext *input) {
   fclose(input->file);
   if (input->file_type == FILE_TYPE_Y4M) y4m_input_close(&input->y4m);
 }
 
-static struct stream_state *new_stream(struct VpxEncoderConfig *global,
+static struct stream_state *new_stream(struct AvxEncoderConfig *global,
                                        struct stream_state *prev) {
   struct stream_state *stream;
 
@@ -1016,12 +1016,12 @@
     stream->index++;
     prev->next = stream;
   } else {
-    vpx_codec_err_t res;
+    aom_codec_err_t res;
 
     /* Populate encoder configuration */
-    res = vpx_codec_enc_config_default(global->codec->codec_interface(),
+    res = aom_codec_enc_config_default(global->codec->codec_interface(),
                                        &stream->config.cfg, global->usage);
-    if (res) fatal("Failed to get config: %s\n", vpx_codec_err_to_string(res));
+    if (res) fatal("Failed to get config: %s\n", aom_codec_err_to_string(res));
 
     /* Change the default timebase to a high enough value so that the
      * encoder will always create strictly increasing timestamps.
@@ -1047,7 +1047,7 @@
     stream->webm_ctx.debug = global->debug;
 
     /* Default lag_in_frames is 0 in realtime mode */
-    if (global->deadline == VPX_DL_REALTIME)
+    if (global->deadline == AOM_DL_REALTIME)
       stream->config.cfg.g_lag_in_frames = 0;
   }
 
@@ -1058,7 +1058,7 @@
   return stream;
 }
 
-static int parse_stream_params(struct VpxEncoderConfig *global,
+static int parse_stream_params(struct AvxEncoderConfig *global,
                                struct stream_state *stream, char **argv) {
   char **argi, **argj;
   struct arg arg;
@@ -1066,18 +1066,18 @@
   static const int *ctrl_args_map = NULL;
   struct stream_config *config = &stream->config;
   int eos_mark_found = 0;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   int test_16bit_internal = 0;
 #endif
 
   // Handle codec specific options
   if (0) {
-#if CONFIG_VP10_ENCODER
-  } else if (strcmp(global->codec->name, "vp10") == 0) {
-    // TODO(jingning): Reuse VP9 specific encoder configuration parameters.
-    // Consider to expand this set for VP10 encoder control.
-    ctrl_args = vp10_args;
-    ctrl_args_map = vp10_arg_ctrl_map;
+#if CONFIG_AV1_ENCODER
+  } else if (strcmp(global->codec->name, "av1") == 0) {
+    // TODO(jingning): Reuse AV1 specific encoder configuration parameters.
+    // Consider to expand this set for AV1 encoder control.
+    ctrl_args = av1_args;
+    ctrl_args_map = av1_arg_ctrl_map;
 #endif
   }
 
@@ -1119,7 +1119,7 @@
       config->cfg.g_w = arg_parse_uint(&arg);
     } else if (arg_match(&arg, &height, argi)) {
       config->cfg.g_h = arg_parse_uint(&arg);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else if (arg_match(&arg, &bitdeptharg, argi)) {
       config->cfg.g_bit_depth = arg_parse_enum_or_int(&arg);
     } else if (arg_match(&arg, &inbitdeptharg, argi)) {
@@ -1136,7 +1136,7 @@
       config->cfg.g_error_resilient = arg_parse_uint(&arg);
     } else if (arg_match(&arg, &lag_in_frames, argi)) {
       config->cfg.g_lag_in_frames = arg_parse_uint(&arg);
-      if (global->deadline == VPX_DL_REALTIME &&
+      if (global->deadline == AOM_DL_REALTIME &&
           config->cfg.g_lag_in_frames != 0) {
         warn("non-zero %s option ignored in realtime mode.\n", arg.name);
         config->cfg.g_lag_in_frames = 0;
@@ -1190,11 +1190,11 @@
     } else if (arg_match(&arg, &kf_max_dist, argi)) {
       config->cfg.kf_max_dist = arg_parse_uint(&arg);
     } else if (arg_match(&arg, &kf_disabled, argi)) {
-      config->cfg.kf_mode = VPX_KF_DISABLED;
-#if CONFIG_VP9_HIGHBITDEPTH
+      config->cfg.kf_mode = AOM_KF_DISABLED;
+#if CONFIG_AOM_HIGHBITDEPTH
     } else if (arg_match(&arg, &test16bitinternalarg, argi)) {
-      if (strcmp(global->codec->name, "vp9") == 0 ||
-          strcmp(global->codec->name, "vp10") == 0) {
+      if (strcmp(global->codec->name, "av1") == 0 ||
+          strcmp(global->codec->name, "av1") == 0) {
         test_16bit_internal = 1;
       }
 #endif
@@ -1225,9 +1225,9 @@
       if (!match) argj++;
     }
   }
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (strcmp(global->codec->name, "vp9") == 0 ||
-      strcmp(global->codec->name, "vp10") == 0) {
+#if CONFIG_AOM_HIGHBITDEPTH
+  if (strcmp(global->codec->name, "av1") == 0 ||
+      strcmp(global->codec->name, "av1") == 0) {
     config->use_16bit_internal =
         test_16bit_internal | (config->cfg.g_profile > 1);
   }
@@ -1244,7 +1244,7 @@
   } while (0)
 
 static void validate_stream_config(const struct stream_state *stream,
-                                   const struct VpxEncoderConfig *global) {
+                                   const struct AvxEncoderConfig *global) {
   const struct stream_state *streami;
   (void)global;
 
@@ -1320,30 +1320,30 @@
   }
 }
 
-static const char *image_format_to_string(vpx_img_fmt_t f) {
+static const char *image_format_to_string(aom_img_fmt_t f) {
   switch (f) {
-    case VPX_IMG_FMT_I420: return "I420";
-    case VPX_IMG_FMT_I422: return "I422";
-    case VPX_IMG_FMT_I444: return "I444";
-    case VPX_IMG_FMT_I440: return "I440";
-    case VPX_IMG_FMT_YV12: return "YV12";
-    case VPX_IMG_FMT_I42016: return "I42016";
-    case VPX_IMG_FMT_I42216: return "I42216";
-    case VPX_IMG_FMT_I44416: return "I44416";
-    case VPX_IMG_FMT_I44016: return "I44016";
+    case AOM_IMG_FMT_I420: return "I420";
+    case AOM_IMG_FMT_I422: return "I422";
+    case AOM_IMG_FMT_I444: return "I444";
+    case AOM_IMG_FMT_I440: return "I440";
+    case AOM_IMG_FMT_YV12: return "YV12";
+    case AOM_IMG_FMT_I42016: return "I42016";
+    case AOM_IMG_FMT_I42216: return "I42216";
+    case AOM_IMG_FMT_I44416: return "I44416";
+    case AOM_IMG_FMT_I44016: return "I44016";
     default: return "Other";
   }
 }
 
 static void show_stream_config(struct stream_state *stream,
-                               struct VpxEncoderConfig *global,
-                               struct VpxInputContext *input) {
+                               struct AvxEncoderConfig *global,
+                               struct AvxInputContext *input) {
 #define SHOW(field) \
   fprintf(stderr, "    %-28s = %d\n", #field, stream->config.cfg.field)
 
   if (stream->index == 0) {
     fprintf(stderr, "Codec: %s\n",
-            vpx_codec_iface_name(global->codec->codec_interface()));
+            aom_codec_iface_name(global->codec->codec_interface()));
     fprintf(stderr, "Source file: %s File Type: %s Format: %s\n",
             input->filename, file_type_to_string(input->file_type),
             image_format_to_string(input->fmt));
@@ -1389,12 +1389,12 @@
 }
 
 static void open_output_file(struct stream_state *stream,
-                             struct VpxEncoderConfig *global,
-                             const struct VpxRational *pixel_aspect_ratio) {
+                             struct AvxEncoderConfig *global,
+                             const struct AvxRational *pixel_aspect_ratio) {
   const char *fn = stream->config.out_fn;
-  const struct vpx_codec_enc_cfg *const cfg = &stream->config.cfg;
+  const struct aom_codec_enc_cfg *const cfg = &stream->config.cfg;
 
-  if (cfg->g_pass == VPX_RC_FIRST_PASS) return;
+  if (cfg->g_pass == AOM_RC_FIRST_PASS) return;
 
   stream->file = strcmp(fn, "-") ? fopen(fn, "wb") : set_binary_mode(stdout);
 
@@ -1421,9 +1421,9 @@
 
 static void close_output_file(struct stream_state *stream,
                               unsigned int fourcc) {
-  const struct vpx_codec_enc_cfg *const cfg = &stream->config.cfg;
+  const struct aom_codec_enc_cfg *const cfg = &stream->config.cfg;
 
-  if (cfg->g_pass == VPX_RC_FIRST_PASS) return;
+  if (cfg->g_pass == AOM_RC_FIRST_PASS) return;
 
 #if CONFIG_WEBM_IO
   if (stream->config.write_webm) {
@@ -1441,7 +1441,7 @@
 }
 
 static void setup_pass(struct stream_state *stream,
-                       struct VpxEncoderConfig *global, int pass) {
+                       struct AvxEncoderConfig *global, int pass) {
   if (stream->config.stats_fn) {
     if (!stats_open_file(&stream->stats, stream->config.stats_fn, pass))
       fatal("Failed to open statistics store");
@@ -1462,8 +1462,8 @@
 #endif
 
   stream->config.cfg.g_pass = global->passes == 2
-                                  ? pass ? VPX_RC_LAST_PASS : VPX_RC_FIRST_PASS
-                                  : VPX_RC_ONE_PASS;
+                                  ? pass ? AOM_RC_LAST_PASS : AOM_RC_FIRST_PASS
+                                  : AOM_RC_ONE_PASS;
   if (pass) {
     stream->config.cfg.rc_twopass_stats_in = stats_get(&stream->stats);
 #if CONFIG_FP_MB_STATS
@@ -1478,29 +1478,29 @@
 }
 
 static void initialize_encoder(struct stream_state *stream,
-                               struct VpxEncoderConfig *global) {
+                               struct AvxEncoderConfig *global) {
   int i;
   int flags = 0;
 
-  flags |= global->show_psnr ? VPX_CODEC_USE_PSNR : 0;
-  flags |= global->out_part ? VPX_CODEC_USE_OUTPUT_PARTITION : 0;
-#if CONFIG_VP9_HIGHBITDEPTH
-  flags |= stream->config.use_16bit_internal ? VPX_CODEC_USE_HIGHBITDEPTH : 0;
+  flags |= global->show_psnr ? AOM_CODEC_USE_PSNR : 0;
+  flags |= global->out_part ? AOM_CODEC_USE_OUTPUT_PARTITION : 0;
+#if CONFIG_AOM_HIGHBITDEPTH
+  flags |= stream->config.use_16bit_internal ? AOM_CODEC_USE_HIGHBITDEPTH : 0;
 #endif
 
   /* Construct Encoder Context */
-  vpx_codec_enc_init(&stream->encoder, global->codec->codec_interface(),
+  aom_codec_enc_init(&stream->encoder, global->codec->codec_interface(),
                      &stream->config.cfg, flags);
   ctx_exit_on_error(&stream->encoder, "Failed to initialize encoder");
 
-  /* Note that we bypass the vpx_codec_control wrapper macro because
+  /* Note that we bypass the aom_codec_control wrapper macro because
    * we're being clever to store the control IDs in an array. Real
    * applications will want to make use of the enumerations directly
    */
   for (i = 0; i < stream->config.arg_ctrl_cnt; i++) {
     int ctrl = stream->config.arg_ctrls[i][0];
     int value = stream->config.arg_ctrls[i][1];
-    if (vpx_codec_control_(&stream->encoder, ctrl, value))
+    if (aom_codec_control_(&stream->encoder, ctrl, value))
       fprintf(stderr, "Error: Tried to set control %d = %d\n", ctrl, value);
 
     ctx_exit_on_error(&stream->encoder, "Failed to control codec");
@@ -1508,16 +1508,16 @@
 
 #if CONFIG_DECODERS
   if (global->test_decode != TEST_DECODE_OFF) {
-    const VpxInterface *decoder = get_vpx_decoder_by_name(global->codec->name);
-    vpx_codec_dec_cfg_t cfg = { 0, 0, 0 };
-    vpx_codec_dec_init(&stream->decoder, decoder->codec_interface(), &cfg, 0);
+    const AvxInterface *decoder = get_aom_decoder_by_name(global->codec->name);
+    aom_codec_dec_cfg_t cfg = { 0, 0, 0 };
+    aom_codec_dec_init(&stream->decoder, decoder->codec_interface(), &cfg, 0);
 
-#if CONFIG_VP10_DECODER && CONFIG_EXT_TILE
-    if (strcmp(global->codec->name, "vp10") == 0) {
-      vpx_codec_control(&stream->decoder, VP10_SET_DECODE_TILE_ROW, -1);
+#if CONFIG_AV1_DECODER && CONFIG_EXT_TILE
+    if (strcmp(global->codec->name, "av1") == 0) {
+      aom_codec_control(&stream->decoder, AV1_SET_DECODE_TILE_ROW, -1);
       ctx_exit_on_error(&stream->decoder, "Failed to set decode_tile_row");
 
-      vpx_codec_control(&stream->decoder, VP10_SET_DECODE_TILE_COL, -1);
+      aom_codec_control(&stream->decoder, AV1_SET_DECODE_TILE_COL, -1);
       ctx_exit_on_error(&stream->decoder, "Failed to set decode_tile_col");
     }
 #endif
@@ -1526,11 +1526,11 @@
 }
 
 static void encode_frame(struct stream_state *stream,
-                         struct VpxEncoderConfig *global, struct vpx_image *img,
+                         struct AvxEncoderConfig *global, struct aom_image *img,
                          unsigned int frames_in) {
-  vpx_codec_pts_t frame_start, next_frame_start;
-  struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
-  struct vpx_usec_timer timer;
+  aom_codec_pts_t frame_start, next_frame_start;
+  struct aom_codec_enc_cfg *cfg = &stream->config.cfg;
+  struct aom_usec_timer timer;
 
   frame_start =
       (cfg->g_timebase.den * (int64_t)(frames_in - 1) * global->framerate.den) /
@@ -1540,29 +1540,29 @@
       cfg->g_timebase.num / global->framerate.num;
 
 /* Scale if necessary */
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (img) {
-    if ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) &&
+    if ((img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) &&
         (img->d_w != cfg->g_w || img->d_h != cfg->g_h)) {
-      if (img->fmt != VPX_IMG_FMT_I42016) {
+      if (img->fmt != AOM_IMG_FMT_I42016) {
         fprintf(stderr, "%s can only scale 4:2:0 inputs\n", exec_name);
         exit(EXIT_FAILURE);
       }
 #if CONFIG_LIBYUV
       if (!stream->img) {
         stream->img =
-            vpx_img_alloc(NULL, VPX_IMG_FMT_I42016, cfg->g_w, cfg->g_h, 16);
+            aom_img_alloc(NULL, AOM_IMG_FMT_I42016, cfg->g_w, cfg->g_h, 16);
       }
       I420Scale_16(
-          (uint16 *)img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y] / 2,
-          (uint16 *)img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U] / 2,
-          (uint16 *)img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V] / 2,
-          img->d_w, img->d_h, (uint16 *)stream->img->planes[VPX_PLANE_Y],
-          stream->img->stride[VPX_PLANE_Y] / 2,
-          (uint16 *)stream->img->planes[VPX_PLANE_U],
-          stream->img->stride[VPX_PLANE_U] / 2,
-          (uint16 *)stream->img->planes[VPX_PLANE_V],
-          stream->img->stride[VPX_PLANE_V] / 2, stream->img->d_w,
+          (uint16 *)img->planes[AOM_PLANE_Y], img->stride[AOM_PLANE_Y] / 2,
+          (uint16 *)img->planes[AOM_PLANE_U], img->stride[AOM_PLANE_U] / 2,
+          (uint16 *)img->planes[AOM_PLANE_V], img->stride[AOM_PLANE_V] / 2,
+          img->d_w, img->d_h, (uint16 *)stream->img->planes[AOM_PLANE_Y],
+          stream->img->stride[AOM_PLANE_Y] / 2,
+          (uint16 *)stream->img->planes[AOM_PLANE_U],
+          stream->img->stride[AOM_PLANE_U] / 2,
+          (uint16 *)stream->img->planes[AOM_PLANE_V],
+          stream->img->stride[AOM_PLANE_V] / 2, stream->img->d_w,
           stream->img->d_h, kFilterBox);
       img = stream->img;
 #else
@@ -1577,21 +1577,21 @@
   }
 #endif
   if (img && (img->d_w != cfg->g_w || img->d_h != cfg->g_h)) {
-    if (img->fmt != VPX_IMG_FMT_I420 && img->fmt != VPX_IMG_FMT_YV12) {
+    if (img->fmt != AOM_IMG_FMT_I420 && img->fmt != AOM_IMG_FMT_YV12) {
       fprintf(stderr, "%s can only scale 4:2:0 8bpp inputs\n", exec_name);
       exit(EXIT_FAILURE);
     }
 #if CONFIG_LIBYUV
     if (!stream->img)
       stream->img =
-          vpx_img_alloc(NULL, VPX_IMG_FMT_I420, cfg->g_w, cfg->g_h, 16);
+          aom_img_alloc(NULL, AOM_IMG_FMT_I420, cfg->g_w, cfg->g_h, 16);
     I420Scale(
-        img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
-        img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
-        img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], img->d_w, img->d_h,
-        stream->img->planes[VPX_PLANE_Y], stream->img->stride[VPX_PLANE_Y],
-        stream->img->planes[VPX_PLANE_U], stream->img->stride[VPX_PLANE_U],
-        stream->img->planes[VPX_PLANE_V], stream->img->stride[VPX_PLANE_V],
+        img->planes[AOM_PLANE_Y], img->stride[AOM_PLANE_Y],
+        img->planes[AOM_PLANE_U], img->stride[AOM_PLANE_U],
+        img->planes[AOM_PLANE_V], img->stride[AOM_PLANE_V], img->d_w, img->d_h,
+        stream->img->planes[AOM_PLANE_Y], stream->img->stride[AOM_PLANE_Y],
+        stream->img->planes[AOM_PLANE_U], stream->img->stride[AOM_PLANE_U],
+        stream->img->planes[AOM_PLANE_V], stream->img->stride[AOM_PLANE_V],
         stream->img->d_w, stream->img->d_h, kFilterBox);
     img = stream->img;
 #else
@@ -1604,40 +1604,40 @@
 #endif
   }
 
-  vpx_usec_timer_start(&timer);
-  vpx_codec_encode(&stream->encoder, img, frame_start,
+  aom_usec_timer_start(&timer);
+  aom_codec_encode(&stream->encoder, img, frame_start,
                    (unsigned long)(next_frame_start - frame_start), 0,
                    global->deadline);
-  vpx_usec_timer_mark(&timer);
-  stream->cx_time += vpx_usec_timer_elapsed(&timer);
+  aom_usec_timer_mark(&timer);
+  stream->cx_time += aom_usec_timer_elapsed(&timer);
   ctx_exit_on_error(&stream->encoder, "Stream %d: Failed to encode frame",
                     stream->index);
 }
 
 static void update_quantizer_histogram(struct stream_state *stream) {
-  if (stream->config.cfg.g_pass != VPX_RC_FIRST_PASS) {
+  if (stream->config.cfg.g_pass != AOM_RC_FIRST_PASS) {
     int q;
 
-    vpx_codec_control(&stream->encoder, VP8E_GET_LAST_QUANTIZER_64, &q);
+    aom_codec_control(&stream->encoder, AOME_GET_LAST_QUANTIZER_64, &q);
     ctx_exit_on_error(&stream->encoder, "Failed to read quantizer");
     stream->counts[q]++;
   }
 }
 
 static void get_cx_data(struct stream_state *stream,
-                        struct VpxEncoderConfig *global, int *got_data) {
-  const vpx_codec_cx_pkt_t *pkt;
-  const struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
-  vpx_codec_iter_t iter = NULL;
+                        struct AvxEncoderConfig *global, int *got_data) {
+  const aom_codec_cx_pkt_t *pkt;
+  const struct aom_codec_enc_cfg *cfg = &stream->config.cfg;
+  aom_codec_iter_t iter = NULL;
 
   *got_data = 0;
-  while ((pkt = vpx_codec_get_cx_data(&stream->encoder, &iter))) {
+  while ((pkt = aom_codec_get_cx_data(&stream->encoder, &iter))) {
     static size_t fsize = 0;
     static int64_t ivf_header_pos = 0;
 
     switch (pkt->kind) {
-      case VPX_CODEC_CX_FRAME_PKT:
-        if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT)) {
+      case AOM_CODEC_CX_FRAME_PKT:
+        if (!(pkt->data.frame.flags & AOM_FRAME_IS_FRAGMENT)) {
           stream->frames_out++;
         }
         if (!global->quiet)
@@ -1658,7 +1658,7 @@
           } else {
             fsize += pkt->data.frame.sz;
 
-            if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT)) {
+            if (!(pkt->data.frame.flags & AOM_FRAME_IS_FRAGMENT)) {
               const int64_t currpos = ftello(stream->file);
               fseeko(stream->file, ivf_header_pos, SEEK_SET);
               ivf_write_frame_size(stream->file, fsize);
@@ -1674,7 +1674,7 @@
         *got_data = 1;
 #if CONFIG_DECODERS
         if (global->test_decode != TEST_DECODE_OFF && !stream->mismatch_seen) {
-          vpx_codec_decode(&stream->decoder, pkt->data.frame.buf,
+          aom_codec_decode(&stream->decoder, pkt->data.frame.buf,
                            (unsigned int)pkt->data.frame.sz, NULL, 0);
           if (stream->decoder.err) {
             warn_or_exit_on_error(&stream->decoder,
@@ -1686,20 +1686,20 @@
         }
 #endif
         break;
-      case VPX_CODEC_STATS_PKT:
+      case AOM_CODEC_STATS_PKT:
         stream->frames_out++;
         stats_write(&stream->stats, pkt->data.twopass_stats.buf,
                     pkt->data.twopass_stats.sz);
         stream->nbytes += pkt->data.raw.sz;
         break;
 #if CONFIG_FP_MB_STATS
-      case VPX_CODEC_FPMB_STATS_PKT:
+      case AOM_CODEC_FPMB_STATS_PKT:
         stats_write(&stream->fpmb_stats, pkt->data.firstpass_mb_stats.buf,
                     pkt->data.firstpass_mb_stats.sz);
         stream->nbytes += pkt->data.raw.sz;
         break;
 #endif
-      case VPX_CODEC_PSNR_PKT:
+      case AOM_CODEC_PSNR_PKT:
 
         if (global->show_psnr) {
           int i;
@@ -1743,46 +1743,46 @@
 
 static void test_decode(struct stream_state *stream,
                         enum TestDecodeFatality fatal,
-                        const VpxInterface *codec) {
-  vpx_image_t enc_img, dec_img;
+                        const AvxInterface *codec) {
+  aom_image_t enc_img, dec_img;
 
   if (stream->mismatch_seen) return;
 
   /* Get the internal reference frame */
   if (strcmp(codec->name, "vp8") == 0) {
-    struct vpx_ref_frame ref_enc, ref_dec;
+    struct aom_ref_frame ref_enc, ref_dec;
     int width, height;
 
     width = (stream->config.cfg.g_w + 15) & ~15;
     height = (stream->config.cfg.g_h + 15) & ~15;
-    vpx_img_alloc(&ref_enc.img, VPX_IMG_FMT_I420, width, height, 1);
+    aom_img_alloc(&ref_enc.img, AOM_IMG_FMT_I420, width, height, 1);
     enc_img = ref_enc.img;
-    vpx_img_alloc(&ref_dec.img, VPX_IMG_FMT_I420, width, height, 1);
+    aom_img_alloc(&ref_dec.img, AOM_IMG_FMT_I420, width, height, 1);
     dec_img = ref_dec.img;
 
-    ref_enc.frame_type = VP8_LAST_FRAME;
-    ref_dec.frame_type = VP8_LAST_FRAME;
-    vpx_codec_control(&stream->encoder, VP8_COPY_REFERENCE, &ref_enc);
-    vpx_codec_control(&stream->decoder, VP8_COPY_REFERENCE, &ref_dec);
+    ref_enc.frame_type = AOM_LAST_FRAME;
+    ref_dec.frame_type = AOM_LAST_FRAME;
+    aom_codec_control(&stream->encoder, AOM_COPY_REFERENCE, &ref_enc);
+    aom_codec_control(&stream->decoder, AOM_COPY_REFERENCE, &ref_dec);
   } else {
-    vpx_codec_control(&stream->encoder, VP10_GET_NEW_FRAME_IMAGE, &enc_img);
-    vpx_codec_control(&stream->decoder, VP10_GET_NEW_FRAME_IMAGE, &dec_img);
+    aom_codec_control(&stream->encoder, AV1_GET_NEW_FRAME_IMAGE, &enc_img);
+    aom_codec_control(&stream->decoder, AV1_GET_NEW_FRAME_IMAGE, &dec_img);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-    if ((enc_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH) !=
-        (dec_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH)) {
-      if (enc_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
-        vpx_image_t enc_hbd_img;
-        vpx_img_alloc(&enc_hbd_img, enc_img.fmt - VPX_IMG_FMT_HIGHBITDEPTH,
+#if CONFIG_AOM_HIGHBITDEPTH
+    if ((enc_img.fmt & AOM_IMG_FMT_HIGHBITDEPTH) !=
+        (dec_img.fmt & AOM_IMG_FMT_HIGHBITDEPTH)) {
+      if (enc_img.fmt & AOM_IMG_FMT_HIGHBITDEPTH) {
+        aom_image_t enc_hbd_img;
+        aom_img_alloc(&enc_hbd_img, enc_img.fmt - AOM_IMG_FMT_HIGHBITDEPTH,
                       enc_img.d_w, enc_img.d_h, 16);
-        vpx_img_truncate_16_to_8(&enc_hbd_img, &enc_img);
+        aom_img_truncate_16_to_8(&enc_hbd_img, &enc_img);
         enc_img = enc_hbd_img;
       }
-      if (dec_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
-        vpx_image_t dec_hbd_img;
-        vpx_img_alloc(&dec_hbd_img, dec_img.fmt - VPX_IMG_FMT_HIGHBITDEPTH,
+      if (dec_img.fmt & AOM_IMG_FMT_HIGHBITDEPTH) {
+        aom_image_t dec_hbd_img;
+        aom_img_alloc(&dec_hbd_img, dec_img.fmt - AOM_IMG_FMT_HIGHBITDEPTH,
                       dec_img.d_w, dec_img.d_h, 16);
-        vpx_img_truncate_16_to_8(&dec_hbd_img, &dec_img);
+        aom_img_truncate_16_to_8(&dec_hbd_img, &dec_img);
         dec_img = dec_hbd_img;
       }
     }
@@ -1793,8 +1793,8 @@
 
   if (!compare_img(&enc_img, &dec_img)) {
     int y[4], u[4], v[4];
-#if CONFIG_VP9_HIGHBITDEPTH
-    if (enc_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
+#if CONFIG_AOM_HIGHBITDEPTH
+    if (enc_img.fmt & AOM_IMG_FMT_HIGHBITDEPTH) {
       find_mismatch_high(&enc_img, &dec_img, y, u, v);
     } else {
       find_mismatch(&enc_img, &dec_img, y, u, v);
@@ -1813,8 +1813,8 @@
     stream->mismatch_seen = stream->frames_out;
   }
 
-  vpx_img_free(&enc_img);
-  vpx_img_free(&dec_img);
+  aom_img_free(&enc_img);
+  aom_img_free(&dec_img);
 }
 
 static void print_time(const char *label, int64_t etl) {
@@ -1838,17 +1838,17 @@
 
 int main(int argc, const char **argv_) {
   int pass;
-  vpx_image_t raw;
-#if CONFIG_VP9_HIGHBITDEPTH
-  vpx_image_t raw_shift;
+  aom_image_t raw;
+#if CONFIG_AOM_HIGHBITDEPTH
+  aom_image_t raw_shift;
   int allocated_raw_shift = 0;
   int use_16bit_internal = 0;
   int input_shift = 0;
 #endif
   int frame_avail, got_data;
 
-  struct VpxInputContext input;
-  struct VpxEncoderConfig global;
+  struct AvxInputContext input;
+  struct AvxEncoderConfig global;
   struct stream_state *streams = NULL;
   char **argv, **argi;
   uint64_t cx_time = 0;
@@ -1874,11 +1874,11 @@
   parse_global_config(&global, argv);
 
   switch (global.color_type) {
-    case I420: input.fmt = VPX_IMG_FMT_I420; break;
-    case I422: input.fmt = VPX_IMG_FMT_I422; break;
-    case I444: input.fmt = VPX_IMG_FMT_I444; break;
-    case I440: input.fmt = VPX_IMG_FMT_I440; break;
-    case YV12: input.fmt = VPX_IMG_FMT_YV12; break;
+    case I420: input.fmt = AOM_IMG_FMT_I420; break;
+    case I422: input.fmt = AOM_IMG_FMT_I422; break;
+    case I444: input.fmt = AOM_IMG_FMT_I444; break;
+    case I440: input.fmt = AOM_IMG_FMT_I440; break;
+    case YV12: input.fmt = AOM_IMG_FMT_YV12; break;
   }
 
   {
@@ -1909,8 +1909,7 @@
   if (!input.filename) usage_exit();
 
   /* Decide if other chroma subsamplings than 4:2:0 are supported */
-  if (global.codec->fourcc == VP9_FOURCC || global.codec->fourcc == VP10_FOURCC)
-    input.only_i420 = 0;
+  if (global.codec->fourcc == AV1_FOURCC) input.only_i420 = 0;
 
   for (pass = global.pass ? global.pass - 1 : 0; pass < global.passes; pass++) {
     int frames_in = 0, seen_frames = 0;
@@ -1952,7 +1951,7 @@
           input.bit_depth = stream->config.cfg.g_input_bit_depth =
               (int)stream->config.cfg.g_bit_depth;
       });
-      if (input.bit_depth > 8) input.fmt |= VPX_IMG_FMT_HIGHBITDEPTH;
+      if (input.bit_depth > 8) input.fmt |= AOM_IMG_FMT_HIGHBITDEPTH;
     } else {
       FOREACH_STREAM(
           { stream->config.cfg.g_input_bit_depth = input.bit_depth; });
@@ -1978,7 +1977,7 @@
       if (stream->config.write_webm) {
         stream->config.write_webm = 0;
         warn(
-            "vpxenc was compiled without WebM container support."
+            "aomenc was compiled without WebM container support."
             "Producing IVF output");
       }
     });
@@ -2005,7 +2004,7 @@
            frames.*/
         memset(&raw, 0, sizeof(raw));
       else
-        vpx_img_alloc(&raw, input.fmt, input.width, input.height, 32);
+        aom_img_alloc(&raw, input.fmt, input.width, input.height, 32);
 
       FOREACH_STREAM(stream->rate_hist = init_rate_histogram(
                          &stream->config.cfg, &global.framerate));
@@ -2016,9 +2015,9 @@
         open_output_file(stream, &global, &input.pixel_aspect_ratio));
     FOREACH_STREAM(initialize_encoder(stream, &global));
 
-#if CONFIG_VP9_HIGHBITDEPTH
-    if (strcmp(global.codec->name, "vp9") == 0 ||
-        strcmp(global.codec->name, "vp10") == 0) {
+#if CONFIG_AOM_HIGHBITDEPTH
+    if (strcmp(global.codec->name, "av1") == 0 ||
+        strcmp(global.codec->name, "av1") == 0) {
       // Check to see if at least one stream uses 16 bit internal.
       // Currently assume that the bit_depths for all streams using
       // highbitdepth are the same.
@@ -2040,7 +2039,7 @@
     got_data = 0;
 
     while (frame_avail || got_data) {
-      struct vpx_usec_timer timer;
+      struct aom_usec_timer timer;
 
       if (!global.limit || frames_in < global.limit) {
         frame_avail = read_frame(&input, &raw);
@@ -2070,25 +2069,25 @@
         frame_avail = 0;
 
       if (frames_in > global.skip_frames) {
-#if CONFIG_VP9_HIGHBITDEPTH
-        vpx_image_t *frame_to_encode;
+#if CONFIG_AOM_HIGHBITDEPTH
+        aom_image_t *frame_to_encode;
         if (input_shift || (use_16bit_internal && input.bit_depth == 8)) {
           assert(use_16bit_internal);
           // Input bit depth and stream bit depth do not match, so up
           // shift frame to stream bit depth
           if (!allocated_raw_shift) {
-            vpx_img_alloc(&raw_shift, raw.fmt | VPX_IMG_FMT_HIGHBITDEPTH,
+            aom_img_alloc(&raw_shift, raw.fmt | AOM_IMG_FMT_HIGHBITDEPTH,
                           input.width, input.height, 32);
             allocated_raw_shift = 1;
           }
-          vpx_img_upshift(&raw_shift, &raw, input_shift);
+          aom_img_upshift(&raw_shift, &raw, input_shift);
           frame_to_encode = &raw_shift;
         } else {
           frame_to_encode = &raw;
         }
-        vpx_usec_timer_start(&timer);
+        aom_usec_timer_start(&timer);
         if (use_16bit_internal) {
-          assert(frame_to_encode->fmt & VPX_IMG_FMT_HIGHBITDEPTH);
+          assert(frame_to_encode->fmt & AOM_IMG_FMT_HIGHBITDEPTH);
           FOREACH_STREAM({
             if (stream->config.use_16bit_internal)
               encode_frame(stream, &global,
@@ -2097,18 +2096,18 @@
               assert(0);
           });
         } else {
-          assert((frame_to_encode->fmt & VPX_IMG_FMT_HIGHBITDEPTH) == 0);
+          assert((frame_to_encode->fmt & AOM_IMG_FMT_HIGHBITDEPTH) == 0);
           FOREACH_STREAM(encode_frame(stream, &global,
                                       frame_avail ? frame_to_encode : NULL,
                                       frames_in));
         }
 #else
-        vpx_usec_timer_start(&timer);
+        aom_usec_timer_start(&timer);
         FOREACH_STREAM(encode_frame(stream, &global, frame_avail ? &raw : NULL,
                                     frames_in));
 #endif
-        vpx_usec_timer_mark(&timer);
-        cx_time += vpx_usec_timer_elapsed(&timer);
+        aom_usec_timer_mark(&timer);
+        cx_time += aom_usec_timer_elapsed(&timer);
 
         FOREACH_STREAM(update_quantizer_histogram(stream));
 
@@ -2170,8 +2169,7 @@
     }
 
     if (global.show_psnr) {
-      if (global.codec->fourcc == VP9_FOURCC ||
-          global.codec->fourcc == VP10_FOURCC) {
+      if (global.codec->fourcc == AV1_FOURCC) {
         FOREACH_STREAM(
             show_psnr(stream, (1 << stream->config.cfg.g_input_bit_depth) - 1));
       } else {
@@ -2179,10 +2177,10 @@
       }
     }
 
-    FOREACH_STREAM(vpx_codec_destroy(&stream->encoder));
+    FOREACH_STREAM(aom_codec_destroy(&stream->encoder));
 
     if (global.test_decode != TEST_DECODE_OFF) {
-      FOREACH_STREAM(vpx_codec_destroy(&stream->decoder));
+      FOREACH_STREAM(aom_codec_destroy(&stream->decoder));
     }
 
     close_input_file(&input);
@@ -2227,10 +2225,10 @@
     });
 #endif
 
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (allocated_raw_shift) vpx_img_free(&raw_shift);
+#if CONFIG_AOM_HIGHBITDEPTH
+  if (allocated_raw_shift) aom_img_free(&raw_shift);
 #endif
-  vpx_img_free(&raw);
+  aom_img_free(&raw);
   free(argv);
   free(streams);
   return res ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/vpxenc.h b/aomenc.h
similarity index 85%
rename from vpxenc.h
rename to aomenc.h
index eee00d0..3825488 100644
--- a/vpxenc.h
+++ b/aomenc.h
@@ -7,10 +7,10 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#ifndef VPXENC_H_
-#define VPXENC_H_
+#ifndef AOMENC_H_
+#define AOMENC_H_
 
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -30,11 +30,11 @@
   YV12,  // 4:2:0 with uv flipped, only 8-bit depth
 } ColorInputType;
 
-struct VpxInterface;
+struct AvxInterface;
 
 /* Configuration elements common to all streams. */
-struct VpxEncoderConfig {
-  const struct VpxInterface *codec;
+struct AvxEncoderConfig {
+  const struct AvxInterface *codec;
   int passes;
   int pass;
   int usage;
@@ -47,7 +47,7 @@
   int show_psnr;
   enum TestDecodeFatality test_decode;
   int have_framerate;
-  struct vpx_rational framerate;
+  struct aom_rational framerate;
   int out_part;
   int debug;
   int show_q_hist_buckets;
@@ -61,4 +61,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VPXENC_H_
+#endif  // AOMENC_H_
diff --git a/vpxstats.c b/aomstats.c
similarity index 96%
rename from vpxstats.c
rename to aomstats.c
index 142e367..16bd0af 100644
--- a/vpxstats.c
+++ b/aomstats.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpxstats.h"
+#include "./aomstats.h"
 
 #include <math.h>
 #include <stdlib.h>
@@ -102,4 +102,4 @@
   }
 }
 
-vpx_fixed_buf_t stats_get(stats_io_t *stats) { return stats->buf; }
+aom_fixed_buf_t stats_get(stats_io_t *stats) { return stats->buf; }
diff --git a/vpxstats.h b/aomstats.h
similarity index 85%
rename from vpxstats.h
rename to aomstats.h
index 0ea7ce4..0e88937 100644
--- a/vpxstats.h
+++ b/aomstats.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPXSTATS_H_
-#define VPXSTATS_H_
+#ifndef AOMSTATS_H_
+#define AOMSTATS_H_
 
 #include <stdio.h>
 
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -23,7 +23,7 @@
  * first pass statistics
  */
 typedef struct {
-  vpx_fixed_buf_t buf;
+  aom_fixed_buf_t buf;
   int pass;
   FILE *file;
   char *buf_ptr;
@@ -34,10 +34,10 @@
 int stats_open_mem(stats_io_t *stats, int pass);
 void stats_close(stats_io_t *stats, int last_pass);
 void stats_write(stats_io_t *stats, const void *pkt, size_t len);
-vpx_fixed_buf_t stats_get(stats_io_t *stats);
+aom_fixed_buf_t stats_get(stats_io_t *stats);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VPXSTATS_H_
+#endif  // AOMSTATS_H_
diff --git a/args.c b/args.c
index fb23c88..91b29ba 100644
--- a/args.c
+++ b/args.c
@@ -151,14 +151,14 @@
   return 0;
 }
 
-struct vpx_rational {
+struct aom_rational {
   int num; /**< fraction numerator */
   int den; /**< fraction denominator */
 };
-struct vpx_rational arg_parse_rational(const struct arg *arg) {
+struct aom_rational arg_parse_rational(const struct arg *arg) {
   long int rawval;
   char *endptr;
-  struct vpx_rational rat;
+  struct aom_rational rat;
 
   /* parse numerator */
   rawval = strtol(arg->val, &endptr, 10);
diff --git a/args.h b/args.h
index 54abe04..8e97b0c 100644
--- a/args.h
+++ b/args.h
@@ -53,7 +53,7 @@
 
 unsigned int arg_parse_uint(const struct arg *arg);
 int arg_parse_int(const struct arg *arg);
-struct vpx_rational arg_parse_rational(const struct arg *arg);
+struct aom_rational arg_parse_rational(const struct arg *arg);
 int arg_parse_enum(const struct arg *arg);
 int arg_parse_enum_or_int(const struct arg *arg);
 #ifdef __cplusplus
diff --git a/av1/av1_common.mk b/av1/av1_common.mk
new file mode 100644
index 0000000..be89263
--- /dev/null
+++ b/av1/av1_common.mk
@@ -0,0 +1,139 @@
+##
+##  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+##  Use of this source code is governed by a BSD-style license
+##  that can be found in the LICENSE file in the root of the source
+##  tree. An additional intellectual property rights grant can be found
+##  in the file PATENTS.  All contributing project authors may
+##  be found in the AUTHORS file in the root of the source tree.
+##
+
+AV1_COMMON_SRCS-yes += av1_common.mk
+AV1_COMMON_SRCS-yes += av1_iface_common.h
+AV1_COMMON_SRCS-yes += common/ans.h
+AV1_COMMON_SRCS-yes += common/alloccommon.c
+AV1_COMMON_SRCS-yes += common/blockd.c
+AV1_COMMON_SRCS-yes += common/debugmodes.c
+AV1_COMMON_SRCS-yes += common/divide.h
+AV1_COMMON_SRCS-yes += common/entropy.c
+AV1_COMMON_SRCS-yes += common/entropymode.c
+AV1_COMMON_SRCS-yes += common/entropymv.c
+AV1_COMMON_SRCS-yes += common/frame_buffers.c
+AV1_COMMON_SRCS-yes += common/frame_buffers.h
+AV1_COMMON_SRCS-yes += common/alloccommon.h
+AV1_COMMON_SRCS-yes += common/blockd.h
+AV1_COMMON_SRCS-yes += common/common.h
+AV1_COMMON_SRCS-yes += common/entropy.h
+AV1_COMMON_SRCS-yes += common/entropymode.h
+AV1_COMMON_SRCS-yes += common/entropymv.h
+AV1_COMMON_SRCS-yes += common/enums.h
+AV1_COMMON_SRCS-yes += common/filter.h
+AV1_COMMON_SRCS-yes += common/filter.c
+AV1_COMMON_SRCS-yes += common/idct.h
+AV1_COMMON_SRCS-yes += common/idct.c
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm.h
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm.c
+AV1_COMMON_SRCS-yes += common/loopfilter.h
+AV1_COMMON_SRCS-yes += common/thread_common.h
+AV1_COMMON_SRCS-yes += common/mv.h
+AV1_COMMON_SRCS-yes += common/onyxc_int.h
+AV1_COMMON_SRCS-yes += common/pred_common.h
+AV1_COMMON_SRCS-yes += common/pred_common.c
+AV1_COMMON_SRCS-yes += common/quant_common.h
+AV1_COMMON_SRCS-yes += common/reconinter.h
+AV1_COMMON_SRCS-yes += common/reconintra.h
+AV1_COMMON_SRCS-yes += common/av1_rtcd.c
+AV1_COMMON_SRCS-yes += common/av1_rtcd_defs.pl
+AV1_COMMON_SRCS-yes += common/scale.h
+AV1_COMMON_SRCS-yes += common/scale.c
+AV1_COMMON_SRCS-yes += common/seg_common.h
+AV1_COMMON_SRCS-yes += common/seg_common.c
+AV1_COMMON_SRCS-yes += common/tile_common.h
+AV1_COMMON_SRCS-yes += common/tile_common.c
+AV1_COMMON_SRCS-yes += common/loopfilter.c
+AV1_COMMON_SRCS-yes += common/thread_common.c
+AV1_COMMON_SRCS-yes += common/mvref_common.c
+AV1_COMMON_SRCS-yes += common/mvref_common.h
+AV1_COMMON_SRCS-yes += common/quant_common.c
+AV1_COMMON_SRCS-yes += common/reconinter.c
+AV1_COMMON_SRCS-yes += common/reconintra.c
+AV1_COMMON_SRCS-yes += common/restoration.h
+AV1_COMMON_SRCS-yes += common/common_data.h
+AV1_COMMON_SRCS-yes += common/scan.c
+AV1_COMMON_SRCS-yes += common/scan.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm.c
+AV1_COMMON_SRCS-yes += common/av1_txfm.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm1d.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm1d.c
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm1d.h
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm1d.c
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm2d.c
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm2d_cfg.h
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm2d.c
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm2d_cfg.h
+AV1_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/av1_convolve_ssse3.c
+AV1_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/av1_convolve_filters_ssse3.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_highbd_convolve_sse4.c
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_highbd_convolve_filters_sse4.c
+endif
+AV1_COMMON_SRCS-yes += common/av1_convolve.c
+AV1_COMMON_SRCS-yes += common/av1_convolve.h
+AV1_COMMON_SRCS-$(CONFIG_ANS) += common/ans.h
+AV1_COMMON_SRCS-$(CONFIG_ANS) += common/divide.h
+AV1_COMMON_SRCS-$(CONFIG_ANS) += common/divide.c
+AV1_COMMON_SRCS-$(CONFIG_LOOP_RESTORATION) += common/restoration.h
+AV1_COMMON_SRCS-$(CONFIG_LOOP_RESTORATION) += common/restoration.c
+ifeq (yes,$(filter yes,$(CONFIG_GLOBAL_MOTION) $(CONFIG_WARPED_MOTION)))
+AV1_COMMON_SRCS-yes += common/warped_motion.h
+AV1_COMMON_SRCS-yes += common/warped_motion.c
+endif
+AV1_COMMON_SRCS-yes += common/clpf.c
+AV1_COMMON_SRCS-yes += common/clpf.h
+ifeq ($(CONFIG_DERING),yes)
+AV1_COMMON_SRCS-yes += common/od_dering.c
+AV1_COMMON_SRCS-yes += common/od_dering.h
+AV1_COMMON_SRCS-yes += common/dering.c
+AV1_COMMON_SRCS-yes += common/dering.h
+endif
+AV1_COMMON_SRCS-yes += common/odintrin.c
+AV1_COMMON_SRCS-yes += common/odintrin.h
+
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans4_dspr2.c
+AV1_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans8_dspr2.c
+AV1_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans16_dspr2.c
+endif
+
+# common (msa)
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct4x4_msa.c
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct8x8_msa.c
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct16x16_msa.c
+
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_intrin_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_dct32x32_impl_sse2.h
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_impl_sse2.h
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_txfm1d_sse4.h
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_fwd_txfm1d_sse4.c
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_fwd_txfm2d_sse4.c
+
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/highbd_txfm_utility_sse4.h
+endif
+
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht4x4_add_neon.c
+AV1_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht8x8_add_neon.c
+endif
+
+ifeq ($(CONFIG_EXT_INTRA),yes)
+AV1_COMMON_SRCS-yes += common/intra_filters.h
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/reconintra_sse4.c
+endif
+
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.h
+
+$(eval $(call rtcd_h_template,av1_rtcd,av1/common/av1_rtcd_defs.pl))
diff --git a/av1/vp10_cx_iface.c b/av1/av1_cx_iface.c
similarity index 61%
rename from av1/vp10_cx_iface.c
rename to av1/av1_cx_iface.c
index 34dd428..b5223e7 100644
--- a/av1/vp10_cx_iface.c
+++ b/av1/av1_cx_iface.c
@@ -11,18 +11,18 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "./vpx_config.h"
-#include "aom/vpx_encoder.h"
-#include "aom_ports/vpx_once.h"
+#include "./aom_config.h"
+#include "aom/aom_encoder.h"
+#include "aom_ports/aom_once.h"
 #include "aom_ports/system_state.h"
-#include "aom/internal/vpx_codec_internal.h"
-#include "./vpx_version.h"
+#include "aom/internal/aom_codec_internal.h"
+#include "./aom_version.h"
 #include "av1/encoder/encoder.h"
-#include "aom/vp8cx.h"
+#include "aom/aomcx.h"
 #include "av1/encoder/firstpass.h"
-#include "av1/vp10_iface_common.h"
+#include "av1/av1_iface_common.h"
 
-struct vp10_extracfg {
+struct av1_extracfg {
   int cpu_used;  // available cpu percentage in 1/16
   unsigned int enable_auto_alt_ref;
 #if CONFIG_EXT_REFS
@@ -37,7 +37,7 @@
   unsigned int arnr_strength;
   unsigned int min_gf_interval;
   unsigned int max_gf_interval;
-  vpx_tune_metric tuning;
+  aom_tune_metric tuning;
   unsigned int cq_level;  // constrained quality level
   unsigned int rc_max_intra_bitrate_pct;
   unsigned int rc_max_inter_bitrate_pct;
@@ -51,16 +51,16 @@
   unsigned int frame_parallel_decoding_mode;
   AQ_MODE aq_mode;
   unsigned int frame_periodic_boost;
-  vpx_bit_depth_t bit_depth;
-  vpx_tune_content content;
-  vpx_color_space_t color_space;
+  aom_bit_depth_t bit_depth;
+  aom_tune_content content;
+  aom_color_space_t color_space;
   int color_range;
   int render_width;
   int render_height;
-  vpx_superblock_size_t superblock_size;
+  aom_superblock_size_t superblock_size;
 };
 
-static struct vp10_extracfg default_extra_cfg = {
+static struct av1_extracfg default_extra_cfg = {
   0,  // cpu_used
   1,  // enable_auto_alt_ref
 #if CONFIG_EXT_REFS
@@ -80,7 +80,7 @@
   5,              // arnr_strength
   0,              // min_gf_interval; 0 -> default decision
   0,              // max_gf_interval; 0 -> default decision
-  VPX_TUNE_PSNR,  // tuning
+  AOM_TUNE_PSNR,  // tuning
   10,             // cq_level
   0,              // rc_max_intra_bitrate_pct
   0,              // rc_max_inter_bitrate_pct
@@ -94,41 +94,41 @@
   1,                           // frame_parallel_decoding_mode
   NO_AQ,                       // aq_mode
   0,                           // frame_periodic_delta_q
-  VPX_BITS_8,                  // Bit depth
-  VPX_CONTENT_DEFAULT,         // content
-  VPX_CS_UNKNOWN,              // color space
+  AOM_BITS_8,                  // Bit depth
+  AOM_CONTENT_DEFAULT,         // content
+  AOM_CS_UNKNOWN,              // color space
   0,                           // color range
   0,                           // render width
   0,                           // render height
-  VPX_SUPERBLOCK_SIZE_DYNAMIC  // superblock_size
+  AOM_SUPERBLOCK_SIZE_DYNAMIC  // superblock_size
 };
 
-struct vpx_codec_alg_priv {
-  vpx_codec_priv_t base;
-  vpx_codec_enc_cfg_t cfg;
-  struct vp10_extracfg extra_cfg;
-  VP10EncoderConfig oxcf;
-  VP10_COMP *cpi;
+struct aom_codec_alg_priv {
+  aom_codec_priv_t base;
+  aom_codec_enc_cfg_t cfg;
+  struct av1_extracfg extra_cfg;
+  AV1EncoderConfig oxcf;
+  AV1_COMP *cpi;
   unsigned char *cx_data;
   size_t cx_data_sz;
   unsigned char *pending_cx_data;
   size_t pending_cx_data_sz;
   int pending_frame_count;
   size_t pending_frame_sizes[8];
-  vpx_image_t preview_img;
-  vpx_enc_frame_flags_t next_frame_flags;
-  vp8_postproc_cfg_t preview_ppcfg;
-  vpx_codec_pkt_list_decl(256) pkt_list;
+  aom_image_t preview_img;
+  aom_enc_frame_flags_t next_frame_flags;
+  aom_postproc_cfg_t preview_ppcfg;
+  aom_codec_pkt_list_decl(256) pkt_list;
   unsigned int fixed_kf_cntr;
   // BufferPool that holds all reference frames.
   BufferPool *buffer_pool;
 };
 
-static vpx_codec_err_t update_error_state(
-    vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
-  const vpx_codec_err_t res = error->error_code;
+static aom_codec_err_t update_error_state(
+    aom_codec_alg_priv_t *ctx, const struct aom_internal_error_info *error) {
+  const aom_codec_err_t res = error->error_code;
 
-  if (res != VPX_CODEC_OK)
+  if (res != AOM_CODEC_OK)
     ctx->base.err_detail = error->has_detail ? error->detail : NULL;
 
   return res;
@@ -138,7 +138,7 @@
 #define ERROR(str)                  \
   do {                              \
     ctx->base.err_detail = str;     \
-    return VPX_CODEC_INVALID_PARAM; \
+    return AOM_CODEC_INVALID_PARAM; \
   } while (0)
 
 #define RANGE_CHECK(p, memb, lo, hi)                                 \
@@ -162,9 +162,9 @@
     if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean"); \
   } while (0)
 
-static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
-                                       const vpx_codec_enc_cfg_t *cfg,
-                                       const struct vp10_extracfg *extra_cfg) {
+static aom_codec_err_t validate_config(aom_codec_alg_priv_t *ctx,
+                                       const aom_codec_enc_cfg_t *cfg,
+                                       const struct av1_extracfg *extra_cfg) {
   RANGE_CHECK(cfg, g_w, 1, 65535);  // 16 bits available
   RANGE_CHECK(cfg, g_h, 1, 65535);  // 16 bits available
   RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
@@ -178,16 +178,16 @@
   RANGE_CHECK(extra_cfg, frame_periodic_boost, 0, 1);
   RANGE_CHECK_HI(cfg, g_threads, 64);
   RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS);
-  RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q);
+  RANGE_CHECK(cfg, rc_end_usage, AOM_VBR, AOM_Q);
   RANGE_CHECK_HI(cfg, rc_undershoot_pct, 100);
   RANGE_CHECK_HI(cfg, rc_overshoot_pct, 100);
   RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100);
-  RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO);
+  RANGE_CHECK(cfg, kf_mode, AOM_KF_DISABLED, AOM_KF_AUTO);
   RANGE_CHECK_BOOL(cfg, rc_resize_allowed);
   RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100);
   RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100);
   RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100);
-  RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS);
+  RANGE_CHECK(cfg, g_pass, AOM_RC_ONE_PASS, AOM_RC_LAST_PASS);
   RANGE_CHECK(extra_cfg, min_gf_interval, 0, (MAX_LAG_BUFFERS - 1));
   RANGE_CHECK(extra_cfg, max_gf_interval, 0, (MAX_LAG_BUFFERS - 1));
   if (extra_cfg->max_gf_interval > 0) {
@@ -203,9 +203,9 @@
     RANGE_CHECK(cfg, rc_scaled_height, 0, cfg->g_h);
   }
 
-  // VP9 does not support a lower bound on the keyframe interval in
+  // AV1 does not support a lower bound on the keyframe interval in
   // automatic keyframe placement mode.
-  if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist &&
+  if (cfg->kf_mode != AOM_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist &&
       cfg->kf_min_dist > 0)
     ERROR(
         "kf_min_dist not supported in auto mode, use 0 "
@@ -217,14 +217,14 @@
 #endif  // CONFIG_EXT_REFS
   RANGE_CHECK(extra_cfg, cpu_used, -8, 8);
   RANGE_CHECK_HI(extra_cfg, noise_sensitivity, 6);
-  RANGE_CHECK(extra_cfg, superblock_size, VPX_SUPERBLOCK_SIZE_64X64,
-              VPX_SUPERBLOCK_SIZE_DYNAMIC);
+  RANGE_CHECK(extra_cfg, superblock_size, AOM_SUPERBLOCK_SIZE_64X64,
+              AOM_SUPERBLOCK_SIZE_DYNAMIC);
 #if CONFIG_EXT_TILE
 // TODO(any): Waring. If CONFIG_EXT_TILE is true, tile_columns really
 // means tile_width, and tile_rows really means tile_hight. The interface
 // should be sanitized.
 #if CONFIG_EXT_PARTITION
-  if (extra_cfg->superblock_size != VPX_SUPERBLOCK_SIZE_64X64) {
+  if (extra_cfg->superblock_size != AOM_SUPERBLOCK_SIZE_64X64) {
     if (extra_cfg->tile_columns != UINT_MAX)
       RANGE_CHECK(extra_cfg, tile_columns, 1, 32);
     if (extra_cfg->tile_rows != UINT_MAX)
@@ -245,15 +245,15 @@
   RANGE_CHECK(extra_cfg, arnr_max_frames, 0, 15);
   RANGE_CHECK_HI(extra_cfg, arnr_strength, 6);
   RANGE_CHECK(extra_cfg, cq_level, 0, 63);
-  RANGE_CHECK(cfg, g_bit_depth, VPX_BITS_8, VPX_BITS_12);
+  RANGE_CHECK(cfg, g_bit_depth, AOM_BITS_8, AOM_BITS_12);
   RANGE_CHECK(cfg, g_input_bit_depth, 8, 12);
-  RANGE_CHECK(extra_cfg, content, VPX_CONTENT_DEFAULT, VPX_CONTENT_INVALID - 1);
+  RANGE_CHECK(extra_cfg, content, AOM_CONTENT_DEFAULT, AOM_CONTENT_INVALID - 1);
 
-  // TODO(yaowu): remove this when ssim tuning is implemented for vp10
-  if (extra_cfg->tuning == VPX_TUNE_SSIM)
-    ERROR("Option --tune=ssim is not currently supported in VP10.");
+  // TODO(yaowu): remove this when ssim tuning is implemented for av1
+  if (extra_cfg->tuning == AOM_TUNE_SSIM)
+    ERROR("Option --tune=ssim is not currently supported in AV1.");
 
-  if (cfg->g_pass == VPX_RC_LAST_PASS) {
+  if (cfg->g_pass == AOM_RC_LAST_PASS) {
     const size_t packet_sz = sizeof(FIRSTPASS_STATS);
     const int n_packets = (int)(cfg->rc_twopass_stats_in.sz / packet_sz);
     const FIRSTPASS_STATS *stats;
@@ -274,45 +274,45 @@
       ERROR("rc_twopass_stats_in missing EOS stats packet");
   }
 
-#if !CONFIG_VP9_HIGHBITDEPTH
+#if !CONFIG_AOM_HIGHBITDEPTH
   if (cfg->g_profile > (unsigned int)PROFILE_1) {
     ERROR("Profile > 1 not supported in this build configuration");
   }
 #endif
   if (cfg->g_profile <= (unsigned int)PROFILE_1 &&
-      cfg->g_bit_depth > VPX_BITS_8) {
+      cfg->g_bit_depth > AOM_BITS_8) {
     ERROR("Codec high bit-depth not supported in profile < 2");
   }
   if (cfg->g_profile <= (unsigned int)PROFILE_1 && cfg->g_input_bit_depth > 8) {
     ERROR("Source high bit-depth not supported in profile < 2");
   }
   if (cfg->g_profile > (unsigned int)PROFILE_1 &&
-      cfg->g_bit_depth == VPX_BITS_8) {
+      cfg->g_bit_depth == AOM_BITS_8) {
     ERROR("Codec bit-depth 8 not supported in profile > 1");
   }
-  RANGE_CHECK(extra_cfg, color_space, VPX_CS_UNKNOWN, VPX_CS_SRGB);
+  RANGE_CHECK(extra_cfg, color_space, AOM_CS_UNKNOWN, AOM_CS_SRGB);
   RANGE_CHECK(extra_cfg, color_range, 0, 1);
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx,
-                                    const vpx_image_t *img) {
+static aom_codec_err_t validate_img(aom_codec_alg_priv_t *ctx,
+                                    const aom_image_t *img) {
   switch (img->fmt) {
-    case VPX_IMG_FMT_YV12:
-    case VPX_IMG_FMT_I420:
-    case VPX_IMG_FMT_I42016: break;
-    case VPX_IMG_FMT_I422:
-    case VPX_IMG_FMT_I444:
-    case VPX_IMG_FMT_I440:
+    case AOM_IMG_FMT_YV12:
+    case AOM_IMG_FMT_I420:
+    case AOM_IMG_FMT_I42016: break;
+    case AOM_IMG_FMT_I422:
+    case AOM_IMG_FMT_I444:
+    case AOM_IMG_FMT_I440:
       if (ctx->cfg.g_profile != (unsigned int)PROFILE_1) {
         ERROR(
             "Invalid image format. I422, I444, I440 images are "
             "not supported in profile.");
       }
       break;
-    case VPX_IMG_FMT_I42216:
-    case VPX_IMG_FMT_I44416:
-    case VPX_IMG_FMT_I44016:
+    case AOM_IMG_FMT_I42216:
+    case AOM_IMG_FMT_I44416:
+    case AOM_IMG_FMT_I44016:
       if (ctx->cfg.g_profile != (unsigned int)PROFILE_1 &&
           ctx->cfg.g_profile != (unsigned int)PROFILE_3) {
         ERROR(
@@ -330,29 +330,29 @@
   if (img->d_w != ctx->cfg.g_w || img->d_h != ctx->cfg.g_h)
     ERROR("Image size must match encoder init configuration size");
 
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static int get_image_bps(const vpx_image_t *img) {
+static int get_image_bps(const aom_image_t *img) {
   switch (img->fmt) {
-    case VPX_IMG_FMT_YV12:
-    case VPX_IMG_FMT_I420: return 12;
-    case VPX_IMG_FMT_I422: return 16;
-    case VPX_IMG_FMT_I444: return 24;
-    case VPX_IMG_FMT_I440: return 16;
-    case VPX_IMG_FMT_I42016: return 24;
-    case VPX_IMG_FMT_I42216: return 32;
-    case VPX_IMG_FMT_I44416: return 48;
-    case VPX_IMG_FMT_I44016: return 32;
+    case AOM_IMG_FMT_YV12:
+    case AOM_IMG_FMT_I420: return 12;
+    case AOM_IMG_FMT_I422: return 16;
+    case AOM_IMG_FMT_I444: return 24;
+    case AOM_IMG_FMT_I440: return 16;
+    case AOM_IMG_FMT_I42016: return 24;
+    case AOM_IMG_FMT_I42216: return 32;
+    case AOM_IMG_FMT_I44416: return 48;
+    case AOM_IMG_FMT_I44016: return 32;
     default: assert(0 && "Invalid image format"); break;
   }
   return 0;
 }
 
-static vpx_codec_err_t set_encoder_config(
-    VP10EncoderConfig *oxcf, const vpx_codec_enc_cfg_t *cfg,
-    const struct vp10_extracfg *extra_cfg) {
-  const int is_vbr = cfg->rc_end_usage == VPX_VBR;
+static aom_codec_err_t set_encoder_config(
+    AV1EncoderConfig *oxcf, const aom_codec_enc_cfg_t *cfg,
+    const struct av1_extracfg *extra_cfg) {
+  const int is_vbr = cfg->rc_end_usage == AOM_VBR;
   oxcf->profile = cfg->g_profile;
   oxcf->max_threads = (int)cfg->g_threads;
   oxcf->width = cfg->g_w;
@@ -366,13 +366,13 @@
   oxcf->mode = GOOD;
 
   switch (cfg->g_pass) {
-    case VPX_RC_ONE_PASS: oxcf->pass = 0; break;
-    case VPX_RC_FIRST_PASS: oxcf->pass = 1; break;
-    case VPX_RC_LAST_PASS: oxcf->pass = 2; break;
+    case AOM_RC_ONE_PASS: oxcf->pass = 0; break;
+    case AOM_RC_FIRST_PASS: oxcf->pass = 1; break;
+    case AOM_RC_LAST_PASS: oxcf->pass = 2; break;
   }
 
   oxcf->lag_in_frames =
-      cfg->g_pass == VPX_RC_FIRST_PASS ? 0 : cfg->g_lag_in_frames;
+      cfg->g_pass == AOM_RC_FIRST_PASS ? 0 : cfg->g_lag_in_frames;
   oxcf->rc_mode = cfg->rc_end_usage;
 
   // Convert target bandwidth from Kbit/s to Bit/s
@@ -382,10 +382,10 @@
   oxcf->gf_cbr_boost_pct = extra_cfg->gf_cbr_boost_pct;
 
   oxcf->best_allowed_q =
-      extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_min_quantizer);
+      extra_cfg->lossless ? 0 : av1_quantizer_to_qindex(cfg->rc_min_quantizer);
   oxcf->worst_allowed_q =
-      extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_max_quantizer);
-  oxcf->cq_level = vp10_quantizer_to_qindex(extra_cfg->cq_level);
+      extra_cfg->lossless ? 0 : av1_quantizer_to_qindex(cfg->rc_max_quantizer);
+  oxcf->cq_level = av1_quantizer_to_qindex(extra_cfg->cq_level);
   oxcf->fixed_q = -1;
 
 #if CONFIG_AOM_QM
@@ -419,7 +419,7 @@
   oxcf->two_pass_vbrmax_section = cfg->rc_2pass_vbr_maxsection_pct;
 
   oxcf->auto_key =
-      cfg->kf_mode == VPX_KF_AUTO && cfg->kf_min_dist != cfg->kf_max_dist;
+      cfg->kf_mode == AOM_KF_AUTO && cfg->kf_min_dist != cfg->kf_max_dist;
 
   oxcf->key_freq = cfg->kf_max_dist;
 
@@ -458,12 +458,12 @@
   {
 #if CONFIG_EXT_PARTITION
     const unsigned int max =
-        extra_cfg->superblock_size == VPX_SUPERBLOCK_SIZE_64X64 ? 64 : 32;
+        extra_cfg->superblock_size == AOM_SUPERBLOCK_SIZE_64X64 ? 64 : 32;
 #else
     const unsigned int max = 64;
 #endif  // CONFIG_EXT_PARTITION
-    oxcf->tile_columns = VPXMIN(extra_cfg->tile_columns, max);
-    oxcf->tile_rows = VPXMIN(extra_cfg->tile_rows, max);
+    oxcf->tile_columns = AOMMIN(extra_cfg->tile_columns, max);
+    oxcf->tile_rows = AOMMIN(extra_cfg->tile_rows, max);
   }
 #else
   oxcf->tile_columns = extra_cfg->tile_columns;
@@ -478,7 +478,7 @@
   oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost;
 
   /*
-  printf("Current VP9 Settings: \n");
+  printf("Current AV1 Settings: \n");
   printf("target_bandwidth: %d\n", oxcf->target_bandwidth);
   printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity);
   printf("sharpness: %d\n",    oxcf->sharpness);
@@ -509,16 +509,16 @@
   printf("frame parallel detokenization: %d\n",
          oxcf->frame_parallel_decoding_mode);
   */
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx,
-                                          const vpx_codec_enc_cfg_t *cfg) {
-  vpx_codec_err_t res;
+static aom_codec_err_t encoder_set_config(aom_codec_alg_priv_t *ctx,
+                                          const aom_codec_enc_cfg_t *cfg) {
+  aom_codec_err_t res;
   int force_key = 0;
 
   if (cfg->g_w != ctx->cfg.g_w || cfg->g_h != ctx->cfg.g_h) {
-    if (cfg->g_lag_in_frames > 1 || cfg->g_pass != VPX_RC_ONE_PASS)
+    if (cfg->g_lag_in_frames > 1 || cfg->g_pass != AOM_RC_ONE_PASS)
       ERROR("Cannot change width or height after initialization");
     if (!valid_ref_frame_size(ctx->cfg.g_w, ctx->cfg.g_h, cfg->g_w, cfg->g_h) ||
         (ctx->cpi->initial_width && (int)cfg->g_w > ctx->cpi->initial_width) ||
@@ -535,246 +535,246 @@
 
   res = validate_config(ctx, cfg, &ctx->extra_cfg);
 
-  if (res == VPX_CODEC_OK) {
+  if (res == AOM_CODEC_OK) {
     ctx->cfg = *cfg;
     set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg);
     // On profile change, request a key frame
     force_key |= ctx->cpi->common.profile != ctx->oxcf.profile;
-    vp10_change_config(ctx->cpi, &ctx->oxcf);
+    av1_change_config(ctx->cpi, &ctx->oxcf);
   }
 
-  if (force_key) ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF;
+  if (force_key) ctx->next_frame_flags |= AOM_EFLAG_FORCE_KF;
 
   return res;
 }
 
-static vpx_codec_err_t ctrl_get_quantizer(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_quantizer(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
   int *const arg = va_arg(args, int *);
-  if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
-  *arg = vp10_get_quantizer(ctx->cpi);
-  return VPX_CODEC_OK;
+  if (arg == NULL) return AOM_CODEC_INVALID_PARAM;
+  *arg = av1_get_quantizer(ctx->cpi);
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t ctrl_get_quantizer64(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_quantizer64(aom_codec_alg_priv_t *ctx,
                                             va_list args) {
   int *const arg = va_arg(args, int *);
-  if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
-  *arg = vp10_qindex_to_quantizer(vp10_get_quantizer(ctx->cpi));
-  return VPX_CODEC_OK;
+  if (arg == NULL) return AOM_CODEC_INVALID_PARAM;
+  *arg = av1_qindex_to_quantizer(av1_get_quantizer(ctx->cpi));
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t update_extra_cfg(vpx_codec_alg_priv_t *ctx,
-                                        const struct vp10_extracfg *extra_cfg) {
-  const vpx_codec_err_t res = validate_config(ctx, &ctx->cfg, extra_cfg);
-  if (res == VPX_CODEC_OK) {
+static aom_codec_err_t update_extra_cfg(aom_codec_alg_priv_t *ctx,
+                                        const struct av1_extracfg *extra_cfg) {
+  const aom_codec_err_t res = validate_config(ctx, &ctx->cfg, extra_cfg);
+  if (res == AOM_CODEC_OK) {
     ctx->extra_cfg = *extra_cfg;
     set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg);
-    vp10_change_config(ctx->cpi, &ctx->oxcf);
+    av1_change_config(ctx->cpi, &ctx->oxcf);
   }
   return res;
 }
 
-static vpx_codec_err_t ctrl_set_cpuused(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_cpuused(aom_codec_alg_priv_t *ctx,
                                         va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.cpu_used = CAST(VP8E_SET_CPUUSED, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.cpu_used = CAST(AOME_SET_CPUUSED, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_enable_auto_alt_ref(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_enable_auto_alt_ref(aom_codec_alg_priv_t *ctx,
                                                     va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.enable_auto_alt_ref = CAST(VP8E_SET_ENABLEAUTOALTREF, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.enable_auto_alt_ref = CAST(AOME_SET_ENABLEAUTOALTREF, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 #if CONFIG_EXT_REFS
-static vpx_codec_err_t ctrl_set_enable_auto_bwd_ref(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_enable_auto_bwd_ref(aom_codec_alg_priv_t *ctx,
                                                     va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.enable_auto_bwd_ref = CAST(VP8E_SET_ENABLEAUTOBWDREF, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.enable_auto_bwd_ref = CAST(AOME_SET_ENABLEAUTOBWDREF, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 #endif  // CONFIG_EXT_REFS
 
-static vpx_codec_err_t ctrl_set_noise_sensitivity(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_noise_sensitivity(aom_codec_alg_priv_t *ctx,
                                                   va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.noise_sensitivity = CAST(VP9E_SET_NOISE_SENSITIVITY, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.noise_sensitivity = CAST(AV1E_SET_NOISE_SENSITIVITY, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_sharpness(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_sharpness(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.sharpness = CAST(VP8E_SET_SHARPNESS, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.sharpness = CAST(AOME_SET_SHARPNESS, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_static_thresh(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_static_thresh(aom_codec_alg_priv_t *ctx,
                                               va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.static_thresh = CAST(VP8E_SET_STATIC_THRESHOLD, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.static_thresh = CAST(AOME_SET_STATIC_THRESHOLD, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_tile_columns(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_tile_columns(aom_codec_alg_priv_t *ctx,
                                              va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.tile_columns = CAST(VP9E_SET_TILE_COLUMNS, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.tile_columns = CAST(AV1E_SET_TILE_COLUMNS, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_tile_rows(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_tile_rows(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.tile_rows = CAST(VP9E_SET_TILE_ROWS, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.tile_rows = CAST(AV1E_SET_TILE_ROWS, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_arnr_max_frames(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_arnr_max_frames(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.arnr_max_frames = CAST(VP8E_SET_ARNR_MAXFRAMES, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.arnr_max_frames = CAST(AOME_SET_ARNR_MAXFRAMES, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_arnr_strength(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_arnr_strength(aom_codec_alg_priv_t *ctx,
                                               va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.arnr_strength = CAST(VP8E_SET_ARNR_STRENGTH, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.arnr_strength = CAST(AOME_SET_ARNR_STRENGTH, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_arnr_type(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_arnr_type(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
   (void)ctx;
   (void)args;
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t ctrl_set_tuning(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_tuning(aom_codec_alg_priv_t *ctx,
                                        va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.tuning = CAST(VP8E_SET_TUNING, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.tuning = CAST(AOME_SET_TUNING, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_cq_level(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_cq_level(aom_codec_alg_priv_t *ctx,
                                          va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.cq_level = CAST(VP8E_SET_CQ_LEVEL, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.cq_level = CAST(AOME_SET_CQ_LEVEL, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_rc_max_intra_bitrate_pct(
-    vpx_codec_alg_priv_t *ctx, va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+static aom_codec_err_t ctrl_set_rc_max_intra_bitrate_pct(
+    aom_codec_alg_priv_t *ctx, va_list args) {
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.rc_max_intra_bitrate_pct =
-      CAST(VP8E_SET_MAX_INTRA_BITRATE_PCT, args);
+      CAST(AOME_SET_MAX_INTRA_BITRATE_PCT, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_rc_max_inter_bitrate_pct(
-    vpx_codec_alg_priv_t *ctx, va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+static aom_codec_err_t ctrl_set_rc_max_inter_bitrate_pct(
+    aom_codec_alg_priv_t *ctx, va_list args) {
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.rc_max_inter_bitrate_pct =
-      CAST(VP8E_SET_MAX_INTER_BITRATE_PCT, args);
+      CAST(AOME_SET_MAX_INTER_BITRATE_PCT, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(aom_codec_alg_priv_t *ctx,
                                                     va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.gf_cbr_boost_pct = CAST(VP9E_SET_GF_CBR_BOOST_PCT, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.gf_cbr_boost_pct = CAST(AV1E_SET_GF_CBR_BOOST_PCT, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_lossless(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_lossless(aom_codec_alg_priv_t *ctx,
                                          va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.lossless = CAST(VP9E_SET_LOSSLESS, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.lossless = CAST(AV1E_SET_LOSSLESS, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 #if CONFIG_AOM_QM
-static vpx_codec_err_t ctrl_set_enable_qm(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_enable_qm(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.enable_qm = CAST(VP9E_SET_ENABLE_QM, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.enable_qm = CAST(AV1E_SET_ENABLE_QM, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_qm_min(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_qm_min(aom_codec_alg_priv_t *ctx,
                                        va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.qm_min = CAST(VP9E_SET_QM_MIN, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.qm_min = CAST(AV1E_SET_QM_MIN, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_qm_max(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_qm_max(aom_codec_alg_priv_t *ctx,
                                        va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.qm_max = CAST(VP9E_SET_QM_MAX, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.qm_max = CAST(AV1E_SET_QM_MAX, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 #endif
 
-static vpx_codec_err_t ctrl_set_frame_parallel_decoding_mode(
-    vpx_codec_alg_priv_t *ctx, va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+static aom_codec_err_t ctrl_set_frame_parallel_decoding_mode(
+    aom_codec_alg_priv_t *ctx, va_list args) {
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.frame_parallel_decoding_mode =
-      CAST(VP9E_SET_FRAME_PARALLEL_DECODING, args);
+      CAST(AV1E_SET_FRAME_PARALLEL_DECODING, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_aq_mode(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_aq_mode(aom_codec_alg_priv_t *ctx,
                                         va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.aq_mode = CAST(VP9E_SET_AQ_MODE, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.aq_mode = CAST(AV1E_SET_AQ_MODE, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_min_gf_interval(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_min_gf_interval(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.min_gf_interval = CAST(VP9E_SET_MIN_GF_INTERVAL, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.min_gf_interval = CAST(AV1E_SET_MIN_GF_INTERVAL, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_max_gf_interval(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_max_gf_interval(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.max_gf_interval = CAST(VP9E_SET_MAX_GF_INTERVAL, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.max_gf_interval = CAST(AV1E_SET_MAX_GF_INTERVAL, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_frame_periodic_boost(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_frame_periodic_boost(aom_codec_alg_priv_t *ctx,
                                                      va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.frame_periodic_boost = CAST(VP9E_SET_FRAME_PERIODIC_BOOST, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.frame_periodic_boost = CAST(AV1E_SET_FRAME_PERIODIC_BOOST, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t encoder_init(vpx_codec_ctx_t *ctx,
-                                    vpx_codec_priv_enc_mr_cfg_t *data) {
-  vpx_codec_err_t res = VPX_CODEC_OK;
+static aom_codec_err_t encoder_init(aom_codec_ctx_t *ctx,
+                                    aom_codec_priv_enc_mr_cfg_t *data) {
+  aom_codec_err_t res = AOM_CODEC_OK;
   (void)data;
 
   if (ctx->priv == NULL) {
-    vpx_codec_alg_priv_t *const priv = vpx_calloc(1, sizeof(*priv));
-    if (priv == NULL) return VPX_CODEC_MEM_ERROR;
+    aom_codec_alg_priv_t *const priv = aom_calloc(1, sizeof(*priv));
+    if (priv == NULL) return AOM_CODEC_MEM_ERROR;
 
-    ctx->priv = (vpx_codec_priv_t *)priv;
+    ctx->priv = (aom_codec_priv_t *)priv;
     ctx->priv->init_flags = ctx->init_flags;
     ctx->priv->enc.total_encoders = 1;
-    priv->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
-    if (priv->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR;
+    priv->buffer_pool = (BufferPool *)aom_calloc(1, sizeof(BufferPool));
+    if (priv->buffer_pool == NULL) return AOM_CODEC_MEM_ERROR;
 
 #if CONFIG_MULTITHREAD
     if (pthread_mutex_init(&priv->buffer_pool->pool_mutex, NULL)) {
-      return VPX_CODEC_MEM_ERROR;
+      return AOM_CODEC_MEM_ERROR;
     }
 #endif
 
@@ -785,19 +785,19 @@
     }
 
     priv->extra_cfg = default_extra_cfg;
-    once(vp10_initialize_enc);
+    once(av1_initialize_enc);
 
     res = validate_config(priv, &priv->cfg, &priv->extra_cfg);
 
-    if (res == VPX_CODEC_OK) {
+    if (res == AOM_CODEC_OK) {
       set_encoder_config(&priv->oxcf, &priv->cfg, &priv->extra_cfg);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       priv->oxcf.use_highbitdepth =
-          (ctx->init_flags & VPX_CODEC_USE_HIGHBITDEPTH) ? 1 : 0;
+          (ctx->init_flags & AOM_CODEC_USE_HIGHBITDEPTH) ? 1 : 0;
 #endif
-      priv->cpi = vp10_create_compressor(&priv->oxcf, priv->buffer_pool);
+      priv->cpi = av1_create_compressor(&priv->oxcf, priv->buffer_pool);
       if (priv->cpi == NULL)
-        res = VPX_CODEC_MEM_ERROR;
+        res = AOM_CODEC_MEM_ERROR;
       else
         priv->cpi->output_pkt_list = &priv->pkt_list.head;
     }
@@ -806,26 +806,26 @@
   return res;
 }
 
-static vpx_codec_err_t encoder_destroy(vpx_codec_alg_priv_t *ctx) {
+static aom_codec_err_t encoder_destroy(aom_codec_alg_priv_t *ctx) {
   free(ctx->cx_data);
-  vp10_remove_compressor(ctx->cpi);
+  av1_remove_compressor(ctx->cpi);
 #if CONFIG_MULTITHREAD
   pthread_mutex_destroy(&ctx->buffer_pool->pool_mutex);
 #endif
-  vpx_free(ctx->buffer_pool);
-  vpx_free(ctx);
-  return VPX_CODEC_OK;
+  aom_free(ctx->buffer_pool);
+  aom_free(ctx);
+  return AOM_CODEC_OK;
 }
 
-static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx,
+static void pick_quickcompress_mode(aom_codec_alg_priv_t *ctx,
                                     unsigned long duration,
                                     unsigned long deadline) {
   MODE new_mode = BEST;
 
   switch (ctx->cfg.g_pass) {
-    case VPX_RC_ONE_PASS:
+    case AOM_RC_ONE_PASS:
       if (deadline > 0) {
-        const vpx_codec_enc_cfg_t *const cfg = &ctx->cfg;
+        const aom_codec_enc_cfg_t *const cfg = &ctx->cfg;
 
         // Convert duration parameter from stream timebase to microseconds.
         const uint64_t duration_us = (uint64_t)duration * 1000000 *
@@ -839,19 +839,19 @@
         new_mode = BEST;
       }
       break;
-    case VPX_RC_FIRST_PASS: break;
-    case VPX_RC_LAST_PASS: new_mode = deadline > 0 ? GOOD : BEST; break;
+    case AOM_RC_FIRST_PASS: break;
+    case AOM_RC_LAST_PASS: new_mode = deadline > 0 ? GOOD : BEST; break;
   }
 
   if (ctx->oxcf.mode != new_mode) {
     ctx->oxcf.mode = new_mode;
-    vp10_change_config(ctx->cpi, &ctx->oxcf);
+    av1_change_config(ctx->cpi, &ctx->oxcf);
   }
 }
 
 // Turn on to test if supplemental superframe data breaks decoding
 // #define TEST_SUPPLEMENTAL_SUPERFRAME_DATA
-static int write_superframe_index(vpx_codec_alg_priv_t *ctx) {
+static int write_superframe_index(aom_codec_alg_priv_t *ctx) {
   uint8_t marker = 0xc0;
   unsigned int mask;
   int mag, index_sz;
@@ -916,50 +916,50 @@
   return index_sz;
 }
 
-// vp9 uses 10,000,000 ticks/second as time stamp
+// av1 uses 10,000,000 ticks/second as time stamp
 #define TICKS_PER_SEC 10000000LL
 
-static int64_t timebase_units_to_ticks(const vpx_rational_t *timebase,
+static int64_t timebase_units_to_ticks(const aom_rational_t *timebase,
                                        int64_t n) {
   return n * TICKS_PER_SEC * timebase->num / timebase->den;
 }
 
-static int64_t ticks_to_timebase_units(const vpx_rational_t *timebase,
+static int64_t ticks_to_timebase_units(const aom_rational_t *timebase,
                                        int64_t n) {
   const int64_t round = TICKS_PER_SEC * timebase->num / 2 - 1;
   return (n * timebase->den + round) / timebase->num / TICKS_PER_SEC;
 }
 
-static vpx_codec_frame_flags_t get_frame_pkt_flags(const VP10_COMP *cpi,
+static aom_codec_frame_flags_t get_frame_pkt_flags(const AV1_COMP *cpi,
                                                    unsigned int lib_flags) {
-  vpx_codec_frame_flags_t flags = lib_flags << 16;
+  aom_codec_frame_flags_t flags = lib_flags << 16;
 
-  if (lib_flags & FRAMEFLAGS_KEY) flags |= VPX_FRAME_IS_KEY;
+  if (lib_flags & FRAMEFLAGS_KEY) flags |= AOM_FRAME_IS_KEY;
 
-  if (cpi->droppable) flags |= VPX_FRAME_IS_DROPPABLE;
+  if (cpi->droppable) flags |= AOM_FRAME_IS_DROPPABLE;
 
   return flags;
 }
 
-static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
-                                      const vpx_image_t *img,
-                                      vpx_codec_pts_t pts,
+static aom_codec_err_t encoder_encode(aom_codec_alg_priv_t *ctx,
+                                      const aom_image_t *img,
+                                      aom_codec_pts_t pts,
                                       unsigned long duration,
-                                      vpx_enc_frame_flags_t enc_flags,
+                                      aom_enc_frame_flags_t enc_flags,
                                       unsigned long deadline) {
-  volatile vpx_codec_err_t res = VPX_CODEC_OK;
-  volatile vpx_enc_frame_flags_t flags = enc_flags;
-  VP10_COMP *const cpi = ctx->cpi;
-  const vpx_rational_t *const timebase = &ctx->cfg.g_timebase;
+  volatile aom_codec_err_t res = AOM_CODEC_OK;
+  volatile aom_enc_frame_flags_t flags = enc_flags;
+  AV1_COMP *const cpi = ctx->cpi;
+  const aom_rational_t *const timebase = &ctx->cfg.g_timebase;
   size_t data_sz;
 
-  if (cpi == NULL) return VPX_CODEC_INVALID_PARAM;
+  if (cpi == NULL) return AOM_CODEC_INVALID_PARAM;
 
   if (img != NULL) {
     res = validate_img(ctx, img);
     // TODO(jzern) the checks related to cpi's validity should be treated as a
     // failure condition, encoder setup is done fully in init() currently.
-    if (res == VPX_CODEC_OK) {
+    if (res == AOM_CODEC_OK) {
 #if CONFIG_EXT_REFS
       data_sz = ctx->cfg.g_w * ctx->cfg.g_h * get_image_bps(img);
 #else
@@ -974,42 +974,42 @@
         free(ctx->cx_data);
         ctx->cx_data = (unsigned char *)malloc(ctx->cx_data_sz);
         if (ctx->cx_data == NULL) {
-          return VPX_CODEC_MEM_ERROR;
+          return AOM_CODEC_MEM_ERROR;
         }
       }
     }
   }
 
   pick_quickcompress_mode(ctx, duration, deadline);
-  vpx_codec_pkt_list_init(&ctx->pkt_list);
+  aom_codec_pkt_list_init(&ctx->pkt_list);
 
   // Handle Flags
-  if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF)) ||
-      ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) {
+  if (((flags & AOM_EFLAG_NO_UPD_GF) && (flags & AOM_EFLAG_FORCE_GF)) ||
+      ((flags & AOM_EFLAG_NO_UPD_ARF) && (flags & AOM_EFLAG_FORCE_ARF))) {
     ctx->base.err_detail = "Conflicting flags.";
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 
   if (setjmp(cpi->common.error.jmp)) {
     cpi->common.error.setjmp = 0;
     res = update_error_state(ctx, &cpi->common.error);
-    vpx_clear_system_state();
+    aom_clear_system_state();
     return res;
   }
   cpi->common.error.setjmp = 1;
 
-  vp10_apply_encoding_flags(cpi, flags);
+  av1_apply_encoding_flags(cpi, flags);
 
   // Handle fixed keyframe intervals
-  if (ctx->cfg.kf_mode == VPX_KF_AUTO &&
+  if (ctx->cfg.kf_mode == AOM_KF_AUTO &&
       ctx->cfg.kf_min_dist == ctx->cfg.kf_max_dist) {
     if (++ctx->fixed_kf_cntr > ctx->cfg.kf_min_dist) {
-      flags |= VPX_EFLAG_FORCE_KF;
+      flags |= AOM_EFLAG_FORCE_KF;
       ctx->fixed_kf_cntr = 1;
     }
   }
 
-  if (res == VPX_CODEC_OK) {
+  if (res == AOM_CODEC_OK) {
     unsigned int lib_flags = 0;
     YV12_BUFFER_CONFIG sd;
     int64_t dst_time_stamp = timebase_units_to_ticks(timebase, pts);
@@ -1019,15 +1019,15 @@
     unsigned char *cx_data;
 
     // Set up internal flags
-    if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) cpi->b_calculate_psnr = 1;
+    if (ctx->base.init_flags & AOM_CODEC_USE_PSNR) cpi->b_calculate_psnr = 1;
 
     if (img != NULL) {
       res = image2yuvconfig(img, &sd);
 
       // Store the original flags in to the frame buffer. Will extract the
       // key frame flag when we actually encode this frame.
-      if (vp10_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
-                                 dst_time_stamp, dst_end_time_stamp)) {
+      if (av1_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
+                                dst_time_stamp, dst_end_time_stamp)) {
         res = update_error_state(ctx, &cpi->common.error);
       }
       ctx->next_frame_flags = 0;
@@ -1047,18 +1047,18 @@
        * the buffer size anyway.
        */
       if (cx_data_sz < ctx->cx_data_sz / 2) {
-        vpx_internal_error(&cpi->common.error, VPX_CODEC_ERROR,
+        aom_internal_error(&cpi->common.error, AOM_CODEC_ERROR,
                            "Compressed data buffer too small");
-        return VPX_CODEC_ERROR;
+        return AOM_CODEC_ERROR;
       }
     }
 
     while (cx_data_sz >= ctx->cx_data_sz / 2 &&
-           -1 != vp10_get_compressed_data(cpi, &lib_flags, &size, cx_data,
-                                          &dst_time_stamp, &dst_end_time_stamp,
-                                          !img)) {
+           -1 != av1_get_compressed_data(cpi, &lib_flags, &size, cx_data,
+                                         &dst_time_stamp, &dst_end_time_stamp,
+                                         !img)) {
       if (size) {
-        vpx_codec_cx_pkt_t pkt;
+        aom_codec_cx_pkt_t pkt;
 
         // Pack invisible frames with the next visible frame
         if (!cpi->common.show_frame) {
@@ -1072,7 +1072,7 @@
         }
 
         // Add the frame packet to the list of returned packets.
-        pkt.kind = VPX_CODEC_CX_FRAME_PKT;
+        pkt.kind = AOM_CODEC_CX_FRAME_PKT;
         pkt.data.frame.pts = ticks_to_timebase_units(timebase, dst_time_stamp);
         pkt.data.frame.duration = (unsigned long)ticks_to_timebase_units(
             timebase, dst_end_time_stamp - dst_time_stamp);
@@ -1093,7 +1093,7 @@
         }
         pkt.data.frame.partition_id = -1;
 
-        vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
+        aom_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
 
         cx_data += size;
         cx_data_sz -= size;
@@ -1105,87 +1105,87 @@
   return res;
 }
 
-static const vpx_codec_cx_pkt_t *encoder_get_cxdata(vpx_codec_alg_priv_t *ctx,
-                                                    vpx_codec_iter_t *iter) {
-  return vpx_codec_pkt_list_get(&ctx->pkt_list.head, iter);
+static const aom_codec_cx_pkt_t *encoder_get_cxdata(aom_codec_alg_priv_t *ctx,
+                                                    aom_codec_iter_t *iter) {
+  return aom_codec_pkt_list_get(&ctx->pkt_list.head, iter);
 }
 
-static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_reference(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
-  vpx_ref_frame_t *const frame = va_arg(args, vpx_ref_frame_t *);
+  aom_ref_frame_t *const frame = va_arg(args, aom_ref_frame_t *);
 
   if (frame != NULL) {
     YV12_BUFFER_CONFIG sd;
 
     image2yuvconfig(&frame->img, &sd);
-    vp10_set_reference_enc(ctx->cpi,
-                           ref_frame_to_vp10_reframe(frame->frame_type), &sd);
-    return VPX_CODEC_OK;
+    av1_set_reference_enc(ctx->cpi, ref_frame_to_av1_reframe(frame->frame_type),
+                          &sd);
+    return AOM_CODEC_OK;
   } else {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 }
 
-static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_copy_reference(aom_codec_alg_priv_t *ctx,
                                            va_list args) {
-  vpx_ref_frame_t *const frame = va_arg(args, vpx_ref_frame_t *);
+  aom_ref_frame_t *const frame = va_arg(args, aom_ref_frame_t *);
 
   if (frame != NULL) {
     YV12_BUFFER_CONFIG sd;
 
     image2yuvconfig(&frame->img, &sd);
-    vp10_copy_reference_enc(ctx->cpi,
-                            ref_frame_to_vp10_reframe(frame->frame_type), &sd);
-    return VPX_CODEC_OK;
+    av1_copy_reference_enc(ctx->cpi,
+                           ref_frame_to_av1_reframe(frame->frame_type), &sd);
+    return AOM_CODEC_OK;
   } else {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 }
 
-static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_reference(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
-  vp9_ref_frame_t *const frame = va_arg(args, vp9_ref_frame_t *);
+  av1_ref_frame_t *const frame = va_arg(args, av1_ref_frame_t *);
 
   if (frame != NULL) {
     YV12_BUFFER_CONFIG *fb = get_ref_frame(&ctx->cpi->common, frame->idx);
-    if (fb == NULL) return VPX_CODEC_ERROR;
+    if (fb == NULL) return AOM_CODEC_ERROR;
 
     yuvconfig2image(&frame->img, fb, NULL);
-    return VPX_CODEC_OK;
+    return AOM_CODEC_OK;
   } else {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 }
 
-static vpx_codec_err_t ctrl_get_new_frame_image(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_new_frame_image(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
-  vpx_image_t *const new_img = va_arg(args, vpx_image_t *);
+  aom_image_t *const new_img = va_arg(args, aom_image_t *);
 
   if (new_img != NULL) {
     YV12_BUFFER_CONFIG new_frame;
 
-    if (vp10_get_last_show_frame(ctx->cpi, &new_frame) == 0) {
+    if (av1_get_last_show_frame(ctx->cpi, &new_frame) == 0) {
       yuvconfig2image(new_img, &new_frame, NULL);
-      return VPX_CODEC_OK;
+      return AOM_CODEC_OK;
     } else {
-      return VPX_CODEC_ERROR;
+      return AOM_CODEC_ERROR;
     }
   } else {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 }
 
-static vpx_codec_err_t ctrl_set_previewpp(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_previewpp(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
   (void)ctx;
   (void)args;
-  return VPX_CODEC_INCAPABLE;
+  return AOM_CODEC_INCAPABLE;
 }
 
-static vpx_image_t *encoder_get_preview(vpx_codec_alg_priv_t *ctx) {
+static aom_image_t *encoder_get_preview(aom_codec_alg_priv_t *ctx) {
   YV12_BUFFER_CONFIG sd;
 
-  if (vp10_get_preview_raw_frame(ctx->cpi, &sd) == 0) {
+  if (av1_get_preview_raw_frame(ctx->cpi, &sd) == 0) {
     yuvconfig2image(&ctx->preview_img, &sd, NULL);
     return &ctx->preview_img;
   } else {
@@ -1193,160 +1193,160 @@
   }
 }
 
-static vpx_codec_err_t ctrl_use_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_use_reference(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
   const int reference_flag = va_arg(args, int);
 
-  vp10_use_as_reference(ctx->cpi, reference_flag);
-  return VPX_CODEC_OK;
+  av1_use_as_reference(ctx->cpi, reference_flag);
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t ctrl_set_roi_map(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_roi_map(aom_codec_alg_priv_t *ctx,
                                         va_list args) {
   (void)ctx;
   (void)args;
 
-  // TODO(yaowu): Need to re-implement and test for VP9.
-  return VPX_CODEC_INVALID_PARAM;
+  // TODO(yaowu): Need to re-implement and test for AV1.
+  return AOM_CODEC_INVALID_PARAM;
 }
 
-static vpx_codec_err_t ctrl_set_active_map(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_active_map(aom_codec_alg_priv_t *ctx,
                                            va_list args) {
-  vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *);
+  aom_active_map_t *const map = va_arg(args, aom_active_map_t *);
 
   if (map) {
-    if (!vp10_set_active_map(ctx->cpi, map->active_map, (int)map->rows,
-                             (int)map->cols))
-      return VPX_CODEC_OK;
+    if (!av1_set_active_map(ctx->cpi, map->active_map, (int)map->rows,
+                            (int)map->cols))
+      return AOM_CODEC_OK;
     else
-      return VPX_CODEC_INVALID_PARAM;
+      return AOM_CODEC_INVALID_PARAM;
   } else {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 }
 
-static vpx_codec_err_t ctrl_get_active_map(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_active_map(aom_codec_alg_priv_t *ctx,
                                            va_list args) {
-  vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *);
+  aom_active_map_t *const map = va_arg(args, aom_active_map_t *);
 
   if (map) {
-    if (!vp10_get_active_map(ctx->cpi, map->active_map, (int)map->rows,
-                             (int)map->cols))
-      return VPX_CODEC_OK;
+    if (!av1_get_active_map(ctx->cpi, map->active_map, (int)map->rows,
+                            (int)map->cols))
+      return AOM_CODEC_OK;
     else
-      return VPX_CODEC_INVALID_PARAM;
+      return AOM_CODEC_INVALID_PARAM;
   } else {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 }
 
-static vpx_codec_err_t ctrl_set_scale_mode(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_scale_mode(aom_codec_alg_priv_t *ctx,
                                            va_list args) {
-  vpx_scaling_mode_t *const mode = va_arg(args, vpx_scaling_mode_t *);
+  aom_scaling_mode_t *const mode = va_arg(args, aom_scaling_mode_t *);
 
   if (mode) {
     const int res =
-        vp10_set_internal_size(ctx->cpi, (VPX_SCALING)mode->h_scaling_mode,
-                               (VPX_SCALING)mode->v_scaling_mode);
-    return (res == 0) ? VPX_CODEC_OK : VPX_CODEC_INVALID_PARAM;
+        av1_set_internal_size(ctx->cpi, (AOM_SCALING)mode->h_scaling_mode,
+                              (AOM_SCALING)mode->v_scaling_mode);
+    return (res == 0) ? AOM_CODEC_OK : AOM_CODEC_INVALID_PARAM;
   } else {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 }
 
-static vpx_codec_err_t ctrl_set_tune_content(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_tune_content(aom_codec_alg_priv_t *ctx,
                                              va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.content = CAST(VP9E_SET_TUNE_CONTENT, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.content = CAST(AV1E_SET_TUNE_CONTENT, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_color_space(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_color_space(aom_codec_alg_priv_t *ctx,
                                             va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.color_space = CAST(VP9E_SET_COLOR_SPACE, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.color_space = CAST(AV1E_SET_COLOR_SPACE, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_color_range(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_color_range(aom_codec_alg_priv_t *ctx,
                                             va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.color_range = CAST(VP9E_SET_COLOR_RANGE, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.color_range = CAST(AV1E_SET_COLOR_RANGE, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_render_size(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_render_size(aom_codec_alg_priv_t *ctx,
                                             va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   int *const render_size = va_arg(args, int *);
   extra_cfg.render_width = render_size[0];
   extra_cfg.render_height = render_size[1];
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_superblock_size(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_superblock_size(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.superblock_size = CAST(VP10E_SET_SUPERBLOCK_SIZE, args);
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
+  extra_cfg.superblock_size = CAST(AV1E_SET_SUPERBLOCK_SIZE, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_ctrl_fn_map_t encoder_ctrl_maps[] = {
-  { VP8_COPY_REFERENCE, ctrl_copy_reference },
-  { VP8E_USE_REFERENCE, ctrl_use_reference },
+static aom_codec_ctrl_fn_map_t encoder_ctrl_maps[] = {
+  { AOM_COPY_REFERENCE, ctrl_copy_reference },
+  { AOME_USE_REFERENCE, ctrl_use_reference },
 
   // Setters
-  { VP8_SET_REFERENCE, ctrl_set_reference },
-  { VP8_SET_POSTPROC, ctrl_set_previewpp },
-  { VP8E_SET_ROI_MAP, ctrl_set_roi_map },
-  { VP8E_SET_ACTIVEMAP, ctrl_set_active_map },
-  { VP8E_SET_SCALEMODE, ctrl_set_scale_mode },
-  { VP8E_SET_CPUUSED, ctrl_set_cpuused },
-  { VP8E_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref },
+  { AOM_SET_REFERENCE, ctrl_set_reference },
+  { AOM_SET_POSTPROC, ctrl_set_previewpp },
+  { AOME_SET_ROI_MAP, ctrl_set_roi_map },
+  { AOME_SET_ACTIVEMAP, ctrl_set_active_map },
+  { AOME_SET_SCALEMODE, ctrl_set_scale_mode },
+  { AOME_SET_CPUUSED, ctrl_set_cpuused },
+  { AOME_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref },
 #if CONFIG_EXT_REFS
-  { VP8E_SET_ENABLEAUTOBWDREF, ctrl_set_enable_auto_bwd_ref },
+  { AOME_SET_ENABLEAUTOBWDREF, ctrl_set_enable_auto_bwd_ref },
 #endif  // CONFIG_EXT_REFS
-  { VP8E_SET_SHARPNESS, ctrl_set_sharpness },
-  { VP8E_SET_STATIC_THRESHOLD, ctrl_set_static_thresh },
-  { VP9E_SET_TILE_COLUMNS, ctrl_set_tile_columns },
-  { VP9E_SET_TILE_ROWS, ctrl_set_tile_rows },
-  { VP8E_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames },
-  { VP8E_SET_ARNR_STRENGTH, ctrl_set_arnr_strength },
-  { VP8E_SET_ARNR_TYPE, ctrl_set_arnr_type },
-  { VP8E_SET_TUNING, ctrl_set_tuning },
-  { VP8E_SET_CQ_LEVEL, ctrl_set_cq_level },
-  { VP8E_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct },
-  { VP9E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct },
-  { VP9E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct },
-  { VP9E_SET_LOSSLESS, ctrl_set_lossless },
+  { AOME_SET_SHARPNESS, ctrl_set_sharpness },
+  { AOME_SET_STATIC_THRESHOLD, ctrl_set_static_thresh },
+  { AV1E_SET_TILE_COLUMNS, ctrl_set_tile_columns },
+  { AV1E_SET_TILE_ROWS, ctrl_set_tile_rows },
+  { AOME_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames },
+  { AOME_SET_ARNR_STRENGTH, ctrl_set_arnr_strength },
+  { AOME_SET_ARNR_TYPE, ctrl_set_arnr_type },
+  { AOME_SET_TUNING, ctrl_set_tuning },
+  { AOME_SET_CQ_LEVEL, ctrl_set_cq_level },
+  { AOME_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct },
+  { AV1E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct },
+  { AV1E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct },
+  { AV1E_SET_LOSSLESS, ctrl_set_lossless },
 #if CONFIG_AOM_QM
-  { VP9E_SET_ENABLE_QM, ctrl_set_enable_qm },
-  { VP9E_SET_QM_MIN, ctrl_set_qm_min },
-  { VP9E_SET_QM_MAX, ctrl_set_qm_max },
+  { AV1E_SET_ENABLE_QM, ctrl_set_enable_qm },
+  { AV1E_SET_QM_MIN, ctrl_set_qm_min },
+  { AV1E_SET_QM_MAX, ctrl_set_qm_max },
 #endif
-  { VP9E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode },
-  { VP9E_SET_AQ_MODE, ctrl_set_aq_mode },
-  { VP9E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost },
-  { VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content },
-  { VP9E_SET_COLOR_SPACE, ctrl_set_color_space },
-  { VP9E_SET_COLOR_RANGE, ctrl_set_color_range },
-  { VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity },
-  { VP9E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval },
-  { VP9E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval },
-  { VP9E_SET_RENDER_SIZE, ctrl_set_render_size },
-  { VP10E_SET_SUPERBLOCK_SIZE, ctrl_set_superblock_size },
+  { AV1E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode },
+  { AV1E_SET_AQ_MODE, ctrl_set_aq_mode },
+  { AV1E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost },
+  { AV1E_SET_TUNE_CONTENT, ctrl_set_tune_content },
+  { AV1E_SET_COLOR_SPACE, ctrl_set_color_space },
+  { AV1E_SET_COLOR_RANGE, ctrl_set_color_range },
+  { AV1E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity },
+  { AV1E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval },
+  { AV1E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval },
+  { AV1E_SET_RENDER_SIZE, ctrl_set_render_size },
+  { AV1E_SET_SUPERBLOCK_SIZE, ctrl_set_superblock_size },
 
   // Getters
-  { VP8E_GET_LAST_QUANTIZER, ctrl_get_quantizer },
-  { VP8E_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64 },
-  { VP9_GET_REFERENCE, ctrl_get_reference },
-  { VP9E_GET_ACTIVEMAP, ctrl_get_active_map },
-  { VP10_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image },
+  { AOME_GET_LAST_QUANTIZER, ctrl_get_quantizer },
+  { AOME_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64 },
+  { AV1_GET_REFERENCE, ctrl_get_reference },
+  { AV1E_GET_ACTIVEMAP, ctrl_get_active_map },
+  { AV1_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image },
 
   { -1, NULL },
 };
 
-static vpx_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
+static aom_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
   { 0,
     {
         // NOLINT
@@ -1356,14 +1356,14 @@
 
         320,         // g_width
         240,         // g_height
-        VPX_BITS_8,  // g_bit_depth
+        AOM_BITS_8,  // g_bit_depth
         8,           // g_input_bit_depth
 
         { 1, 30 },  // g_timebase
 
         0,  // g_error_resilient
 
-        VPX_RC_ONE_PASS,  // g_pass
+        AOM_RC_ONE_PASS,  // g_pass
 
         25,  // g_lag_in_frames
 
@@ -1374,7 +1374,7 @@
         60,  // rc_resize_down_thresold
         30,  // rc_resize_up_thresold
 
-        VPX_VBR,      // rc_end_usage
+        AOM_VBR,      // rc_end_usage
         { NULL, 0 },  // rc_twopass_stats_in
         { NULL, 0 },  // rc_firstpass_mb_stats_in
         256,          // rc_target_bandwidth
@@ -1392,7 +1392,7 @@
         2000,  // rc_two_pass_vbrmax_section
 
         // keyframing settings (kf)
-        VPX_KF_AUTO,  // g_kfmode
+        AOM_KF_AUTO,  // g_kfmode
         0,            // kf_min_dist
         9999,         // kf_max_dist
     } },
@@ -1401,33 +1401,33 @@
 #ifndef VERSION_STRING
 #define VERSION_STRING
 #endif
-CODEC_INTERFACE(vpx_codec_vp10_cx) = {
-  "WebM Project VP10 Encoder" VERSION_STRING,
-  VPX_CODEC_INTERNAL_ABI_VERSION,
-#if CONFIG_VP9_HIGHBITDEPTH
-  VPX_CODEC_CAP_HIGHBITDEPTH |
+CODEC_INTERFACE(aom_codec_av1_cx) = {
+  "AOMedia Project AV1 Encoder" VERSION_STRING,
+  AOM_CODEC_INTERNAL_ABI_VERSION,
+#if CONFIG_AOM_HIGHBITDEPTH
+  AOM_CODEC_CAP_HIGHBITDEPTH |
 #endif
-      VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR,  // vpx_codec_caps_t
-  encoder_init,                                    // vpx_codec_init_fn_t
-  encoder_destroy,                                 // vpx_codec_destroy_fn_t
-  encoder_ctrl_maps,                               // vpx_codec_ctrl_fn_map_t
+      AOM_CODEC_CAP_ENCODER | AOM_CODEC_CAP_PSNR,  // aom_codec_caps_t
+  encoder_init,                                    // aom_codec_init_fn_t
+  encoder_destroy,                                 // aom_codec_destroy_fn_t
+  encoder_ctrl_maps,                               // aom_codec_ctrl_fn_map_t
   {
       // NOLINT
-      NULL,  // vpx_codec_peek_si_fn_t
-      NULL,  // vpx_codec_get_si_fn_t
-      NULL,  // vpx_codec_decode_fn_t
-      NULL,  // vpx_codec_frame_get_fn_t
-      NULL   // vpx_codec_set_fb_fn_t
+      NULL,  // aom_codec_peek_si_fn_t
+      NULL,  // aom_codec_get_si_fn_t
+      NULL,  // aom_codec_decode_fn_t
+      NULL,  // aom_codec_frame_get_fn_t
+      NULL   // aom_codec_set_fb_fn_t
   },
   {
       // NOLINT
       1,                      // 1 cfg map
-      encoder_usage_cfg_map,  // vpx_codec_enc_cfg_map_t
-      encoder_encode,         // vpx_codec_encode_fn_t
-      encoder_get_cxdata,     // vpx_codec_get_cx_data_fn_t
-      encoder_set_config,     // vpx_codec_enc_config_set_fn_t
-      NULL,                   // vpx_codec_get_global_headers_fn_t
-      encoder_get_preview,    // vpx_codec_get_preview_frame_fn_t
-      NULL                    // vpx_codec_enc_mr_get_mem_loc_fn_t
+      encoder_usage_cfg_map,  // aom_codec_enc_cfg_map_t
+      encoder_encode,         // aom_codec_encode_fn_t
+      encoder_get_cxdata,     // aom_codec_get_cx_data_fn_t
+      encoder_set_config,     // aom_codec_enc_config_set_fn_t
+      NULL,                   // aom_codec_get_global_headers_fn_t
+      encoder_get_preview,    // aom_codec_get_preview_frame_fn_t
+      NULL                    // aom_codec_enc_mr_get_mem_loc_fn_t
   }
 };
diff --git a/av1/vp10_dx_iface.c b/av1/av1_dx_iface.c
similarity index 65%
rename from av1/vp10_dx_iface.c
rename to av1/av1_dx_iface.c
index 9e17c5a..53f7d46 100644
--- a/av1/vp10_dx_iface.c
+++ b/av1/av1_dx_iface.c
@@ -11,15 +11,15 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "./vpx_config.h"
-#include "./vpx_version.h"
+#include "./aom_config.h"
+#include "./aom_version.h"
 
-#include "aom/internal/vpx_codec_internal.h"
-#include "aom/vp8dx.h"
-#include "aom/vpx_decoder.h"
+#include "aom/internal/aom_codec_internal.h"
+#include "aom/aomdx.h"
+#include "aom/aom_decoder.h"
 #include "aom_dsp/bitreader_buffer.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_util/aom_thread.h"
 
 #include "av1/common/alloccommon.h"
 #include "av1/common/frame_buffers.h"
@@ -28,9 +28,9 @@
 #include "av1/decoder/decoder.h"
 #include "av1/decoder/decodeframe.h"
 
-#include "av1/vp10_iface_common.h"
+#include "av1/av1_iface_common.h"
 
-typedef vpx_codec_stream_info_t vp10_stream_info_t;
+typedef aom_codec_stream_info_t av1_stream_info_t;
 
 // This limit is due to framebuffer numbers.
 // TODO(hkuang): Remove this limit after implementing ondemand framebuffers.
@@ -38,18 +38,18 @@
 
 typedef struct cache_frame {
   int fb_idx;
-  vpx_image_t img;
+  aom_image_t img;
 } cache_frame;
 
-struct vpx_codec_alg_priv {
-  vpx_codec_priv_t base;
-  vpx_codec_dec_cfg_t cfg;
-  vp10_stream_info_t si;
+struct aom_codec_alg_priv {
+  aom_codec_priv_t base;
+  aom_codec_dec_cfg_t cfg;
+  av1_stream_info_t si;
   int postproc_cfg_set;
-  vp8_postproc_cfg_t postproc_cfg;
-  vpx_decrypt_cb decrypt_cb;
+  aom_postproc_cfg_t postproc_cfg;
+  aom_decrypt_cb decrypt_cb;
   void *decrypt_state;
-  vpx_image_t img;
+  aom_image_t img;
   int img_avail;
   int flushed;
   int invert_tile_order;
@@ -61,7 +61,7 @@
 
   // Frame parallel related.
   int frame_parallel_decode;  // frame-based threading.
-  VPxWorker *frame_workers;
+  AVxWorker *frame_workers;
   int num_frame_workers;
   int next_submit_worker_id;
   int last_submit_worker_id;
@@ -75,32 +75,32 @@
   // BufferPool that holds all reference frames. Shared by all the FrameWorkers.
   BufferPool *buffer_pool;
 
-  // External frame buffer info to save for VP10 common.
+  // External frame buffer info to save for AV1 common.
   void *ext_priv;  // Private data associated with the external frame buffers.
-  vpx_get_frame_buffer_cb_fn_t get_ext_fb_cb;
-  vpx_release_frame_buffer_cb_fn_t release_ext_fb_cb;
+  aom_get_frame_buffer_cb_fn_t get_ext_fb_cb;
+  aom_release_frame_buffer_cb_fn_t release_ext_fb_cb;
 };
 
-static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx,
-                                    vpx_codec_priv_enc_mr_cfg_t *data) {
-  // This function only allocates space for the vpx_codec_alg_priv_t
+static aom_codec_err_t decoder_init(aom_codec_ctx_t *ctx,
+                                    aom_codec_priv_enc_mr_cfg_t *data) {
+  // This function only allocates space for the aom_codec_alg_priv_t
   // structure. More memory may be required at the time the stream
   // information becomes known.
   (void)data;
 
   if (!ctx->priv) {
-    vpx_codec_alg_priv_t *const priv =
-        (vpx_codec_alg_priv_t *)vpx_calloc(1, sizeof(*priv));
-    if (priv == NULL) return VPX_CODEC_MEM_ERROR;
+    aom_codec_alg_priv_t *const priv =
+        (aom_codec_alg_priv_t *)aom_calloc(1, sizeof(*priv));
+    if (priv == NULL) return AOM_CODEC_MEM_ERROR;
 
-    ctx->priv = (vpx_codec_priv_t *)priv;
+    ctx->priv = (aom_codec_priv_t *)priv;
     ctx->priv->init_flags = ctx->init_flags;
     priv->si.sz = sizeof(priv->si);
     priv->flushed = 0;
     // Only do frame parallel decode when threads > 1.
     priv->frame_parallel_decode =
         (ctx->config.dec && (ctx->config.dec->threads > 1) &&
-         (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING))
+         (ctx->init_flags & AOM_CODEC_USE_FRAME_THREADING))
             ? 1
             : 0;
     if (ctx->config.dec) {
@@ -109,28 +109,28 @@
     }
   }
 
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) {
+static aom_codec_err_t decoder_destroy(aom_codec_alg_priv_t *ctx) {
   if (ctx->frame_workers != NULL) {
     int i;
     for (i = 0; i < ctx->num_frame_workers; ++i) {
-      VPxWorker *const worker = &ctx->frame_workers[i];
+      AVxWorker *const worker = &ctx->frame_workers[i];
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
-      vpx_get_worker_interface()->end(worker);
-      vp10_remove_common(&frame_worker_data->pbi->common);
+      aom_get_worker_interface()->end(worker);
+      av1_remove_common(&frame_worker_data->pbi->common);
 #if CONFIG_LOOP_RESTORATION
-      vp10_free_restoration_buffers(&frame_worker_data->pbi->common);
+      av1_free_restoration_buffers(&frame_worker_data->pbi->common);
 #endif  // CONFIG_LOOP_RESTORATION
-      vp10_decoder_remove(frame_worker_data->pbi);
-      vpx_free(frame_worker_data->scratch_buffer);
+      av1_decoder_remove(frame_worker_data->pbi);
+      aom_free(frame_worker_data->scratch_buffer);
 #if CONFIG_MULTITHREAD
       pthread_mutex_destroy(&frame_worker_data->stats_mutex);
       pthread_cond_destroy(&frame_worker_data->stats_cond);
 #endif
-      vpx_free(frame_worker_data);
+      aom_free(frame_worker_data);
     }
 #if CONFIG_MULTITHREAD
     pthread_mutex_destroy(&ctx->buffer_pool->pool_mutex);
@@ -138,22 +138,22 @@
   }
 
   if (ctx->buffer_pool) {
-    vp10_free_ref_frame_buffers(ctx->buffer_pool);
-    vp10_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
+    av1_free_ref_frame_buffers(ctx->buffer_pool);
+    av1_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
   }
 
-  vpx_free(ctx->frame_workers);
-  vpx_free(ctx->buffer_pool);
-  vpx_free(ctx);
-  return VPX_CODEC_OK;
+  aom_free(ctx->frame_workers);
+  aom_free(ctx->buffer_pool);
+  aom_free(ctx);
+  return AOM_CODEC_OK;
 }
 
 static int parse_bitdepth_colorspace_sampling(BITSTREAM_PROFILE profile,
-                                              struct vpx_read_bit_buffer *rb) {
-  vpx_color_space_t color_space;
+                                              struct aom_read_bit_buffer *rb) {
+  aom_color_space_t color_space;
   if (profile >= PROFILE_2) rb->bit_offset += 1;  // Bit-depth 10 or 12.
-  color_space = (vpx_color_space_t)vpx_rb_read_literal(rb, 3);
-  if (color_space != VPX_CS_SRGB) {
+  color_space = (aom_color_space_t)aom_rb_read_literal(rb, 3);
+  if (color_space != AOM_CS_SRGB) {
     rb->bit_offset += 1;  // [16,235] (including xvycc) vs [0,255] range.
     if (profile == PROFILE_1 || profile == PROFILE_3) {
       rb->bit_offset += 2;  // subsampling x/y.
@@ -170,19 +170,19 @@
   return 1;
 }
 
-static vpx_codec_err_t decoder_peek_si_internal(
-    const uint8_t *data, unsigned int data_sz, vpx_codec_stream_info_t *si,
-    int *is_intra_only, vpx_decrypt_cb decrypt_cb, void *decrypt_state) {
+static aom_codec_err_t decoder_peek_si_internal(
+    const uint8_t *data, unsigned int data_sz, aom_codec_stream_info_t *si,
+    int *is_intra_only, aom_decrypt_cb decrypt_cb, void *decrypt_state) {
   int intra_only_flag = 0;
   uint8_t clear_buffer[9];
 
-  if (data + data_sz <= data) return VPX_CODEC_INVALID_PARAM;
+  if (data + data_sz <= data) return AOM_CODEC_INVALID_PARAM;
 
   si->is_kf = 0;
   si->w = si->h = 0;
 
   if (decrypt_cb) {
-    data_sz = VPXMIN(sizeof(clear_buffer), data_sz);
+    data_sz = AOMMIN(sizeof(clear_buffer), data_sz);
     decrypt_cb(decrypt_state, data, clear_buffer, data_sz);
     data = clear_buffer;
   }
@@ -190,91 +190,91 @@
   {
     int show_frame;
     int error_resilient;
-    struct vpx_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL };
-    const int frame_marker = vpx_rb_read_literal(&rb, 2);
-    const BITSTREAM_PROFILE profile = vp10_read_profile(&rb);
+    struct aom_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL };
+    const int frame_marker = aom_rb_read_literal(&rb, 2);
+    const BITSTREAM_PROFILE profile = av1_read_profile(&rb);
 
-    if (frame_marker != VPX_FRAME_MARKER) return VPX_CODEC_UNSUP_BITSTREAM;
+    if (frame_marker != AOM_FRAME_MARKER) return AOM_CODEC_UNSUP_BITSTREAM;
 
-    if (profile >= MAX_PROFILES) return VPX_CODEC_UNSUP_BITSTREAM;
+    if (profile >= MAX_PROFILES) return AOM_CODEC_UNSUP_BITSTREAM;
 
     if ((profile >= 2 && data_sz <= 1) || data_sz < 1)
-      return VPX_CODEC_UNSUP_BITSTREAM;
+      return AOM_CODEC_UNSUP_BITSTREAM;
 
-    if (vpx_rb_read_bit(&rb)) {     // show an existing frame
-      vpx_rb_read_literal(&rb, 3);  // Frame buffer to show.
-      return VPX_CODEC_OK;
+    if (aom_rb_read_bit(&rb)) {     // show an existing frame
+      aom_rb_read_literal(&rb, 3);  // Frame buffer to show.
+      return AOM_CODEC_OK;
     }
 
-    if (data_sz <= 8) return VPX_CODEC_UNSUP_BITSTREAM;
+    if (data_sz <= 8) return AOM_CODEC_UNSUP_BITSTREAM;
 
-    si->is_kf = !vpx_rb_read_bit(&rb);
-    show_frame = vpx_rb_read_bit(&rb);
-    error_resilient = vpx_rb_read_bit(&rb);
+    si->is_kf = !aom_rb_read_bit(&rb);
+    show_frame = aom_rb_read_bit(&rb);
+    error_resilient = aom_rb_read_bit(&rb);
 
     if (si->is_kf) {
-      if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
+      if (!av1_read_sync_code(&rb)) return AOM_CODEC_UNSUP_BITSTREAM;
 
       if (!parse_bitdepth_colorspace_sampling(profile, &rb))
-        return VPX_CODEC_UNSUP_BITSTREAM;
-      vp10_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
+        return AOM_CODEC_UNSUP_BITSTREAM;
+      av1_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
     } else {
-      intra_only_flag = show_frame ? 0 : vpx_rb_read_bit(&rb);
+      intra_only_flag = show_frame ? 0 : aom_rb_read_bit(&rb);
 
       rb.bit_offset += error_resilient ? 0 : 2;  // reset_frame_context
 
       if (intra_only_flag) {
-        if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
+        if (!av1_read_sync_code(&rb)) return AOM_CODEC_UNSUP_BITSTREAM;
         if (profile > PROFILE_0) {
           if (!parse_bitdepth_colorspace_sampling(profile, &rb))
-            return VPX_CODEC_UNSUP_BITSTREAM;
+            return AOM_CODEC_UNSUP_BITSTREAM;
         }
         rb.bit_offset += REF_FRAMES;  // refresh_frame_flags
-        vp10_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
+        av1_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
       }
     }
   }
   if (is_intra_only != NULL) *is_intra_only = intra_only_flag;
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t decoder_peek_si(const uint8_t *data,
+static aom_codec_err_t decoder_peek_si(const uint8_t *data,
                                        unsigned int data_sz,
-                                       vpx_codec_stream_info_t *si) {
+                                       aom_codec_stream_info_t *si) {
   return decoder_peek_si_internal(data, data_sz, si, NULL, NULL, NULL);
 }
 
-static vpx_codec_err_t decoder_get_si(vpx_codec_alg_priv_t *ctx,
-                                      vpx_codec_stream_info_t *si) {
-  const size_t sz = (si->sz >= sizeof(vp10_stream_info_t))
-                        ? sizeof(vp10_stream_info_t)
-                        : sizeof(vpx_codec_stream_info_t);
+static aom_codec_err_t decoder_get_si(aom_codec_alg_priv_t *ctx,
+                                      aom_codec_stream_info_t *si) {
+  const size_t sz = (si->sz >= sizeof(av1_stream_info_t))
+                        ? sizeof(av1_stream_info_t)
+                        : sizeof(aom_codec_stream_info_t);
   memcpy(si, &ctx->si, sz);
   si->sz = (unsigned int)sz;
 
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static void set_error_detail(vpx_codec_alg_priv_t *ctx,
+static void set_error_detail(aom_codec_alg_priv_t *ctx,
                              const char *const error) {
   ctx->base.err_detail = error;
 }
 
-static vpx_codec_err_t update_error_state(
-    vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
+static aom_codec_err_t update_error_state(
+    aom_codec_alg_priv_t *ctx, const struct aom_internal_error_info *error) {
   if (error->error_code)
     set_error_detail(ctx, error->has_detail ? error->detail : NULL);
 
   return error->error_code;
 }
 
-static void init_buffer_callbacks(vpx_codec_alg_priv_t *ctx) {
+static void init_buffer_callbacks(aom_codec_alg_priv_t *ctx) {
   int i;
 
   for (i = 0; i < ctx->num_frame_workers; ++i) {
-    VPxWorker *const worker = &ctx->frame_workers[i];
+    AVxWorker *const worker = &ctx->frame_workers[i];
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
-    VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+    AV1_COMMON *const cm = &frame_worker_data->pbi->common;
     BufferPool *const pool = cm->buffer_pool;
 
     cm->new_fb_idx = INVALID_IDX;
@@ -286,11 +286,11 @@
       pool->release_fb_cb = ctx->release_ext_fb_cb;
       pool->cb_priv = ctx->ext_priv;
     } else {
-      pool->get_fb_cb = vp10_get_frame_buffer;
-      pool->release_fb_cb = vp10_release_frame_buffer;
+      pool->get_fb_cb = av1_get_frame_buffer;
+      pool->release_fb_cb = av1_release_frame_buffer;
 
-      if (vp10_alloc_internal_frame_buffers(&pool->int_frame_buffers))
-        vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+      if (av1_alloc_internal_frame_buffers(&pool->int_frame_buffers))
+        aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                            "Failed to initialize internal frame buffers");
 
       pool->cb_priv = &pool->int_frame_buffers;
@@ -298,8 +298,8 @@
   }
 }
 
-static void set_default_ppflags(vp8_postproc_cfg_t *cfg) {
-  cfg->post_proc_flag = VP8_DEBLOCK | VP8_DEMACROBLOCK;
+static void set_default_ppflags(aom_postproc_cfg_t *cfg) {
+  cfg->post_proc_flag = AOM_DEBLOCK | AOM_DEMACROBLOCK;
   cfg->deblocking_level = 4;
   cfg->noise_level = 0;
 }
@@ -309,7 +309,7 @@
   const uint8_t *data = frame_worker_data->data;
   (void)arg2;
 
-  frame_worker_data->result = vp10_receive_compressed_data(
+  frame_worker_data->result = av1_receive_compressed_data(
       frame_worker_data->pbi, frame_worker_data->data_size, &data);
   frame_worker_data->data_end = data;
 
@@ -318,17 +318,17 @@
     // the compressed data.
     if (frame_worker_data->result != 0 ||
         frame_worker_data->data + frame_worker_data->data_size - 1 > data) {
-      VPxWorker *const worker = frame_worker_data->pbi->frame_worker_owner;
+      AVxWorker *const worker = frame_worker_data->pbi->frame_worker_owner;
       BufferPool *const pool = frame_worker_data->pbi->common.buffer_pool;
       // Signal all the other threads that are waiting for this frame.
-      vp10_frameworker_lock_stats(worker);
+      av1_frameworker_lock_stats(worker);
       frame_worker_data->frame_context_ready = 1;
       lock_buffer_pool(pool);
       frame_worker_data->pbi->cur_buf->buf.corrupted = 1;
       unlock_buffer_pool(pool);
       frame_worker_data->pbi->need_resync = 1;
-      vp10_frameworker_signal_stats(worker);
-      vp10_frameworker_unlock_stats(worker);
+      av1_frameworker_signal_stats(worker);
+      av1_frameworker_unlock_stats(worker);
       return 0;
     }
   } else if (frame_worker_data->result != 0) {
@@ -339,9 +339,9 @@
   return !frame_worker_data->result;
 }
 
-static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
+static aom_codec_err_t init_decoder(aom_codec_alg_priv_t *ctx) {
   int i;
-  const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
 
   ctx->last_show_frame = -1;
   ctx->next_submit_worker_id = 0;
@@ -358,37 +358,37 @@
   ctx->available_threads = ctx->num_frame_workers;
   ctx->flushed = 0;
 
-  ctx->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
-  if (ctx->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR;
+  ctx->buffer_pool = (BufferPool *)aom_calloc(1, sizeof(BufferPool));
+  if (ctx->buffer_pool == NULL) return AOM_CODEC_MEM_ERROR;
 
 #if CONFIG_MULTITHREAD
   if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
     set_error_detail(ctx, "Failed to allocate buffer pool mutex");
-    return VPX_CODEC_MEM_ERROR;
+    return AOM_CODEC_MEM_ERROR;
   }
 #endif
 
-  ctx->frame_workers = (VPxWorker *)vpx_malloc(ctx->num_frame_workers *
+  ctx->frame_workers = (AVxWorker *)aom_malloc(ctx->num_frame_workers *
                                                sizeof(*ctx->frame_workers));
   if (ctx->frame_workers == NULL) {
     set_error_detail(ctx, "Failed to allocate frame_workers");
-    return VPX_CODEC_MEM_ERROR;
+    return AOM_CODEC_MEM_ERROR;
   }
 
   for (i = 0; i < ctx->num_frame_workers; ++i) {
-    VPxWorker *const worker = &ctx->frame_workers[i];
+    AVxWorker *const worker = &ctx->frame_workers[i];
     FrameWorkerData *frame_worker_data = NULL;
     winterface->init(worker);
-    worker->data1 = vpx_memalign(32, sizeof(FrameWorkerData));
+    worker->data1 = aom_memalign(32, sizeof(FrameWorkerData));
     if (worker->data1 == NULL) {
       set_error_detail(ctx, "Failed to allocate frame_worker_data");
-      return VPX_CODEC_MEM_ERROR;
+      return AOM_CODEC_MEM_ERROR;
     }
     frame_worker_data = (FrameWorkerData *)worker->data1;
-    frame_worker_data->pbi = vp10_decoder_create(ctx->buffer_pool);
+    frame_worker_data->pbi = av1_decoder_create(ctx->buffer_pool);
     if (frame_worker_data->pbi == NULL) {
       set_error_detail(ctx, "Failed to allocate frame_worker_data");
-      return VPX_CODEC_MEM_ERROR;
+      return AOM_CODEC_MEM_ERROR;
     }
     frame_worker_data->pbi->frame_worker_owner = worker;
     frame_worker_data->worker_id = i;
@@ -399,12 +399,12 @@
 #if CONFIG_MULTITHREAD
     if (pthread_mutex_init(&frame_worker_data->stats_mutex, NULL)) {
       set_error_detail(ctx, "Failed to allocate frame_worker_data mutex");
-      return VPX_CODEC_MEM_ERROR;
+      return AOM_CODEC_MEM_ERROR;
     }
 
     if (pthread_cond_init(&frame_worker_data->stats_cond, NULL)) {
       set_error_detail(ctx, "Failed to allocate frame_worker_data cond");
-      return VPX_CODEC_MEM_ERROR;
+      return AOM_CODEC_MEM_ERROR;
     }
 #endif
     // If decoding in serial mode, FrameWorker thread could create tile worker
@@ -415,35 +415,35 @@
     frame_worker_data->pbi->inv_tile_order = ctx->invert_tile_order;
     frame_worker_data->pbi->common.frame_parallel_decode =
         ctx->frame_parallel_decode;
-    worker->hook = (VPxWorkerHook)frame_worker_hook;
+    worker->hook = (AVxWorkerHook)frame_worker_hook;
     if (!winterface->reset(worker)) {
       set_error_detail(ctx, "Frame Worker thread creation failed");
-      return VPX_CODEC_MEM_ERROR;
+      return AOM_CODEC_MEM_ERROR;
     }
   }
 
   // If postprocessing was enabled by the application and a
   // configuration has not been provided, default it.
-  if (!ctx->postproc_cfg_set && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
+  if (!ctx->postproc_cfg_set && (ctx->base.init_flags & AOM_CODEC_USE_POSTPROC))
     set_default_ppflags(&ctx->postproc_cfg);
 
   init_buffer_callbacks(ctx);
 
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static INLINE void check_resync(vpx_codec_alg_priv_t *const ctx,
-                                const VP10Decoder *const pbi) {
+static INLINE void check_resync(aom_codec_alg_priv_t *const ctx,
+                                const AV1Decoder *const pbi) {
   // Clear resync flag if worker got a key frame or intra only frame.
   if (ctx->need_resync == 1 && pbi->need_resync == 0 &&
       (pbi->common.intra_only || pbi->common.frame_type == KEY_FRAME))
     ctx->need_resync = 0;
 }
 
-static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t decode_one(aom_codec_alg_priv_t *ctx,
                                   const uint8_t **data, unsigned int data_sz,
                                   void *user_priv, int64_t deadline) {
-  const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
   (void)deadline;
 
   // Determine the stream parameters. Note that we rely on peek_si to
@@ -451,16 +451,16 @@
   // of the heap.
   if (!ctx->si.h) {
     int is_intra_only = 0;
-    const vpx_codec_err_t res =
+    const aom_codec_err_t res =
         decoder_peek_si_internal(*data, data_sz, &ctx->si, &is_intra_only,
                                  ctx->decrypt_cb, ctx->decrypt_state);
-    if (res != VPX_CODEC_OK) return res;
+    if (res != AOM_CODEC_OK) return res;
 
-    if (!ctx->si.is_kf && !is_intra_only) return VPX_CODEC_ERROR;
+    if (!ctx->si.is_kf && !is_intra_only) return AOM_CODEC_ERROR;
   }
 
   if (!ctx->frame_parallel_decode) {
-    VPxWorker *const worker = ctx->frame_workers;
+    AVxWorker *const worker = ctx->frame_workers;
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     frame_worker_data->data = *data;
     frame_worker_data->data_size = data_sz;
@@ -488,11 +488,11 @@
 
     check_resync(ctx, frame_worker_data->pbi);
   } else {
-    VPxWorker *const worker = &ctx->frame_workers[ctx->next_submit_worker_id];
+    AVxWorker *const worker = &ctx->frame_workers[ctx->next_submit_worker_id];
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     // Copy context from last worker thread to next worker thread.
     if (ctx->next_submit_worker_id != ctx->last_submit_worker_id)
-      vp10_frameworker_copy_context(
+      av1_frameworker_copy_context(
           &ctx->frame_workers[ctx->next_submit_worker_id],
           &ctx->frame_workers[ctx->last_submit_worker_id]);
 
@@ -503,10 +503,10 @@
     // avoid too many deallocate and allocate.
     if (frame_worker_data->scratch_buffer_size < data_sz) {
       frame_worker_data->scratch_buffer =
-          (uint8_t *)vpx_realloc(frame_worker_data->scratch_buffer, data_sz);
+          (uint8_t *)aom_realloc(frame_worker_data->scratch_buffer, data_sz);
       if (frame_worker_data->scratch_buffer == NULL) {
         set_error_detail(ctx, "Failed to reallocate scratch buffer");
-        return VPX_CODEC_MEM_ERROR;
+        return AOM_CODEC_MEM_ERROR;
       }
       frame_worker_data->scratch_buffer_size = data_sz;
     }
@@ -530,13 +530,13 @@
     winterface->launch(worker);
   }
 
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) {
+static void wait_worker_and_cache_frame(aom_codec_alg_priv_t *ctx) {
   YV12_BUFFER_CONFIG sd;
-  const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
-  VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
+  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
+  AVxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
   FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
   ctx->next_output_worker_id =
       (ctx->next_output_worker_id + 1) % ctx->num_frame_workers;
@@ -547,8 +547,8 @@
 
   check_resync(ctx, frame_worker_data->pbi);
 
-  if (vp10_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
-    VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+  if (av1_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
+    AV1_COMMON *const cm = &frame_worker_data->pbi->common;
     RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
     ctx->frame_cache[ctx->frame_cache_write].fb_idx = cm->new_fb_idx;
     yuvconfig2image(&ctx->frame_cache[ctx->frame_cache_write].img, &sd,
@@ -560,18 +560,18 @@
   }
 }
 
-static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t decoder_decode(aom_codec_alg_priv_t *ctx,
                                       const uint8_t *data, unsigned int data_sz,
                                       void *user_priv, long deadline) {
   const uint8_t *data_start = data;
   const uint8_t *const data_end = data + data_sz;
-  vpx_codec_err_t res;
+  aom_codec_err_t res;
   uint32_t frame_sizes[8];
   int frame_count;
 
   if (data == NULL && data_sz == 0) {
     ctx->flushed = 1;
-    return VPX_CODEC_OK;
+    return AOM_CODEC_OK;
   }
 
   // Reset flushed when receiving a valid frame.
@@ -579,13 +579,13 @@
 
   // Initialize the decoder workers on the first frame.
   if (ctx->frame_workers == NULL) {
-    const vpx_codec_err_t res = init_decoder(ctx);
-    if (res != VPX_CODEC_OK) return res;
+    const aom_codec_err_t res = init_decoder(ctx);
+    if (res != AOM_CODEC_OK) return res;
   }
 
-  res = vp10_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
-                                    ctx->decrypt_cb, ctx->decrypt_state);
-  if (res != VPX_CODEC_OK) return res;
+  res = av1_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
+                                   ctx->decrypt_cb, ctx->decrypt_state);
+  if (res != AOM_CODEC_OK) return res;
 
   if (ctx->frame_parallel_decode) {
     // Decode in frame parallel mode. When decoding in this mode, the frame
@@ -601,7 +601,7 @@
         if (data_start < data ||
             frame_size > (uint32_t)(data_end - data_start)) {
           set_error_detail(ctx, "Invalid frame size in index");
-          return VPX_CODEC_CORRUPT_FRAME;
+          return AOM_CODEC_CORRUPT_FRAME;
         }
 
         if (ctx->available_threads == 0) {
@@ -612,13 +612,13 @@
           } else {
             // TODO(hkuang): Add unit test to test this path.
             set_error_detail(ctx, "Frame output cache is full.");
-            return VPX_CODEC_ERROR;
+            return AOM_CODEC_ERROR;
           }
         }
 
         res =
             decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
-        if (res != VPX_CODEC_OK) return res;
+        if (res != AOM_CODEC_OK) return res;
         data_start += frame_size;
       }
     } else {
@@ -630,12 +630,12 @@
         } else {
           // TODO(hkuang): Add unit test to test this path.
           set_error_detail(ctx, "Frame output cache is full.");
-          return VPX_CODEC_ERROR;
+          return AOM_CODEC_ERROR;
         }
       }
 
       res = decode_one(ctx, &data, data_sz, user_priv, deadline);
-      if (res != VPX_CODEC_OK) return res;
+      if (res != AOM_CODEC_OK) return res;
     }
   } else {
     // Decode in serial mode.
@@ -645,25 +645,25 @@
       for (i = 0; i < frame_count; ++i) {
         const uint8_t *data_start_copy = data_start;
         const uint32_t frame_size = frame_sizes[i];
-        vpx_codec_err_t res;
+        aom_codec_err_t res;
         if (data_start < data ||
             frame_size > (uint32_t)(data_end - data_start)) {
           set_error_detail(ctx, "Invalid frame size in index");
-          return VPX_CODEC_CORRUPT_FRAME;
+          return AOM_CODEC_CORRUPT_FRAME;
         }
 
         res =
             decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
-        if (res != VPX_CODEC_OK) return res;
+        if (res != AOM_CODEC_OK) return res;
 
         data_start += frame_size;
       }
     } else {
       while (data_start < data_end) {
         const uint32_t frame_size = (uint32_t)(data_end - data_start);
-        const vpx_codec_err_t res =
+        const aom_codec_err_t res =
             decode_one(ctx, &data_start, frame_size, user_priv, deadline);
-        if (res != VPX_CODEC_OK) return res;
+        if (res != AOM_CODEC_OK) return res;
 
         // Account for suboptimal termination by the encoder.
         while (data_start < data_end) {
@@ -679,7 +679,7 @@
   return res;
 }
 
-static void release_last_output_frame(vpx_codec_alg_priv_t *ctx) {
+static void release_last_output_frame(aom_codec_alg_priv_t *ctx) {
   RefCntBuffer *const frame_bufs = ctx->buffer_pool->frame_bufs;
   // Decrease reference count of last output frame in frame parallel mode.
   if (ctx->frame_parallel_decode && ctx->last_show_frame >= 0) {
@@ -690,9 +690,9 @@
   }
 }
 
-static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
-                                      vpx_codec_iter_t *iter) {
-  vpx_image_t *img = NULL;
+static aom_image_t *decoder_get_frame(aom_codec_alg_priv_t *ctx,
+                                      aom_codec_iter_t *iter) {
+  aom_image_t *img = NULL;
 
   // Only return frame when all the cpu are busy or
   // application fluhsed the decoder in frame parallel decode.
@@ -717,8 +717,8 @@
   if (*iter == NULL && ctx->frame_workers != NULL) {
     do {
       YV12_BUFFER_CONFIG sd;
-      const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
-      VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
+      const AVxWorkerInterface *const winterface = aom_get_worker_interface();
+      AVxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
       ctx->next_output_worker_id =
@@ -731,8 +731,8 @@
           frame_worker_data->received_frame = 0;
           check_resync(ctx, frame_worker_data->pbi);
         }
-        if (vp10_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
-          VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+        if (av1_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
+          AV1_COMMON *const cm = &frame_worker_data->pbi->common;
           RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
           release_last_output_frame(ctx);
           ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx;
@@ -742,7 +742,7 @@
 #if CONFIG_EXT_TILE
           if (frame_worker_data->pbi->dec_tile_row >= 0) {
             const int tile_row =
-                VPXMIN(frame_worker_data->pbi->dec_tile_row, cm->tile_rows - 1);
+                AOMMIN(frame_worker_data->pbi->dec_tile_row, cm->tile_rows - 1);
             const int mi_row = tile_row * cm->tile_height;
             const int ssy = ctx->img.y_chroma_shift;
             int plane;
@@ -752,12 +752,12 @@
                   mi_row * (MI_SIZE >> ssy) * ctx->img.stride[plane];
             }
             ctx->img.d_h =
-                VPXMIN(cm->tile_height, cm->mi_rows - mi_row) * MI_SIZE;
+                AOMMIN(cm->tile_height, cm->mi_rows - mi_row) * MI_SIZE;
           }
 
           if (frame_worker_data->pbi->dec_tile_col >= 0) {
             const int tile_col =
-                VPXMIN(frame_worker_data->pbi->dec_tile_col, cm->tile_cols - 1);
+                AOMMIN(frame_worker_data->pbi->dec_tile_col, cm->tile_cols - 1);
             const int mi_col = tile_col * cm->tile_width;
             const int ssx = ctx->img.x_chroma_shift;
             int plane;
@@ -766,7 +766,7 @@
               ctx->img.planes[plane] += mi_col * (MI_SIZE >> ssx);
             }
             ctx->img.d_w =
-                VPXMIN(cm->tile_width, cm->mi_cols - mi_col) * MI_SIZE;
+                AOMMIN(cm->tile_width, cm->mi_cols - mi_col) * MI_SIZE;
           }
 #endif  // CONFIG_EXT_TILE
 
@@ -786,271 +786,271 @@
   return NULL;
 }
 
-static vpx_codec_err_t decoder_set_fb_fn(
-    vpx_codec_alg_priv_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get,
-    vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
+static aom_codec_err_t decoder_set_fb_fn(
+    aom_codec_alg_priv_t *ctx, aom_get_frame_buffer_cb_fn_t cb_get,
+    aom_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
   if (cb_get == NULL || cb_release == NULL) {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   } else if (ctx->frame_workers == NULL) {
     // If the decoder has already been initialized, do not accept changes to
     // the frame buffer functions.
     ctx->get_ext_fb_cb = cb_get;
     ctx->release_ext_fb_cb = cb_release;
     ctx->ext_priv = cb_priv;
-    return VPX_CODEC_OK;
+    return AOM_CODEC_OK;
   }
 
-  return VPX_CODEC_ERROR;
+  return AOM_CODEC_ERROR;
 }
 
-static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_reference(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
-  vpx_ref_frame_t *const data = va_arg(args, vpx_ref_frame_t *);
+  aom_ref_frame_t *const data = va_arg(args, aom_ref_frame_t *);
 
   // Only support this function in serial decode.
   if (ctx->frame_parallel_decode) {
     set_error_detail(ctx, "Not supported in frame parallel decode");
-    return VPX_CODEC_INCAPABLE;
+    return AOM_CODEC_INCAPABLE;
   }
 
   if (data) {
-    vpx_ref_frame_t *const frame = (vpx_ref_frame_t *)data;
+    aom_ref_frame_t *const frame = (aom_ref_frame_t *)data;
     YV12_BUFFER_CONFIG sd;
-    VPxWorker *const worker = ctx->frame_workers;
+    AVxWorker *const worker = ctx->frame_workers;
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     image2yuvconfig(&frame->img, &sd);
-    return vp10_set_reference_dec(&frame_worker_data->pbi->common,
-                                  ref_frame_to_vp10_reframe(frame->frame_type),
-                                  &sd);
+    return av1_set_reference_dec(&frame_worker_data->pbi->common,
+                                 ref_frame_to_av1_reframe(frame->frame_type),
+                                 &sd);
   } else {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 }
 
-static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_copy_reference(aom_codec_alg_priv_t *ctx,
                                            va_list args) {
-  vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+  aom_ref_frame_t *data = va_arg(args, aom_ref_frame_t *);
 
   // Only support this function in serial decode.
   if (ctx->frame_parallel_decode) {
     set_error_detail(ctx, "Not supported in frame parallel decode");
-    return VPX_CODEC_INCAPABLE;
+    return AOM_CODEC_INCAPABLE;
   }
 
   if (data) {
-    vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+    aom_ref_frame_t *frame = (aom_ref_frame_t *)data;
     YV12_BUFFER_CONFIG sd;
-    VPxWorker *const worker = ctx->frame_workers;
+    AVxWorker *const worker = ctx->frame_workers;
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     image2yuvconfig(&frame->img, &sd);
-    return vp10_copy_reference_dec(frame_worker_data->pbi,
-                                   (VPX_REFFRAME)frame->frame_type, &sd);
+    return av1_copy_reference_dec(frame_worker_data->pbi,
+                                  (AOM_REFFRAME)frame->frame_type, &sd);
   } else {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 }
 
-static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_reference(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
-  vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *);
+  av1_ref_frame_t *data = va_arg(args, av1_ref_frame_t *);
 
   // Only support this function in serial decode.
   if (ctx->frame_parallel_decode) {
     set_error_detail(ctx, "Not supported in frame parallel decode");
-    return VPX_CODEC_INCAPABLE;
+    return AOM_CODEC_INCAPABLE;
   }
 
   if (data) {
     YV12_BUFFER_CONFIG *fb;
-    VPxWorker *const worker = ctx->frame_workers;
+    AVxWorker *const worker = ctx->frame_workers;
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx);
-    if (fb == NULL) return VPX_CODEC_ERROR;
+    if (fb == NULL) return AOM_CODEC_ERROR;
     yuvconfig2image(&data->img, fb, NULL);
-    return VPX_CODEC_OK;
+    return AOM_CODEC_OK;
   } else {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 }
 
-static vpx_codec_err_t ctrl_get_new_frame_image(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_new_frame_image(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
-  vpx_image_t *new_img = va_arg(args, vpx_image_t *);
+  aom_image_t *new_img = va_arg(args, aom_image_t *);
 
   // Only support this function in serial decode.
   if (ctx->frame_parallel_decode) {
     set_error_detail(ctx, "Not supported in frame parallel decode");
-    return VPX_CODEC_INCAPABLE;
+    return AOM_CODEC_INCAPABLE;
   }
 
   if (new_img) {
     YV12_BUFFER_CONFIG new_frame;
-    VPxWorker *const worker = ctx->frame_workers;
+    AVxWorker *const worker = ctx->frame_workers;
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
 
-    if (vp10_get_frame_to_show(frame_worker_data->pbi, &new_frame) == 0) {
+    if (av1_get_frame_to_show(frame_worker_data->pbi, &new_frame) == 0) {
       yuvconfig2image(new_img, &new_frame, NULL);
-      return VPX_CODEC_OK;
+      return AOM_CODEC_OK;
     } else {
-      return VPX_CODEC_ERROR;
+      return AOM_CODEC_ERROR;
     }
   } else {
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
   }
 }
 
-static vpx_codec_err_t ctrl_set_postproc(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_postproc(aom_codec_alg_priv_t *ctx,
                                          va_list args) {
   (void)ctx;
   (void)args;
-  return VPX_CODEC_INCAPABLE;
+  return AOM_CODEC_INCAPABLE;
 }
 
-static vpx_codec_err_t ctrl_set_dbg_options(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_dbg_options(aom_codec_alg_priv_t *ctx,
                                             va_list args) {
   (void)ctx;
   (void)args;
-  return VPX_CODEC_INCAPABLE;
+  return AOM_CODEC_INCAPABLE;
 }
 
-static vpx_codec_err_t ctrl_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_last_ref_updates(aom_codec_alg_priv_t *ctx,
                                                  va_list args) {
   int *const update_info = va_arg(args, int *);
 
   // Only support this function in serial decode.
   if (ctx->frame_parallel_decode) {
     set_error_detail(ctx, "Not supported in frame parallel decode");
-    return VPX_CODEC_INCAPABLE;
+    return AOM_CODEC_INCAPABLE;
   }
 
   if (update_info) {
     if (ctx->frame_workers) {
-      VPxWorker *const worker = ctx->frame_workers;
+      AVxWorker *const worker = ctx->frame_workers;
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
       *update_info = frame_worker_data->pbi->refresh_frame_flags;
-      return VPX_CODEC_OK;
+      return AOM_CODEC_OK;
     } else {
-      return VPX_CODEC_ERROR;
+      return AOM_CODEC_ERROR;
     }
   }
 
-  return VPX_CODEC_INVALID_PARAM;
+  return AOM_CODEC_INVALID_PARAM;
 }
 
-static vpx_codec_err_t ctrl_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_frame_corrupted(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
   int *corrupted = va_arg(args, int *);
 
   if (corrupted) {
     if (ctx->frame_workers) {
-      VPxWorker *const worker = ctx->frame_workers;
+      AVxWorker *const worker = ctx->frame_workers;
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
       RefCntBuffer *const frame_bufs =
           frame_worker_data->pbi->common.buffer_pool->frame_bufs;
       if (frame_worker_data->pbi->common.frame_to_show == NULL)
-        return VPX_CODEC_ERROR;
+        return AOM_CODEC_ERROR;
       if (ctx->last_show_frame >= 0)
         *corrupted = frame_bufs[ctx->last_show_frame].buf.corrupted;
-      return VPX_CODEC_OK;
+      return AOM_CODEC_OK;
     } else {
-      return VPX_CODEC_ERROR;
+      return AOM_CODEC_ERROR;
     }
   }
 
-  return VPX_CODEC_INVALID_PARAM;
+  return AOM_CODEC_INVALID_PARAM;
 }
 
-static vpx_codec_err_t ctrl_get_frame_size(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_frame_size(aom_codec_alg_priv_t *ctx,
                                            va_list args) {
   int *const frame_size = va_arg(args, int *);
 
   // Only support this function in serial decode.
   if (ctx->frame_parallel_decode) {
     set_error_detail(ctx, "Not supported in frame parallel decode");
-    return VPX_CODEC_INCAPABLE;
+    return AOM_CODEC_INCAPABLE;
   }
 
   if (frame_size) {
     if (ctx->frame_workers) {
-      VPxWorker *const worker = ctx->frame_workers;
+      AVxWorker *const worker = ctx->frame_workers;
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
-      const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+      const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
       frame_size[0] = cm->width;
       frame_size[1] = cm->height;
-      return VPX_CODEC_OK;
+      return AOM_CODEC_OK;
     } else {
-      return VPX_CODEC_ERROR;
+      return AOM_CODEC_ERROR;
     }
   }
 
-  return VPX_CODEC_INVALID_PARAM;
+  return AOM_CODEC_INVALID_PARAM;
 }
 
-static vpx_codec_err_t ctrl_get_render_size(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_render_size(aom_codec_alg_priv_t *ctx,
                                             va_list args) {
   int *const render_size = va_arg(args, int *);
 
   // Only support this function in serial decode.
   if (ctx->frame_parallel_decode) {
     set_error_detail(ctx, "Not supported in frame parallel decode");
-    return VPX_CODEC_INCAPABLE;
+    return AOM_CODEC_INCAPABLE;
   }
 
   if (render_size) {
     if (ctx->frame_workers) {
-      VPxWorker *const worker = ctx->frame_workers;
+      AVxWorker *const worker = ctx->frame_workers;
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
-      const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+      const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
       render_size[0] = cm->render_width;
       render_size[1] = cm->render_height;
-      return VPX_CODEC_OK;
+      return AOM_CODEC_OK;
     } else {
-      return VPX_CODEC_ERROR;
+      return AOM_CODEC_ERROR;
     }
   }
 
-  return VPX_CODEC_INVALID_PARAM;
+  return AOM_CODEC_INVALID_PARAM;
 }
 
-static vpx_codec_err_t ctrl_get_bit_depth(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_get_bit_depth(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
   unsigned int *const bit_depth = va_arg(args, unsigned int *);
-  VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
+  AVxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
 
   if (bit_depth) {
     if (worker) {
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
-      const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+      const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
       *bit_depth = cm->bit_depth;
-      return VPX_CODEC_OK;
+      return AOM_CODEC_OK;
     } else {
-      return VPX_CODEC_ERROR;
+      return AOM_CODEC_ERROR;
     }
   }
 
-  return VPX_CODEC_INVALID_PARAM;
+  return AOM_CODEC_INVALID_PARAM;
 }
 
-static vpx_codec_err_t ctrl_set_invert_tile_order(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_invert_tile_order(aom_codec_alg_priv_t *ctx,
                                                   va_list args) {
   ctx->invert_tile_order = va_arg(args, int);
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t ctrl_set_decryptor(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_decryptor(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
-  vpx_decrypt_init *init = va_arg(args, vpx_decrypt_init *);
+  aom_decrypt_init *init = va_arg(args, aom_decrypt_init *);
   ctx->decrypt_cb = init ? init->decrypt_cb : NULL;
   ctx->decrypt_state = init ? init->decrypt_state : NULL;
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t ctrl_set_byte_alignment(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_byte_alignment(aom_codec_alg_priv_t *ctx,
                                                va_list args) {
   const int legacy_byte_alignment = 0;
   const int min_byte_alignment = 32;
@@ -1061,67 +1061,67 @@
       (byte_alignment < min_byte_alignment ||
        byte_alignment > max_byte_alignment ||
        (byte_alignment & (byte_alignment - 1)) != 0))
-    return VPX_CODEC_INVALID_PARAM;
+    return AOM_CODEC_INVALID_PARAM;
 
   ctx->byte_alignment = byte_alignment;
   if (ctx->frame_workers) {
-    VPxWorker *const worker = ctx->frame_workers;
+    AVxWorker *const worker = ctx->frame_workers;
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     frame_worker_data->pbi->common.byte_alignment = byte_alignment;
   }
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t ctrl_set_skip_loop_filter(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_skip_loop_filter(aom_codec_alg_priv_t *ctx,
                                                  va_list args) {
   ctx->skip_loop_filter = va_arg(args, int);
 
   if (ctx->frame_workers) {
-    VPxWorker *const worker = ctx->frame_workers;
+    AVxWorker *const worker = ctx->frame_workers;
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     frame_worker_data->pbi->common.skip_loop_filter = ctx->skip_loop_filter;
   }
 
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t ctrl_set_decode_tile_row(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_decode_tile_row(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
   ctx->decode_tile_row = va_arg(args, int);
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_err_t ctrl_set_decode_tile_col(vpx_codec_alg_priv_t *ctx,
+static aom_codec_err_t ctrl_set_decode_tile_col(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
   ctx->decode_tile_col = va_arg(args, int);
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
-  { VP8_COPY_REFERENCE, ctrl_copy_reference },
+static aom_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
+  { AOM_COPY_REFERENCE, ctrl_copy_reference },
 
   // Setters
-  { VP8_SET_REFERENCE, ctrl_set_reference },
-  { VP8_SET_POSTPROC, ctrl_set_postproc },
-  { VP8_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options },
-  { VP8_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options },
-  { VP8_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options },
-  { VP8_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options },
-  { VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order },
-  { VPXD_SET_DECRYPTOR, ctrl_set_decryptor },
-  { VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment },
-  { VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter },
-  { VP10_SET_DECODE_TILE_ROW, ctrl_set_decode_tile_row },
-  { VP10_SET_DECODE_TILE_COL, ctrl_set_decode_tile_col },
+  { AOM_SET_REFERENCE, ctrl_set_reference },
+  { AOM_SET_POSTPROC, ctrl_set_postproc },
+  { AOM_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options },
+  { AOM_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options },
+  { AOM_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options },
+  { AOM_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options },
+  { AV1_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order },
+  { AOMD_SET_DECRYPTOR, ctrl_set_decryptor },
+  { AV1_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment },
+  { AV1_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter },
+  { AV1_SET_DECODE_TILE_ROW, ctrl_set_decode_tile_row },
+  { AV1_SET_DECODE_TILE_COL, ctrl_set_decode_tile_col },
 
   // Getters
-  { VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates },
-  { VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted },
-  { VP9_GET_REFERENCE, ctrl_get_reference },
-  { VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size },
-  { VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth },
-  { VP9D_GET_FRAME_SIZE, ctrl_get_frame_size },
-  { VP10_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image },
+  { AOMD_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates },
+  { AOMD_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted },
+  { AV1_GET_REFERENCE, ctrl_get_reference },
+  { AV1D_GET_DISPLAY_SIZE, ctrl_get_render_size },
+  { AV1D_GET_BIT_DEPTH, ctrl_get_bit_depth },
+  { AV1D_GET_FRAME_SIZE, ctrl_get_frame_size },
+  { AV1_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image },
 
   { -1, NULL },
 };
@@ -1129,31 +1129,31 @@
 #ifndef VERSION_STRING
 #define VERSION_STRING
 #endif
-CODEC_INTERFACE(vpx_codec_vp10_dx) = {
-  "WebM Project VP10 Decoder" VERSION_STRING,
-  VPX_CODEC_INTERNAL_ABI_VERSION,
-  VPX_CODEC_CAP_DECODER |
-      VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER,  // vpx_codec_caps_t
-  decoder_init,                             // vpx_codec_init_fn_t
-  decoder_destroy,                          // vpx_codec_destroy_fn_t
-  decoder_ctrl_maps,                        // vpx_codec_ctrl_fn_map_t
+CODEC_INTERFACE(aom_codec_av1_dx) = {
+  "AOMedia Project AV1 Decoder" VERSION_STRING,
+  AOM_CODEC_INTERNAL_ABI_VERSION,
+  AOM_CODEC_CAP_DECODER |
+      AOM_CODEC_CAP_EXTERNAL_FRAME_BUFFER,  // aom_codec_caps_t
+  decoder_init,                             // aom_codec_init_fn_t
+  decoder_destroy,                          // aom_codec_destroy_fn_t
+  decoder_ctrl_maps,                        // aom_codec_ctrl_fn_map_t
   {
       // NOLINT
-      decoder_peek_si,    // vpx_codec_peek_si_fn_t
-      decoder_get_si,     // vpx_codec_get_si_fn_t
-      decoder_decode,     // vpx_codec_decode_fn_t
-      decoder_get_frame,  // vpx_codec_frame_get_fn_t
-      decoder_set_fb_fn,  // vpx_codec_set_fb_fn_t
+      decoder_peek_si,    // aom_codec_peek_si_fn_t
+      decoder_get_si,     // aom_codec_get_si_fn_t
+      decoder_decode,     // aom_codec_decode_fn_t
+      decoder_get_frame,  // aom_codec_frame_get_fn_t
+      decoder_set_fb_fn,  // aom_codec_set_fb_fn_t
   },
   {
       // NOLINT
       0,
-      NULL,  // vpx_codec_enc_cfg_map_t
-      NULL,  // vpx_codec_encode_fn_t
-      NULL,  // vpx_codec_get_cx_data_fn_t
-      NULL,  // vpx_codec_enc_config_set_fn_t
-      NULL,  // vpx_codec_get_global_headers_fn_t
-      NULL,  // vpx_codec_get_preview_frame_fn_t
-      NULL   // vpx_codec_enc_mr_get_mem_loc_fn_t
+      NULL,  // aom_codec_enc_cfg_map_t
+      NULL,  // aom_codec_encode_fn_t
+      NULL,  // aom_codec_get_cx_data_fn_t
+      NULL,  // aom_codec_enc_config_set_fn_t
+      NULL,  // aom_codec_get_global_headers_fn_t
+      NULL,  // aom_codec_get_preview_frame_fn_t
+      NULL   // aom_codec_enc_mr_get_mem_loc_fn_t
   }
 };
diff --git a/av1/av1_iface_common.h b/av1/av1_iface_common.h
new file mode 100644
index 0000000..3ba029e
--- /dev/null
+++ b/av1/av1_iface_common.h
@@ -0,0 +1,145 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef AV1_AV1_IFACE_COMMON_H_
+#define AV1_AV1_IFACE_COMMON_H_
+
+#include "aom_ports/mem.h"
+
+static void yuvconfig2image(aom_image_t *img, const YV12_BUFFER_CONFIG *yv12,
+                            void *user_priv) {
+  /** aom_img_wrap() doesn't allow specifying independent strides for
+    * the Y, U, and V planes, nor other alignment adjustments that
+    * might be representable by a YV12_BUFFER_CONFIG, so we just
+    * initialize all the fields.*/
+  int bps;
+  if (!yv12->subsampling_y) {
+    if (!yv12->subsampling_x) {
+      img->fmt = AOM_IMG_FMT_I444;
+      bps = 24;
+    } else {
+      img->fmt = AOM_IMG_FMT_I422;
+      bps = 16;
+    }
+  } else {
+    if (!yv12->subsampling_x) {
+      img->fmt = AOM_IMG_FMT_I440;
+      bps = 16;
+    } else {
+      img->fmt = AOM_IMG_FMT_I420;
+      bps = 12;
+    }
+  }
+  img->cs = yv12->color_space;
+  img->range = yv12->color_range;
+  img->bit_depth = 8;
+  img->w = yv12->y_stride;
+  img->h = ALIGN_POWER_OF_TWO(yv12->y_height + 2 * AOM_ENC_BORDER_IN_PIXELS, 3);
+  img->d_w = yv12->y_crop_width;
+  img->d_h = yv12->y_crop_height;
+  img->r_w = yv12->render_width;
+  img->r_h = yv12->render_height;
+  img->x_chroma_shift = yv12->subsampling_x;
+  img->y_chroma_shift = yv12->subsampling_y;
+  img->planes[AOM_PLANE_Y] = yv12->y_buffer;
+  img->planes[AOM_PLANE_U] = yv12->u_buffer;
+  img->planes[AOM_PLANE_V] = yv12->v_buffer;
+  img->planes[AOM_PLANE_ALPHA] = NULL;
+  img->stride[AOM_PLANE_Y] = yv12->y_stride;
+  img->stride[AOM_PLANE_U] = yv12->uv_stride;
+  img->stride[AOM_PLANE_V] = yv12->uv_stride;
+  img->stride[AOM_PLANE_ALPHA] = yv12->y_stride;
+#if CONFIG_AOM_HIGHBITDEPTH
+  if (yv12->flags & YV12_FLAG_HIGHBITDEPTH) {
+    // aom_image_t uses byte strides and a pointer to the first byte
+    // of the image.
+    img->fmt = (aom_img_fmt_t)(img->fmt | AOM_IMG_FMT_HIGHBITDEPTH);
+    img->bit_depth = yv12->bit_depth;
+    img->planes[AOM_PLANE_Y] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->y_buffer);
+    img->planes[AOM_PLANE_U] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->u_buffer);
+    img->planes[AOM_PLANE_V] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->v_buffer);
+    img->planes[AOM_PLANE_ALPHA] = NULL;
+    img->stride[AOM_PLANE_Y] = 2 * yv12->y_stride;
+    img->stride[AOM_PLANE_U] = 2 * yv12->uv_stride;
+    img->stride[AOM_PLANE_V] = 2 * yv12->uv_stride;
+    img->stride[AOM_PLANE_ALPHA] = 2 * yv12->y_stride;
+  }
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+  img->bps = bps;
+  img->user_priv = user_priv;
+  img->img_data = yv12->buffer_alloc;
+  img->img_data_owner = 0;
+  img->self_allocd = 0;
+}
+
+static aom_codec_err_t image2yuvconfig(const aom_image_t *img,
+                                       YV12_BUFFER_CONFIG *yv12) {
+  yv12->y_buffer = img->planes[AOM_PLANE_Y];
+  yv12->u_buffer = img->planes[AOM_PLANE_U];
+  yv12->v_buffer = img->planes[AOM_PLANE_V];
+
+  yv12->y_crop_width = img->d_w;
+  yv12->y_crop_height = img->d_h;
+  yv12->render_width = img->r_w;
+  yv12->render_height = img->r_h;
+  yv12->y_width = img->d_w;
+  yv12->y_height = img->d_h;
+
+  yv12->uv_width =
+      img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2 : yv12->y_width;
+  yv12->uv_height =
+      img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2 : yv12->y_height;
+  yv12->uv_crop_width = yv12->uv_width;
+  yv12->uv_crop_height = yv12->uv_height;
+
+  yv12->y_stride = img->stride[AOM_PLANE_Y];
+  yv12->uv_stride = img->stride[AOM_PLANE_U];
+  yv12->color_space = img->cs;
+  yv12->color_range = img->range;
+
+#if CONFIG_AOM_HIGHBITDEPTH
+  if (img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) {
+    // In aom_image_t
+    //     planes point to uint8 address of start of data
+    //     stride counts uint8s to reach next row
+    // In YV12_BUFFER_CONFIG
+    //     y_buffer, u_buffer, v_buffer point to uint16 address of data
+    //     stride and border counts in uint16s
+    // This means that all the address calculations in the main body of code
+    // should work correctly.
+    // However, before we do any pixel operations we need to cast the address
+    // to a uint16 ponter and double its value.
+    yv12->y_buffer = CONVERT_TO_BYTEPTR(yv12->y_buffer);
+    yv12->u_buffer = CONVERT_TO_BYTEPTR(yv12->u_buffer);
+    yv12->v_buffer = CONVERT_TO_BYTEPTR(yv12->v_buffer);
+    yv12->y_stride >>= 1;
+    yv12->uv_stride >>= 1;
+    yv12->flags = YV12_FLAG_HIGHBITDEPTH;
+  } else {
+    yv12->flags = 0;
+  }
+  yv12->border = (yv12->y_stride - img->w) / 2;
+#else
+  yv12->border = (img->stride[AOM_PLANE_Y] - img->w) / 2;
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+  yv12->subsampling_x = img->x_chroma_shift;
+  yv12->subsampling_y = img->y_chroma_shift;
+  return AOM_CODEC_OK;
+}
+
+static AOM_REFFRAME ref_frame_to_av1_reframe(aom_ref_frame_type_t frame) {
+  switch (frame) {
+    case AOM_LAST_FRAME: return AOM_LAST_FLAG;
+    case AOM_GOLD_FRAME: return AOM_GOLD_FLAG;
+    case AOM_ALTR_FRAME: return AOM_ALT_FLAG;
+  }
+  assert(0 && "Invalid Reference Frame");
+  return AOM_LAST_FLAG;
+}
+#endif  // AV1_AV1_IFACE_COMMON_H_
diff --git a/av1/av1cx.mk b/av1/av1cx.mk
new file mode 100644
index 0000000..463c5f7
--- /dev/null
+++ b/av1/av1cx.mk
@@ -0,0 +1,147 @@
+##
+##  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+##  Use of this source code is governed by a BSD-style license
+##  that can be found in the LICENSE file in the root of the source
+##  tree. An additional intellectual property rights grant can be found
+##  in the file PATENTS.  All contributing project authors may
+##  be found in the AUTHORS file in the root of the source tree.
+##
+
+AV1_CX_EXPORTS += exports_enc
+
+AV1_CX_SRCS-yes += $(AV1_COMMON_SRCS-yes)
+AV1_CX_SRCS-no  += $(AV1_COMMON_SRCS-no)
+AV1_CX_SRCS_REMOVE-yes += $(AV1_COMMON_SRCS_REMOVE-yes)
+AV1_CX_SRCS_REMOVE-no  += $(AV1_COMMON_SRCS_REMOVE-no)
+
+AV1_CX_SRCS-yes += av1_cx_iface.c
+
+AV1_CX_SRCS-yes += encoder/bitstream.c
+AV1_CX_SRCS-yes += encoder/bitwriter.h
+AV1_CX_SRCS-yes += encoder/context_tree.c
+AV1_CX_SRCS-yes += encoder/context_tree.h
+AV1_CX_SRCS-yes += encoder/variance_tree.c
+AV1_CX_SRCS-yes += encoder/variance_tree.h
+AV1_CX_SRCS-yes += encoder/cost.h
+AV1_CX_SRCS-yes += encoder/cost.c
+AV1_CX_SRCS-yes += encoder/dct.c
+AV1_CX_SRCS-yes += encoder/hybrid_fwd_txfm.c
+AV1_CX_SRCS-yes += encoder/hybrid_fwd_txfm.h
+AV1_CX_SRCS-yes += encoder/encodeframe.c
+AV1_CX_SRCS-yes += encoder/encodeframe.h
+AV1_CX_SRCS-yes += encoder/encodemb.c
+AV1_CX_SRCS-yes += encoder/encodemv.c
+AV1_CX_SRCS-yes += encoder/ethread.h
+AV1_CX_SRCS-yes += encoder/ethread.c
+AV1_CX_SRCS-yes += encoder/extend.c
+AV1_CX_SRCS-yes += encoder/firstpass.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/nonmax.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast_9.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.h
+AV1_CX_SRCS-yes += encoder/block.h
+AV1_CX_SRCS-yes += encoder/bitstream.h
+AV1_CX_SRCS-yes += encoder/encodemb.h
+AV1_CX_SRCS-yes += encoder/encodemv.h
+AV1_CX_SRCS-yes += encoder/extend.h
+AV1_CX_SRCS-yes += encoder/firstpass.h
+AV1_CX_SRCS-yes += encoder/lookahead.c
+AV1_CX_SRCS-yes += encoder/lookahead.h
+AV1_CX_SRCS-yes += encoder/mcomp.h
+AV1_CX_SRCS-yes += encoder/encoder.h
+AV1_CX_SRCS-yes += encoder/quantize.h
+AV1_CX_SRCS-yes += encoder/ratectrl.h
+AV1_CX_SRCS-yes += encoder/rd.h
+AV1_CX_SRCS-yes += encoder/rdopt.h
+AV1_CX_SRCS-yes += encoder/tokenize.h
+AV1_CX_SRCS-yes += encoder/treewriter.h
+AV1_CX_SRCS-yes += encoder/mcomp.c
+AV1_CX_SRCS-yes += encoder/encoder.c
+AV1_CX_SRCS-yes += encoder/palette.h
+AV1_CX_SRCS-yes += encoder/palette.c
+AV1_CX_SRCS-yes += encoder/picklpf.c
+AV1_CX_SRCS-yes += encoder/picklpf.h
+AV1_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.c
+AV1_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.h
+AV1_CX_SRCS-yes += encoder/quantize.c
+AV1_CX_SRCS-yes += encoder/ratectrl.c
+AV1_CX_SRCS-yes += encoder/rd.c
+AV1_CX_SRCS-yes += encoder/rdopt.c
+AV1_CX_SRCS-yes += encoder/segmentation.c
+AV1_CX_SRCS-yes += encoder/segmentation.h
+AV1_CX_SRCS-yes += encoder/speed_features.c
+AV1_CX_SRCS-yes += encoder/speed_features.h
+AV1_CX_SRCS-yes += encoder/subexp.c
+AV1_CX_SRCS-yes += encoder/subexp.h
+AV1_CX_SRCS-yes += encoder/resize.c
+AV1_CX_SRCS-yes += encoder/resize.h
+AV1_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/blockiness.c
+AV1_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.h
+AV1_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.c
+
+AV1_CX_SRCS-yes += encoder/tokenize.c
+AV1_CX_SRCS-yes += encoder/treewriter.c
+AV1_CX_SRCS-yes += encoder/aq_variance.c
+AV1_CX_SRCS-yes += encoder/aq_variance.h
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.c
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.h
+AV1_CX_SRCS-yes += encoder/aq_complexity.c
+AV1_CX_SRCS-yes += encoder/aq_complexity.h
+AV1_CX_SRCS-yes += encoder/temporal_filter.c
+AV1_CX_SRCS-yes += encoder/temporal_filter.h
+AV1_CX_SRCS-yes += encoder/mbgraph.c
+AV1_CX_SRCS-yes += encoder/mbgraph.h
+ifeq ($(CONFIG_DERING),yes)
+AV1_CX_SRCS-yes += encoder/pickdering.c
+endif
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/highbd_block_error_intrin_sse2.c
+endif
+
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/error_sse2.asm
+
+ifeq ($(ARCH_X86_64),yes)
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/quantize_ssse3_x86_64.asm
+endif
+
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_intrin_sse2.c
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/highbd_fwd_txfm_sse4.c
+AV1_CX_SRCS-$(HAVE_SSE4_1) += common/x86/highbd_inv_txfm_sse4.c
+AV1_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/av1_highbd_quantize_sse4.c
+endif
+
+ifeq ($(CONFIG_EXT_INTER),yes)
+AV1_CX_SRCS-yes += encoder/wedge_utils.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/wedge_utils_sse2.c
+endif
+
+AV1_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c
+
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/dct_neon.c
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/error_neon.c
+endif
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/quantize_neon.c
+
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/error_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct4x4_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct8x8_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct16x16_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct_msa.h
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
+
+AV1_CX_SRCS-yes := $(filter-out $(AV1_CX_SRCS_REMOVE-yes),$(AV1_CX_SRCS-yes))
diff --git a/av1/av1dx.mk b/av1/av1dx.mk
new file mode 100644
index 0000000..0b74abe
--- /dev/null
+++ b/av1/av1dx.mk
@@ -0,0 +1,34 @@
+##
+##  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+##  Use of this source code is governed by a BSD-style license
+##  that can be found in the LICENSE file in the root of the source
+##  tree. An additional intellectual property rights grant can be found
+##  in the file PATENTS.  All contributing project authors may
+##  be found in the AUTHORS file in the root of the source tree.
+##
+
+AV1_DX_EXPORTS += exports_dec
+
+AV1_DX_SRCS-yes += $(AV1_COMMON_SRCS-yes)
+AV1_DX_SRCS-no  += $(AV1_COMMON_SRCS-no)
+AV1_DX_SRCS_REMOVE-yes += $(AV1_COMMON_SRCS_REMOVE-yes)
+AV1_DX_SRCS_REMOVE-no  += $(AV1_COMMON_SRCS_REMOVE-no)
+
+AV1_DX_SRCS-yes += av1_dx_iface.c
+
+AV1_DX_SRCS-yes += decoder/decodemv.c
+AV1_DX_SRCS-yes += decoder/decodeframe.c
+AV1_DX_SRCS-yes += decoder/decodeframe.h
+AV1_DX_SRCS-yes += decoder/detokenize.c
+AV1_DX_SRCS-yes += decoder/decodemv.h
+AV1_DX_SRCS-yes += decoder/detokenize.h
+AV1_DX_SRCS-yes += decoder/dthread.c
+AV1_DX_SRCS-yes += decoder/dthread.h
+AV1_DX_SRCS-yes += decoder/decoder.c
+AV1_DX_SRCS-yes += decoder/decoder.h
+AV1_DX_SRCS-yes += decoder/dsubexp.c
+AV1_DX_SRCS-yes += decoder/dsubexp.h
+AV1_DX_SRCS-yes += decoder/bitreader.h
+
+AV1_DX_SRCS-yes := $(filter-out $(AV1_DX_SRCS_REMOVE-yes),$(AV1_DX_SRCS-yes))
diff --git a/av1/common/alloccommon.c b/av1/common/alloccommon.c
index b6ff12a..eb4f8e6 100644
--- a/av1/common/alloccommon.c
+++ b/av1/common/alloccommon.c
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_config.h"
+#include "aom_mem/aom_mem.h"
 
 #include "av1/common/alloccommon.h"
 #include "av1/common/blockd.h"
@@ -17,7 +17,7 @@
 #include "av1/common/entropymv.h"
 #include "av1/common/onyxc_int.h"
 
-void vp10_set_mb_mi(VP10_COMMON *cm, int width, int height) {
+void av1_set_mb_mi(AV1_COMMON *cm, int width, int height) {
   const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2);
   const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2);
 
@@ -30,11 +30,11 @@
   cm->MBs = cm->mb_rows * cm->mb_cols;
 }
 
-static int alloc_seg_map(VP10_COMMON *cm, int seg_map_size) {
+static int alloc_seg_map(AV1_COMMON *cm, int seg_map_size) {
   int i;
 
   for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
-    cm->seg_map_array[i] = (uint8_t *)vpx_calloc(seg_map_size, 1);
+    cm->seg_map_array[i] = (uint8_t *)aom_calloc(seg_map_size, 1);
     if (cm->seg_map_array[i] == NULL) return 1;
   }
   cm->seg_map_alloc_size = seg_map_size;
@@ -50,11 +50,11 @@
   return 0;
 }
 
-static void free_seg_map(VP10_COMMON *cm) {
+static void free_seg_map(AV1_COMMON *cm) {
   int i;
 
   for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
-    vpx_free(cm->seg_map_array[i]);
+    aom_free(cm->seg_map_array[i]);
     cm->seg_map_array[i] = NULL;
   }
 
@@ -65,7 +65,7 @@
   }
 }
 
-void vp10_free_ref_frame_buffers(BufferPool *pool) {
+void av1_free_ref_frame_buffers(BufferPool *pool) {
   int i;
 
   for (i = 0; i < FRAME_BUFFERS; ++i) {
@@ -74,45 +74,45 @@
       pool->release_fb_cb(pool->cb_priv, &pool->frame_bufs[i].raw_frame_buffer);
       pool->frame_bufs[i].ref_count = 0;
     }
-    vpx_free(pool->frame_bufs[i].mvs);
+    aom_free(pool->frame_bufs[i].mvs);
     pool->frame_bufs[i].mvs = NULL;
-    vpx_free_frame_buffer(&pool->frame_bufs[i].buf);
+    aom_free_frame_buffer(&pool->frame_bufs[i].buf);
   }
 }
 
 #if CONFIG_LOOP_RESTORATION
-void vp10_free_restoration_buffers(VP10_COMMON *cm) {
-  vpx_free(cm->rst_info.bilateral_level);
+void av1_free_restoration_buffers(AV1_COMMON *cm) {
+  aom_free(cm->rst_info.bilateral_level);
   cm->rst_info.bilateral_level = NULL;
-  vpx_free(cm->rst_info.vfilter);
+  aom_free(cm->rst_info.vfilter);
   cm->rst_info.vfilter = NULL;
-  vpx_free(cm->rst_info.hfilter);
+  aom_free(cm->rst_info.hfilter);
   cm->rst_info.hfilter = NULL;
-  vpx_free(cm->rst_info.wiener_level);
+  aom_free(cm->rst_info.wiener_level);
   cm->rst_info.wiener_level = NULL;
 }
 #endif  // CONFIG_LOOP_RESTORATION
 
-void vp10_free_context_buffers(VP10_COMMON *cm) {
+void av1_free_context_buffers(AV1_COMMON *cm) {
   int i;
   cm->free_mi(cm);
   free_seg_map(cm);
   for (i = 0; i < MAX_MB_PLANE; i++) {
-    vpx_free(cm->above_context[i]);
+    aom_free(cm->above_context[i]);
     cm->above_context[i] = NULL;
   }
-  vpx_free(cm->above_seg_context);
+  aom_free(cm->above_seg_context);
   cm->above_seg_context = NULL;
 #if CONFIG_VAR_TX
-  vpx_free(cm->above_txfm_context);
+  aom_free(cm->above_txfm_context);
   cm->above_txfm_context = NULL;
 #endif
 }
 
-int vp10_alloc_context_buffers(VP10_COMMON *cm, int width, int height) {
+int av1_alloc_context_buffers(AV1_COMMON *cm, int width, int height) {
   int new_mi_size;
 
-  vp10_set_mb_mi(cm, width, height);
+  av1_set_mb_mi(cm, width, height);
   new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows);
   if (cm->mi_alloc_size < new_mi_size) {
     cm->free_mi(cm);
@@ -134,20 +134,20 @@
     int i;
 
     for (i = 0; i < MAX_MB_PLANE; i++) {
-      vpx_free(cm->above_context[i]);
-      cm->above_context[i] = (ENTROPY_CONTEXT *)vpx_calloc(
+      aom_free(cm->above_context[i]);
+      cm->above_context[i] = (ENTROPY_CONTEXT *)aom_calloc(
           2 * aligned_mi_cols, sizeof(*cm->above_context[0]));
       if (!cm->above_context[i]) goto fail;
     }
 
-    vpx_free(cm->above_seg_context);
-    cm->above_seg_context = (PARTITION_CONTEXT *)vpx_calloc(
+    aom_free(cm->above_seg_context);
+    cm->above_seg_context = (PARTITION_CONTEXT *)aom_calloc(
         aligned_mi_cols, sizeof(*cm->above_seg_context));
     if (!cm->above_seg_context) goto fail;
 
 #if CONFIG_VAR_TX
-    vpx_free(cm->above_txfm_context);
-    cm->above_txfm_context = (TXFM_CONTEXT *)vpx_calloc(
+    aom_free(cm->above_txfm_context);
+    cm->above_txfm_context = (TXFM_CONTEXT *)aom_calloc(
         aligned_mi_cols, sizeof(*cm->above_txfm_context));
     if (!cm->above_txfm_context) goto fail;
 #endif
@@ -159,27 +159,27 @@
 
 fail:
   // clear the mi_* values to force a realloc on resync
-  vp10_set_mb_mi(cm, 0, 0);
-  vp10_free_context_buffers(cm);
+  av1_set_mb_mi(cm, 0, 0);
+  av1_free_context_buffers(cm);
   return 1;
 }
 
-void vp10_remove_common(VP10_COMMON *cm) {
-  vp10_free_context_buffers(cm);
+void av1_remove_common(AV1_COMMON *cm) {
+  av1_free_context_buffers(cm);
 
-  vpx_free(cm->fc);
+  aom_free(cm->fc);
   cm->fc = NULL;
-  vpx_free(cm->frame_contexts);
+  aom_free(cm->frame_contexts);
   cm->frame_contexts = NULL;
 }
 
-void vp10_init_context_buffers(VP10_COMMON *cm) {
+void av1_init_context_buffers(AV1_COMMON *cm) {
   cm->setup_mi(cm);
   if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
     memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
 }
 
-void vp10_swap_current_and_last_seg_map(VP10_COMMON *cm) {
+void av1_swap_current_and_last_seg_map(AV1_COMMON *cm) {
   // Swap indices.
   const int tmp = cm->seg_map_idx;
   cm->seg_map_idx = cm->prev_seg_map_idx;
diff --git a/av1/common/alloccommon.h b/av1/common/alloccommon.h
index d2d2643..ad0b454 100644
--- a/av1/common/alloccommon.h
+++ b/av1/common/alloccommon.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ALLOCCOMMON_H_
-#define VP10_COMMON_ALLOCCOMMON_H_
+#ifndef AV1_COMMON_ALLOCCOMMON_H_
+#define AV1_COMMON_ALLOCCOMMON_H_
 
 #define INVALID_IDX -1  // Invalid buffer index.
 
@@ -17,29 +17,29 @@
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 struct BufferPool;
 
-void vp10_remove_common(struct VP10Common *cm);
+void av1_remove_common(struct AV1Common *cm);
 
-int vp10_alloc_context_buffers(struct VP10Common *cm, int width, int height);
-void vp10_init_context_buffers(struct VP10Common *cm);
-void vp10_free_context_buffers(struct VP10Common *cm);
+int av1_alloc_context_buffers(struct AV1Common *cm, int width, int height);
+void av1_init_context_buffers(struct AV1Common *cm);
+void av1_free_context_buffers(struct AV1Common *cm);
 
-void vp10_free_ref_frame_buffers(struct BufferPool *pool);
+void av1_free_ref_frame_buffers(struct BufferPool *pool);
 #if CONFIG_LOOP_RESTORATION
-void vp10_free_restoration_buffers(struct VP10Common *cm);
+void av1_free_restoration_buffers(struct AV1Common *cm);
 #endif  // CONFIG_LOOP_RESTORATION
 
-int vp10_alloc_state_buffers(struct VP10Common *cm, int width, int height);
-void vp10_free_state_buffers(struct VP10Common *cm);
+int av1_alloc_state_buffers(struct AV1Common *cm, int width, int height);
+void av1_free_state_buffers(struct AV1Common *cm);
 
-void vp10_set_mb_mi(struct VP10Common *cm, int width, int height);
+void av1_set_mb_mi(struct AV1Common *cm, int width, int height);
 
-void vp10_swap_current_and_last_seg_map(struct VP10Common *cm);
+void av1_swap_current_and_last_seg_map(struct AV1Common *cm);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ALLOCCOMMON_H_
+#endif  // AV1_COMMON_ALLOCCOMMON_H_
diff --git a/av1/common/ans.h b/av1/common/ans.h
index c974ada..1a632ee 100644
--- a/av1/common/ans.h
+++ b/av1/common/ans.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ANS_H_
-#define VP10_COMMON_ANS_H_
+#ifndef AV1_COMMON_ANS_H_
+#define AV1_COMMON_ANS_H_
 // An implementation of Asymmetric Numeral Systems
 // http://arxiv.org/abs/1311.2540v2
 
 #include <assert.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/prob.h"
 #include "aom_ports/mem_ops.h"
 
@@ -250,9 +250,9 @@
 
 // TODO(aconverse): Replace trees with tokensets.
 static INLINE int uabs_read_tree(struct AnsDecoder *ans,
-                                 const vpx_tree_index *tree,
+                                 const aom_tree_index *tree,
                                  const AnsP8 *probs) {
-  vpx_tree_index i = 0;
+  aom_tree_index i = 0;
 
   while ((i = tree[i + uabs_read(ans, probs[i >> 1])]) > 0) continue;
 
@@ -313,8 +313,8 @@
   adjustment -= out_pdf[0];
   for (i = 0; i < in_syms; ++i) {
     int p = (p1 * src_pdf[i] + round_fact) >> ans_p8_shift;
-    p = VPXMIN(p, (int)rans_precision - in_syms);
-    p = VPXMAX(p, 1);
+    p = AOMMIN(p, (int)rans_precision - in_syms);
+    p = AOMMAX(p, 1);
     out_pdf[i + 1] = p;
     adjustment -= p;
   }
@@ -411,4 +411,4 @@
 #ifdef __cplusplus
 }  // extern "C"
 #endif  // __cplusplus
-#endif  // VP10_COMMON_ANS_H_
+#endif  // AV1_COMMON_ANS_H_
diff --git a/av1/common/arm/neon/iht4x4_add_neon.c b/av1/common/arm/neon/iht4x4_add_neon.c
index 600e66b..fc72c98 100644
--- a/av1/common/arm/neon/iht4x4_add_neon.c
+++ b/av1/common/arm/neon/iht4x4_add_neon.c
@@ -11,8 +11,8 @@
 #include <arm_neon.h>
 #include <assert.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
 #include "av1/common/common.h"
 
 static int16_t sinpi_1_9 = 0x14a3;
@@ -139,8 +139,8 @@
   return;
 }
 
-void vp10_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
-                             int dest_stride, int tx_type) {
+void av1_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
+                            int dest_stride, int tx_type) {
   uint8x8_t d26u8, d27u8;
   int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16;
   uint32x2_t d26u32, d27u32;
@@ -156,7 +156,7 @@
 
   switch (tx_type) {
     case 0:  // idct_idct is not supported. Fall back to C
-      vp10_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
+      av1_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
       return;
       break;
     case 1:  // iadst_idct
diff --git a/av1/common/arm/neon/iht8x8_add_neon.c b/av1/common/arm/neon/iht8x8_add_neon.c
index ff5578d..8421926 100644
--- a/av1/common/arm/neon/iht8x8_add_neon.c
+++ b/av1/common/arm/neon/iht8x8_add_neon.c
@@ -11,8 +11,8 @@
 #include <arm_neon.h>
 #include <assert.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
 #include "av1/common/common.h"
 
 static int16_t cospi_2_64 = 16305;
@@ -471,8 +471,8 @@
   return;
 }
 
-void vp10_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
-                             int dest_stride, int tx_type) {
+void av1_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
+                            int dest_stride, int tx_type) {
   int i;
   uint8_t *d1, *d2;
   uint8x8_t d0u8, d1u8, d2u8, d3u8;
@@ -494,7 +494,7 @@
 
   switch (tx_type) {
     case 0:  // idct_idct is not supported. Fall back to C
-      vp10_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
+      av1_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
       return;
       break;
     case 1:  // iadst_idct
diff --git a/av1/common/vp10_convolve.c b/av1/common/av1_convolve.c
similarity index 66%
rename from av1/common/vp10_convolve.c
rename to av1/common/av1_convolve.c
index b62bae5..dec6759 100644
--- a/av1/common/vp10_convolve.c
+++ b/av1/common/av1_convolve.c
@@ -1,10 +1,10 @@
 #include <assert.h>
 #include <string.h>
 
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_convolve.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_convolve.h"
 #include "av1/common/filter.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
 #define MAX_BLOCK_WIDTH (MAX_SB_SIZE)
@@ -12,10 +12,10 @@
 #define MAX_STEP (32)
 #define MAX_FILTER_TAP (12)
 
-void vp10_convolve_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst,
-                           int dst_stride, int w, int h,
-                           const InterpFilterParams filter_params,
-                           const int subpel_x_q4, int x_step_q4, int avg) {
+void av1_convolve_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst,
+                          int dst_stride, int w, int h,
+                          const InterpFilterParams filter_params,
+                          const int subpel_x_q4, int x_step_q4, int avg) {
   int x, y;
   int filter_size = filter_params.taps;
   src -= filter_size / 2 - 1;
@@ -23,7 +23,7 @@
     int x_q4 = subpel_x_q4;
     for (x = 0; x < w; ++x) {
       const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
-      const int16_t *x_filter = vp10_get_interp_filter_subpel_kernel(
+      const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
           filter_params, x_q4 & SUBPEL_MASK);
       int k, sum = 0;
       for (k = 0; k < filter_size; ++k) sum += src_x[k] * x_filter[k];
@@ -40,10 +40,10 @@
   }
 }
 
-void vp10_convolve_vert_c(const uint8_t *src, int src_stride, uint8_t *dst,
-                          int dst_stride, int w, int h,
-                          const InterpFilterParams filter_params,
-                          const int subpel_y_q4, int y_step_q4, int avg) {
+void av1_convolve_vert_c(const uint8_t *src, int src_stride, uint8_t *dst,
+                         int dst_stride, int w, int h,
+                         const InterpFilterParams filter_params,
+                         const int subpel_y_q4, int y_step_q4, int avg) {
   int x, y;
   int filter_size = filter_params.taps;
   src -= src_stride * (filter_size / 2 - 1);
@@ -52,7 +52,7 @@
     int y_q4 = subpel_y_q4;
     for (y = 0; y < h; ++y) {
       const uint8_t *const src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
-      const int16_t *y_filter = vp10_get_interp_filter_subpel_kernel(
+      const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
           filter_params, y_q4 & SUBPEL_MASK);
       int k, sum = 0;
       for (k = 0; k < filter_size; ++k)
@@ -93,15 +93,15 @@
   }
 }
 
-void vp10_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
-                   int dst_stride, int w, int h,
+void av1_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
+                  int dst_stride, int w, int h,
 #if CONFIG_DUAL_FILTER
-                   const INTERP_FILTER *interp_filter,
+                  const INTERP_FILTER *interp_filter,
 #else
-                   const INTERP_FILTER interp_filter,
+                  const INTERP_FILTER interp_filter,
 #endif
-                   const int subpel_x_q4, int x_step_q4, const int subpel_y_q4,
-                   int y_step_q4, int ref_idx) {
+                  const int subpel_x_q4, int x_step_q4, const int subpel_y_q4,
+                  int y_step_q4, int ref_idx) {
   int ignore_horiz = x_step_q4 == 16 && subpel_x_q4 == 0;
   int ignore_vert = y_step_q4 == 16 && subpel_y_q4 == 0;
 
@@ -115,25 +115,25 @@
   } else if (ignore_vert) {
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
 #endif
     assert(filter_params.taps <= MAX_FILTER_TAP);
-    vp10_convolve_horiz(src, src_stride, dst, dst_stride, w, h, filter_params,
-                        subpel_x_q4, x_step_q4, ref_idx);
+    av1_convolve_horiz(src, src_stride, dst, dst_stride, w, h, filter_params,
+                       subpel_x_q4, x_step_q4, ref_idx);
   } else if (ignore_horiz) {
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter[2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[2 * ref_idx]);
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
 #endif
     assert(filter_params.taps <= MAX_FILTER_TAP);
-    vp10_convolve_vert(src, src_stride, dst, dst_stride, w, h, filter_params,
-                       subpel_y_q4, y_step_q4, ref_idx);
+    av1_convolve_vert(src, src_stride, dst, dst_stride, w, h, filter_params,
+                      subpel_y_q4, y_step_q4, ref_idx);
   } else {
     // temp's size is set to (maximum possible intermediate_height) *
     // MAX_BLOCK_WIDTH
@@ -143,9 +143,9 @@
     int temp_stride = MAX_BLOCK_WIDTH;
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params_x =
-        vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
     InterpFilterParams filter_params_y =
-        vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
     InterpFilterParams filter_params = filter_params_x;
 
     // The filter size implies the required number of reference pixels for
@@ -154,7 +154,7 @@
     int filter_size = filter_params_y.taps;
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
     int filter_size = filter_params.taps;
 #endif
     int intermediate_height =
@@ -162,30 +162,30 @@
 
     assert(filter_params.taps <= MAX_FILTER_TAP);
 
-    vp10_convolve_horiz(src - src_stride * (filter_size / 2 - 1), src_stride,
-                        temp, temp_stride, w, intermediate_height,
-                        filter_params, subpel_x_q4, x_step_q4, 0);
+    av1_convolve_horiz(src - src_stride * (filter_size / 2 - 1), src_stride,
+                       temp, temp_stride, w, intermediate_height, filter_params,
+                       subpel_x_q4, x_step_q4, 0);
 
 #if CONFIG_DUAL_FILTER
     filter_params = filter_params_y;
 #else
-    filter_params = vp10_get_interp_filter_params(interp_filter);
+    filter_params = av1_get_interp_filter_params(interp_filter);
 #endif
     filter_size = filter_params.taps;
     assert(filter_params.taps <= MAX_FILTER_TAP);
 
-    vp10_convolve_vert(temp + temp_stride * (filter_size / 2 - 1), temp_stride,
-                       dst, dst_stride, w, h, filter_params, subpel_y_q4,
-                       y_step_q4, ref_idx);
+    av1_convolve_vert(temp + temp_stride * (filter_size / 2 - 1), temp_stride,
+                      dst, dst_stride, w, h, filter_params, subpel_y_q4,
+                      y_step_q4, ref_idx);
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_convolve_horiz_c(const uint16_t *src, int src_stride,
-                                  uint16_t *dst, int dst_stride, int w, int h,
-                                  const InterpFilterParams filter_params,
-                                  const int subpel_x_q4, int x_step_q4, int avg,
-                                  int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_convolve_horiz_c(const uint16_t *src, int src_stride,
+                                 uint16_t *dst, int dst_stride, int w, int h,
+                                 const InterpFilterParams filter_params,
+                                 const int subpel_x_q4, int x_step_q4, int avg,
+                                 int bd) {
   int x, y;
   int filter_size = filter_params.taps;
   src -= filter_size / 2 - 1;
@@ -193,7 +193,7 @@
     int x_q4 = subpel_x_q4;
     for (x = 0; x < w; ++x) {
       const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
-      const int16_t *x_filter = vp10_get_interp_filter_subpel_kernel(
+      const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
           filter_params, x_q4 & SUBPEL_MASK);
       int k, sum = 0;
       for (k = 0; k < filter_size; ++k) sum += src_x[k] * x_filter[k];
@@ -211,11 +211,11 @@
   }
 }
 
-void vp10_highbd_convolve_vert_c(const uint16_t *src, int src_stride,
-                                 uint16_t *dst, int dst_stride, int w, int h,
-                                 const InterpFilterParams filter_params,
-                                 const int subpel_y_q4, int y_step_q4, int avg,
-                                 int bd) {
+void av1_highbd_convolve_vert_c(const uint16_t *src, int src_stride,
+                                uint16_t *dst, int dst_stride, int w, int h,
+                                const InterpFilterParams filter_params,
+                                const int subpel_y_q4, int y_step_q4, int avg,
+                                int bd) {
   int x, y;
   int filter_size = filter_params.taps;
   src -= src_stride * (filter_size / 2 - 1);
@@ -224,7 +224,7 @@
     int y_q4 = subpel_y_q4;
     for (y = 0; y < h; ++y) {
       const uint16_t *const src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
-      const int16_t *y_filter = vp10_get_interp_filter_subpel_kernel(
+      const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
           filter_params, y_q4 & SUBPEL_MASK);
       int k, sum = 0;
       for (k = 0; k < filter_size; ++k)
@@ -267,16 +267,16 @@
   }
 }
 
-void vp10_highbd_convolve(const uint8_t *src8, int src_stride, uint8_t *dst8,
-                          int dst_stride, int w, int h,
+void av1_highbd_convolve(const uint8_t *src8, int src_stride, uint8_t *dst8,
+                         int dst_stride, int w, int h,
 #if CONFIG_DUAL_FILTER
-                          const INTERP_FILTER *interp_filter,
+                         const INTERP_FILTER *interp_filter,
 #else
-                          const INTERP_FILTER interp_filter,
+                         const INTERP_FILTER interp_filter,
 #endif
-                          const int subpel_x_q4, int x_step_q4,
-                          const int subpel_y_q4, int y_step_q4, int ref_idx,
-                          int bd) {
+                         const int subpel_x_q4, int x_step_q4,
+                         const int subpel_y_q4, int y_step_q4, int ref_idx,
+                         int bd) {
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
   int ignore_horiz = x_step_q4 == 16 && subpel_x_q4 == 0;
@@ -292,25 +292,25 @@
   } else if (ignore_vert) {
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
 #endif
-    vp10_highbd_convolve_horiz(src, src_stride, dst, dst_stride, w, h,
-                               filter_params, subpel_x_q4, x_step_q4, ref_idx,
-                               bd);
+    av1_highbd_convolve_horiz(src, src_stride, dst, dst_stride, w, h,
+                              filter_params, subpel_x_q4, x_step_q4, ref_idx,
+                              bd);
   } else if (ignore_horiz) {
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
 #endif
-    vp10_highbd_convolve_vert(src, src_stride, dst, dst_stride, w, h,
-                              filter_params, subpel_y_q4, y_step_q4, ref_idx,
-                              bd);
+    av1_highbd_convolve_vert(src, src_stride, dst, dst_stride, w, h,
+                             filter_params, subpel_y_q4, y_step_q4, ref_idx,
+                             bd);
   } else {
     // temp's size is set to (maximum possible intermediate_height) *
     // MAX_BLOCK_WIDTH
@@ -321,21 +321,21 @@
 
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params_x =
-        vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
     InterpFilterParams filter_params_y =
-        vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
     InterpFilterParams filter_params = filter_params_x;
     int filter_size = filter_params_y.taps;
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
     int filter_size = filter_params.taps;
 #endif
 
     int intermediate_height =
         (((h - 1) * y_step_q4 + subpel_y_q4) >> SUBPEL_BITS) + filter_size;
 
-    vp10_highbd_convolve_horiz(
+    av1_highbd_convolve_horiz(
         src - src_stride * (filter_size / 2 - 1), src_stride, temp, temp_stride,
         w, intermediate_height, filter_params, subpel_x_q4, x_step_q4, 0, bd);
 
@@ -345,9 +345,9 @@
     filter_size = filter_params.taps;
     assert(filter_params.taps <= MAX_FILTER_TAP);
 
-    vp10_highbd_convolve_vert(temp + temp_stride * (filter_size / 2 - 1),
-                              temp_stride, dst, dst_stride, w, h, filter_params,
-                              subpel_y_q4, y_step_q4, ref_idx, bd);
+    av1_highbd_convolve_vert(temp + temp_stride * (filter_size / 2 - 1),
+                             temp_stride, dst, dst_stride, w, h, filter_params,
+                             subpel_y_q4, y_step_q4, ref_idx, bd);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/av1_convolve.h b/av1/common/av1_convolve.h
new file mode 100644
index 0000000..f082a8a
--- /dev/null
+++ b/av1/common/av1_convolve.h
@@ -0,0 +1,35 @@
+#ifndef AV1_COMMON_AV1_CONVOLVE_H_
+#define AV1_COMMON_AV1_CONVOLVE_H_
+#include "av1/common/filter.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void av1_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
+                  int dst_stride, int w, int h,
+#if CONFIG_DUAL_FILTER
+                  const INTERP_FILTER *interp_filter,
+#else
+                  const INTERP_FILTER interp_filter,
+#endif
+                  const int subpel_x, int xstep, const int subpel_y, int ystep,
+                  int avg);
+
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
+                         int dst_stride, int w, int h,
+#if CONFIG_DUAL_FILTER
+                         const INTERP_FILTER *interp_filter,
+#else
+                         const INTERP_FILTER interp_filter,
+#endif
+                         const int subpel_x, int xstep, const int subpel_y,
+                         int ystep, int avg, int bd);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // AV1_COMMON_AV1_CONVOLVE_H_
diff --git a/av1/common/vp10_fwd_txfm.c b/av1/common/av1_fwd_txfm.c
similarity index 92%
rename from av1/common/vp10_fwd_txfm.c
rename to av1/common/av1_fwd_txfm.c
index eb1c018..221f4e1 100644
--- a/av1/common/vp10_fwd_txfm.c
+++ b/av1/common/av1_fwd_txfm.c
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_fwd_txfm.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_fwd_txfm.h"
 
-void vp10_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
   // The 2D transform is done with two passes which are actually pretty
   // similar. In the first one, we transform the columns and transpose
   // the results. In the second one, we transform the rows. To achieve that,
@@ -77,7 +77,7 @@
   }
 }
 
-void vp10_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 4; ++r)
@@ -87,8 +87,7 @@
   output[1] = 0;
 }
 
-void vp10_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
-                    int stride) {
+void av1_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
   int i, j;
   tran_low_t intermediate[64];
   int pass;
@@ -173,7 +172,7 @@
   }
 }
 
-void vp10_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 8; ++r)
@@ -183,7 +182,7 @@
   output[1] = 0;
 }
 
-void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
   // The 2D transform is done with two passes which are actually pretty
   // similar. In the first one, we transform the columns and transpose
   // the results. In the second one, we transform the rows. To achieve that,
@@ -363,7 +362,7 @@
   }
 }
 
-void vp10_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 16; ++r)
@@ -386,7 +385,7 @@
   return rv;
 }
 
-void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
+void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
   tran_high_t step[32];
   // Stage 1
   step[0] = input[0] + input[(32 - 1)];
@@ -709,7 +708,7 @@
   output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
 }
 
-void vp10_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+void av1_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
   int i, j;
   tran_high_t output[32 * 32];
 
@@ -717,7 +716,7 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
   }
@@ -726,7 +725,7 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       out[j + i * 32] =
           (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
@@ -736,7 +735,7 @@
 // Note that although we use dct_32_round in dct32 computation flow,
 // this 2d fdct32x32 for rate-distortion optimization loop is operating
 // within 16 bits precision.
-void vp10_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
+void av1_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
   int i, j;
   tran_high_t output[32 * 32];
 
@@ -744,11 +743,11 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       // TODO(cd): see quality impact of only doing
       //           output[j * 32 + i] = (temp_out[j] + 1) >> 2;
-      //           PS: also change code in vp10_dsp/x86/vp10_dct_sse2.c
+      //           PS: also change code in av1_dsp/x86/av1_dct_sse2.c
       output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
   }
 
@@ -756,12 +755,12 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
-    vp10_fdct32(temp_in, temp_out, 1);
+    av1_fdct32(temp_in, temp_out, 1);
     for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
   }
 }
 
-void vp10_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 32; ++r)
@@ -771,44 +770,43 @@
   output[1] = 0;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
-                           int stride) {
-  vp10_fdct4x4_c(input, output, stride);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
+                          int stride) {
+  av1_fdct4x4_c(input, output, stride);
 }
 
-void vp10_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
-                           int stride) {
-  vp10_fdct8x8_c(input, final_output, stride);
+void av1_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
+                          int stride) {
+  av1_fdct8x8_c(input, final_output, stride);
 }
 
-void vp10_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
-                             int stride) {
-  vp10_fdct8x8_1_c(input, final_output, stride);
+void av1_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
+                            int stride) {
+  av1_fdct8x8_1_c(input, final_output, stride);
 }
 
-void vp10_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
-                             int stride) {
-  vp10_fdct16x16_c(input, output, stride);
+void av1_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
+                            int stride) {
+  av1_fdct16x16_c(input, output, stride);
 }
 
-void vp10_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
+void av1_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
+                              int stride) {
+  av1_fdct16x16_1_c(input, output, stride);
+}
+
+void av1_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+  av1_fdct32x32_c(input, out, stride);
+}
+
+void av1_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
                                int stride) {
-  vp10_fdct16x16_1_c(input, output, stride);
+  av1_fdct32x32_rd_c(input, out, stride);
 }
 
-void vp10_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out,
-                             int stride) {
-  vp10_fdct32x32_c(input, out, stride);
+void av1_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
+                              int stride) {
+  av1_fdct32x32_1_c(input, out, stride);
 }
-
-void vp10_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
-                                int stride) {
-  vp10_fdct32x32_rd_c(input, out, stride);
-}
-
-void vp10_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
-                               int stride) {
-  vp10_fdct32x32_1_c(input, out, stride);
-}
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/vp10_fwd_txfm.h b/av1/common/av1_fwd_txfm.h
similarity index 71%
rename from av1/common/vp10_fwd_txfm.h
rename to av1/common/av1_fwd_txfm.h
index a0481d3..96d942e 100644
--- a/av1/common/vp10_fwd_txfm.h
+++ b/av1/common/av1_fwd_txfm.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_VP10_FWD_TXFM_H_
-#define VP10_COMMON_VP10_FWD_TXFM_H_
+#ifndef AV1_COMMON_AV1_FWD_TXFM_H_
+#define AV1_COMMON_AV1_FWD_TXFM_H_
 
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/fwd_txfm.h"
 
-void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round);
-#endif  // VP10_COMMON_VP10_FWD_TXFM_H_
+void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round);
+#endif  // AV1_COMMON_AV1_FWD_TXFM_H_
diff --git a/av1/common/vp10_fwd_txfm1d.c b/av1/common/av1_fwd_txfm1d.c
similarity index 98%
rename from av1/common/vp10_fwd_txfm1d.c
rename to av1/common/av1_fwd_txfm1d.c
index 6dff077..3dc960c 100644
--- a/av1/common/vp10_fwd_txfm1d.c
+++ b/av1/common/av1_fwd_txfm1d.c
@@ -9,7 +9,7 @@
  */
 
 #include <stdlib.h>
-#include "av1/common/vp10_fwd_txfm1d.h"
+#include "av1/common/av1_fwd_txfm1d.h"
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
 #define range_check(stage, input, buf, size, bit)                         \
   {                                                                       \
@@ -40,8 +40,8 @@
   }
 #endif
 
-void vp10_fdct4_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range) {
   const int32_t size = 4;
   const int32_t *cospi;
 
@@ -83,8 +83,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fdct8_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range) {
   const int32_t size = 8;
   const int32_t *cospi;
 
@@ -168,8 +168,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fdct16_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct16_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 16;
   const int32_t *cospi;
 
@@ -339,8 +339,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fdct32_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct32_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 32;
   const int32_t *cospi;
 
@@ -700,8 +700,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fadst4_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst4_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 4;
   const int32_t *cospi;
 
@@ -765,8 +765,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fadst8_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst8_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 8;
   const int32_t *cospi;
 
@@ -880,8 +880,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fadst16_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst16_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 16;
   const int32_t *cospi;
 
@@ -1094,8 +1094,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fadst32_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst32_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 32;
   const int32_t *cospi;
 
diff --git a/av1/common/av1_fwd_txfm1d.h b/av1/common/av1_fwd_txfm1d.h
new file mode 100644
index 0000000..7aab70e
--- /dev/null
+++ b/av1/common/av1_fwd_txfm1d.h
@@ -0,0 +1,44 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AV1_FWD_TXFM1D_H_
+#define AV1_FWD_TXFM1D_H_
+
+#include "av1/common/av1_txfm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void av1_fdct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range);
+void av1_fdct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range);
+void av1_fdct16_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct32_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct64_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+
+void av1_fadst4_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst8_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst16_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst32_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // AV1_FWD_TXFM1D_H_
diff --git a/av1/common/vp10_fwd_txfm2d.c b/av1/common/av1_fwd_txfm2d.c
similarity index 76%
rename from av1/common/vp10_fwd_txfm2d.c
rename to av1/common/av1_fwd_txfm2d.c
index 85c6b68..dc984e1 100644
--- a/av1/common/vp10_fwd_txfm2d.c
+++ b/av1/common/av1_fwd_txfm2d.c
@@ -10,22 +10,22 @@
 
 #include <assert.h>
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "av1/common/enums.h"
-#include "av1/common/vp10_fwd_txfm1d.h"
-#include "av1/common/vp10_fwd_txfm2d_cfg.h"
-#include "av1/common/vp10_txfm.h"
+#include "av1/common/av1_fwd_txfm1d.h"
+#include "av1/common/av1_fwd_txfm2d_cfg.h"
+#include "av1/common/av1_txfm.h"
 
 static INLINE TxfmFunc fwd_txfm_type_to_func(TXFM_TYPE txfm_type) {
   switch (txfm_type) {
-    case TXFM_TYPE_DCT4: return vp10_fdct4_new;
-    case TXFM_TYPE_DCT8: return vp10_fdct8_new;
-    case TXFM_TYPE_DCT16: return vp10_fdct16_new;
-    case TXFM_TYPE_DCT32: return vp10_fdct32_new;
-    case TXFM_TYPE_ADST4: return vp10_fadst4_new;
-    case TXFM_TYPE_ADST8: return vp10_fadst8_new;
-    case TXFM_TYPE_ADST16: return vp10_fadst16_new;
-    case TXFM_TYPE_ADST32: return vp10_fadst32_new;
+    case TXFM_TYPE_DCT4: return av1_fdct4_new;
+    case TXFM_TYPE_DCT8: return av1_fdct8_new;
+    case TXFM_TYPE_DCT16: return av1_fdct16_new;
+    case TXFM_TYPE_DCT32: return av1_fdct32_new;
+    case TXFM_TYPE_ADST4: return av1_fadst4_new;
+    case TXFM_TYPE_ADST8: return av1_fadst8_new;
+    case TXFM_TYPE_ADST16: return av1_fadst16_new;
+    case TXFM_TYPE_ADST32: return av1_fadst32_new;
     default: assert(0); return NULL;
   }
 }
@@ -76,42 +76,42 @@
   }
 }
 
-void vp10_fwd_txfm2d_4x4_c(const int16_t *input, int32_t *output, int stride,
-                           int tx_type, int bd) {
+void av1_fwd_txfm2d_4x4_c(const int16_t *input, int32_t *output, int stride,
+                          int tx_type, int bd) {
   int32_t txfm_buf[4 * 4];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_4X4);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_4X4);
   (void)bd;
   fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
 }
 
-void vp10_fwd_txfm2d_8x8_c(const int16_t *input, int32_t *output, int stride,
-                           int tx_type, int bd) {
+void av1_fwd_txfm2d_8x8_c(const int16_t *input, int32_t *output, int stride,
+                          int tx_type, int bd) {
   int32_t txfm_buf[8 * 8];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_8X8);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_8X8);
   (void)bd;
   fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
 }
 
-void vp10_fwd_txfm2d_16x16_c(const int16_t *input, int32_t *output, int stride,
-                             int tx_type, int bd) {
+void av1_fwd_txfm2d_16x16_c(const int16_t *input, int32_t *output, int stride,
+                            int tx_type, int bd) {
   int32_t txfm_buf[16 * 16];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_16X16);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_16X16);
   (void)bd;
   fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
 }
 
-void vp10_fwd_txfm2d_32x32_c(const int16_t *input, int32_t *output, int stride,
-                             int tx_type, int bd) {
+void av1_fwd_txfm2d_32x32_c(const int16_t *input, int32_t *output, int stride,
+                            int tx_type, int bd) {
   int32_t txfm_buf[32 * 32];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_32X32);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_32X32);
   (void)bd;
   fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
 }
 
-void vp10_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output, int stride,
-                             int tx_type, int bd) {
+void av1_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output, int stride,
+                            int tx_type, int bd) {
   int32_t txfm_buf[64 * 64];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_64x64_cfg(tx_type);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_64x64_cfg(tx_type);
   (void)bd;
   fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
 }
@@ -150,14 +150,14 @@
 };
 #endif  // CONFIG_EXT_TX
 
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_cfg(int tx_type, int tx_size) {
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(int tx_type, int tx_size) {
   TXFM_2D_FLIP_CFG cfg;
   set_flip_cfg(tx_type, &cfg);
   cfg.cfg = fwd_txfm_cfg_ls[tx_type][tx_size];
   return cfg;
 }
 
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_64x64_cfg(int tx_type) {
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(int tx_type) {
   TXFM_2D_FLIP_CFG cfg;
   switch (tx_type) {
     case DCT_DCT:
diff --git a/av1/common/vp10_fwd_txfm2d_cfg.h b/av1/common/av1_fwd_txfm2d_cfg.h
similarity index 99%
rename from av1/common/vp10_fwd_txfm2d_cfg.h
rename to av1/common/av1_fwd_txfm2d_cfg.h
index f780b87..49d324d 100644
--- a/av1/common/vp10_fwd_txfm2d_cfg.h
+++ b/av1/common/av1_fwd_txfm2d_cfg.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_FWD_TXFM2D_CFG_H_
-#define VP10_FWD_TXFM2D_CFG_H_
+#ifndef AV1_FWD_TXFM2D_CFG_H_
+#define AV1_FWD_TXFM2D_CFG_H_
 #include "av1/common/enums.h"
-#include "av1/common/vp10_fwd_txfm1d.h"
+#include "av1/common/av1_fwd_txfm1d.h"
 //  ---------------- config fwd_dct_dct_4 ----------------
 static const int8_t fwd_shift_dct_dct_4[3] = { 2, 0, 0 };
 static const int8_t fwd_stage_range_col_dct_dct_4[4] = { 15, 16, 17, 17 };
@@ -440,4 +440,4 @@
   TXFM_TYPE_ADST32,                 // .txfm_type_col
   TXFM_TYPE_DCT32
 };      // .txfm_type_row
-#endif  // VP10_FWD_TXFM2D_CFG_H_
+#endif  // AV1_FWD_TXFM2D_CFG_H_
diff --git a/av1/common/vp10_inv_txfm.c b/av1/common/av1_inv_txfm.c
similarity index 95%
rename from av1/common/vp10_inv_txfm.c
rename to av1/common/av1_inv_txfm.c
index a74de09..76a49a2 100644
--- a/av1/common/vp10_inv_txfm.c
+++ b/av1/common/av1_inv_txfm.c
@@ -12,10 +12,10 @@
 #include <math.h>
 #include <string.h>
 
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_inv_txfm.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_inv_txfm.h"
 
-void vp10_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
      0.5 shifts per pixel. */
   int i;
@@ -67,8 +67,7 @@
   }
 }
 
-void vp10_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest,
-                          int dest_stride) {
+void av1_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) {
   int i;
   tran_high_t a1, e1;
   tran_low_t tmp[4];
@@ -94,7 +93,7 @@
   }
 }
 
-void vp10_idct4_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct4_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step[4];
   tran_high_t temp1, temp2;
   // stage 1
@@ -114,7 +113,7 @@
   output[3] = WRAPLOW(step[0] - step[3]);
 }
 
-void vp10_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
   int i, j;
@@ -122,7 +121,7 @@
 
   // Rows
   for (i = 0; i < 4; ++i) {
-    vp10_idct4_c(input, outptr);
+    av1_idct4_c(input, outptr);
     input += 4;
     outptr += 4;
   }
@@ -130,7 +129,7 @@
   // Columns
   for (i = 0; i < 4; ++i) {
     for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
-    vp10_idct4_c(temp_in, temp_out);
+    av1_idct4_c(temp_in, temp_out);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 4));
@@ -138,8 +137,8 @@
   }
 }
 
-void vp10_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
-                          int dest_stride) {
+void av1_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
+                         int dest_stride) {
   int i;
   tran_high_t a1;
   tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -155,7 +154,7 @@
   }
 }
 
-void vp10_idct8_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct8_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step1[8], step2[8];
   tran_high_t temp1, temp2;
   // stage 1
@@ -209,7 +208,7 @@
   output[7] = WRAPLOW(step1[0] - step1[7]);
 }
 
-void vp10_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
   int i, j;
@@ -217,7 +216,7 @@
 
   // First transform rows
   for (i = 0; i < 8; ++i) {
-    vp10_idct8_c(input, outptr);
+    av1_idct8_c(input, outptr);
     input += 8;
     outptr += 8;
   }
@@ -225,7 +224,7 @@
   // Then transform columns
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_idct8_c(temp_in, temp_out);
+    av1_idct8_c(temp_in, temp_out);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 5));
@@ -233,7 +232,7 @@
   }
 }
 
-void vp10_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   int i, j;
   tran_high_t a1;
   tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -245,7 +244,7 @@
   }
 }
 
-void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst4_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_low_t x0 = input[0];
@@ -282,7 +281,7 @@
   output[3] = WRAPLOW(dct_const_round_shift(s0 + s1 - s3));
 }
 
-void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst8_c(const tran_low_t *input, tran_low_t *output) {
   int s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_high_t x0 = input[7];
@@ -359,7 +358,7 @@
   output[7] = WRAPLOW(-x1);
 }
 
-void vp10_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -368,7 +367,7 @@
   // First transform rows
   // only first 4 row has non-zero coefs
   for (i = 0; i < 4; ++i) {
-    vp10_idct8_c(input, outptr);
+    av1_idct8_c(input, outptr);
     input += 8;
     outptr += 8;
   }
@@ -376,7 +375,7 @@
   // Then transform columns
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_idct8_c(temp_in, temp_out);
+    av1_idct8_c(temp_in, temp_out);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 5));
@@ -384,7 +383,7 @@
   }
 }
 
-void vp10_idct16_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct16_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step1[16], step2[16];
   tran_high_t temp1, temp2;
 
@@ -549,8 +548,8 @@
   output[15] = WRAPLOW(step2[0] - step2[15]);
 }
 
-void vp10_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
-                              int stride) {
+void av1_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
+                             int stride) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
   int i, j;
@@ -558,7 +557,7 @@
 
   // First transform rows
   for (i = 0; i < 16; ++i) {
-    vp10_idct16_c(input, outptr);
+    av1_idct16_c(input, outptr);
     input += 16;
     outptr += 16;
   }
@@ -566,7 +565,7 @@
   // Then transform columns
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_idct16_c(temp_in, temp_out);
+    av1_idct16_c(temp_in, temp_out);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -574,7 +573,7 @@
   }
 }
 
-void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst16_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
   tran_high_t s9, s10, s11, s12, s13, s14, s15;
 
@@ -745,8 +744,8 @@
   output[15] = WRAPLOW(-x1);
 }
 
-void vp10_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
-                             int stride) {
+void av1_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
+                            int stride) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -755,7 +754,7 @@
   // First transform rows. Since all non-zero dct coefficients are in
   // upper-left 4x4 area, we only need to calculate first 4 rows here.
   for (i = 0; i < 4; ++i) {
-    vp10_idct16_c(input, outptr);
+    av1_idct16_c(input, outptr);
     input += 16;
     outptr += 16;
   }
@@ -763,7 +762,7 @@
   // Then transform columns
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_idct16_c(temp_in, temp_out);
+    av1_idct16_c(temp_in, temp_out);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -771,8 +770,7 @@
   }
 }
 
-void vp10_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest,
-                            int stride) {
+void av1_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   int i, j;
   tran_high_t a1;
   tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -784,7 +782,7 @@
   }
 }
 
-void vp10_idct32_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct32_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step1[32], step2[32];
   tran_high_t temp1, temp2;
 
@@ -1151,8 +1149,8 @@
   output[31] = WRAPLOW(step1[0] - step1[31]);
 }
 
-void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
-                               int stride) {
+void av1_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
+                              int stride) {
   tran_low_t out[32 * 32];
   tran_low_t *outptr = out;
   int i, j;
@@ -1170,7 +1168,7 @@
       zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
 
     if (zero_coeff[0] | zero_coeff[1])
-      vp10_idct32_c(input, outptr);
+      av1_idct32_c(input, outptr);
     else
       memset(outptr, 0, sizeof(tran_low_t) * 32);
     input += 32;
@@ -1180,7 +1178,7 @@
   // Columns
   for (i = 0; i < 32; ++i) {
     for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
-    vp10_idct32_c(temp_in, temp_out);
+    av1_idct32_c(temp_in, temp_out);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -1188,8 +1186,8 @@
   }
 }
 
-void vp10_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
-                             int stride) {
+void av1_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
+                            int stride) {
   tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -1198,7 +1196,7 @@
   // Rows
   // only upper-left 8x8 has non-zero coeff
   for (i = 0; i < 8; ++i) {
-    vp10_idct32_c(input, outptr);
+    av1_idct32_c(input, outptr);
     input += 32;
     outptr += 32;
   }
@@ -1206,7 +1204,7 @@
   // Columns
   for (i = 0; i < 32; ++i) {
     for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
-    vp10_idct32_c(temp_in, temp_out);
+    av1_idct32_c(temp_in, temp_out);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -1214,8 +1212,7 @@
   }
 }
 
-void vp10_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest,
-                            int stride) {
+void av1_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   int i, j;
   tran_high_t a1;
 
@@ -1229,9 +1226,9 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
-                                  int stride, int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+                                 int stride, int bd) {
   /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
      0.5 shifts per pixel. */
   int i;
@@ -1287,8 +1284,8 @@
   }
 }
 
-void vp10_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
-                                 int dest_stride, int bd) {
+void av1_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
+                                int dest_stride, int bd) {
   int i;
   tran_high_t a1, e1;
   tran_low_t tmp[4];
@@ -1320,7 +1317,7 @@
   }
 }
 
-void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step[4];
   tran_high_t temp1, temp2;
   (void)bd;
@@ -1341,8 +1338,8 @@
   output[3] = HIGHBD_WRAPLOW(step[0] - step[3], bd);
 }
 
-void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
-                                  int stride, int bd) {
+void av1_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+                                 int stride, int bd) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
   int i, j;
@@ -1351,7 +1348,7 @@
 
   // Rows
   for (i = 0; i < 4; ++i) {
-    vp10_highbd_idct4_c(input, outptr, bd);
+    av1_highbd_idct4_c(input, outptr, bd);
     input += 4;
     outptr += 4;
   }
@@ -1359,7 +1356,7 @@
   // Columns
   for (i = 0; i < 4; ++i) {
     for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
-    vp10_highbd_idct4_c(temp_in, temp_out, bd);
+    av1_highbd_idct4_c(temp_in, temp_out, bd);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -1367,8 +1364,8 @@
   }
 }
 
-void vp10_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int dest_stride, int bd) {
+void av1_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int dest_stride, int bd) {
   int i;
   tran_high_t a1;
   tran_low_t out =
@@ -1387,7 +1384,7 @@
   }
 }
 
-void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step1[8], step2[8];
   tran_high_t temp1, temp2;
   // stage 1
@@ -1405,7 +1402,7 @@
   step1[6] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd);
 
   // stage 2 & stage 3 - even half
-  vp10_highbd_idct4_c(step1, step1, bd);
+  av1_highbd_idct4_c(step1, step1, bd);
 
   // stage 2 - odd half
   step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
@@ -1432,8 +1429,8 @@
   output[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
 }
 
-void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
-                                  int stride, int bd) {
+void av1_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+                                 int stride, int bd) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
   int i, j;
@@ -1442,7 +1439,7 @@
 
   // First transform rows.
   for (i = 0; i < 8; ++i) {
-    vp10_highbd_idct8_c(input, outptr, bd);
+    av1_highbd_idct8_c(input, outptr, bd);
     input += 8;
     outptr += 8;
   }
@@ -1450,7 +1447,7 @@
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_highbd_idct8_c(temp_in, temp_out, bd);
+    av1_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1458,8 +1455,8 @@
   }
 }
 
-void vp10_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int bd) {
+void av1_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int stride, int bd) {
   int i, j;
   tran_high_t a1;
   tran_low_t out =
@@ -1473,7 +1470,7 @@
   }
 }
 
-void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_low_t x0 = input[0];
@@ -1511,7 +1508,7 @@
   output[3] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3), bd);
 }
 
-void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_low_t x0 = input[7];
@@ -1588,8 +1585,8 @@
   output[7] = HIGHBD_WRAPLOW(-x1, bd);
 }
 
-void vp10_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
-                                  int stride, int bd) {
+void av1_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
+                                 int stride, int bd) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -1599,14 +1596,14 @@
   // First transform rows.
   // Only first 4 row has non-zero coefs.
   for (i = 0; i < 4; ++i) {
-    vp10_highbd_idct8_c(input, outptr, bd);
+    av1_highbd_idct8_c(input, outptr, bd);
     input += 8;
     outptr += 8;
   }
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_highbd_idct8_c(temp_in, temp_out, bd);
+    av1_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1614,7 +1611,7 @@
   }
 }
 
-void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step1[16], step2[16];
   tran_high_t temp1, temp2;
   (void)bd;
@@ -1780,8 +1777,8 @@
   output[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
 }
 
-void vp10_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
-                                     int stride, int bd) {
+void av1_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int bd) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
   int i, j;
@@ -1790,7 +1787,7 @@
 
   // First transform rows.
   for (i = 0; i < 16; ++i) {
-    vp10_highbd_idct16_c(input, outptr, bd);
+    av1_highbd_idct16_c(input, outptr, bd);
     input += 16;
     outptr += 16;
   }
@@ -1798,7 +1795,7 @@
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_highbd_idct16_c(temp_in, temp_out, bd);
+    av1_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -1806,8 +1803,7 @@
   }
 }
 
-void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output,
-                           int bd) {
+void av1_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
   tran_high_t s9, s10, s11, s12, s13, s14, s15;
 
@@ -1977,8 +1973,8 @@
   output[15] = HIGHBD_WRAPLOW(-x1, bd);
 }
 
-void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int bd) {
+void av1_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
+                                   int stride, int bd) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -1988,7 +1984,7 @@
   // First transform rows. Since all non-zero dct coefficients are in
   // upper-left 4x4 area, we only need to calculate first 4 rows here.
   for (i = 0; i < 4; ++i) {
-    vp10_highbd_idct16_c(input, outptr, bd);
+    av1_highbd_idct16_c(input, outptr, bd);
     input += 16;
     outptr += 16;
   }
@@ -1996,7 +1992,7 @@
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_highbd_idct16_c(temp_in, temp_out, bd);
+    av1_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -2004,8 +2000,8 @@
   }
 }
 
-void vp10_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                   int stride, int bd) {
+void av1_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
+                                  int stride, int bd) {
   int i, j;
   tran_high_t a1;
   tran_low_t out =
@@ -2389,8 +2385,8 @@
   output[31] = HIGHBD_WRAPLOW(step1[0] - step1[31], bd);
 }
 
-void vp10_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
-                                      int stride, int bd) {
+void av1_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
+                                     int stride, int bd) {
   tran_low_t out[32 * 32];
   tran_low_t *outptr = out;
   int i, j;
@@ -2427,8 +2423,8 @@
   }
 }
 
-void vp10_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int bd) {
+void av1_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
+                                   int stride, int bd) {
   tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -2453,8 +2449,8 @@
   }
 }
 
-void vp10_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                   int stride, int bd) {
+void av1_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
+                                  int stride, int bd) {
   int i, j;
   int a1;
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
@@ -2469,4 +2465,4 @@
     dest += stride;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/vp10_inv_txfm.h b/av1/common/av1_inv_txfm.h
similarity index 76%
rename from av1/common/vp10_inv_txfm.h
rename to av1/common/av1_inv_txfm.h
index b53db48..4295aa0 100644
--- a/av1/common/vp10_inv_txfm.h
+++ b/av1/common/av1_inv_txfm.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_INV_TXFM_H_
-#define VPX_DSP_INV_TXFM_H_
+#ifndef AOM_DSP_INV_TXFM_H_
+#define AOM_DSP_INV_TXFM_H_
 
 #include <assert.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/inv_txfm.h"
 #include "aom_ports/mem.h"
@@ -41,7 +41,7 @@
   return rv;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE tran_high_t highbd_check_range(tran_high_t input, int bd) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
   // For valid highbitdepth streams, intermediate stage coefficients will
@@ -63,7 +63,7 @@
   tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
   return rv;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #if CONFIG_EMULATE_HARDWARE
 // When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
@@ -84,36 +84,36 @@
 // bd of x uses trans_low with 8+x bits, need to remove 24-x bits
 
 #define WRAPLOW(x) ((((int32_t)check_range(x)) << 16) >> 16)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define HIGHBD_WRAPLOW(x, bd) \
   ((((int32_t)highbd_check_range((x), bd)) << (24 - bd)) >> (24 - bd))
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #else  // CONFIG_EMULATE_HARDWARE
 
 #define WRAPLOW(x) ((int32_t)check_range(x))
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define HIGHBD_WRAPLOW(x, bd) ((int32_t)highbd_check_range((x), bd))
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #endif  // CONFIG_EMULATE_HARDWARE
 
-void vp10_idct4_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct8_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct16_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct32_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct4_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct8_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct16_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct32_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst4_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst8_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst16_c(const tran_low_t *input, tran_low_t *output);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
 
-void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
 
 static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans,
                                              int bd) {
@@ -129,4 +129,4 @@
 #ifdef __cplusplus
 }  // extern "C"
 #endif
-#endif  // VPX_DSP_INV_TXFM_H_
+#endif  // AOM_DSP_INV_TXFM_H_
diff --git a/av1/common/vp10_inv_txfm1d.c b/av1/common/av1_inv_txfm1d.c
similarity index 98%
rename from av1/common/vp10_inv_txfm1d.c
rename to av1/common/av1_inv_txfm1d.c
index 76fb623..dbb463f 100644
--- a/av1/common/vp10_inv_txfm1d.c
+++ b/av1/common/av1_inv_txfm1d.c
@@ -9,7 +9,7 @@
  */
 
 #include <stdlib.h>
-#include "av1/common/vp10_inv_txfm1d.h"
+#include "av1/common/av1_inv_txfm1d.h"
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
 #define range_check(stage, input, buf, size, bit)                         \
   {                                                                       \
@@ -40,8 +40,8 @@
   }
 #endif
 
-void vp10_idct4_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range) {
   const int32_t size = 4;
   const int32_t *cospi;
 
@@ -83,8 +83,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_idct8_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range) {
   const int32_t size = 8;
   const int32_t *cospi;
 
@@ -168,8 +168,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_idct16_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct16_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 16;
   const int32_t *cospi;
 
@@ -339,8 +339,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_idct32_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct32_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 32;
   const int32_t *cospi;
 
@@ -700,8 +700,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_iadst4_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst4_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 4;
   const int32_t *cospi;
 
@@ -765,8 +765,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_iadst8_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst8_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 8;
   const int32_t *cospi;
 
@@ -880,8 +880,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_iadst16_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst16_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 16;
   const int32_t *cospi;
 
@@ -1097,8 +1097,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_iadst32_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst32_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 32;
   const int32_t *cospi;
 
diff --git a/av1/common/av1_inv_txfm1d.h b/av1/common/av1_inv_txfm1d.h
new file mode 100644
index 0000000..5937617
--- /dev/null
+++ b/av1/common/av1_inv_txfm1d.h
@@ -0,0 +1,44 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AV1_INV_TXFM1D_H_
+#define AV1_INV_TXFM1D_H_
+
+#include "av1/common/av1_txfm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void av1_idct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range);
+void av1_idct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range);
+void av1_idct16_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct32_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct64_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+
+void av1_iadst4_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst8_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst16_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst32_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // AV1_INV_TXFM1D_H_
diff --git a/av1/common/vp10_inv_txfm2d.c b/av1/common/av1_inv_txfm2d.c
similarity index 79%
rename from av1/common/vp10_inv_txfm2d.c
rename to av1/common/av1_inv_txfm2d.c
index 60606c9..844a38a 100644
--- a/av1/common/vp10_inv_txfm2d.c
+++ b/av1/common/av1_inv_txfm2d.c
@@ -8,22 +8,22 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "av1/common/enums.h"
-#include "av1/common/vp10_txfm.h"
-#include "av1/common/vp10_inv_txfm1d.h"
-#include "av1/common/vp10_inv_txfm2d_cfg.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/common/av1_inv_txfm1d.h"
+#include "av1/common/av1_inv_txfm2d_cfg.h"
 
 static INLINE TxfmFunc inv_txfm_type_to_func(TXFM_TYPE txfm_type) {
   switch (txfm_type) {
-    case TXFM_TYPE_DCT4: return vp10_idct4_new;
-    case TXFM_TYPE_DCT8: return vp10_idct8_new;
-    case TXFM_TYPE_DCT16: return vp10_idct16_new;
-    case TXFM_TYPE_DCT32: return vp10_idct32_new;
-    case TXFM_TYPE_ADST4: return vp10_iadst4_new;
-    case TXFM_TYPE_ADST8: return vp10_iadst8_new;
-    case TXFM_TYPE_ADST16: return vp10_iadst16_new;
-    case TXFM_TYPE_ADST32: return vp10_iadst32_new;
+    case TXFM_TYPE_DCT4: return av1_idct4_new;
+    case TXFM_TYPE_DCT8: return av1_idct8_new;
+    case TXFM_TYPE_DCT16: return av1_idct16_new;
+    case TXFM_TYPE_DCT32: return av1_idct32_new;
+    case TXFM_TYPE_ADST4: return av1_iadst4_new;
+    case TXFM_TYPE_ADST8: return av1_iadst8_new;
+    case TXFM_TYPE_ADST16: return av1_iadst16_new;
+    case TXFM_TYPE_ADST32: return av1_iadst32_new;
     default: assert(0); return NULL;
   }
 }
@@ -62,14 +62,14 @@
 };
 #endif
 
-TXFM_2D_FLIP_CFG vp10_get_inv_txfm_cfg(int tx_type, int tx_size) {
+TXFM_2D_FLIP_CFG av1_get_inv_txfm_cfg(int tx_type, int tx_size) {
   TXFM_2D_FLIP_CFG cfg;
   set_flip_cfg(tx_type, &cfg);
   cfg.cfg = inv_txfm_cfg_ls[tx_type][tx_size];
   return cfg;
 }
 
-TXFM_2D_FLIP_CFG vp10_get_inv_txfm_64x64_cfg(int tx_type) {
+TXFM_2D_FLIP_CFG av1_get_inv_txfm_64x64_cfg(int tx_type) {
   TXFM_2D_FLIP_CFG cfg = { 0, 0, NULL };
   switch (tx_type) {
     case DCT_DCT:
@@ -130,62 +130,62 @@
   }
 }
 
-void vp10_inv_txfm2d_add_4x4_c(const int32_t *input, uint16_t *output,
-                               int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_4x4_c(const int32_t *input, uint16_t *output,
+                              int stride, int tx_type, int bd) {
   int txfm_buf[4 * 4 + 4 + 4];
   // output contains the prediction signal which is always positive and smaller
   // than (1 << bd) - 1
   // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
   // int16_t*
-  TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_4X4);
+  TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_4X4);
   inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
   clamp_block((int16_t *)output, 4, stride, 0, (1 << bd) - 1);
 }
 
-void vp10_inv_txfm2d_add_8x8_c(const int32_t *input, uint16_t *output,
-                               int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_8x8_c(const int32_t *input, uint16_t *output,
+                              int stride, int tx_type, int bd) {
   int txfm_buf[8 * 8 + 8 + 8];
   // output contains the prediction signal which is always positive and smaller
   // than (1 << bd) - 1
   // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
   // int16_t*
-  TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_8X8);
+  TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_8X8);
   inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
   clamp_block((int16_t *)output, 8, stride, 0, (1 << bd) - 1);
 }
 
-void vp10_inv_txfm2d_add_16x16_c(const int32_t *input, uint16_t *output,
-                                 int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_16x16_c(const int32_t *input, uint16_t *output,
+                                int stride, int tx_type, int bd) {
   int txfm_buf[16 * 16 + 16 + 16];
   // output contains the prediction signal which is always positive and smaller
   // than (1 << bd) - 1
   // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
   // int16_t*
-  TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_16X16);
+  TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_16X16);
   inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
   clamp_block((int16_t *)output, 16, stride, 0, (1 << bd) - 1);
 }
 
-void vp10_inv_txfm2d_add_32x32_c(const int32_t *input, uint16_t *output,
-                                 int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_32x32_c(const int32_t *input, uint16_t *output,
+                                int stride, int tx_type, int bd) {
   int txfm_buf[32 * 32 + 32 + 32];
   // output contains the prediction signal which is always positive and smaller
   // than (1 << bd) - 1
   // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
   // int16_t*
-  TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_32X32);
+  TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_32X32);
   inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
   clamp_block((int16_t *)output, 32, stride, 0, (1 << bd) - 1);
 }
 
-void vp10_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output,
-                                 int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output,
+                                int stride, int tx_type, int bd) {
   int txfm_buf[64 * 64 + 64 + 64];
   // output contains the prediction signal which is always positive and smaller
   // than (1 << bd) - 1
   // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
   // int16_t*
-  TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_64x64_cfg(tx_type);
+  TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_64x64_cfg(tx_type);
   inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
   clamp_block((int16_t *)output, 64, stride, 0, (1 << bd) - 1);
 }
diff --git a/av1/common/vp10_inv_txfm2d_cfg.h b/av1/common/av1_inv_txfm2d_cfg.h
similarity index 99%
rename from av1/common/vp10_inv_txfm2d_cfg.h
rename to av1/common/av1_inv_txfm2d_cfg.h
index 9bfa420..ee018fb 100644
--- a/av1/common/vp10_inv_txfm2d_cfg.h
+++ b/av1/common/av1_inv_txfm2d_cfg.h
@@ -8,9 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_INV_TXFM2D_CFG_H_
-#define VP10_INV_TXFM2D_CFG_H_
-#include "av1/common/vp10_inv_txfm1d.h"
+#ifndef AV1_INV_TXFM2D_CFG_H_
+#define AV1_INV_TXFM2D_CFG_H_
+#include "av1/common/av1_inv_txfm1d.h"
 //  ---------------- config inv_dct_dct_4 ----------------
 static const int8_t inv_shift_dct_dct_4[2] = { 0, -4 };
 static const int8_t inv_stage_range_col_dct_dct_4[4] = { 18, 18, 17, 17 };
@@ -441,4 +441,4 @@
   TXFM_TYPE_DCT32
 };  // .txfm_type_row
 
-#endif  // VP10_INV_TXFM2D_CFG_H_
+#endif  // AV1_INV_TXFM2D_CFG_H_
diff --git a/av1/common/vp10_rtcd.c b/av1/common/av1_rtcd.c
similarity index 85%
rename from av1/common/vp10_rtcd.c
rename to av1/common/av1_rtcd.c
index 7fce6b9..fad509c 100644
--- a/av1/common/vp10_rtcd.c
+++ b/av1/common/av1_rtcd.c
@@ -7,12 +7,12 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #define RTCD_C
-#include "./vp10_rtcd.h"
-#include "aom_ports/vpx_once.h"
+#include "./av1_rtcd.h"
+#include "aom_ports/aom_once.h"
 
-void vp10_rtcd() {
+void av1_rtcd() {
   // TODO(JBB): Remove this once, by insuring that both the encoder and
   // decoder setup functions are protected by once();
   once(setup_rtcd_internal);
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
new file mode 100644
index 0000000..c1b0f9e
--- /dev/null
+++ b/av1/common/av1_rtcd_defs.pl
@@ -0,0 +1,912 @@
+sub av1_common_forward_decls() {
+print <<EOF
+/*
+ * AV1
+ */
+
+#include "aom/aom_integer.h"
+#include "av1/common/common.h"
+#include "av1/common/enums.h"
+#include "av1/common/quant_common.h"
+#include "av1/common/filter.h"
+#include "av1/common/av1_txfm.h"
+
+struct macroblockd;
+
+/* Encoder forward decls */
+struct macroblock;
+struct aom_variance_vtable;
+struct search_site_config;
+struct mv;
+union int_mv;
+struct yv12_buffer_config;
+EOF
+}
+forward_decls qw/av1_common_forward_decls/;
+
+# functions that are 64 bit only.
+$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
+if ($opts{arch} eq "x86_64") {
+  $mmx_x86_64 = 'mmx';
+  $sse2_x86_64 = 'sse2';
+  $ssse3_x86_64 = 'ssse3';
+  $avx_x86_64 = 'avx';
+  $avx2_x86_64 = 'avx2';
+}
+
+#
+# 10/12-tap convolution filters
+#
+add_proto qw/void av1_convolve_horiz/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
+specialize qw/av1_convolve_horiz ssse3/;
+
+add_proto qw/void av1_convolve_vert/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
+specialize qw/av1_convolve_vert ssse3/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  add_proto qw/void av1_highbd_convolve_horiz/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
+  specialize qw/av1_highbd_convolve_horiz sse4_1/;
+  add_proto qw/void av1_highbd_convolve_vert/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
+  specialize qw/av1_highbd_convolve_vert sse4_1/;
+}
+
+#
+# dct
+#
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  # Note as optimized versions of these functions are added we need to add a check to ensure
+  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
+  if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add/;
+
+    add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x8_32_add/;
+
+    add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x4_32_add/;
+
+    add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x16_128_add/;
+
+    add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x8_128_add/;
+
+    add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x32_512_add/;
+
+    add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht32x16_512_add/;
+
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add/;
+
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add/;
+
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4/;
+
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1/;
+
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8/;
+
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1/;
+
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16/;
+
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1/;
+
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32/;
+
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd/;
+
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1/;
+
+    add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct4x4/;
+
+    add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8/;
+
+    add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8_1/;
+
+    add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16/;
+
+    add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16_1/;
+
+    add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32/;
+
+    add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_rd/;
+
+    add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_1/;
+  } else {
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add sse2/;
+
+    add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x8_32_add/;
+
+    add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x4_32_add/;
+
+    add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x16_128_add/;
+
+    add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x8_128_add/;
+
+    add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x32_512_add/;
+
+    add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht32x16_512_add/;
+
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add sse2/;
+
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add sse2/;
+
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4 sse2/;
+
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1 sse2/;
+
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8 sse2/;
+
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1 sse2/;
+
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16 sse2/;
+
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1 sse2/;
+
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32 sse2/;
+
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd sse2/;
+
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1 sse2/;
+
+    add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct4x4 sse2/;
+
+    add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8 sse2/;
+
+    add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8_1/;
+
+    add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16 sse2/;
+
+    add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16_1/;
+
+    add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32 sse2/;
+
+    add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_rd sse2/;
+
+    add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_1/;
+  }
+} else {
+  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+  if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add/;
+
+    add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x8_32_add/;
+
+    add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x4_32_add/;
+
+    add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x16_128_add/;
+
+    add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x8_128_add/;
+
+    add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x32_512_add/;
+
+    add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht32x16_512_add/;
+
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add/;
+
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add/;
+
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4/;
+
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1/;
+
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8/;
+
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1/;
+
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16/;
+
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1/;
+
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32/;
+
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd/;
+
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1/;
+  } else {
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add sse2 neon dspr2/;
+
+    add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x8_32_add/;
+
+    add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x4_32_add/;
+
+    add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x16_128_add/;
+
+    add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x8_128_add/;
+
+    add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x32_512_add/;
+
+    add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht32x16_512_add/;
+
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add sse2 neon dspr2/;
+
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add sse2 dspr2/;
+
+    if (aom_config("CONFIG_EXT_TX") ne "yes") {
+      specialize qw/av1_iht4x4_16_add msa/;
+      specialize qw/av1_iht8x8_64_add msa/;
+      specialize qw/av1_iht16x16_256_add msa/;
+    }
+
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4 sse2/;
+
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1 sse2/;
+
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8 sse2/;
+
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1 sse2/;
+
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16 sse2/;
+
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1 sse2/;
+
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32 sse2/;
+
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd sse2/;
+
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1 sse2/;
+  }
+}
+
+if (aom_config("CONFIG_NEW_QUANT") eq "yes") {
+  add_proto qw/void quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+  specialize qw/quantize_nuq/;
+
+  add_proto qw/void quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+  specialize qw/quantize_fp_nuq/;
+
+  add_proto qw/void quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+  specialize qw/quantize_32x32_nuq/;
+
+  add_proto qw/void quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+  specialize qw/quantize_32x32_fp_nuq/;
+}
+
+# EXT_INTRA predictor functions
+if (aom_config("CONFIG_EXT_INTRA") eq "yes") {
+  add_proto qw/void av1_dc_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_dc_filter_predictor sse4_1/;
+  add_proto qw/void av1_v_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_v_filter_predictor sse4_1/;
+  add_proto qw/void av1_h_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_h_filter_predictor sse4_1/;
+  add_proto qw/void av1_d45_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d45_filter_predictor sse4_1/;
+  add_proto qw/void av1_d135_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d135_filter_predictor sse4_1/;
+  add_proto qw/void av1_d117_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d117_filter_predictor sse4_1/;
+  add_proto qw/void av1_d153_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d153_filter_predictor sse4_1/;
+  add_proto qw/void av1_d207_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d207_filter_predictor sse4_1/;
+  add_proto qw/void av1_d63_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d63_filter_predictor sse4_1/;
+  add_proto qw/void av1_tm_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_tm_filter_predictor sse4_1/;
+  # High bitdepth functions
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    add_proto qw/void av1_highbd_dc_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_dc_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_v_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_v_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_h_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_h_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d45_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d45_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d135_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d135_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d117_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d117_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d153_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d153_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d207_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d207_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d63_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d63_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_tm_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_tm_filter_predictor sse4_1/;
+  }
+}
+
+# High bitdepth functions
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  #
+  # Sub Pixel Filters
+  #
+  add_proto qw/void av1_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve_copy/;
+
+  add_proto qw/void av1_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve_avg/;
+
+  add_proto qw/void av1_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8/, "$sse2_x86_64";
+
+  add_proto qw/void av1_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_horiz/, "$sse2_x86_64";
+
+  add_proto qw/void av1_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_vert/, "$sse2_x86_64";
+
+  add_proto qw/void av1_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_avg/, "$sse2_x86_64";
+
+  add_proto qw/void av1_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
+
+  add_proto qw/void av1_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_avg_vert/, "$sse2_x86_64";
+
+  #
+  # dct
+  #
+  # Note as optimized versions of these functions are added we need to add a check to ensure
+  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
+  add_proto qw/void av1_highbd_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht4x4_16_add/;
+
+  add_proto qw/void av1_highbd_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht4x8_32_add/;
+
+  add_proto qw/void av1_highbd_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht8x4_32_add/;
+
+  add_proto qw/void av1_highbd_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht8x16_128_add/;
+
+  add_proto qw/void av1_highbd_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht16x8_128_add/;
+
+  add_proto qw/void av1_highbd_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht16x32_512_add/;
+
+  add_proto qw/void av1_highbd_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht32x16_512_add/;
+
+  add_proto qw/void av1_highbd_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht8x8_64_add/;
+
+  add_proto qw/void av1_highbd_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type, int bd";
+  specialize qw/av1_highbd_iht16x16_256_add/;
+}
+
+#
+# Encoder functions below this point.
+#
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
+
+# ENCODEMB INVOKE
+
+if (aom_config("CONFIG_AOM_QM") eq "yes") {
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    # the transform coefficients are held in 32-bit
+    # values, so the assembler code for  av1_block_error can no longer be used.
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error/;
+
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+    specialize qw/av1_fdct8x8_quant/;
+  } else {
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error avx2 msa/, "$sse2_x86inc";
+
+    add_proto qw/int64_t av1_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
+    specialize qw/av1_block_error_fp neon/, "$sse2_x86inc";
+
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+  }
+} else {
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    # the transform coefficients are held in 32-bit
+    # values, so the assembler code for  av1_block_error can no longer be used.
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error/;
+
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp/;
+
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp_32x32/;
+
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_fdct8x8_quant/;
+  } else {
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error sse2 avx2 msa/;
+
+    add_proto qw/int64_t av1_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
+    specialize qw/av1_block_error_fp neon sse2/;
+
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp neon sse2/, "$ssse3_x86_64";
+
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp_32x32/, "$ssse3_x86_64";
+
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_fdct8x8_quant sse2 ssse3 neon/;
+  }
+
+}
+
+# fdct functions
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  add_proto qw/void av1_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht4x4 sse2/;
+
+  add_proto qw/void av1_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht4x8/;
+
+  add_proto qw/void av1_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x4/;
+
+  add_proto qw/void av1_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x16/;
+
+  add_proto qw/void av1_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x8/;
+
+  add_proto qw/void av1_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x32/;
+
+  add_proto qw/void av1_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht32x16/;
+
+  add_proto qw/void av1_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x8 sse2/;
+
+  add_proto qw/void av1_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x16 sse2/;
+
+  add_proto qw/void av1_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht32x32/;
+
+  add_proto qw/void av1_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/av1_fwht4x4/;
+} else {
+  add_proto qw/void av1_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht4x4 sse2/;
+
+  add_proto qw/void av1_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht4x8/;
+
+  add_proto qw/void av1_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x4/;
+
+  add_proto qw/void av1_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x16/;
+
+  add_proto qw/void av1_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x8/;
+
+  add_proto qw/void av1_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x32/;
+
+  add_proto qw/void av1_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht32x16/;
+
+  add_proto qw/void av1_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x8 sse2/;
+
+  add_proto qw/void av1_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x16 sse2/;
+
+  if (aom_config("CONFIG_EXT_TX") ne "yes") {
+    specialize qw/av1_fht4x4 msa/;
+    specialize qw/av1_fht8x8 msa/;
+    specialize qw/av1_fht16x16 msa/;
+  }
+
+  add_proto qw/void av1_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht32x32/;
+
+  add_proto qw/void av1_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/av1_fwht4x4/;
+}
+
+add_proto qw/void av1_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bs, int tx_type";
+  specialize qw/av1_fwd_idtx/;
+
+# Inverse transform
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  # Note as optimized versions of these functions are added we need to add a check to ensure
+  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
+  add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct4x4_1_add/;
+
+  add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct4x4_16_add/;
+
+  add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct8x8_1_add/;
+
+  add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct8x8_64_add/;
+
+  add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct8x8_12_add/;
+
+  add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct16x16_1_add/;
+
+  add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct16x16_256_add/;
+
+  add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct16x16_10_add/;
+
+  add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct32x32_1024_add/;
+
+  add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct32x32_34_add/;
+
+  add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct32x32_1_add/;
+
+  add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_iwht4x4_1_add/;
+
+  add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_iwht4x4_16_add/;
+
+  add_proto qw/void av1_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct4x4_1_add/;
+
+  add_proto qw/void av1_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct8x8_1_add/;
+
+  add_proto qw/void av1_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct16x16_1_add/;
+
+  add_proto qw/void av1_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct32x32_1024_add/;
+
+  add_proto qw/void av1_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct32x32_34_add/;
+
+  add_proto qw/void av1_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct32x32_1_add/;
+
+  add_proto qw/void av1_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_iwht4x4_1_add/;
+
+  add_proto qw/void av1_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_iwht4x4_16_add/;
+
+  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+  if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+    add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct4x4_16_add/;
+
+    add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_64_add/;
+
+    add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_10_add/;
+
+    add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_256_add/;
+
+    add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_10_add/;
+  } else {
+    add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct4x4_16_add sse2/;
+
+    add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_64_add sse2/;
+
+    add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_10_add sse2/;
+
+    add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_256_add sse2/;
+
+    add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_10_add sse2/;
+  }  # CONFIG_EMULATE_HARDWARE
+} else {
+  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+  if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+    add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_1_add/;
+
+    add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_16_add/;
+
+    add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_1_add/;
+
+    add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_64_add/;
+
+    add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_12_add/;
+
+    add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_1_add/;
+
+    add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_256_add/;
+
+    add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_10_add/;
+
+    add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1024_add/;
+
+    add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_34_add/;
+
+    add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1_add/;
+
+    add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_1_add/;
+
+    add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_16_add/;
+  } else {
+    add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_1_add sse2/;
+
+    add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_16_add sse2/;
+
+    add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_1_add sse2/;
+
+    add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_64_add sse2/;
+
+    add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_12_add sse2/;
+
+    add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_1_add sse2/;
+
+    add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_256_add sse2/;
+
+    add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_10_add sse2/;
+
+    add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1024_add sse2/;
+
+    add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_34_add sse2/;
+
+    add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1_add sse2/;
+
+    add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_1_add/;
+
+    add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_16_add/;
+  }  # CONFIG_EMULATE_HARDWARE
+}  # CONFIG_AOM_HIGHBITDEPTH
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  #fwd txfm
+  add_proto qw/void av1_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_fwd_txfm2d_4x4 sse4_1/;
+  add_proto qw/void av1_fwd_txfm2d_8x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_fwd_txfm2d_8x8 sse4_1/;
+  add_proto qw/void av1_fwd_txfm2d_16x16/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_fwd_txfm2d_16x16 sse4_1/;
+  add_proto qw/void av1_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_fwd_txfm2d_32x32 sse4_1/;
+  add_proto qw/void av1_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_fwd_txfm2d_64x64 sse4_1/;
+
+  #inv txfm
+  add_proto qw/void av1_inv_txfm2d_add_4x4/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_inv_txfm2d_add_4x4 sse4_1/;
+  add_proto qw/void av1_inv_txfm2d_add_8x8/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_inv_txfm2d_add_8x8 sse4_1/;
+  add_proto qw/void av1_inv_txfm2d_add_16x16/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_inv_txfm2d_add_16x16 sse4_1/;
+  add_proto qw/void av1_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_inv_txfm2d_add_32x32/;
+  add_proto qw/void av1_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_inv_txfm2d_add_64x64/;
+}
+
+#
+# Motion search
+#
+add_proto qw/int av1_full_search_sad/, "const struct macroblock *x, const struct mv *ref_mv, int sad_per_bit, int distance, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv, struct mv *best_mv";
+specialize qw/av1_full_search_sad sse3 sse4_1/;
+$av1_full_search_sad_sse3=av1_full_search_sadx3;
+$av1_full_search_sad_sse4_1=av1_full_search_sadx8;
+
+add_proto qw/int av1_diamond_search_sad/, "struct macroblock *x, const struct search_site_config *cfg,  struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
+specialize qw/av1_diamond_search_sad/;
+
+add_proto qw/int av1_full_range_search/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
+specialize qw/av1_full_range_search/;
+
+add_proto qw/void av1_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
+specialize qw/av1_temporal_filter_apply sse2 msa/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+
+  # ENCODEMB INVOKE
+  if (aom_config("CONFIG_NEW_QUANT") eq "yes") {
+    add_proto qw/void highbd_quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+    specialize qw/highbd_quantize_nuq/;
+
+    add_proto qw/void highbd_quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+    specialize qw/highbd_quantize_fp_nuq/;
+
+    add_proto qw/void highbd_quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+    specialize qw/highbd_quantize_32x32_nuq/;
+
+    add_proto qw/void highbd_quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+    specialize qw/highbd_quantize_32x32_fp_nuq/;
+  }
+
+  add_proto qw/int64_t av1_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
+  specialize qw/av1_highbd_block_error sse2/;
+
+  if (aom_config("CONFIG_AOM_QM") eq "yes") {
+    add_proto qw/void av1_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+
+    add_proto qw/void av1_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+  } else {
+    add_proto qw/void av1_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
+    specialize qw/av1_highbd_quantize_fp sse4_1/;
+
+    add_proto qw/void av1_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
+    specialize qw/av1_highbd_quantize_b/;
+  }
+
+  # fdct functions
+  add_proto qw/void av1_highbd_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht4x4 sse4_1/;
+
+  add_proto qw/void av1_highbd_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht4x8/;
+
+  add_proto qw/void av1_highbd_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht8x4/;
+
+  add_proto qw/void av1_highbd_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht8x16/;
+
+  add_proto qw/void av1_highbd_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht16x8/;
+
+  add_proto qw/void av1_highbd_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht16x32/;
+
+  add_proto qw/void av1_highbd_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht32x16/;
+
+  add_proto qw/void av1_highbd_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht8x8/;
+
+  add_proto qw/void av1_highbd_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht16x16/;
+
+  add_proto qw/void av1_highbd_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht32x32/;
+
+  add_proto qw/void av1_highbd_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/av1_highbd_fwht4x4/;
+
+  add_proto qw/void av1_highbd_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
+  specialize qw/av1_highbd_temporal_filter_apply/;
+
+}
+# End av1_high encoder functions
+
+if (aom_config("CONFIG_EXT_INTER") eq "yes") {
+  add_proto qw/uint64_t av1_wedge_sse_from_residuals/, "const int16_t *r1, const int16_t *d, const uint8_t *m, int N";
+  specialize qw/av1_wedge_sse_from_residuals sse2/;
+  add_proto qw/int av1_wedge_sign_from_residuals/, "const int16_t *ds, const uint8_t *m, int N, int64_t limit";
+  specialize qw/av1_wedge_sign_from_residuals sse2/;
+  add_proto qw/void av1_wedge_compute_delta_squares/, "int16_t *d, const int16_t *a, const int16_t *b, int N";
+  specialize qw/av1_wedge_compute_delta_squares sse2/;
+}
+
+}
+# end encoder functions
+1;
diff --git a/av1/common/vp10_txfm.h b/av1/common/av1_txfm.h
similarity index 96%
rename from av1/common/vp10_txfm.h
rename to av1/common/av1_txfm.h
index bfeb3ea..289f953 100644
--- a/av1/common/vp10_txfm.h
+++ b/av1/common/av1_txfm.h
@@ -7,16 +7,16 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#ifndef VP10_TXFM_H_
-#define VP10_TXFM_H_
+#ifndef AV1_TXFM_H_
+#define AV1_TXFM_H_
 
 #include <assert.h>
 #include <math.h>
 #include <stdio.h>
 
 #include "av1/common/enums.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 static const int cos_bit_min = 10;
 static const int cos_bit_max = 16;
@@ -198,10 +198,10 @@
 #ifdef __cplusplus
 extern "C" {
 #endif
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_cfg(int tx_type, int tx_size);
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_64x64_cfg(int tx_type);
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(int tx_type, int tx_size);
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(int tx_type);
 #ifdef __cplusplus
 }
 #endif  // __cplusplus
 
-#endif  // VP10_TXFM_H_
+#endif  // AV1_TXFM_H_
diff --git a/av1/common/blockd.c b/av1/common/blockd.c
index ee95271..b13e562 100644
--- a/av1/common/blockd.c
+++ b/av1/common/blockd.c
@@ -14,8 +14,8 @@
 
 #include "av1/common/blockd.h"
 
-PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
-                                     const MODE_INFO *left_mi, int b) {
+PREDICTION_MODE av1_left_block_mode(const MODE_INFO *cur_mi,
+                                    const MODE_INFO *left_mi, int b) {
   if (b == 0 || b == 2) {
     if (!left_mi || is_inter_block(&left_mi->mbmi)) return DC_PRED;
 
@@ -26,8 +26,8 @@
   }
 }
 
-PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
-                                      const MODE_INFO *above_mi, int b) {
+PREDICTION_MODE av1_above_block_mode(const MODE_INFO *cur_mi,
+                                     const MODE_INFO *above_mi, int b) {
   if (b == 0 || b == 1) {
     if (!above_mi || is_inter_block(&above_mi->mbmi)) return DC_PRED;
 
@@ -38,7 +38,7 @@
   }
 }
 
-void vp10_foreach_transformed_block_in_plane(
+void av1_foreach_transformed_block_in_plane(
     const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
     foreach_transformed_block_visitor visit, void *arg) {
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -81,18 +81,18 @@
   }
 }
 
-void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
-                                    BLOCK_SIZE bsize,
-                                    foreach_transformed_block_visitor visit,
-                                    void *arg) {
+void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
+                                   BLOCK_SIZE bsize,
+                                   foreach_transformed_block_visitor visit,
+                                   void *arg) {
   int plane;
   for (plane = 0; plane < MAX_MB_PLANE; ++plane)
-    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
+    av1_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
 }
 
-void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
-                       BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
-                       int aoff, int loff) {
+void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+                      BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
+                      int aoff, int loff) {
   ENTROPY_CONTEXT *const a = pd->above_context + aoff;
   ENTROPY_CONTEXT *const l = pd->left_context + loff;
   const int tx_w_in_blocks = num_4x4_blocks_wide_txsize_lookup[tx_size];
@@ -128,7 +128,7 @@
   }
 }
 
-void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
+void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
   int i;
 
   for (i = 0; i < MAX_MB_PLANE; i++) {
@@ -151,7 +151,7 @@
 
 // Returns whether filter selection is needed for a given
 // intra prediction angle.
-int vp10_is_intra_filter_switchable(int angle) {
+int av1_is_intra_filter_switchable(int angle) {
   assert(angle > 0 && angle < 270);
   if (angle % 45 == 0) return 0;
   if (angle > 90 && angle < 180) {
diff --git a/av1/common/blockd.h b/av1/common/blockd.h
index 4dcc1f0..327a8d0 100644
--- a/av1/common/blockd.h
+++ b/av1/common/blockd.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_BLOCKD_H_
-#define VP10_COMMON_BLOCKD_H_
+#ifndef AV1_COMMON_BLOCKD_H_
+#define AV1_COMMON_BLOCKD_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 #include "aom_scale/yv12config.h"
 
@@ -39,7 +39,7 @@
 } FRAME_TYPE;
 
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
-#define IsInterpolatingFilter(filter) (vp10_is_interpolating_filter(filter))
+#define IsInterpolatingFilter(filter) (av1_is_interpolating_filter(filter))
 #else
 #define IsInterpolatingFilter(filter) (1)
 #endif  // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
@@ -158,11 +158,11 @@
   // Number of base colors for Y (0) and UV (1)
   uint8_t palette_size[2];
 // Value of base colors for Y, U, and V
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint16_t palette_colors[3 * PALETTE_MAX_SIZE];
 #else
   uint8_t palette_colors[3 * PALETTE_MAX_SIZE];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   // Only used by encoder to store the color index of the top left pixel.
   // TODO(huisu): move this to encoder
   uint8_t palette_first_color_idx[2];
@@ -260,11 +260,11 @@
   return mbmi->ref_frame[1] > INTRA_FRAME;
 }
 
-PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
-                                     const MODE_INFO *left_mi, int b);
+PREDICTION_MODE av1_left_block_mode(const MODE_INFO *cur_mi,
+                                    const MODE_INFO *left_mi, int b);
 
-PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
-                                      const MODE_INFO *above_mi, int b);
+PREDICTION_MODE av1_above_block_mode(const MODE_INFO *cur_mi,
+                                     const MODE_INFO *above_mi, int b);
 
 enum mv_precision { MV_PRECISION_Q3, MV_PRECISION_Q4 };
 
@@ -314,7 +314,7 @@
 
 typedef struct RefBuffer {
   // TODO(dkovalev): idx is not really required and should be removed, now it
-  // is used in vp10_onyxd_if.c
+  // is used in av1_onyxd_if.c
   int idx;
   YV12_BUFFER_CONFIG *buf;
   struct scale_factors sf;
@@ -339,7 +339,7 @@
   int up_available;
   int left_available;
 
-  const vpx_prob (*partition_probs)[PARTITION_TYPES - 1];
+  const aom_prob (*partition_probs)[PARTITION_TYPES - 1];
 
   /* Distance of MB away from frame edges */
   int mb_to_left_edge;
@@ -381,7 +381,7 @@
   uint8_t is_sec_rect;
 #endif
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   /* Bit depth: 8, 10, 12 */
   int bd;
 #endif
@@ -389,7 +389,7 @@
   int lossless[MAX_SEGMENTS];
   int corrupted;
 
-  struct vpx_internal_error_info *error_info;
+  struct aom_internal_error_info *error_info;
 #if CONFIG_GLOBAL_MOTION
   Global_Motion_Params *global_motion;
 #endif  // CONFIG_GLOBAL_MOTION
@@ -419,7 +419,7 @@
 #if CONFIG_SUPERTX
 static INLINE int supertx_enabled(const MB_MODE_INFO *mbmi) {
   return (int)txsize_sqr_map[mbmi->tx_size] >
-         VPXMIN(b_width_log2_lookup[mbmi->sb_type],
+         AOMMIN(b_width_log2_lookup[mbmi->sb_type],
                 b_height_log2_lookup[mbmi->sb_type]);
 }
 #endif  // CONFIG_SUPERTX
@@ -567,7 +567,7 @@
 
 #if CONFIG_EXT_TX && CONFIG_RECT_TX
   if (!is_inter) {
-    return VPXMIN(max_tx_size, largest_tx_size);
+    return AOMMIN(max_tx_size, largest_tx_size);
   } else {
     const TX_SIZE max_rect_tx_size = max_txsize_rect_lookup[bsize];
     if (txsize_sqr_up_map[max_rect_tx_size] <= largest_tx_size) {
@@ -578,7 +578,7 @@
   }
 #else
   (void)is_inter;
-  return VPXMIN(max_tx_size, largest_tx_size);
+  return AOMMIN(max_tx_size, largest_tx_size);
 #endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
 }
 
@@ -606,7 +606,7 @@
   ADST_ADST,  // FILTER_TM
 };
 
-int vp10_is_intra_filter_switchable(int angle);
+int av1_is_intra_filter_switchable(int angle);
 #endif  // CONFIG_EXT_INTRA
 
 #if CONFIG_EXT_TILE
@@ -718,7 +718,7 @@
 #endif  // CONFIG_EXT_TX
 }
 
-void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
+void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
 
 static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize,
                                           int xss, int yss) {
@@ -726,7 +726,7 @@
     return TX_4X4;
   } else {
     const BLOCK_SIZE plane_bsize = ss_size_lookup[bsize][xss][yss];
-    return VPXMIN(txsize_sqr_map[y_tx_size], max_txsize_lookup[plane_bsize]);
+    return AOMMIN(txsize_sqr_map[y_tx_size], max_txsize_lookup[plane_bsize]);
   }
 }
 
@@ -763,18 +763,18 @@
                                                   BLOCK_SIZE plane_bsize,
                                                   TX_SIZE tx_size, void *arg);
 
-void vp10_foreach_transformed_block_in_plane(
+void av1_foreach_transformed_block_in_plane(
     const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
     foreach_transformed_block_visitor visit, void *arg);
 
-void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
-                                    BLOCK_SIZE bsize,
-                                    foreach_transformed_block_visitor visit,
-                                    void *arg);
+void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
+                                   BLOCK_SIZE bsize,
+                                   foreach_transformed_block_visitor visit,
+                                   void *arg);
 
-void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
-                       BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
-                       int aoff, int loff);
+void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+                      BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
+                      int aoff, int loff);
 
 #if CONFIG_EXT_INTER
 static INLINE int is_interintra_allowed_bsize(const BLOCK_SIZE bsize) {
@@ -830,4 +830,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_BLOCKD_H_
+#endif  // AV1_COMMON_BLOCKD_H_
diff --git a/av1/common/clpf.c b/av1/common/clpf.c
index bba40cb..2309391 100644
--- a/av1/common/clpf.c
+++ b/av1/common/clpf.c
@@ -28,9 +28,9 @@
 #define BS (MI_SIZE * MAX_MIB_SIZE)
 
 // Iterate over blocks within a superblock
-static void vp10_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
-                         const VP10_COMMON *cm, MACROBLOCKD *xd,
-                         MODE_INFO *const *mi_8x8, int xpos, int ypos) {
+static void av1_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
+                        const AV1_COMMON *cm, MACROBLOCKD *xd,
+                        MODE_INFO *const *mi_8x8, int xpos, int ypos) {
   // Temporary buffer (to allow SIMD parallelism)
   uint8_t buf_unaligned[BS * BS + 15];
   uint8_t *buf = (uint8_t *)(((intptr_t)buf_unaligned + 15) & ~15);
@@ -56,7 +56,7 @@
           has_bottom &= y != MAX_MIB_SIZE - 1;
           has_right &= x != MAX_MIB_SIZE - 1;
 #endif
-          vp10_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
+          av1_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
           clpf_block(
               xd->plane[p].dst.buf, CLPF_ALLOW_PIXEL_PARALLELISM
                                         ? buf + y * MI_SIZE * BS + x * MI_SIZE
@@ -74,7 +74,7 @@
       for (x = 0; x < MAX_MIB_SIZE && xpos + x < cm->mi_cols; x++) {
         const MB_MODE_INFO *mbmi =
             &mi_8x8[(ypos + y) * cm->mi_stride + xpos + x]->mbmi;
-        vp10_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
+        av1_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
         if (!mbmi->skip) {
           int i = 0;
           for (i = 0; i<MI_SIZE>> xd->plane[p].subsampling_y; i++)
@@ -89,11 +89,11 @@
 }
 
 // Iterate over the superblocks of an entire frame
-void vp10_clpf_frame(const YV12_BUFFER_CONFIG *frame, const VP10_COMMON *cm,
-                     MACROBLOCKD *xd) {
+void av1_clpf_frame(const YV12_BUFFER_CONFIG *frame, const AV1_COMMON *cm,
+                    MACROBLOCKD *xd) {
   int x, y;
 
   for (y = 0; y < cm->mi_rows; y += MAX_MIB_SIZE)
     for (x = 0; x < cm->mi_cols; x += MAX_MIB_SIZE)
-      vp10_clpf_sb(frame, cm, xd, cm->mi_grid_visible, x, y);
+      av1_clpf_sb(frame, cm, xd, cm->mi_grid_visible, x, y);
 }
diff --git a/av1/common/clpf.h b/av1/common/clpf.h
index 5b9d55b..85f29d9 100644
--- a/av1/common/clpf.h
+++ b/av1/common/clpf.h
@@ -3,8 +3,8 @@
 (Replace with proper AOM header)
 */
 
-#ifndef VP10_COMMON_CLPF_H_
-#define VP10_COMMON_CLPF_H_
+#ifndef AV1_COMMON_CLPF_H_
+#define AV1_COMMON_CLPF_H_
 
 #include "av1/common/reconinter.h"
 
@@ -16,7 +16,7 @@
 #define CLPF_FILTER_ALL_PLANES \
   0  // 1 = filter both luma and chroma, 0 = filter only luma
 
-void vp10_clpf_frame(const YV12_BUFFER_CONFIG *frame, const VP10_COMMON *cm,
-                     MACROBLOCKD *xd);
+void av1_clpf_frame(const YV12_BUFFER_CONFIG *frame, const AV1_COMMON *cm,
+                    MACROBLOCKD *xd);
 
 #endif
diff --git a/av1/common/common.h b/av1/common/common.h
index 4e30034..c333a17 100644
--- a/av1/common/common.h
+++ b/av1/common/common.h
@@ -8,17 +8,17 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_COMMON_H_
-#define VP10_COMMON_COMMON_H_
+#ifndef AV1_COMMON_COMMON_H_
+#define AV1_COMMON_COMMON_H_
 
 /* Interface header for common constant data structures and lookup tables */
 
 #include <assert.h>
 
-#include "./vpx_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/bitops.h"
 
 #ifdef __cplusplus
@@ -28,21 +28,21 @@
 #define PI 3.141592653589793238462643383279502884
 
 // Only need this for fixed-size arrays, for structs just assign.
-#define vp10_copy(dest, src)             \
+#define av1_copy(dest, src)              \
   {                                      \
     assert(sizeof(dest) == sizeof(src)); \
     memcpy(dest, src, sizeof(src));      \
   }
 
 // Use this for variably-sized arrays.
-#define vp10_copy_array(dest, src, n)          \
+#define av1_copy_array(dest, src, n)           \
   {                                            \
     assert(sizeof(*(dest)) == sizeof(*(src))); \
     memcpy(dest, src, n * sizeof(*(src)));     \
   }
 
-#define vp10_zero(dest) memset(&(dest), 0, sizeof(dest))
-#define vp10_zero_array(dest, n) memset(dest, 0, n * sizeof(*(dest)))
+#define av1_zero(dest) memset(&(dest), 0, sizeof(dest))
+#define av1_zero_array(dest, n) memset(dest, 0, n * sizeof(*(dest)))
 
 static INLINE int get_unsigned_bits(unsigned int num_values) {
   return num_values > 0 ? get_msb(num_values) + 1 : 0;
@@ -53,7 +53,7 @@
   do {                                                                      \
     lval = (expr);                                                          \
     if (!lval)                                                              \
-      vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,                   \
+      aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,                   \
                          "Failed to allocate " #lval " at %s:%d", __FILE__, \
                          __LINE__);                                         \
   } while (0)
@@ -62,19 +62,19 @@
   do {                                                    \
     lval = (expr);                                        \
     if (!lval)                                            \
-      vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \
+      aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, \
                          "Failed to allocate " #lval);    \
   } while (0)
 #endif
 // TODO(yaowu: validate the usage of these codes or develop new ones.)
-#define VP10_SYNC_CODE_0 0x49
-#define VP10_SYNC_CODE_1 0x83
-#define VP10_SYNC_CODE_2 0x43
+#define AV1_SYNC_CODE_0 0x49
+#define AV1_SYNC_CODE_1 0x83
+#define AV1_SYNC_CODE_2 0x43
 
-#define VPX_FRAME_MARKER 0x2
+#define AOM_FRAME_MARKER 0x2
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_COMMON_H_
+#endif  // AV1_COMMON_COMMON_H_
diff --git a/av1/common/common_data.h b/av1/common/common_data.h
index 4348f08..1fdabfa 100644
--- a/av1/common/common_data.h
+++ b/av1/common/common_data.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_COMMON_DATA_H_
-#define VP10_COMMON_COMMON_DATA_H_
+#ifndef AV1_COMMON_COMMON_DATA_H_
+#define AV1_COMMON_COMMON_DATA_H_
 
 #include "av1/common/enums.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -98,7 +98,7 @@
 #endif  // CONFIG_EXT_TX
 };
 
-// VPXMIN(3, VPXMIN(b_width_log2(bsize), b_height_log2(bsize)))
+// AOMMIN(3, AOMMIN(b_width_log2(bsize), b_height_log2(bsize)))
 static const uint8_t size_group_lookup[BLOCK_SIZES] = {
   0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, IF_EXT_PARTITION(3, 3, 3)
 };
@@ -583,4 +583,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_COMMON_DATA_H_
+#endif  // AV1_COMMON_COMMON_DATA_H_
diff --git a/av1/common/debugmodes.c b/av1/common/debugmodes.c
index 6c958a8..d4169fd 100644
--- a/av1/common/debugmodes.c
+++ b/av1/common/debugmodes.c
@@ -13,7 +13,7 @@
 #include "av1/common/blockd.h"
 #include "av1/common/onyxc_int.h"
 
-static void log_frame_info(VP10_COMMON *cm, const char *str, FILE *f) {
+static void log_frame_info(AV1_COMMON *cm, const char *str, FILE *f) {
   fprintf(f, "%s", str);
   fprintf(f, "(Frame %d, Show:%d, Q:%d): \n", cm->current_video_frame,
           cm->show_frame, cm->base_qindex);
@@ -22,7 +22,7 @@
  * and uses the passed in member offset to print out the value of an integer
  * for each mbmi member value in the mi structure.
  */
-static void print_mi_data(VP10_COMMON *cm, FILE *file, const char *descriptor,
+static void print_mi_data(AV1_COMMON *cm, FILE *file, const char *descriptor,
                           size_t member_offset) {
   int mi_row, mi_col;
   MODE_INFO **mi = cm->mi_grid_visible;
@@ -43,7 +43,7 @@
   fprintf(file, "\n");
 }
 
-void vp10_print_modes_and_motion_vectors(VP10_COMMON *cm, const char *file) {
+void av1_print_modes_and_motion_vectors(AV1_COMMON *cm, const char *file) {
   int mi_row;
   int mi_col;
   FILE *mvs = fopen(file, "a");
diff --git a/av1/common/dering.c b/av1/common/dering.c
index 7c116a2..97b31af 100644
--- a/av1/common/dering.c
+++ b/av1/common/dering.c
@@ -11,8 +11,8 @@
 #include <string.h>
 #include <math.h>
 
-#include "./vpx_scale_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_scale_rtcd.h"
+#include "aom/aom_integer.h"
 #include "av1/common/dering.h"
 #include "av1/common/onyxc_int.h"
 #include "av1/common/reconinter.h"
@@ -26,7 +26,7 @@
   return clamp(level, gi, MAX_DERING_LEVEL - 1);
 }
 
-int sb_all_skip(const VP10_COMMON *const cm, int mi_row, int mi_col) {
+int sb_all_skip(const AV1_COMMON *const cm, int mi_row, int mi_col) {
   int r, c;
   int maxc, maxr;
   int skip = 1;
@@ -44,8 +44,8 @@
   return skip;
 }
 
-void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                       MACROBLOCKD *xd, int global_level) {
+void av1_dering_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                      MACROBLOCKD *xd, int global_level) {
   int r, c;
   int sbr, sbc;
   int nhsb, nvsb;
@@ -56,21 +56,21 @@
   int bsize[3];
   int dec[3];
   int pli;
-  int coeff_shift = VPXMAX(cm->bit_depth - 8, 0);
+  int coeff_shift = AOMMAX(cm->bit_depth - 8, 0);
   nvsb = (cm->mi_rows + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
   nhsb = (cm->mi_cols + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
-  bskip = vpx_malloc(sizeof(*bskip) * cm->mi_rows * cm->mi_cols);
-  vp10_setup_dst_planes(xd->plane, frame, 0, 0);
+  bskip = aom_malloc(sizeof(*bskip) * cm->mi_rows * cm->mi_cols);
+  av1_setup_dst_planes(xd->plane, frame, 0, 0);
   for (pli = 0; pli < 3; pli++) {
     dec[pli] = xd->plane[pli].subsampling_x;
     bsize[pli] = 8 >> dec[pli];
   }
   stride = bsize[0] * cm->mi_cols;
   for (pli = 0; pli < 3; pli++) {
-    src[pli] = vpx_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64);
+    src[pli] = aom_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64);
     for (r = 0; r < bsize[pli] * cm->mi_rows; ++r) {
       for (c = 0; c < bsize[pli] * cm->mi_cols; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (cm->use_highbitdepth) {
           src[pli][r * stride + c] = CONVERT_TO_SHORTPTR(
               xd->plane[pli].dst.buf)[r * xd->plane[pli].dst.stride + c];
@@ -78,7 +78,7 @@
 #endif
           src[pli][r * stride + c] =
               xd->plane[pli].dst.buf[r * xd->plane[pli].dst.stride + c];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         }
 #endif
       }
@@ -95,8 +95,8 @@
     for (sbc = 0; sbc < nhsb; sbc++) {
       int level;
       int nhb, nvb;
-      nhb = VPXMIN(MAX_MIB_SIZE, cm->mi_cols - MAX_MIB_SIZE * sbc);
-      nvb = VPXMIN(MAX_MIB_SIZE, cm->mi_rows - MAX_MIB_SIZE * sbr);
+      nhb = AOMMIN(MAX_MIB_SIZE, cm->mi_cols - MAX_MIB_SIZE * sbc);
+      nvb = AOMMIN(MAX_MIB_SIZE, cm->mi_rows - MAX_MIB_SIZE * sbr);
       for (pli = 0; pli < 3; pli++) {
         int16_t dst[MAX_MIB_SIZE * MAX_MIB_SIZE * 8 * 8];
         int threshold;
@@ -123,7 +123,7 @@
                   coeff_shift);
         for (r = 0; r < bsize[pli] * nvb; ++r) {
           for (c = 0; c < bsize[pli] * nhb; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
             if (cm->use_highbitdepth) {
               CONVERT_TO_SHORTPTR(xd->plane[pli].dst.buf)
               [xd->plane[pli].dst.stride *
@@ -136,7 +136,7 @@
                                          (bsize[pli] * MAX_MIB_SIZE * sbr + r) +
                                      sbc * bsize[pli] * MAX_MIB_SIZE + c] =
                   dst[r * MAX_MIB_SIZE * bsize[pli] + c];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
             }
 #endif
           }
@@ -145,7 +145,7 @@
     }
   }
   for (pli = 0; pli < 3; pli++) {
-    vpx_free(src[pli]);
+    aom_free(src[pli]);
   }
-  vpx_free(bskip);
+  aom_free(bskip);
 }
diff --git a/av1/common/dering.h b/av1/common/dering.h
index de59c86..254d34f 100644
--- a/av1/common/dering.h
+++ b/av1/common/dering.h
@@ -1,10 +1,10 @@
-#ifndef VP10_COMMON_DERING_H_
-#define VP10_COMMON_DERING_H_
+#ifndef AV1_COMMON_DERING_H_
+#define AV1_COMMON_DERING_H_
 
 #include "av1/common/od_dering.h"
 #include "av1/common/onyxc_int.h"
-#include "aom/vpx_integer.h"
-#include "./vpx_config.h"
+#include "aom/aom_integer.h"
+#include "./aom_config.h"
 #include "aom_ports/mem.h"
 
 #ifdef __cplusplus
@@ -19,14 +19,14 @@
 #define DERING_REFINEMENT_LEVELS 4
 
 int compute_level_from_index(int global_level, int gi);
-int sb_all_skip(const VP10_COMMON *const cm, int mi_row, int mi_col);
-void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                       MACROBLOCKD *xd, int global_level);
+int sb_all_skip(const AV1_COMMON *const cm, int mi_row, int mi_col);
+void av1_dering_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                      MACROBLOCKD *xd, int global_level);
 
-int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
-                       VP10_COMMON *cm, MACROBLOCKD *xd);
+int av1_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
+                      AV1_COMMON *cm, MACROBLOCKD *xd);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
-#endif  // VP10_COMMON_DERING_H_
+#endif  // AV1_COMMON_DERING_H_
diff --git a/av1/common/divide.c b/av1/common/divide.c
index f0c6730..3c82be8 100644
--- a/av1/common/divide.c
+++ b/av1/common/divide.c
@@ -25,7 +25,7 @@
   }
 }
 */
-const struct fastdiv_elem vp10_fastdiv_tab[256] = {
+const struct fastdiv_elem av1_fastdiv_tab[256] = {
   { 0, 0 },           { 0, 0 },           { 0, 1 },
   { 1431655766, 2 },  { 0, 2 },           { 2576980378u, 3 },
   { 1431655766, 3 },  { 613566757, 3 },   { 0, 3 },
diff --git a/av1/common/divide.h b/av1/common/divide.h
index 7de6c91..b96ad4c 100644
--- a/av1/common/divide.h
+++ b/av1/common/divide.h
@@ -8,15 +8,15 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_DIVIDE_H_
-#define VP10_COMMON_DIVIDE_H_
+#ifndef AV1_COMMON_DIVIDE_H_
+#define AV1_COMMON_DIVIDE_H_
 // An implemntation of the divide by multiply alogrithm
 // https://gmplib.org/~tege/divcnst-pldi94.pdf
 
 #include <limits.h>
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -27,14 +27,14 @@
   unsigned shift;
 };
 
-extern const struct fastdiv_elem vp10_fastdiv_tab[256];
+extern const struct fastdiv_elem av1_fastdiv_tab[256];
 
 static INLINE unsigned fastdiv(unsigned x, int y) {
   unsigned t =
-      ((uint64_t)x * vp10_fastdiv_tab[y].mult) >> (sizeof(x) * CHAR_BIT);
-  return (t + x) >> vp10_fastdiv_tab[y].shift;
+      ((uint64_t)x * av1_fastdiv_tab[y].mult) >> (sizeof(x) * CHAR_BIT);
+  return (t + x) >> av1_fastdiv_tab[y].shift;
 }
 #ifdef __cplusplus
 }  // extern "C"
 #endif  // __cplusplus
-#endif  // VP10_COMMON_DIVIDE_H_
+#endif  // AV1_COMMON_DIVIDE_H_
diff --git a/av1/common/entropy.c b/av1/common/entropy.c
index be96c42..83f8f65 100644
--- a/av1/common/entropy.c
+++ b/av1/common/entropy.c
@@ -12,12 +12,12 @@
 #include "av1/common/blockd.h"
 #include "av1/common/onyxc_int.h"
 #include "av1/common/entropymode.h"
-#include "aom_mem/vpx_mem.h"
-#include "aom/vpx_integer.h"
+#include "aom_mem/aom_mem.h"
+#include "aom/aom_integer.h"
 
 // Unconstrained Node Tree
 /* clang-format off */
-const vpx_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
+const aom_tree_index av1_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
   2, 6,                                // 0 = LOW_VAL
   -TWO_TOKEN, 4,                       // 1 = TWO
   -THREE_TOKEN, -FOUR_TOKEN,           // 2 = THREE
@@ -29,30 +29,30 @@
 };
 /* clang-format on */
 
-const vpx_prob vp10_cat1_prob[] = { 159 };
-const vpx_prob vp10_cat2_prob[] = { 165, 145 };
-const vpx_prob vp10_cat3_prob[] = { 173, 148, 140 };
-const vpx_prob vp10_cat4_prob[] = { 176, 155, 140, 135 };
-const vpx_prob vp10_cat5_prob[] = { 180, 157, 141, 134, 130 };
-const vpx_prob vp10_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230,
-                                    196, 177, 153, 140, 133, 130, 129 };
-#if CONFIG_VP9_HIGHBITDEPTH
-const vpx_prob vp10_cat1_prob_high10[] = { 159 };
-const vpx_prob vp10_cat2_prob_high10[] = { 165, 145 };
-const vpx_prob vp10_cat3_prob_high10[] = { 173, 148, 140 };
-const vpx_prob vp10_cat4_prob_high10[] = { 176, 155, 140, 135 };
-const vpx_prob vp10_cat5_prob_high10[] = { 180, 157, 141, 134, 130 };
-const vpx_prob vp10_cat6_prob_high10[] = {
+const aom_prob av1_cat1_prob[] = { 159 };
+const aom_prob av1_cat2_prob[] = { 165, 145 };
+const aom_prob av1_cat3_prob[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230,
+                                   196, 177, 153, 140, 133, 130, 129 };
+#if CONFIG_AOM_HIGHBITDEPTH
+const aom_prob av1_cat1_prob_high10[] = { 159 };
+const aom_prob av1_cat2_prob_high10[] = { 165, 145 };
+const aom_prob av1_cat3_prob_high10[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob_high10[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob_high10[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob_high10[] = {
   255, 255, 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129
 };
-const vpx_prob vp10_cat1_prob_high12[] = { 159 };
-const vpx_prob vp10_cat2_prob_high12[] = { 165, 145 };
-const vpx_prob vp10_cat3_prob_high12[] = { 173, 148, 140 };
-const vpx_prob vp10_cat4_prob_high12[] = { 176, 155, 140, 135 };
-const vpx_prob vp10_cat5_prob_high12[] = { 180, 157, 141, 134, 130 };
-const vpx_prob vp10_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254,
-                                           254, 252, 249, 243, 230, 196,
-                                           177, 153, 140, 133, 130, 129 };
+const aom_prob av1_cat1_prob_high12[] = { 159 };
+const aom_prob av1_cat2_prob_high12[] = { 165, 145 };
+const aom_prob av1_cat3_prob_high12[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob_high12[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob_high12[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254,
+                                          254, 252, 249, 243, 230, 196,
+                                          177, 153, 140, 133, 130, 129 };
 #endif
 
 const uint16_t band_count_table[TX_SIZES_ALL][8] = {
@@ -75,7 +75,7 @@
 #endif  // CONFIG_EXT_TX
 };
 
-const uint8_t vp10_coefband_trans_8x8plus[1024] = {
+const uint8_t av1_coefband_trans_8x8plus[1024] = {
   0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
   // beyond MAXBAND_INDEX+1 all values are filled as 5
   5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
@@ -120,18 +120,18 @@
 };
 
 #if CONFIG_EXT_TX
-const uint8_t vp10_coefband_trans_4x8_8x4[32] = {
+const uint8_t av1_coefband_trans_4x8_8x4[32] = {
   0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
   4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
 };
 #endif  // CONFIG_EXT_TX
 
-const uint8_t vp10_coefband_trans_4x4[16] = {
+const uint8_t av1_coefband_trans_4x4[16] = {
   0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
 };
 
-const uint8_t vp10_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
-                                                       4, 5, 5, 5, 5, 5 };
+const uint8_t av1_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
+                                                      4, 5, 5, 5, 5, 5 };
 
 // Model obtained from a 2-sided zero-centered distribution derived
 // from a Pareto distribution. The cdf of the distribution is:
@@ -145,9 +145,9 @@
 
 // Every odd line in this table can be generated from the even lines
 // by averaging :
-// vp10_pareto8_full[l][node] = (vp10_pareto8_full[l-1][node] +
-//                              vp10_pareto8_full[l+1][node] ) >> 1;
-const vpx_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
+// av1_pareto8_full[l][node] = (av1_pareto8_full[l-1][node] +
+//                              av1_pareto8_full[l+1][node] ) >> 1;
+const aom_prob av1_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
   { 3, 86, 128, 6, 86, 23, 88, 29 },
   { 6, 86, 128, 11, 87, 42, 91, 52 },
   { 9, 86, 129, 17, 88, 61, 94, 76 },
@@ -417,7 +417,7 @@
 // beta = 8
 // Values for tokens ONE_TOKEN through CATEGORY6_TOKEN included here.
 // ZERO_TOKEN and EOB_TOKEN are coded as flags outside this coder.
-const AnsP10 vp10_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2] = {
+const AnsP10 av1_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2] = {
   { 4, 4, 4, 4, 8, 15, 30, 57, 103, 795 },
   { 8, 8, 8, 8, 15, 30, 57, 103, 168, 619 },
   { 12, 12, 12, 12, 23, 43, 80, 138, 205, 487 },
@@ -678,7 +678,7 @@
 
 /* clang-format off */
 #if CONFIG_ENTROPY
-const vp10_coeff_probs_model
+const av1_coeff_probs_model
 default_qctx_coef_probs[QCTX_BINS][TX_SIZES][PLANE_TYPES] = {
     {  // Q_Index 0
         {  // TX_SIZE 0
@@ -2450,7 +2450,7 @@
     },
 };
 #else
-static const vp10_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
   {  // Y plane
     {  // Intra
       {  // Band 0
@@ -2534,7 +2534,7 @@
   }
 };
 
-static const vp10_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
   {  // Y plane
     {  // Intra
       {  // Band 0
@@ -2618,7 +2618,7 @@
   }
 };
 
-static const vp10_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
   {  // Y plane
     {  // Intra
       {  // Band 0
@@ -2702,7 +2702,7 @@
   }
 };
 
-static const vp10_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
   {  // Y plane
     {  // Intra
       {  // Band 0
@@ -2788,30 +2788,30 @@
 #endif  // CONFIG_ENTROPY
 /* clang-format on */
 
-static void extend_to_full_distribution(vpx_prob *probs, vpx_prob p) {
+static void extend_to_full_distribution(aom_prob *probs, aom_prob p) {
   assert(p != 0);
-  memcpy(probs, vp10_pareto8_full[p - 1], MODEL_NODES * sizeof(vpx_prob));
+  memcpy(probs, av1_pareto8_full[p - 1], MODEL_NODES * sizeof(aom_prob));
 }
 
-void vp10_model_to_full_probs(const vpx_prob *model, vpx_prob *full) {
+void av1_model_to_full_probs(const aom_prob *model, aom_prob *full) {
   if (full != model)
-    memcpy(full, model, sizeof(vpx_prob) * UNCONSTRAINED_NODES);
+    memcpy(full, model, sizeof(aom_prob) * UNCONSTRAINED_NODES);
   extend_to_full_distribution(&full[UNCONSTRAINED_NODES], model[PIVOT_NODE]);
 }
 
 #if CONFIG_ANS
-void vp10_build_token_cdfs(const vpx_prob *pdf_model, rans_dec_lut cdf) {
+void av1_build_token_cdfs(const aom_prob *pdf_model, rans_dec_lut cdf) {
   AnsP10 pdf_tab[ENTROPY_TOKENS - 1];
   assert(pdf_model[2] != 0);
   // TODO(aconverse): Investigate making the precision of the zero and EOB tree
   // nodes 10-bits.
   rans_merge_prob8_pdf(pdf_tab, pdf_model[1],
-                       vp10_pareto8_token_probs[pdf_model[2] - 1],
+                       av1_pareto8_token_probs[pdf_model[2] - 1],
                        ENTROPY_TOKENS - 2);
   rans_build_cdf_from_pdf(pdf_tab, cdf);
 }
 
-void vp10_coef_pareto_cdfs(FRAME_CONTEXT *fc) {
+void av1_coef_pareto_cdfs(FRAME_CONTEXT *fc) {
   TX_SIZE t;
   int i, j, k, l;
   for (t = TX_4X4; t <= TX_32X32; ++t)
@@ -2819,42 +2819,42 @@
       for (j = 0; j < REF_TYPES; ++j)
         for (k = 0; k < COEF_BANDS; ++k)
           for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
-            vp10_build_token_cdfs(fc->coef_probs[t][i][j][k][l],
-                                  fc->coef_cdfs[t][i][j][k][l]);
+            av1_build_token_cdfs(fc->coef_probs[t][i][j][k][l],
+                                 fc->coef_cdfs[t][i][j][k][l]);
 }
 #endif  // CONFIG_ANS
 
-void vp10_default_coef_probs(VP10_COMMON *cm) {
+void av1_default_coef_probs(AV1_COMMON *cm) {
 #if CONFIG_ENTROPY
-  const int index = VPXMIN(
+  const int index = AOMMIN(
       ROUND_POWER_OF_TWO(cm->base_qindex, 8 - QCTX_BIN_BITS), QCTX_BINS - 1);
-  vp10_copy(cm->fc->coef_probs, default_qctx_coef_probs[index]);
+  av1_copy(cm->fc->coef_probs, default_qctx_coef_probs[index]);
 #else
-  vp10_copy(cm->fc->coef_probs[TX_4X4], default_coef_probs_4x4);
-  vp10_copy(cm->fc->coef_probs[TX_8X8], default_coef_probs_8x8);
-  vp10_copy(cm->fc->coef_probs[TX_16X16], default_coef_probs_16x16);
-  vp10_copy(cm->fc->coef_probs[TX_32X32], default_coef_probs_32x32);
+  av1_copy(cm->fc->coef_probs[TX_4X4], default_coef_probs_4x4);
+  av1_copy(cm->fc->coef_probs[TX_8X8], default_coef_probs_8x8);
+  av1_copy(cm->fc->coef_probs[TX_16X16], default_coef_probs_16x16);
+  av1_copy(cm->fc->coef_probs[TX_32X32], default_coef_probs_32x32);
 #endif  // CONFIG_ENTROPY
 #if CONFIG_ANS
-  vp10_coef_pareto_cdfs(cm->fc);
+  av1_coef_pareto_cdfs(cm->fc);
 #endif  // CONFIG_ANS
 }
 
-static void adapt_coef_probs(VP10_COMMON *cm, TX_SIZE tx_size,
+static void adapt_coef_probs(AV1_COMMON *cm, TX_SIZE tx_size,
                              unsigned int count_sat,
                              unsigned int update_factor) {
   const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
-  vp10_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size];
+  av1_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size];
 #if CONFIG_ENTROPY
-  const vp10_coeff_probs_model *const pre_probs =
+  const av1_coeff_probs_model *const pre_probs =
       cm->partial_prob_update
-          ? (const vp10_coeff_probs_model *)cm->starting_coef_probs[tx_size]
+          ? (const av1_coeff_probs_model *)cm->starting_coef_probs[tx_size]
           : pre_fc->coef_probs[tx_size];
 #else
-  const vp10_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size];
+  const av1_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size];
 #endif  // CONFIG_ENTROPY
-  const vp10_coeff_count_model *const counts =
-      (const vp10_coeff_count_model *)cm->counts.coef[tx_size];
+  const av1_coeff_count_model *const counts =
+      (const av1_coeff_count_model *)cm->counts.coef[tx_size];
   const unsigned int(*eob_counts)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
       (const unsigned int(*)[
           REF_TYPES][COEF_BANDS][COEFF_CONTEXTS])cm->counts.eob_branch[tx_size];
@@ -2873,12 +2873,12 @@
           };
           for (m = 0; m < UNCONSTRAINED_NODES; ++m)
             probs[i][j][k][l][m] =
-                vp10_merge_probs(pre_probs[i][j][k][l][m], branch_ct[m],
-                                 count_sat, update_factor);
+                av1_merge_probs(pre_probs[i][j][k][l][m], branch_ct[m],
+                                count_sat, update_factor);
         }
 }
 
-void vp10_adapt_coef_probs(VP10_COMMON *cm) {
+void av1_adapt_coef_probs(AV1_COMMON *cm) {
   TX_SIZE t;
   unsigned int count_sat, update_factor;
 
@@ -2905,18 +2905,18 @@
   for (t = TX_4X4; t <= TX_32X32; t++)
     adapt_coef_probs(cm, t, count_sat, update_factor);
 #if CONFIG_ANS
-  vp10_coef_pareto_cdfs(cm->fc);
+  av1_coef_pareto_cdfs(cm->fc);
 #endif
 }
 
 #if CONFIG_ENTROPY
-void vp10_partial_adapt_probs(VP10_COMMON *cm, int mi_row, int mi_col) {
+void av1_partial_adapt_probs(AV1_COMMON *cm, int mi_row, int mi_col) {
   (void)mi_row;
   (void)mi_col;
 
   if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
     cm->partial_prob_update = 1;
-    vp10_adapt_coef_probs(cm);
+    av1_adapt_coef_probs(cm);
   }
 }
 #endif  // CONFIG_ENTROPY
diff --git a/av1/common/entropy.h b/av1/common/entropy.h
index b0afd46..63b4edd 100644
--- a/av1/common/entropy.h
+++ b/av1/common/entropy.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ENTROPY_H_
-#define VP10_COMMON_ENTROPY_H_
+#ifndef AV1_COMMON_ENTROPY_H_
+#define AV1_COMMON_ENTROPY_H_
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/prob.h"
 
 #if CONFIG_ANS
@@ -51,7 +51,7 @@
 
 #define ENTROPY_NODES 11
 
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_pt_energy_class[ENTROPY_TOKENS]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_pt_energy_class[ENTROPY_TOKENS]);
 
 #define CAT1_MIN_VAL 5
 #define CAT2_MIN_VAL 7
@@ -61,50 +61,50 @@
 #define CAT6_MIN_VAL 67
 
 // Extra bit probabilities.
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob[14]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob[14]);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high10[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high10[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high10[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high10[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high10[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high10[16]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high12[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high12[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high12[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high12[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high12[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high12[18]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob_high10[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob_high10[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob_high10[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob_high10[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob_high10[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob_high10[16]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob_high12[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob_high12[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob_high12[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob_high12[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob_high12[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob_high12[18]);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #define EOB_MODEL_TOKEN 3
 
 typedef struct {
-  const vpx_tree_index *tree;
-  const vpx_prob *prob;
+  const aom_tree_index *tree;
+  const aom_prob *prob;
   int len;
   int base_val;
   const int16_t *cost;
-} vp10_extra_bit;
+} av1_extra_bit;
 
 // indexed by token value
-extern const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS];
-#if CONFIG_VP9_HIGHBITDEPTH
-extern const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS];
-extern const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+extern const av1_extra_bit av1_extra_bits[ENTROPY_TOKENS];
+#if CONFIG_AOM_HIGHBITDEPTH
+extern const av1_extra_bit av1_extra_bits_high10[ENTROPY_TOKENS];
+extern const av1_extra_bit av1_extra_bits_high12[ENTROPY_TOKENS];
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #define DCT_MAX_VALUE 16384
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define DCT_MAX_VALUE_HIGH10 65536
 #define DCT_MAX_VALUE_HIGH12 262144
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 /* Coefficients are predicted via a 3-dimensional probability table. */
 
@@ -135,18 +135,18 @@
 // #define ENTROPY_STATS
 
 typedef unsigned int
-    vp10_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_TOKENS];
+    av1_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_TOKENS];
 typedef unsigned int
-    vp10_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_NODES][2];
+    av1_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_NODES][2];
 
 #define SUBEXP_PARAM 4   /* Subexponential code parameter */
 #define MODULUS_PARAM 13 /* Modulus parameter */
 
-struct VP10Common;
-void vp10_default_coef_probs(struct VP10Common *cm);
-void vp10_adapt_coef_probs(struct VP10Common *cm);
+struct AV1Common;
+void av1_default_coef_probs(struct AV1Common *cm);
+void av1_adapt_coef_probs(struct AV1Common *cm);
 #if CONFIG_ENTROPY
-void vp10_partial_adapt_probs(struct VP10Common *cm, int mi_row, int mi_col);
+void av1_partial_adapt_probs(struct AV1Common *cm, int mi_row, int mi_col);
 #endif  // CONFIG_ENTROPY
 
 // This is the index in the scan order beyond which all coefficients for
@@ -154,11 +154,11 @@
 // This macro is currently unused but may be used by certain implementations
 #define MAXBAND_INDEX 21
 
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_8x8plus[1024]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_8x8plus[1024]);
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_4x8_8x4[32]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_4x8_8x4[32]);
 #endif  // CONFIG_EXT_TX
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_4x4[16]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_4x4[16]);
 
 DECLARE_ALIGNED(16, extern const uint16_t, band_count_table[TX_SIZES_ALL][8]);
 DECLARE_ALIGNED(16, extern const uint16_t,
@@ -166,11 +166,11 @@
 
 static INLINE const uint8_t *get_band_translate(TX_SIZE tx_size) {
   switch (tx_size) {
-    case TX_4X4: return vp10_coefband_trans_4x4;
+    case TX_4X4: return av1_coefband_trans_4x4;
 #if CONFIG_EXT_TX
-    case TX_4X8: return vp10_coefband_trans_4x8_8x4;
+    case TX_4X8: return av1_coefband_trans_4x8_8x4;
 #endif  // CONFIG_EXT_TX
-    default: return vp10_coefband_trans_8x8plus;
+    default: return av1_coefband_trans_8x8plus;
   }
 }
 
@@ -185,22 +185,22 @@
 #define PIVOT_NODE 2  // which node is pivot
 
 #define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES)
-extern const vpx_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
-extern const vpx_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
+extern const aom_tree_index av1_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
+extern const aom_prob av1_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
 #if CONFIG_ANS
 extern const AnsP10
-    vp10_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2];
+    av1_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2];
 
 typedef rans_dec_lut coeff_cdf_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
 #endif  // CONFIG_ANS
 
-typedef vpx_prob vp10_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
-                                       [UNCONSTRAINED_NODES];
+typedef aom_prob av1_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
+                                      [UNCONSTRAINED_NODES];
 
-typedef unsigned int vp10_coeff_count_model
+typedef unsigned int av1_coeff_count_model
     [REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
 
-void vp10_model_to_full_probs(const vpx_prob *model, vpx_prob *full);
+void av1_model_to_full_probs(const aom_prob *model, aom_prob *full);
 
 typedef char ENTROPY_CONTEXT;
 
@@ -263,7 +263,7 @@
 
 #if CONFIG_ANS
 struct frame_contexts;
-void vp10_coef_pareto_cdfs(struct frame_contexts *fc);
+void av1_coef_pareto_cdfs(struct frame_contexts *fc);
 #endif  // CONFIG_ANS
 
 #if CONFIG_ENTROPY
@@ -283,14 +283,14 @@
 
 #endif  // CONFIG_ENTROPY
 
-static INLINE vpx_prob vp10_merge_probs(vpx_prob pre_prob,
-                                        const unsigned int ct[2],
-                                        unsigned int count_sat,
-                                        unsigned int max_update_factor) {
+static INLINE aom_prob av1_merge_probs(aom_prob pre_prob,
+                                       const unsigned int ct[2],
+                                       unsigned int count_sat,
+                                       unsigned int max_update_factor) {
 #if CONFIG_ENTROPY
-  const vpx_prob prob = get_binary_prob(ct[0], ct[1]);
+  const aom_prob prob = get_binary_prob(ct[0], ct[1]);
   const unsigned int count =
-      VPXMIN(ct[0] + ct[1], (unsigned int)(1 << count_sat));
+      AOMMIN(ct[0] + ct[1], (unsigned int)(1 << count_sat));
   const unsigned int factor = count << (max_update_factor - count_sat);
   return weighted_prob(pre_prob, prob, factor);
 #else
@@ -298,11 +298,11 @@
 #endif  // CONFIG_ENTROPY
 }
 
-static INLINE vpx_prob vp10_mode_mv_merge_probs(vpx_prob pre_prob,
-                                                const unsigned int ct[2]) {
+static INLINE aom_prob av1_mode_mv_merge_probs(aom_prob pre_prob,
+                                               const unsigned int ct[2]) {
 #if CONFIG_ENTROPY
-  return vp10_merge_probs(pre_prob, ct, MODE_MV_COUNT_SAT_BITS,
-                          MODE_MV_MAX_UPDATE_FACTOR_BITS);
+  return av1_merge_probs(pre_prob, ct, MODE_MV_COUNT_SAT_BITS,
+                         MODE_MV_MAX_UPDATE_FACTOR_BITS);
 #else
   return mode_mv_merge_probs(pre_prob, ct);
 #endif  // CONFIG_ENTROPY
@@ -312,4 +312,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENTROPY_H_
+#endif  // AV1_COMMON_ENTROPY_H_
diff --git a/av1/common/entropymode.c b/av1/common/entropymode.c
index 98e26e7..80ed00f 100644
--- a/av1/common/entropymode.c
+++ b/av1/common/entropymode.c
@@ -8,152 +8,153 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 #include "av1/common/reconinter.h"
 #include "av1/common/onyxc_int.h"
 #include "av1/common/seg_common.h"
 
-const vpx_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] =
-    { {
-          // above = dc
-          { 137, 30, 42, 148, 151, 207, 70, 52, 91 },   // left = dc
-          { 92, 45, 102, 136, 116, 180, 74, 90, 100 },  // left = v
-          { 73, 32, 19, 187, 222, 215, 46, 34, 100 },   // left = h
-          { 91, 30, 32, 116, 121, 186, 93, 86, 94 },    // left = d45
-          { 72, 35, 36, 149, 68, 206, 68, 63, 105 },    // left = d135
-          { 73, 31, 28, 138, 57, 124, 55, 122, 151 },   // left = d117
-          { 67, 23, 21, 140, 126, 197, 40, 37, 171 },   // left = d153
-          { 86, 27, 28, 128, 154, 212, 45, 43, 53 },    // left = d207
-          { 74, 32, 27, 107, 86, 160, 63, 134, 102 },   // left = d63
-          { 59, 67, 44, 140, 161, 202, 78, 67, 119 }    // left = tm
-      },
-      {
-          // above = v
-          { 63, 36, 126, 146, 123, 158, 60, 90, 96 },   // left = dc
-          { 43, 46, 168, 134, 107, 128, 69, 142, 92 },  // left = v
-          { 44, 29, 68, 159, 201, 177, 50, 57, 77 },    // left = h
-          { 58, 38, 76, 114, 97, 172, 78, 133, 92 },    // left = d45
-          { 46, 41, 76, 140, 63, 184, 69, 112, 57 },    // left = d135
-          { 38, 32, 85, 140, 46, 112, 54, 151, 133 },   // left = d117
-          { 39, 27, 61, 131, 110, 175, 44, 75, 136 },   // left = d153
-          { 52, 30, 74, 113, 130, 175, 51, 64, 58 },    // left = d207
-          { 47, 35, 80, 100, 74, 143, 64, 163, 74 },    // left = d63
-          { 36, 61, 116, 114, 128, 162, 80, 125, 82 }   // left = tm
-      },
-      {
-          // above = h
-          { 82, 26, 26, 171, 208, 204, 44, 32, 105 },  // left = dc
-          { 55, 44, 68, 166, 179, 192, 57, 57, 108 },  // left = v
-          { 42, 26, 11, 199, 241, 228, 23, 15, 85 },   // left = h
-          { 68, 42, 19, 131, 160, 199, 55, 52, 83 },   // left = d45
-          { 58, 50, 25, 139, 115, 232, 39, 52, 118 },  // left = d135
-          { 50, 35, 33, 153, 104, 162, 64, 59, 131 },  // left = d117
-          { 44, 24, 16, 150, 177, 202, 33, 19, 156 },  // left = d153
-          { 55, 27, 12, 153, 203, 218, 26, 27, 49 },   // left = d207
-          { 53, 49, 21, 110, 116, 168, 59, 80, 76 },   // left = d63
-          { 38, 72, 19, 168, 203, 212, 50, 50, 107 }   // left = tm
-      },
-      {
-          // above = d45
-          { 103, 26, 36, 129, 132, 201, 83, 80, 93 },  // left = dc
-          { 59, 38, 83, 112, 103, 162, 98, 136, 90 },  // left = v
-          { 62, 30, 23, 158, 200, 207, 59, 57, 50 },   // left = h
-          { 67, 30, 29, 84, 86, 191, 102, 91, 59 },    // left = d45
-          { 60, 32, 33, 112, 71, 220, 64, 89, 104 },   // left = d135
-          { 53, 26, 34, 130, 56, 149, 84, 120, 103 },  // left = d117
-          { 53, 21, 23, 133, 109, 210, 56, 77, 172 },  // left = d153
-          { 77, 19, 29, 112, 142, 228, 55, 66, 36 },   // left = d207
-          { 61, 29, 29, 93, 97, 165, 83, 175, 162 },   // left = d63
-          { 47, 47, 43, 114, 137, 181, 100, 99, 95 }   // left = tm
-      },
-      {
-          // above = d135
-          { 69, 23, 29, 128, 83, 199, 46, 44, 101 },   // left = dc
-          { 53, 40, 55, 139, 69, 183, 61, 80, 110 },   // left = v
-          { 40, 29, 19, 161, 180, 207, 43, 24, 91 },   // left = h
-          { 60, 34, 19, 105, 61, 198, 53, 64, 89 },    // left = d45
-          { 52, 31, 22, 158, 40, 209, 58, 62, 89 },    // left = d135
-          { 44, 31, 29, 147, 46, 158, 56, 102, 198 },  // left = d117
-          { 35, 19, 12, 135, 87, 209, 41, 45, 167 },   // left = d153
-          { 55, 25, 21, 118, 95, 215, 38, 39, 66 },    // left = d207
-          { 51, 38, 25, 113, 58, 164, 70, 93, 97 },    // left = d63
-          { 47, 54, 34, 146, 108, 203, 72, 103, 151 }  // left = tm
-      },
-      {
-          // above = d117
-          { 64, 19, 37, 156, 66, 138, 49, 95, 133 },   // left = dc
-          { 46, 27, 80, 150, 55, 124, 55, 121, 135 },  // left = v
-          { 36, 23, 27, 165, 149, 166, 54, 64, 118 },  // left = h
-          { 53, 21, 36, 131, 63, 163, 60, 109, 81 },   // left = d45
-          { 40, 26, 35, 154, 40, 185, 51, 97, 123 },   // left = d135
-          { 35, 19, 34, 179, 19, 97, 48, 129, 124 },   // left = d117
-          { 36, 20, 26, 136, 62, 164, 33, 77, 154 },   // left = d153
-          { 45, 18, 32, 130, 90, 157, 40, 79, 91 },    // left = d207
-          { 45, 26, 28, 129, 45, 129, 49, 147, 123 },  // left = d63
-          { 38, 44, 51, 136, 74, 162, 57, 97, 121 }    // left = tm
-      },
-      {
-          // above = d153
-          { 75, 17, 22, 136, 138, 185, 32, 34, 166 },  // left = dc
-          { 56, 39, 58, 133, 117, 173, 48, 53, 187 },  // left = v
-          { 35, 21, 12, 161, 212, 207, 20, 23, 145 },  // left = h
-          { 56, 29, 19, 117, 109, 181, 55, 68, 112 },  // left = d45
-          { 47, 29, 17, 153, 64, 220, 59, 51, 114 },   // left = d135
-          { 46, 16, 24, 136, 76, 147, 41, 64, 172 },   // left = d117
-          { 34, 17, 11, 108, 152, 187, 13, 15, 209 },  // left = d153
-          { 51, 24, 14, 115, 133, 209, 32, 26, 104 },  // left = d207
-          { 55, 30, 18, 122, 79, 179, 44, 88, 116 },   // left = d63
-          { 37, 49, 25, 129, 168, 164, 41, 54, 148 }   // left = tm
-      },
-      {
-          // above = d207
-          { 82, 22, 32, 127, 143, 213, 39, 41, 70 },   // left = dc
-          { 62, 44, 61, 123, 105, 189, 48, 57, 64 },   // left = v
-          { 47, 25, 17, 175, 222, 220, 24, 30, 86 },   // left = h
-          { 68, 36, 17, 106, 102, 206, 59, 74, 74 },   // left = d45
-          { 57, 39, 23, 151, 68, 216, 55, 63, 58 },    // left = d135
-          { 49, 30, 35, 141, 70, 168, 82, 40, 115 },   // left = d117
-          { 51, 25, 15, 136, 129, 202, 38, 35, 139 },  // left = d153
-          { 68, 26, 16, 111, 141, 215, 29, 28, 28 },   // left = d207
-          { 59, 39, 19, 114, 75, 180, 77, 104, 42 },   // left = d63
-          { 40, 61, 26, 126, 152, 206, 61, 59, 93 }    // left = tm
-      },
-      {
-          // above = d63
-          { 78, 23, 39, 111, 117, 170, 74, 124, 94 },   // left = dc
-          { 48, 34, 86, 101, 92, 146, 78, 179, 134 },   // left = v
-          { 47, 22, 24, 138, 187, 178, 68, 69, 59 },    // left = h
-          { 56, 25, 33, 105, 112, 187, 95, 177, 129 },  // left = d45
-          { 48, 31, 27, 114, 63, 183, 82, 116, 56 },    // left = d135
-          { 43, 28, 37, 121, 63, 123, 61, 192, 169 },   // left = d117
-          { 42, 17, 24, 109, 97, 177, 56, 76, 122 },    // left = d153
-          { 58, 18, 28, 105, 139, 182, 70, 92, 63 },    // left = d207
-          { 46, 23, 32, 74, 86, 150, 67, 183, 88 },     // left = d63
-          { 36, 38, 48, 92, 122, 165, 88, 137, 91 }     // left = tm
-      },
-      {
-          // above = tm
-          { 65, 70, 60, 155, 159, 199, 61, 60, 81 },    // left = dc
-          { 44, 78, 115, 132, 119, 173, 71, 112, 93 },  // left = v
-          { 39, 38, 21, 184, 227, 206, 42, 32, 64 },    // left = h
-          { 58, 47, 36, 124, 137, 193, 80, 82, 78 },    // left = d45
-          { 49, 50, 35, 144, 95, 205, 63, 78, 59 },     // left = d135
-          { 41, 53, 52, 148, 71, 142, 65, 128, 51 },    // left = d117
-          { 40, 36, 28, 143, 143, 202, 40, 55, 137 },   // left = d153
-          { 52, 34, 29, 129, 183, 227, 42, 35, 43 },    // left = d207
-          { 42, 44, 44, 104, 105, 164, 64, 130, 80 },   // left = d63
-          { 43, 81, 53, 140, 169, 204, 68, 84, 72 }     // left = tm
-      } };
+const aom_prob av1_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] = {
+  {
+      // above = dc
+      { 137, 30, 42, 148, 151, 207, 70, 52, 91 },   // left = dc
+      { 92, 45, 102, 136, 116, 180, 74, 90, 100 },  // left = v
+      { 73, 32, 19, 187, 222, 215, 46, 34, 100 },   // left = h
+      { 91, 30, 32, 116, 121, 186, 93, 86, 94 },    // left = d45
+      { 72, 35, 36, 149, 68, 206, 68, 63, 105 },    // left = d135
+      { 73, 31, 28, 138, 57, 124, 55, 122, 151 },   // left = d117
+      { 67, 23, 21, 140, 126, 197, 40, 37, 171 },   // left = d153
+      { 86, 27, 28, 128, 154, 212, 45, 43, 53 },    // left = d207
+      { 74, 32, 27, 107, 86, 160, 63, 134, 102 },   // left = d63
+      { 59, 67, 44, 140, 161, 202, 78, 67, 119 }    // left = tm
+  },
+  {
+      // above = v
+      { 63, 36, 126, 146, 123, 158, 60, 90, 96 },   // left = dc
+      { 43, 46, 168, 134, 107, 128, 69, 142, 92 },  // left = v
+      { 44, 29, 68, 159, 201, 177, 50, 57, 77 },    // left = h
+      { 58, 38, 76, 114, 97, 172, 78, 133, 92 },    // left = d45
+      { 46, 41, 76, 140, 63, 184, 69, 112, 57 },    // left = d135
+      { 38, 32, 85, 140, 46, 112, 54, 151, 133 },   // left = d117
+      { 39, 27, 61, 131, 110, 175, 44, 75, 136 },   // left = d153
+      { 52, 30, 74, 113, 130, 175, 51, 64, 58 },    // left = d207
+      { 47, 35, 80, 100, 74, 143, 64, 163, 74 },    // left = d63
+      { 36, 61, 116, 114, 128, 162, 80, 125, 82 }   // left = tm
+  },
+  {
+      // above = h
+      { 82, 26, 26, 171, 208, 204, 44, 32, 105 },  // left = dc
+      { 55, 44, 68, 166, 179, 192, 57, 57, 108 },  // left = v
+      { 42, 26, 11, 199, 241, 228, 23, 15, 85 },   // left = h
+      { 68, 42, 19, 131, 160, 199, 55, 52, 83 },   // left = d45
+      { 58, 50, 25, 139, 115, 232, 39, 52, 118 },  // left = d135
+      { 50, 35, 33, 153, 104, 162, 64, 59, 131 },  // left = d117
+      { 44, 24, 16, 150, 177, 202, 33, 19, 156 },  // left = d153
+      { 55, 27, 12, 153, 203, 218, 26, 27, 49 },   // left = d207
+      { 53, 49, 21, 110, 116, 168, 59, 80, 76 },   // left = d63
+      { 38, 72, 19, 168, 203, 212, 50, 50, 107 }   // left = tm
+  },
+  {
+      // above = d45
+      { 103, 26, 36, 129, 132, 201, 83, 80, 93 },  // left = dc
+      { 59, 38, 83, 112, 103, 162, 98, 136, 90 },  // left = v
+      { 62, 30, 23, 158, 200, 207, 59, 57, 50 },   // left = h
+      { 67, 30, 29, 84, 86, 191, 102, 91, 59 },    // left = d45
+      { 60, 32, 33, 112, 71, 220, 64, 89, 104 },   // left = d135
+      { 53, 26, 34, 130, 56, 149, 84, 120, 103 },  // left = d117
+      { 53, 21, 23, 133, 109, 210, 56, 77, 172 },  // left = d153
+      { 77, 19, 29, 112, 142, 228, 55, 66, 36 },   // left = d207
+      { 61, 29, 29, 93, 97, 165, 83, 175, 162 },   // left = d63
+      { 47, 47, 43, 114, 137, 181, 100, 99, 95 }   // left = tm
+  },
+  {
+      // above = d135
+      { 69, 23, 29, 128, 83, 199, 46, 44, 101 },   // left = dc
+      { 53, 40, 55, 139, 69, 183, 61, 80, 110 },   // left = v
+      { 40, 29, 19, 161, 180, 207, 43, 24, 91 },   // left = h
+      { 60, 34, 19, 105, 61, 198, 53, 64, 89 },    // left = d45
+      { 52, 31, 22, 158, 40, 209, 58, 62, 89 },    // left = d135
+      { 44, 31, 29, 147, 46, 158, 56, 102, 198 },  // left = d117
+      { 35, 19, 12, 135, 87, 209, 41, 45, 167 },   // left = d153
+      { 55, 25, 21, 118, 95, 215, 38, 39, 66 },    // left = d207
+      { 51, 38, 25, 113, 58, 164, 70, 93, 97 },    // left = d63
+      { 47, 54, 34, 146, 108, 203, 72, 103, 151 }  // left = tm
+  },
+  {
+      // above = d117
+      { 64, 19, 37, 156, 66, 138, 49, 95, 133 },   // left = dc
+      { 46, 27, 80, 150, 55, 124, 55, 121, 135 },  // left = v
+      { 36, 23, 27, 165, 149, 166, 54, 64, 118 },  // left = h
+      { 53, 21, 36, 131, 63, 163, 60, 109, 81 },   // left = d45
+      { 40, 26, 35, 154, 40, 185, 51, 97, 123 },   // left = d135
+      { 35, 19, 34, 179, 19, 97, 48, 129, 124 },   // left = d117
+      { 36, 20, 26, 136, 62, 164, 33, 77, 154 },   // left = d153
+      { 45, 18, 32, 130, 90, 157, 40, 79, 91 },    // left = d207
+      { 45, 26, 28, 129, 45, 129, 49, 147, 123 },  // left = d63
+      { 38, 44, 51, 136, 74, 162, 57, 97, 121 }    // left = tm
+  },
+  {
+      // above = d153
+      { 75, 17, 22, 136, 138, 185, 32, 34, 166 },  // left = dc
+      { 56, 39, 58, 133, 117, 173, 48, 53, 187 },  // left = v
+      { 35, 21, 12, 161, 212, 207, 20, 23, 145 },  // left = h
+      { 56, 29, 19, 117, 109, 181, 55, 68, 112 },  // left = d45
+      { 47, 29, 17, 153, 64, 220, 59, 51, 114 },   // left = d135
+      { 46, 16, 24, 136, 76, 147, 41, 64, 172 },   // left = d117
+      { 34, 17, 11, 108, 152, 187, 13, 15, 209 },  // left = d153
+      { 51, 24, 14, 115, 133, 209, 32, 26, 104 },  // left = d207
+      { 55, 30, 18, 122, 79, 179, 44, 88, 116 },   // left = d63
+      { 37, 49, 25, 129, 168, 164, 41, 54, 148 }   // left = tm
+  },
+  {
+      // above = d207
+      { 82, 22, 32, 127, 143, 213, 39, 41, 70 },   // left = dc
+      { 62, 44, 61, 123, 105, 189, 48, 57, 64 },   // left = v
+      { 47, 25, 17, 175, 222, 220, 24, 30, 86 },   // left = h
+      { 68, 36, 17, 106, 102, 206, 59, 74, 74 },   // left = d45
+      { 57, 39, 23, 151, 68, 216, 55, 63, 58 },    // left = d135
+      { 49, 30, 35, 141, 70, 168, 82, 40, 115 },   // left = d117
+      { 51, 25, 15, 136, 129, 202, 38, 35, 139 },  // left = d153
+      { 68, 26, 16, 111, 141, 215, 29, 28, 28 },   // left = d207
+      { 59, 39, 19, 114, 75, 180, 77, 104, 42 },   // left = d63
+      { 40, 61, 26, 126, 152, 206, 61, 59, 93 }    // left = tm
+  },
+  {
+      // above = d63
+      { 78, 23, 39, 111, 117, 170, 74, 124, 94 },   // left = dc
+      { 48, 34, 86, 101, 92, 146, 78, 179, 134 },   // left = v
+      { 47, 22, 24, 138, 187, 178, 68, 69, 59 },    // left = h
+      { 56, 25, 33, 105, 112, 187, 95, 177, 129 },  // left = d45
+      { 48, 31, 27, 114, 63, 183, 82, 116, 56 },    // left = d135
+      { 43, 28, 37, 121, 63, 123, 61, 192, 169 },   // left = d117
+      { 42, 17, 24, 109, 97, 177, 56, 76, 122 },    // left = d153
+      { 58, 18, 28, 105, 139, 182, 70, 92, 63 },    // left = d207
+      { 46, 23, 32, 74, 86, 150, 67, 183, 88 },     // left = d63
+      { 36, 38, 48, 92, 122, 165, 88, 137, 91 }     // left = tm
+  },
+  {
+      // above = tm
+      { 65, 70, 60, 155, 159, 199, 61, 60, 81 },    // left = dc
+      { 44, 78, 115, 132, 119, 173, 71, 112, 93 },  // left = v
+      { 39, 38, 21, 184, 227, 206, 42, 32, 64 },    // left = h
+      { 58, 47, 36, 124, 137, 193, 80, 82, 78 },    // left = d45
+      { 49, 50, 35, 144, 95, 205, 63, 78, 59 },     // left = d135
+      { 41, 53, 52, 148, 71, 142, 65, 128, 51 },    // left = d117
+      { 40, 36, 28, 143, 143, 202, 40, 55, 137 },   // left = d153
+      { 52, 34, 29, 129, 183, 227, 42, 35, 43 },    // left = d207
+      { 42, 44, 44, 104, 105, 164, 64, 130, 80 },   // left = d63
+      { 43, 81, 53, 140, 169, 204, 68, 84, 72 }     // left = tm
+  }
+};
 
-static const vpx_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = {
+static const aom_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = {
   { 65, 32, 18, 144, 162, 194, 41, 51, 98 },   // block_size < 8x8
   { 132, 68, 18, 165, 217, 196, 45, 40, 78 },  // block_size < 16x16
   { 173, 80, 19, 176, 240, 193, 64, 35, 46 },  // block_size < 32x32
   { 221, 135, 38, 194, 248, 121, 96, 85, 29 }  // block_size >= 32x32
 };
 
-static const vpx_prob default_uv_probs[INTRA_MODES][INTRA_MODES - 1] = {
+static const aom_prob default_uv_probs[INTRA_MODES][INTRA_MODES - 1] = {
   { 120, 7, 76, 176, 208, 126, 28, 54, 103 },   // y = dc
   { 48, 12, 154, 155, 139, 90, 34, 117, 119 },  // y = v
   { 67, 6, 25, 204, 243, 158, 13, 21, 96 },     // y = h
@@ -167,7 +168,7 @@
 };
 
 #if CONFIG_EXT_PARTITION_TYPES
-static const vpx_prob
+static const aom_prob
     default_partition_probs[PARTITION_CONTEXTS][EXT_PARTITION_TYPES - 1] = {
       // 8x8 -> 4x4
       { 199, 122, 141, 128, 128, 128, 128 },  // a/l both not split
@@ -198,7 +199,7 @@
 #endif                                      // CONFIG_EXT_PARTITION
     };
 #else
-static const vpx_prob
+static const aom_prob
     default_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1] = {
       // 8x8 -> 4x4
       { 199, 122, 141 },  // a/l both not split
@@ -231,27 +232,27 @@
 #endif  // CONFIG_EXT_PARTITION_TYPES
 
 #if CONFIG_REF_MV
-static const vpx_prob default_newmv_prob[NEWMV_MODE_CONTEXTS] = {
+static const aom_prob default_newmv_prob[NEWMV_MODE_CONTEXTS] = {
   200, 180, 150, 150, 110, 70, 60,
 };
 
-static const vpx_prob default_zeromv_prob[ZEROMV_MODE_CONTEXTS] = {
+static const aom_prob default_zeromv_prob[ZEROMV_MODE_CONTEXTS] = {
   192, 64,
 };
 
-static const vpx_prob default_refmv_prob[REFMV_MODE_CONTEXTS] = {
+static const aom_prob default_refmv_prob[REFMV_MODE_CONTEXTS] = {
   220, 220, 200, 200, 180, 128, 30, 220, 30,
 };
 
-static const vpx_prob default_drl_prob[DRL_MODE_CONTEXTS] = { 128, 160, 180,
+static const aom_prob default_drl_prob[DRL_MODE_CONTEXTS] = { 128, 160, 180,
                                                               128, 160 };
 
 #if CONFIG_EXT_INTER
-static const vpx_prob default_new2mv_prob = 180;
+static const aom_prob default_new2mv_prob = 180;
 #endif  // CONFIG_EXT_INTER
 #endif  // CONFIG_REF_MV
 
-static const vpx_prob
+static const aom_prob
     default_inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1] = {
 #if CONFIG_EXT_INTER
       // TODO(zoeliu): To adjust the initial default probs
@@ -274,7 +275,7 @@
     };
 
 #if CONFIG_EXT_INTER
-static const vpx_prob default_inter_compound_mode_probs
+static const aom_prob default_inter_compound_mode_probs
     [INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES - 1] = {
       { 2, 173, 68, 192, 64, 192, 128, 180, 180 },   // 0 = both zero mv
       { 7, 145, 160, 192, 64, 192, 128, 180, 180 },  // 1 = 1 zero + 1 predicted
@@ -285,11 +286,11 @@
       { 25, 29, 50, 192, 64, 192, 128, 180, 180 },   // 6 = two intra neighbours
     };
 
-static const vpx_prob default_interintra_prob[BLOCK_SIZE_GROUPS] = {
+static const aom_prob default_interintra_prob[BLOCK_SIZE_GROUPS] = {
   208, 208, 208, 208,
 };
 
-static const vpx_prob
+static const aom_prob
     default_interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1] = {
       { 65, 32, 18, 144, 162, 194, 41, 51, 98 },   // block_size < 8x8
       { 132, 68, 18, 165, 217, 196, 45, 40, 78 },  // block_size < 16x16
@@ -297,14 +298,14 @@
       { 221, 135, 38, 194, 248, 121, 96, 85, 29 }  // block_size >= 32x32
     };
 
-static const vpx_prob default_wedge_interintra_prob[BLOCK_SIZES] = {
+static const aom_prob default_wedge_interintra_prob[BLOCK_SIZES] = {
   208, 208, 208, 208, 208, 208, 216, 216, 216, 224, 224, 224, 240,
 #if CONFIG_EXT_PARTITION
   208, 208, 208
 #endif  // CONFIG_EXT_PARTITION
 };
 
-static const vpx_prob default_wedge_interinter_prob[BLOCK_SIZES] = {
+static const aom_prob default_wedge_interinter_prob[BLOCK_SIZES] = {
   208, 208, 208, 208, 208, 208, 216, 216, 216, 224, 224, 224, 240,
 #if CONFIG_EXT_PARTITION
   255, 255, 255
@@ -314,10 +315,10 @@
 
 // Change this section appropriately once warped motion is supported
 #if CONFIG_OBMC && !CONFIG_WARPED_MOTION
-const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
+const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
   -SIMPLE_TRANSLATION, -OBMC_CAUSAL
 };
-static const vpx_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
+static const aom_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
     {
       { 255 }, { 255 }, { 255 }, { 151 }, { 153 }, { 144 }, { 178 },
       { 165 }, { 160 }, { 207 }, { 195 }, { 168 }, { 244 },
@@ -328,10 +329,10 @@
 
 #elif !CONFIG_OBMC && CONFIG_WARPED_MOTION
 
-const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
+const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
   -SIMPLE_TRANSLATION, -WARPED_CAUSAL
 };
-static const vpx_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
+static const aom_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
     {
       { 255 }, { 255 }, { 255 }, { 151 }, { 153 }, { 144 }, { 178 },
       { 165 }, { 160 }, { 207 }, { 195 }, { 168 }, { 244 },
@@ -342,10 +343,10 @@
 
 #elif CONFIG_OBMC && CONFIG_WARPED_MOTION
 
-const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
+const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
   -SIMPLE_TRANSLATION, 2, -OBMC_CAUSAL, -WARPED_CAUSAL,
 };
-static const vpx_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
+static const aom_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
     {
       { 255, 200 }, { 255, 200 }, { 255, 200 }, { 151, 200 }, { 153, 200 },
       { 144, 200 }, { 178, 200 }, { 165, 200 }, { 160, 200 }, { 207, 200 },
@@ -357,7 +358,7 @@
 #endif  // CONFIG_OBMC || !CONFIG_WARPED_MOTION
 
 /* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
-const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
+const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
   -DC_PRED,   2,          /* 0 = DC_NODE */
   -TM_PRED,   4,          /* 1 = TM_NODE */
   -V_PRED,    6,          /* 2 = V_NODE */
@@ -369,7 +370,7 @@
   -D153_PRED, -D207_PRED  /* 8 = D153_NODE */
 };
 
-const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
+const aom_tree_index av1_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
   -INTER_OFFSET(ZEROMV),    2,
   -INTER_OFFSET(NEARESTMV), 4,
 #if CONFIG_EXT_INTER
@@ -382,7 +383,7 @@
 
 #if CONFIG_EXT_INTER
 /* clang-format off */
-const vpx_tree_index vp10_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)] = {
+const aom_tree_index av1_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)] = {
   -II_DC_PRED, 2,                   /* 0 = II_DC_NODE     */
   -II_TM_PRED, 4,                   /* 1 = II_TM_NODE     */
   -II_V_PRED, 6,                    /* 2 = II_V_NODE      */
@@ -394,7 +395,7 @@
   -II_D153_PRED, -II_D207_PRED      /* 8 = II_D153_NODE   */
 };
 
-const vpx_tree_index vp10_inter_compound_mode_tree
+const aom_tree_index av1_inter_compound_mode_tree
     [TREE_SIZE(INTER_COMPOUND_MODES)] = {
   -INTER_COMPOUND_OFFSET(ZERO_ZEROMV), 2,
   -INTER_COMPOUND_OFFSET(NEAREST_NEARESTMV), 4,
@@ -410,13 +411,13 @@
 /* clang-format on */
 #endif  // CONFIG_EXT_INTER
 
-const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
+const aom_tree_index av1_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
   -PARTITION_NONE, 2, -PARTITION_HORZ, 4, -PARTITION_VERT, -PARTITION_SPLIT
 };
 
 #if CONFIG_EXT_PARTITION_TYPES
 /* clang-format off */
-const vpx_tree_index vp10_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)] = {
+const aom_tree_index av1_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)] = {
   -PARTITION_NONE, 2,
   6, 4,
   8, -PARTITION_SPLIT,
@@ -428,16 +429,16 @@
 /* clang-format on */
 #endif  // CONFIG_EXT_PARTITION_TYPES
 
-static const vpx_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = {
+static const aom_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = {
   9, 102, 187, 225
 };
 
-static const vpx_prob default_comp_inter_p[COMP_INTER_CONTEXTS] = {
+static const aom_prob default_comp_inter_p[COMP_INTER_CONTEXTS] = {
   239, 183, 119, 96, 41
 };
 
 #if CONFIG_EXT_REFS
-static const vpx_prob default_comp_ref_p[REF_CONTEXTS][FWD_REFS - 1] = {
+static const aom_prob default_comp_ref_p[REF_CONTEXTS][FWD_REFS - 1] = {
   // TODO(zoeliu): To adjust the initial prob values.
   { 33, 16, 16 },
   { 77, 74, 74 },
@@ -445,16 +446,16 @@
   { 172, 170, 170 },
   { 238, 247, 247 }
 };
-static const vpx_prob default_comp_bwdref_p[REF_CONTEXTS][BWD_REFS - 1] = {
+static const aom_prob default_comp_bwdref_p[REF_CONTEXTS][BWD_REFS - 1] = {
   { 16 }, { 74 }, { 142 }, { 170 }, { 247 }
 };
 #else
-static const vpx_prob default_comp_ref_p[REF_CONTEXTS][COMP_REFS - 1] = {
+static const aom_prob default_comp_ref_p[REF_CONTEXTS][COMP_REFS - 1] = {
   { 50 }, { 126 }, { 123 }, { 221 }, { 226 }
 };
 #endif  // CONFIG_EXT_REFS
 
-static const vpx_prob default_single_ref_p[REF_CONTEXTS][SINGLE_REFS - 1] = {
+static const aom_prob default_single_ref_p[REF_CONTEXTS][SINGLE_REFS - 1] = {
 #if CONFIG_EXT_REFS
   { 33, 16, 16, 16, 16 },
   { 77, 74, 74, 74, 74 },
@@ -466,14 +467,14 @@
 #endif  // CONFIG_EXT_REFS
 };
 
-const vpx_tree_index vp10_palette_size_tree[TREE_SIZE(PALETTE_SIZES)] = {
+const aom_tree_index av1_palette_size_tree[TREE_SIZE(PALETTE_SIZES)] = {
   -TWO_COLORS,  2, -THREE_COLORS, 4,  -FOUR_COLORS,  6,
   -FIVE_COLORS, 8, -SIX_COLORS,   10, -SEVEN_COLORS, -EIGHT_COLORS,
 };
 
 // TODO(huisu): tune these probs
-const vpx_prob
-    vp10_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] = {
+const aom_prob
+    av1_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] = {
       { 96, 89, 100, 64, 77, 130 },   { 22, 15, 44, 16, 34, 82 },
       { 30, 19, 57, 18, 38, 86 },     { 94, 36, 104, 23, 43, 92 },
       { 116, 76, 107, 46, 65, 105 },  { 112, 82, 94, 40, 70, 112 },
@@ -485,21 +486,20 @@
 #endif  // CONFIG_EXT_PARTITION
     };
 
-const vpx_prob
-    vp10_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] =
-        {
-          { 160, 196, 228, 213, 175, 230 }, { 87, 148, 208, 141, 166, 163 },
-          { 72, 151, 204, 139, 155, 161 },  { 78, 135, 171, 104, 120, 173 },
-          { 59, 92, 131, 78, 92, 142 },     { 75, 118, 149, 84, 90, 128 },
-          { 89, 87, 92, 66, 66, 128 },      { 67, 53, 54, 55, 66, 93 },
-          { 120, 130, 83, 171, 75, 214 },   { 72, 55, 66, 68, 79, 107 },
+const aom_prob
+    av1_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] = {
+      { 160, 196, 228, 213, 175, 230 }, { 87, 148, 208, 141, 166, 163 },
+      { 72, 151, 204, 139, 155, 161 },  { 78, 135, 171, 104, 120, 173 },
+      { 59, 92, 131, 78, 92, 142 },     { 75, 118, 149, 84, 90, 128 },
+      { 89, 87, 92, 66, 66, 128 },      { 67, 53, 54, 55, 66, 93 },
+      { 120, 130, 83, 171, 75, 214 },   { 72, 55, 66, 68, 79, 107 },
 #if CONFIG_EXT_PARTITION
-          { 72, 55, 66, 68, 79, 107 },      { 72, 55, 66, 68, 79, 107 },
-          { 72, 55, 66, 68, 79, 107 },
+      { 72, 55, 66, 68, 79, 107 },      { 72, 55, 66, 68, 79, 107 },
+      { 72, 55, 66, 68, 79, 107 },
 #endif  // CONFIG_EXT_PARTITION
-        };
+    };
 
-const vpx_prob vp10_default_palette_y_mode_prob
+const aom_prob av1_default_palette_y_mode_prob
     [PALETTE_BLOCK_SIZES][PALETTE_Y_MODE_CONTEXTS] = {
       { 240, 180, 100 }, { 240, 180, 100 }, { 240, 180, 100 },
       { 240, 180, 100 }, { 240, 180, 100 }, { 240, 180, 100 },
@@ -510,10 +510,10 @@
 #endif  // CONFIG_EXT_PARTITION
     };
 
-const vpx_prob vp10_default_palette_uv_mode_prob[2] = { 253, 229 };
+const aom_prob av1_default_palette_uv_mode_prob[2] = { 253, 229 };
 
-const vpx_tree_index
-    vp10_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)] = {
+const aom_tree_index
+    av1_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)] = {
       { // 2 colors
         -PALETTE_COLOR_ONE, -PALETTE_COLOR_TWO },
       { // 3 colors
@@ -537,7 +537,7 @@
         -PALETTE_COLOR_SEVEN, -PALETTE_COLOR_EIGHT },
     };
 
-const vpx_prob vp10_default_palette_y_color_prob
+const aom_prob av1_default_palette_y_color_prob
     [PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] = {
       {
           // 2 colors
@@ -674,7 +674,7 @@
       }
     };
 
-const vpx_prob vp10_default_palette_uv_color_prob
+const aom_prob av1_default_palette_uv_color_prob
     [PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] = {
       {
           // 2 colors
@@ -822,7 +822,7 @@
   9680, 10648, 10890, 13310
 };
 
-const vpx_tree_index vp10_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)] = {
+const aom_tree_index av1_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)] = {
   {
       // Max tx_size is 8X8
       -TX_4X4, -TX_8X8,
@@ -837,7 +837,7 @@
   },
 };
 
-static const vpx_prob
+static const aom_prob
     default_tx_size_prob[TX_SIZES - 1][TX_SIZE_CONTEXTS][TX_SIZES - 1] = {
       {
           // Max tx_size is 8X8
@@ -856,8 +856,8 @@
       },
     };
 
-int vp10_get_palette_color_context(const uint8_t *color_map, int cols, int r,
-                                   int c, int n, int *color_order) {
+int av1_get_palette_color_context(const uint8_t *color_map, int cols, int r,
+                                  int c, int n, int *color_order) {
   int i, j, max, max_idx, temp;
   int scores[PALETTE_MAX_SIZE + 10];
   int weights[4] = { 3, 2, 3, 2 };
@@ -926,15 +926,15 @@
 }
 
 #if CONFIG_VAR_TX
-static const vpx_prob default_txfm_partition_probs[TXFM_PARTITION_CONTEXTS] = {
+static const aom_prob default_txfm_partition_probs[TXFM_PARTITION_CONTEXTS] = {
   192, 128, 64, 192, 128, 64, 192, 128, 64,
 };
 #endif
 
-static const vpx_prob default_skip_probs[SKIP_CONTEXTS] = { 192, 128, 64 };
+static const aom_prob default_skip_probs[SKIP_CONTEXTS] = { 192, 128, 64 };
 
 #if CONFIG_EXT_INTERP
-static const vpx_prob default_switchable_interp_prob
+static const aom_prob default_switchable_interp_prob
     [SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1] = {
 #if CONFIG_DUAL_FILTER
       { 235, 192, 128, 128 }, { 36, 243, 208, 128 }, { 34, 16, 128, 128 },
@@ -955,7 +955,7 @@
     };
 #else  // CONFIG_EXT_INTERP
 #if CONFIG_DUAL_FILTER
-static const vpx_prob default_switchable_interp_prob
+static const aom_prob default_switchable_interp_prob
     [SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1] = {
       { 235, 162 }, { 36, 255 }, { 34, 3 }, { 149, 144 },
 
@@ -966,7 +966,7 @@
       { 235, 162 }, { 36, 255 }, { 34, 3 }, { 10, 3 },
     };
 #else
-static const vpx_prob default_switchable_interp_prob
+static const aom_prob default_switchable_interp_prob
     [SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1] = {
       { 235, 162 }, { 36, 255 }, { 34, 3 }, { 149, 144 },
     };
@@ -975,7 +975,7 @@
 
 #if CONFIG_EXT_TX
 /* clang-format off */
-const vpx_tree_index vp10_ext_tx_inter_tree[EXT_TX_SETS_INTER]
+const aom_tree_index av1_ext_tx_inter_tree[EXT_TX_SETS_INTER]
                                            [TREE_SIZE(TX_TYPES)] = {
   { // ToDo(yaowu): remove used entry 0.
     0
@@ -1012,7 +1012,7 @@
   }
 };
 
-const vpx_tree_index vp10_ext_tx_intra_tree[EXT_TX_SETS_INTRA]
+const aom_tree_index av1_ext_tx_intra_tree[EXT_TX_SETS_INTRA]
                                            [TREE_SIZE(TX_TYPES)] = {
   {  // ToDo(yaowu): remove unused entry 0.
     0
@@ -1032,7 +1032,7 @@
 };
 /* clang-format on */
 
-static const vpx_prob
+static const aom_prob
     default_inter_ext_tx_prob[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES - 1] = {
       {
           // ToDo(yaowu): remove unused entry 0.
@@ -1073,7 +1073,7 @@
       }
     };
 
-static const vpx_prob default_intra_ext_tx_prob
+static const aom_prob default_intra_ext_tx_prob
     [EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES][TX_TYPES - 1] = {
       {
           // ToDo(yaowu): remove unused entry 0.
@@ -1237,41 +1237,41 @@
 #else
 
 /* clang-format off */
-const vpx_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)] = {
+const aom_tree_index av1_ext_tx_tree[TREE_SIZE(TX_TYPES)] = {
   -DCT_DCT, 2,
   -ADST_ADST, 4,
   -ADST_DCT, -DCT_ADST
 };
 /* clang-format on */
 
-static const vpx_prob
+static const aom_prob
     default_intra_ext_tx_prob[EXT_TX_SIZES][TX_TYPES][TX_TYPES - 1] = {
       { { 240, 85, 128 }, { 4, 1, 248 }, { 4, 1, 8 }, { 4, 248, 128 } },
       { { 244, 85, 128 }, { 8, 2, 248 }, { 8, 2, 8 }, { 8, 248, 128 } },
       { { 248, 85, 128 }, { 16, 4, 248 }, { 16, 4, 8 }, { 16, 248, 128 } },
     };
 
-static const vpx_prob default_inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1] = {
+static const aom_prob default_inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1] = {
   { 160, 85, 128 }, { 176, 85, 128 }, { 192, 85, 128 },
 };
 #endif  // CONFIG_EXT_TX
 
 #if CONFIG_EXT_INTRA
-static const vpx_prob
+static const aom_prob
     default_intra_filter_probs[INTRA_FILTERS + 1][INTRA_FILTERS - 1] = {
       { 98, 63, 60 }, { 98, 82, 80 }, { 94, 65, 103 },
       { 49, 25, 24 }, { 72, 38, 50 },
     };
-static const vpx_prob default_ext_intra_probs[2] = { 230, 230 };
+static const aom_prob default_ext_intra_probs[2] = { 230, 230 };
 
-const vpx_tree_index vp10_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)] = {
+const aom_tree_index av1_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)] = {
   -INTRA_FILTER_LINEAR,      2, -INTRA_FILTER_8TAP, 4, -INTRA_FILTER_8TAP_SHARP,
   -INTRA_FILTER_8TAP_SMOOTH,
 };
 #endif  // CONFIG_EXT_INTRA
 
 #if CONFIG_SUPERTX
-static const vpx_prob
+static const aom_prob
     default_supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES] = {
       { 1, 160, 160, 170 }, { 1, 200, 200, 210 },
     };
@@ -1283,58 +1283,58 @@
 };
 
 static void init_mode_probs(FRAME_CONTEXT *fc) {
-  vp10_copy(fc->uv_mode_prob, default_uv_probs);
-  vp10_copy(fc->y_mode_prob, default_if_y_probs);
-  vp10_copy(fc->switchable_interp_prob, default_switchable_interp_prob);
-  vp10_copy(fc->partition_prob, default_partition_probs);
-  vp10_copy(fc->intra_inter_prob, default_intra_inter_p);
-  vp10_copy(fc->comp_inter_prob, default_comp_inter_p);
-  vp10_copy(fc->comp_ref_prob, default_comp_ref_p);
+  av1_copy(fc->uv_mode_prob, default_uv_probs);
+  av1_copy(fc->y_mode_prob, default_if_y_probs);
+  av1_copy(fc->switchable_interp_prob, default_switchable_interp_prob);
+  av1_copy(fc->partition_prob, default_partition_probs);
+  av1_copy(fc->intra_inter_prob, default_intra_inter_p);
+  av1_copy(fc->comp_inter_prob, default_comp_inter_p);
+  av1_copy(fc->comp_ref_prob, default_comp_ref_p);
 #if CONFIG_EXT_REFS
-  vp10_copy(fc->comp_bwdref_prob, default_comp_bwdref_p);
+  av1_copy(fc->comp_bwdref_prob, default_comp_bwdref_p);
 #endif  // CONFIG_EXT_REFS
-  vp10_copy(fc->single_ref_prob, default_single_ref_p);
-  vp10_copy(fc->tx_size_probs, default_tx_size_prob);
+  av1_copy(fc->single_ref_prob, default_single_ref_p);
+  av1_copy(fc->tx_size_probs, default_tx_size_prob);
 #if CONFIG_VAR_TX
-  vp10_copy(fc->txfm_partition_prob, default_txfm_partition_probs);
+  av1_copy(fc->txfm_partition_prob, default_txfm_partition_probs);
 #endif
-  vp10_copy(fc->skip_probs, default_skip_probs);
+  av1_copy(fc->skip_probs, default_skip_probs);
 #if CONFIG_REF_MV
-  vp10_copy(fc->newmv_prob, default_newmv_prob);
-  vp10_copy(fc->zeromv_prob, default_zeromv_prob);
-  vp10_copy(fc->refmv_prob, default_refmv_prob);
-  vp10_copy(fc->drl_prob, default_drl_prob);
+  av1_copy(fc->newmv_prob, default_newmv_prob);
+  av1_copy(fc->zeromv_prob, default_zeromv_prob);
+  av1_copy(fc->refmv_prob, default_refmv_prob);
+  av1_copy(fc->drl_prob, default_drl_prob);
 #if CONFIG_EXT_INTER
   fc->new2mv_prob = default_new2mv_prob;
 #endif  // CONFIG_EXT_INTER
 #endif  // CONFIG_REF_MV
-  vp10_copy(fc->inter_mode_probs, default_inter_mode_probs);
+  av1_copy(fc->inter_mode_probs, default_inter_mode_probs);
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
-  vp10_copy(fc->motvar_prob, default_motvar_prob);
+  av1_copy(fc->motvar_prob, default_motvar_prob);
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 #if CONFIG_EXT_INTER
-  vp10_copy(fc->inter_compound_mode_probs, default_inter_compound_mode_probs);
-  vp10_copy(fc->interintra_prob, default_interintra_prob);
-  vp10_copy(fc->interintra_mode_prob, default_interintra_mode_prob);
-  vp10_copy(fc->wedge_interintra_prob, default_wedge_interintra_prob);
-  vp10_copy(fc->wedge_interinter_prob, default_wedge_interinter_prob);
+  av1_copy(fc->inter_compound_mode_probs, default_inter_compound_mode_probs);
+  av1_copy(fc->interintra_prob, default_interintra_prob);
+  av1_copy(fc->interintra_mode_prob, default_interintra_mode_prob);
+  av1_copy(fc->wedge_interintra_prob, default_wedge_interintra_prob);
+  av1_copy(fc->wedge_interinter_prob, default_wedge_interinter_prob);
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_SUPERTX
-  vp10_copy(fc->supertx_prob, default_supertx_prob);
+  av1_copy(fc->supertx_prob, default_supertx_prob);
 #endif  // CONFIG_SUPERTX
-  vp10_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
-  vp10_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
+  av1_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
+  av1_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
 #if CONFIG_EXT_INTRA
-  vp10_copy(fc->ext_intra_probs, default_ext_intra_probs);
-  vp10_copy(fc->intra_filter_probs, default_intra_filter_probs);
+  av1_copy(fc->ext_intra_probs, default_ext_intra_probs);
+  av1_copy(fc->intra_filter_probs, default_intra_filter_probs);
 #endif  // CONFIG_EXT_INTRA
-  vp10_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
-  vp10_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob);
+  av1_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
+  av1_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob);
 }
 
 #if CONFIG_EXT_INTERP
-const vpx_tree_index
-    vp10_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)] = {
+const aom_tree_index av1_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)] =
+    {
       -EIGHTTAP_REGULAR,
       2,
       4,
@@ -1345,23 +1345,22 @@
       -MULTITAP_SHARP2,
     };
 #else
-const vpx_tree_index vp10_switchable_interp_tree[TREE_SIZE(
-    SWITCHABLE_FILTERS)] = { -EIGHTTAP_REGULAR, 2, -EIGHTTAP_SMOOTH,
-                             -MULTITAP_SHARP };
+const aom_tree_index av1_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)] =
+    { -EIGHTTAP_REGULAR, 2, -EIGHTTAP_SMOOTH, -MULTITAP_SHARP };
 #endif  // CONFIG_EXT_INTERP
 
-void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
+void av1_adapt_inter_frame_probs(AV1_COMMON *cm) {
   int i, j;
   FRAME_CONTEXT *fc = cm->fc;
   const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
   const FRAME_COUNTS *counts = &cm->counts;
 
   for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
-    fc->intra_inter_prob[i] = vp10_mode_mv_merge_probs(
+    fc->intra_inter_prob[i] = av1_mode_mv_merge_probs(
         pre_fc->intra_inter_prob[i], counts->intra_inter[i]);
   for (i = 0; i < COMP_INTER_CONTEXTS; i++)
-    fc->comp_inter_prob[i] = vp10_mode_mv_merge_probs(
-        pre_fc->comp_inter_prob[i], counts->comp_inter[i]);
+    fc->comp_inter_prob[i] = av1_mode_mv_merge_probs(pre_fc->comp_inter_prob[i],
+                                                     counts->comp_inter[i]);
 
 #if CONFIG_EXT_REFS
   for (i = 0; i < REF_CONTEXTS; i++)
@@ -1381,36 +1380,36 @@
 
   for (i = 0; i < REF_CONTEXTS; i++)
     for (j = 0; j < (SINGLE_REFS - 1); j++)
-      fc->single_ref_prob[i][j] = vp10_mode_mv_merge_probs(
+      fc->single_ref_prob[i][j] = av1_mode_mv_merge_probs(
           pre_fc->single_ref_prob[i][j], counts->single_ref[i][j]);
 
 #if CONFIG_REF_MV
   for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
     fc->newmv_prob[i] =
-        vp10_mode_mv_merge_probs(pre_fc->newmv_prob[i], counts->newmv_mode[i]);
+        av1_mode_mv_merge_probs(pre_fc->newmv_prob[i], counts->newmv_mode[i]);
   for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
-    fc->zeromv_prob[i] = vp10_mode_mv_merge_probs(pre_fc->zeromv_prob[i],
-                                                  counts->zeromv_mode[i]);
+    fc->zeromv_prob[i] =
+        av1_mode_mv_merge_probs(pre_fc->zeromv_prob[i], counts->zeromv_mode[i]);
   for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
     fc->refmv_prob[i] =
-        vp10_mode_mv_merge_probs(pre_fc->refmv_prob[i], counts->refmv_mode[i]);
+        av1_mode_mv_merge_probs(pre_fc->refmv_prob[i], counts->refmv_mode[i]);
 
   for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
     fc->drl_prob[i] =
-        vp10_mode_mv_merge_probs(pre_fc->drl_prob[i], counts->drl_mode[i]);
+        av1_mode_mv_merge_probs(pre_fc->drl_prob[i], counts->drl_mode[i]);
 #if CONFIG_EXT_INTER
   fc->new2mv_prob =
-      vp10_mode_mv_merge_probs(pre_fc->new2mv_prob, counts->new2mv_mode);
+      av1_mode_mv_merge_probs(pre_fc->new2mv_prob, counts->new2mv_mode);
 #endif  // CONFIG_EXT_INTER
 #else
   for (i = 0; i < INTER_MODE_CONTEXTS; i++)
-    vpx_tree_merge_probs(vp10_inter_mode_tree, pre_fc->inter_mode_probs[i],
+    aom_tree_merge_probs(av1_inter_mode_tree, pre_fc->inter_mode_probs[i],
                          counts->inter_mode[i], fc->inter_mode_probs[i]);
 #endif
 
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
   for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i)
-    vpx_tree_merge_probs(vp10_motvar_tree, pre_fc->motvar_prob[i],
+    aom_tree_merge_probs(av1_motvar_tree, pre_fc->motvar_prob[i],
                          counts->motvar[i], fc->motvar_prob[i]);
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 
@@ -1418,7 +1417,7 @@
   for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
     int j;
     for (j = 1; j < TX_SIZES; ++j) {
-      fc->supertx_prob[i][j] = vp10_mode_mv_merge_probs(
+      fc->supertx_prob[i][j] = av1_mode_mv_merge_probs(
           pre_fc->supertx_prob[i][j], counts->supertx[i][j]);
     }
   }
@@ -1426,44 +1425,44 @@
 
 #if CONFIG_EXT_INTER
   for (i = 0; i < INTER_MODE_CONTEXTS; i++)
-    vpx_tree_merge_probs(
-        vp10_inter_compound_mode_tree, pre_fc->inter_compound_mode_probs[i],
+    aom_tree_merge_probs(
+        av1_inter_compound_mode_tree, pre_fc->inter_compound_mode_probs[i],
         counts->inter_compound_mode[i], fc->inter_compound_mode_probs[i]);
   for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) {
     if (is_interintra_allowed_bsize_group(i))
-      fc->interintra_prob[i] = vp10_mode_mv_merge_probs(
+      fc->interintra_prob[i] = av1_mode_mv_merge_probs(
           pre_fc->interintra_prob[i], counts->interintra[i]);
   }
   for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
-    vpx_tree_merge_probs(
-        vp10_interintra_mode_tree, pre_fc->interintra_mode_prob[i],
+    aom_tree_merge_probs(
+        av1_interintra_mode_tree, pre_fc->interintra_mode_prob[i],
         counts->interintra_mode[i], fc->interintra_mode_prob[i]);
   }
   for (i = 0; i < BLOCK_SIZES; ++i) {
     if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i))
-      fc->wedge_interintra_prob[i] = vp10_mode_mv_merge_probs(
+      fc->wedge_interintra_prob[i] = av1_mode_mv_merge_probs(
           pre_fc->wedge_interintra_prob[i], counts->wedge_interintra[i]);
   }
   for (i = 0; i < BLOCK_SIZES; ++i) {
     if (is_interinter_wedge_used(i))
-      fc->wedge_interinter_prob[i] = vp10_mode_mv_merge_probs(
+      fc->wedge_interinter_prob[i] = av1_mode_mv_merge_probs(
           pre_fc->wedge_interinter_prob[i], counts->wedge_interinter[i]);
   }
 #endif  // CONFIG_EXT_INTER
 
   for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
-    vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->y_mode_prob[i],
+    aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->y_mode_prob[i],
                          counts->y_mode[i], fc->y_mode_prob[i]);
 
   if (cm->interp_filter == SWITCHABLE) {
     for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
-      vpx_tree_merge_probs(
-          vp10_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
+      aom_tree_merge_probs(
+          av1_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
           counts->switchable_interp[i], fc->switchable_interp_prob[i]);
   }
 }
 
-void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
+void av1_adapt_intra_frame_probs(AV1_COMMON *cm) {
   int i, j;
   FRAME_CONTEXT *fc = cm->fc;
   const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
@@ -1472,7 +1471,7 @@
   if (cm->tx_mode == TX_MODE_SELECT) {
     for (i = 0; i < TX_SIZES - 1; ++i) {
       for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
-        vpx_tree_merge_probs(vp10_tx_size_tree[i], pre_fc->tx_size_probs[i][j],
+        aom_tree_merge_probs(av1_tx_size_tree[i], pre_fc->tx_size_probs[i][j],
                              counts->tx_size[i][j], fc->tx_size_probs[i][j]);
     }
   }
@@ -1480,21 +1479,21 @@
 #if CONFIG_VAR_TX
   if (cm->tx_mode == TX_MODE_SELECT)
     for (i = 0; i < TXFM_PARTITION_CONTEXTS; ++i)
-      fc->txfm_partition_prob[i] = vp10_mode_mv_merge_probs(
+      fc->txfm_partition_prob[i] = av1_mode_mv_merge_probs(
           pre_fc->txfm_partition_prob[i], counts->txfm_partition[i]);
 #endif
 
   for (i = 0; i < SKIP_CONTEXTS; ++i)
     fc->skip_probs[i] =
-        vp10_mode_mv_merge_probs(pre_fc->skip_probs[i], counts->skip[i]);
+        av1_mode_mv_merge_probs(pre_fc->skip_probs[i], counts->skip[i]);
 
 #if CONFIG_EXT_TX
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     int s;
     for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
       if (use_inter_ext_tx_for_txsize[s][i]) {
-        vpx_tree_merge_probs(
-            vp10_ext_tx_inter_tree[s], pre_fc->inter_ext_tx_prob[s][i],
+        aom_tree_merge_probs(
+            av1_ext_tx_inter_tree[s], pre_fc->inter_ext_tx_prob[s][i],
             counts->inter_ext_tx[s][i], fc->inter_ext_tx_prob[s][i]);
       }
     }
@@ -1502,8 +1501,8 @@
       if (use_intra_ext_tx_for_txsize[s][i]) {
         int j;
         for (j = 0; j < INTRA_MODES; ++j)
-          vpx_tree_merge_probs(
-              vp10_ext_tx_intra_tree[s], pre_fc->intra_ext_tx_prob[s][i][j],
+          aom_tree_merge_probs(
+              av1_ext_tx_intra_tree[s], pre_fc->intra_ext_tx_prob[s][i][j],
               counts->intra_ext_tx[s][i][j], fc->intra_ext_tx_prob[s][i][j]);
       }
     }
@@ -1511,52 +1510,52 @@
 #else
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     for (j = 0; j < TX_TYPES; ++j)
-      vpx_tree_merge_probs(vp10_ext_tx_tree, pre_fc->intra_ext_tx_prob[i][j],
+      aom_tree_merge_probs(av1_ext_tx_tree, pre_fc->intra_ext_tx_prob[i][j],
                            counts->intra_ext_tx[i][j],
                            fc->intra_ext_tx_prob[i][j]);
   }
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
-    vpx_tree_merge_probs(vp10_ext_tx_tree, pre_fc->inter_ext_tx_prob[i],
+    aom_tree_merge_probs(av1_ext_tx_tree, pre_fc->inter_ext_tx_prob[i],
                          counts->inter_ext_tx[i], fc->inter_ext_tx_prob[i]);
   }
 #endif  // CONFIG_EXT_TX
 
   if (cm->seg.temporal_update) {
     for (i = 0; i < PREDICTION_PROBS; i++)
-      fc->seg.pred_probs[i] = vp10_mode_mv_merge_probs(
-          pre_fc->seg.pred_probs[i], counts->seg.pred[i]);
+      fc->seg.pred_probs[i] = av1_mode_mv_merge_probs(pre_fc->seg.pred_probs[i],
+                                                      counts->seg.pred[i]);
 
-    vpx_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+    aom_tree_merge_probs(av1_segment_tree, pre_fc->seg.tree_probs,
                          counts->seg.tree_mispred, fc->seg.tree_probs);
   } else {
-    vpx_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+    aom_tree_merge_probs(av1_segment_tree, pre_fc->seg.tree_probs,
                          counts->seg.tree_total, fc->seg.tree_probs);
   }
 
   for (i = 0; i < INTRA_MODES; ++i)
-    vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->uv_mode_prob[i],
+    aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->uv_mode_prob[i],
                          counts->uv_mode[i], fc->uv_mode_prob[i]);
 
 #if CONFIG_EXT_PARTITION_TYPES
-  vpx_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[0],
+  aom_tree_merge_probs(av1_partition_tree, pre_fc->partition_prob[0],
                        counts->partition[0], fc->partition_prob[0]);
   for (i = 1; i < PARTITION_CONTEXTS; i++)
-    vpx_tree_merge_probs(vp10_ext_partition_tree, pre_fc->partition_prob[i],
+    aom_tree_merge_probs(av1_ext_partition_tree, pre_fc->partition_prob[i],
                          counts->partition[i], fc->partition_prob[i]);
 #else
   for (i = 0; i < PARTITION_CONTEXTS; i++)
-    vpx_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i],
+    aom_tree_merge_probs(av1_partition_tree, pre_fc->partition_prob[i],
                          counts->partition[i], fc->partition_prob[i]);
 #endif  // CONFIG_EXT_PARTITION_TYPES
 
 #if CONFIG_EXT_INTRA
   for (i = 0; i < PLANE_TYPES; ++i) {
-    fc->ext_intra_probs[i] = vp10_mode_mv_merge_probs(
-        pre_fc->ext_intra_probs[i], counts->ext_intra[i]);
+    fc->ext_intra_probs[i] = av1_mode_mv_merge_probs(pre_fc->ext_intra_probs[i],
+                                                     counts->ext_intra[i]);
   }
 
   for (i = 0; i < INTRA_FILTERS + 1; ++i)
-    vpx_tree_merge_probs(vp10_intra_filter_tree, pre_fc->intra_filter_probs[i],
+    aom_tree_merge_probs(av1_intra_filter_tree, pre_fc->intra_filter_probs[i],
                          counts->intra_filter[i], fc->intra_filter_probs[i]);
 #endif  // CONFIG_EXT_INTRA
 }
@@ -1579,13 +1578,13 @@
   lf->mode_deltas[1] = 0;
 }
 
-void vp10_setup_past_independence(VP10_COMMON *cm) {
+void av1_setup_past_independence(AV1_COMMON *cm) {
   // Reset the segment feature data to the default stats:
   // Features disabled, 0, with delta coding (Default state).
   struct loopfilter *const lf = &cm->lf;
 
   int i;
-  vp10_clearall_segfeatures(&cm->seg);
+  av1_clearall_segfeatures(&cm->seg);
   cm->seg.abs_delta = SEGMENT_DELTADATA;
 
   if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
@@ -1595,8 +1594,8 @@
     memset(cm->current_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
 
   // Reset the mode ref deltas for loop filter
-  vp10_zero(lf->last_ref_deltas);
-  vp10_zero(lf->last_mode_deltas);
+  av1_zero(lf->last_ref_deltas);
+  av1_zero(lf->last_mode_deltas);
   set_default_lf_deltas(lf);
 
   // To force update of the sharpness
@@ -1608,9 +1607,9 @@
   }
 #endif  // CONFIG_LOOP_RESTORATION
 
-  vp10_default_coef_probs(cm);
+  av1_default_coef_probs(cm);
   init_mode_probs(cm->fc);
-  vp10_init_mv_probs(cm);
+  av1_init_mv_probs(cm);
   cm->fc->initialized = 1;
 
   if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
diff --git a/av1/common/entropymode.h b/av1/common/entropymode.h
index 4616aa2..e437b3f 100644
--- a/av1/common/entropymode.h
+++ b/av1/common/entropymode.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ENTROPYMODE_H_
-#define VP10_COMMON_ENTROPYMODE_H_
+#ifndef AV1_COMMON_ENTROPYMODE_H_
+#define AV1_COMMON_ENTROPYMODE_H_
 
 #include "av1/common/entropy.h"
 #include "av1/common/entropymv.h"
 #include "av1/common/filter.h"
 #include "av1/common/seg_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -36,7 +36,7 @@
 #define PALETTE_Y_MODE_CONTEXTS 3
 #define PALETTE_MAX_BLOCK_SIZE (64 * 64)
 
-struct VP10Common;
+struct AV1Common;
 
 struct seg_counts {
   unsigned int tree_total[MAX_SEGMENTS];
@@ -45,58 +45,58 @@
 };
 
 typedef struct frame_contexts {
-  vpx_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1];
-  vpx_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
+  aom_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1];
+  aom_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
 #if CONFIG_EXT_PARTITION_TYPES
-  vpx_prob partition_prob[PARTITION_CONTEXTS][EXT_PARTITION_TYPES - 1];
+  aom_prob partition_prob[PARTITION_CONTEXTS][EXT_PARTITION_TYPES - 1];
 #else
-  vpx_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
+  aom_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
 #endif
-  vp10_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
+  av1_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
 #if CONFIG_ANS
   coeff_cdf_model coef_cdfs[TX_SIZES][PLANE_TYPES];
 #endif
-  vpx_prob
+  aom_prob
       switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS -
                                                          1];
 
 #if CONFIG_REF_MV
-  vpx_prob newmv_prob[NEWMV_MODE_CONTEXTS];
-  vpx_prob zeromv_prob[ZEROMV_MODE_CONTEXTS];
-  vpx_prob refmv_prob[REFMV_MODE_CONTEXTS];
-  vpx_prob drl_prob[DRL_MODE_CONTEXTS];
+  aom_prob newmv_prob[NEWMV_MODE_CONTEXTS];
+  aom_prob zeromv_prob[ZEROMV_MODE_CONTEXTS];
+  aom_prob refmv_prob[REFMV_MODE_CONTEXTS];
+  aom_prob drl_prob[DRL_MODE_CONTEXTS];
 
 #if CONFIG_EXT_INTER
-  vpx_prob new2mv_prob;
+  aom_prob new2mv_prob;
 #endif  // CONFIG_EXT_INTER
 #endif  // CONFIG_REF_MV
 
-  vpx_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
+  aom_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
 #if CONFIG_EXT_INTER
-  vpx_prob
+  aom_prob
       inter_compound_mode_probs[INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES - 1];
-  vpx_prob interintra_prob[BLOCK_SIZE_GROUPS];
-  vpx_prob interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1];
-  vpx_prob wedge_interintra_prob[BLOCK_SIZES];
-  vpx_prob wedge_interinter_prob[BLOCK_SIZES];
+  aom_prob interintra_prob[BLOCK_SIZE_GROUPS];
+  aom_prob interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1];
+  aom_prob wedge_interintra_prob[BLOCK_SIZES];
+  aom_prob wedge_interinter_prob[BLOCK_SIZES];
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
-  vpx_prob motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1];
+  aom_prob motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1];
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
-  vpx_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
-  vpx_prob comp_inter_prob[COMP_INTER_CONTEXTS];
-  vpx_prob single_ref_prob[REF_CONTEXTS][SINGLE_REFS - 1];
+  aom_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
+  aom_prob comp_inter_prob[COMP_INTER_CONTEXTS];
+  aom_prob single_ref_prob[REF_CONTEXTS][SINGLE_REFS - 1];
 #if CONFIG_EXT_REFS
-  vpx_prob comp_ref_prob[REF_CONTEXTS][FWD_REFS - 1];
-  vpx_prob comp_bwdref_prob[REF_CONTEXTS][BWD_REFS - 1];
+  aom_prob comp_ref_prob[REF_CONTEXTS][FWD_REFS - 1];
+  aom_prob comp_bwdref_prob[REF_CONTEXTS][BWD_REFS - 1];
 #else
-  vpx_prob comp_ref_prob[REF_CONTEXTS][COMP_REFS - 1];
+  aom_prob comp_ref_prob[REF_CONTEXTS][COMP_REFS - 1];
 #endif  // CONFIG_EXT_REFS
-  vpx_prob tx_size_probs[TX_SIZES - 1][TX_SIZE_CONTEXTS][TX_SIZES - 1];
+  aom_prob tx_size_probs[TX_SIZES - 1][TX_SIZE_CONTEXTS][TX_SIZES - 1];
 #if CONFIG_VAR_TX
-  vpx_prob txfm_partition_prob[TXFM_PARTITION_CONTEXTS];
+  aom_prob txfm_partition_prob[TXFM_PARTITION_CONTEXTS];
 #endif
-  vpx_prob skip_probs[SKIP_CONTEXTS];
+  aom_prob skip_probs[SKIP_CONTEXTS];
 #if CONFIG_REF_MV
   nmv_context nmvc[NMV_CONTEXTS];
 #else
@@ -104,24 +104,24 @@
 #endif
   int initialized;
 #if CONFIG_EXT_TX
-  vpx_prob inter_ext_tx_prob[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES - 1];
-  vpx_prob
+  aom_prob inter_ext_tx_prob[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES - 1];
+  aom_prob
       intra_ext_tx_prob[EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES][TX_TYPES -
                                                                       1];
 #else
-  vpx_prob intra_ext_tx_prob[EXT_TX_SIZES][TX_TYPES][TX_TYPES - 1];
-  vpx_prob inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1];
+  aom_prob intra_ext_tx_prob[EXT_TX_SIZES][TX_TYPES][TX_TYPES - 1];
+  aom_prob inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1];
 #endif  // CONFIG_EXT_TX
 #if CONFIG_SUPERTX
-  vpx_prob supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES];
+  aom_prob supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES];
 #endif  // CONFIG_SUPERTX
   struct segmentation_probs seg;
 #if CONFIG_EXT_INTRA
-  vpx_prob ext_intra_probs[PLANE_TYPES];
-  vpx_prob intra_filter_probs[INTRA_FILTERS + 1][INTRA_FILTERS - 1];
+  aom_prob ext_intra_probs[PLANE_TYPES];
+  aom_prob intra_filter_probs[INTRA_FILTERS + 1][INTRA_FILTERS - 1];
 #endif  // CONFIG_EXT_INTRA
 #if CONFIG_GLOBAL_MOTION
-  vpx_prob global_motion_types_prob[GLOBAL_MOTION_TYPES - 1];
+  aom_prob global_motion_types_prob[GLOBAL_MOTION_TYPES - 1];
 #endif  // CONFIG_GLOBAL_MOTION
 } FRAME_CONTEXT;
 
@@ -136,7 +136,7 @@
 #else
   unsigned int partition[PARTITION_CONTEXTS][PARTITION_TYPES];
 #endif
-  vp10_coeff_count_model coef[TX_SIZES][PLANE_TYPES];
+  av1_coeff_count_model coef[TX_SIZES][PLANE_TYPES];
   unsigned int
       eob_branch[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
   unsigned int
@@ -207,61 +207,60 @@
 #endif  // CONFIG_EXT_INTRA
 } FRAME_COUNTS;
 
-extern const vpx_prob
-    vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
-extern const vpx_prob vp10_default_palette_y_mode_prob[PALETTE_BLOCK_SIZES]
-                                                      [PALETTE_Y_MODE_CONTEXTS];
-extern const vpx_prob vp10_default_palette_uv_mode_prob[2];
-extern const vpx_prob
-    vp10_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
-extern const vpx_prob
-    vp10_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
-extern const vpx_prob vp10_default_palette_y_color_prob
+extern const aom_prob
+    av1_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
+extern const aom_prob av1_default_palette_y_mode_prob[PALETTE_BLOCK_SIZES]
+                                                     [PALETTE_Y_MODE_CONTEXTS];
+extern const aom_prob av1_default_palette_uv_mode_prob[2];
+extern const aom_prob
+    av1_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+extern const aom_prob
+    av1_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+extern const aom_prob av1_default_palette_y_color_prob
     [PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1];
-extern const vpx_prob vp10_default_palette_uv_color_prob
+extern const aom_prob av1_default_palette_uv_color_prob
     [PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1];
 
-extern const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
-extern const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)];
+extern const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
+extern const aom_tree_index av1_inter_mode_tree[TREE_SIZE(INTER_MODES)];
 #if CONFIG_EXT_INTER
-extern const vpx_tree_index
-    vp10_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)];
-extern const vpx_tree_index
-    vp10_inter_compound_mode_tree[TREE_SIZE(INTER_COMPOUND_MODES)];
+extern const aom_tree_index
+    av1_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)];
+extern const aom_tree_index
+    av1_inter_compound_mode_tree[TREE_SIZE(INTER_COMPOUND_MODES)];
 #endif  // CONFIG_EXT_INTER
-extern const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)];
+extern const aom_tree_index av1_partition_tree[TREE_SIZE(PARTITION_TYPES)];
 #if CONFIG_EXT_PARTITION_TYPES
-extern const vpx_tree_index
-    vp10_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)];
+extern const aom_tree_index
+    av1_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)];
 #endif
-extern const vpx_tree_index
-    vp10_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)];
-extern const vpx_tree_index vp10_palette_size_tree[TREE_SIZE(PALETTE_SIZES)];
-extern const vpx_tree_index
-    vp10_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)];
-extern const vpx_tree_index
-    vp10_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)];
+extern const aom_tree_index
+    av1_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)];
+extern const aom_tree_index av1_palette_size_tree[TREE_SIZE(PALETTE_SIZES)];
+extern const aom_tree_index
+    av1_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)];
+extern const aom_tree_index av1_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)];
 #if CONFIG_EXT_INTRA
-extern const vpx_tree_index vp10_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)];
+extern const aom_tree_index av1_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)];
 #endif  // CONFIG_EXT_INTRA
 #if CONFIG_EXT_TX
-extern const vpx_tree_index
-    vp10_ext_tx_inter_tree[EXT_TX_SETS_INTER][TREE_SIZE(TX_TYPES)];
-extern const vpx_tree_index
-    vp10_ext_tx_intra_tree[EXT_TX_SETS_INTRA][TREE_SIZE(TX_TYPES)];
+extern const aom_tree_index
+    av1_ext_tx_inter_tree[EXT_TX_SETS_INTER][TREE_SIZE(TX_TYPES)];
+extern const aom_tree_index
+    av1_ext_tx_intra_tree[EXT_TX_SETS_INTRA][TREE_SIZE(TX_TYPES)];
 #else
-extern const vpx_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)];
+extern const aom_tree_index av1_ext_tx_tree[TREE_SIZE(TX_TYPES)];
 #endif  // CONFIG_EXT_TX
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
-extern const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)];
+extern const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)];
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 
-void vp10_setup_past_independence(struct VP10Common *cm);
+void av1_setup_past_independence(struct AV1Common *cm);
 
-void vp10_adapt_intra_frame_probs(struct VP10Common *cm);
-void vp10_adapt_inter_frame_probs(struct VP10Common *cm);
+void av1_adapt_intra_frame_probs(struct AV1Common *cm);
+void av1_adapt_inter_frame_probs(struct AV1Common *cm);
 
-static INLINE int vp10_ceil_log2(int n) {
+static INLINE int av1_ceil_log2(int n) {
   int i = 1, p = 2;
   while (p < n) {
     i++;
@@ -270,11 +269,11 @@
   return i;
 }
 
-int vp10_get_palette_color_context(const uint8_t *color_map, int cols, int r,
-                                   int c, int n, int *color_order);
+int av1_get_palette_color_context(const uint8_t *color_map, int cols, int r,
+                                  int c, int n, int *color_order);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENTROPYMODE_H_
+#endif  // AV1_COMMON_ENTROPYMODE_H_
diff --git a/av1/common/entropymv.c b/av1/common/entropymv.c
index f3dba3f..5abc252 100644
--- a/av1/common/entropymv.c
+++ b/av1/common/entropymv.c
@@ -14,12 +14,12 @@
 // Integer pel reference mv threshold for use of high-precision 1/8 mv
 #define COMPANDED_MVREF_THRESH 8
 
-const vpx_tree_index vp10_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = {
+const aom_tree_index av1_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = {
   -MV_JOINT_ZERO, 2, -MV_JOINT_HNZVZ, 4, -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ
 };
 
 /* clang-format off */
-const vpx_tree_index vp10_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
+const aom_tree_index av1_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
   -MV_CLASS_0, 2,
   -MV_CLASS_1, 4,
   6, 8,
@@ -33,12 +33,12 @@
 };
 /* clang-format on */
 
-const vpx_tree_index vp10_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = {
+const aom_tree_index av1_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = {
   -0, -1,
 };
 
-const vpx_tree_index vp10_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2,  -1,
-                                                                4,  -2, -3 };
+const aom_tree_index av1_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2,  -1,
+                                                               4,  -2, -3 };
 
 static const nmv_context default_nmv_context = {
 #if CONFIG_REF_MV
@@ -115,12 +115,12 @@
 };
 
 #if CONFIG_GLOBAL_MOTION
-const vpx_tree_index
-    vp10_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)] = {
+const aom_tree_index
+    av1_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)] = {
       -GLOBAL_ZERO, 2, -GLOBAL_TRANSLATION, 4, -GLOBAL_ROTZOOM, -GLOBAL_AFFINE
     };
 
-static const vpx_prob default_global_motion_types_prob[GLOBAL_MOTION_TYPES -
+static const aom_prob default_global_motion_types_prob[GLOBAL_MOTION_TYPES -
                                                        1] = { 224, 128, 128 };
 #endif  // CONFIG_GLOBAL_MOTION
 
@@ -128,7 +128,7 @@
   return c ? CLASS0_SIZE << (c + 2) : 0;
 }
 
-MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset) {
+MV_CLASS_TYPE av1_get_mv_class(int z, int *offset) {
   const MV_CLASS_TYPE c = (z >= CLASS0_SIZE * 4096)
                               ? MV_CLASS_10
                               : (MV_CLASS_TYPE)log_in_base_2[z >> 3];
@@ -138,7 +138,7 @@
 
 // TODO(jingning): This idle function is intentionally left as is for
 // experimental purpose.
-int vp10_use_mv_hp(const MV *ref) {
+int av1_use_mv_hp(const MV *ref) {
   (void)ref;
   return 1;
 }
@@ -151,7 +151,7 @@
   comp_counts->sign[s] += incr;
   z = (s ? -v : v) - 1; /* magnitude - 1 */
 
-  c = vp10_get_mv_class(z, &o);
+  c = av1_get_mv_class(z, &o);
   comp_counts->classes[c] += incr;
 
   d = (o >> 3);     /* int mv data */
@@ -171,9 +171,9 @@
   }
 }
 
-void vp10_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
+void av1_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
   if (counts != NULL) {
-    const MV_JOINT_TYPE j = vp10_get_mv_joint(mv);
+    const MV_JOINT_TYPE j = av1_get_mv_joint(mv);
 
 #if CONFIG_REF_MV
     ++counts->zero_rmv[j == MV_JOINT_ZERO];
@@ -189,7 +189,7 @@
   }
 }
 
-void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
+void av1_adapt_mv_probs(AV1_COMMON *cm, int allow_hp) {
   int i, j;
 #if CONFIG_REF_MV
   int idx;
@@ -199,10 +199,10 @@
         &cm->frame_contexts[cm->frame_context_idx].nmvc[idx];
     const nmv_context_counts *counts = &cm->counts.mv[idx];
 
-    vpx_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints,
+    aom_tree_merge_probs(av1_mv_joint_tree, pre_fc->joints, counts->joints,
                          fc->joints);
 #if CONFIG_REF_MV
-    fc->zero_rmv = vp10_mode_mv_merge_probs(pre_fc->zero_rmv, counts->zero_rmv);
+    fc->zero_rmv = av1_mode_mv_merge_probs(pre_fc->zero_rmv, counts->zero_rmv);
 #endif
 
     for (i = 0; i < 2; ++i) {
@@ -210,25 +210,25 @@
       const nmv_component *pre_comp = &pre_fc->comps[i];
       const nmv_component_counts *c = &counts->comps[i];
 
-      comp->sign = vp10_mode_mv_merge_probs(pre_comp->sign, c->sign);
-      vpx_tree_merge_probs(vp10_mv_class_tree, pre_comp->classes, c->classes,
+      comp->sign = av1_mode_mv_merge_probs(pre_comp->sign, c->sign);
+      aom_tree_merge_probs(av1_mv_class_tree, pre_comp->classes, c->classes,
                            comp->classes);
-      vpx_tree_merge_probs(vp10_mv_class0_tree, pre_comp->class0, c->class0,
+      aom_tree_merge_probs(av1_mv_class0_tree, pre_comp->class0, c->class0,
                            comp->class0);
 
       for (j = 0; j < MV_OFFSET_BITS; ++j)
-        comp->bits[j] = vp10_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
+        comp->bits[j] = av1_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
 
       for (j = 0; j < CLASS0_SIZE; ++j)
-        vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->class0_fp[j],
+        aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->class0_fp[j],
                              c->class0_fp[j], comp->class0_fp[j]);
 
-      vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
+      aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
 
       if (allow_hp) {
         comp->class0_hp =
-            vp10_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
-        comp->hp = vp10_mode_mv_merge_probs(pre_comp->hp, c->hp);
+            av1_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
+        comp->hp = av1_mode_mv_merge_probs(pre_comp->hp, c->hp);
       }
     }
   }
@@ -237,7 +237,7 @@
   const nmv_context *pre_fc = &cm->frame_contexts[cm->frame_context_idx].nmvc;
   const nmv_context_counts *counts = &cm->counts.mv;
 
-  vpx_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints,
+  aom_tree_merge_probs(av1_mv_joint_tree, pre_fc->joints, counts->joints,
                        fc->joints);
 
   for (i = 0; i < 2; ++i) {
@@ -245,31 +245,31 @@
     const nmv_component *pre_comp = &pre_fc->comps[i];
     const nmv_component_counts *c = &counts->comps[i];
 
-    comp->sign = vp10_mode_mv_merge_probs(pre_comp->sign, c->sign);
-    vpx_tree_merge_probs(vp10_mv_class_tree, pre_comp->classes, c->classes,
+    comp->sign = av1_mode_mv_merge_probs(pre_comp->sign, c->sign);
+    aom_tree_merge_probs(av1_mv_class_tree, pre_comp->classes, c->classes,
                          comp->classes);
-    vpx_tree_merge_probs(vp10_mv_class0_tree, pre_comp->class0, c->class0,
+    aom_tree_merge_probs(av1_mv_class0_tree, pre_comp->class0, c->class0,
                          comp->class0);
 
     for (j = 0; j < MV_OFFSET_BITS; ++j)
-      comp->bits[j] = vp10_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
+      comp->bits[j] = av1_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
 
     for (j = 0; j < CLASS0_SIZE; ++j)
-      vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->class0_fp[j],
+      aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->class0_fp[j],
                            c->class0_fp[j], comp->class0_fp[j]);
 
-    vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
+    aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
 
     if (allow_hp) {
       comp->class0_hp =
-          vp10_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
-      comp->hp = vp10_mode_mv_merge_probs(pre_comp->hp, c->hp);
+          av1_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
+      comp->hp = av1_mode_mv_merge_probs(pre_comp->hp, c->hp);
     }
   }
 #endif
 }
 
-void vp10_init_mv_probs(VP10_COMMON *cm) {
+void av1_init_mv_probs(AV1_COMMON *cm) {
 #if CONFIG_REF_MV
   int i;
   for (i = 0; i < NMV_CONTEXTS; ++i) cm->fc->nmvc[i] = default_nmv_context;
@@ -277,6 +277,6 @@
   cm->fc->nmvc = default_nmv_context;
 #endif
 #if CONFIG_GLOBAL_MOTION
-  vp10_copy(cm->fc->global_motion_types_prob, default_global_motion_types_prob);
+  av1_copy(cm->fc->global_motion_types_prob, default_global_motion_types_prob);
 #endif  // CONFIG_GLOBAL_MOTION
 }
diff --git a/av1/common/entropymv.h b/av1/common/entropymv.h
index c809a67..c6e0855 100644
--- a/av1/common/entropymv.h
+++ b/av1/common/entropymv.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ENTROPYMV_H_
-#define VP10_COMMON_ENTROPYMV_H_
+#ifndef AV1_COMMON_ENTROPYMV_H_
+#define AV1_COMMON_ENTROPYMV_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #include "aom_dsp/prob.h"
 
@@ -21,12 +21,12 @@
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 
-void vp10_init_mv_probs(struct VP10Common *cm);
+void av1_init_mv_probs(struct AV1Common *cm);
 
-void vp10_adapt_mv_probs(struct VP10Common *cm, int usehp);
-int vp10_use_mv_hp(const MV *ref);
+void av1_adapt_mv_probs(struct AV1Common *cm, int usehp);
+int av1_use_mv_hp(const MV *ref);
 
 #define MV_UPDATE_PROB 252
 
@@ -76,31 +76,31 @@
 #define MV_UPP ((1 << MV_IN_USE_BITS) - 1)
 #define MV_LOW (-(1 << MV_IN_USE_BITS))
 
-extern const vpx_tree_index vp10_mv_joint_tree[];
-extern const vpx_tree_index vp10_mv_class_tree[];
-extern const vpx_tree_index vp10_mv_class0_tree[];
-extern const vpx_tree_index vp10_mv_fp_tree[];
+extern const aom_tree_index av1_mv_joint_tree[];
+extern const aom_tree_index av1_mv_class_tree[];
+extern const aom_tree_index av1_mv_class0_tree[];
+extern const aom_tree_index av1_mv_fp_tree[];
 
 typedef struct {
-  vpx_prob sign;
-  vpx_prob classes[MV_CLASSES - 1];
-  vpx_prob class0[CLASS0_SIZE - 1];
-  vpx_prob bits[MV_OFFSET_BITS];
-  vpx_prob class0_fp[CLASS0_SIZE][MV_FP_SIZE - 1];
-  vpx_prob fp[MV_FP_SIZE - 1];
-  vpx_prob class0_hp;
-  vpx_prob hp;
+  aom_prob sign;
+  aom_prob classes[MV_CLASSES - 1];
+  aom_prob class0[CLASS0_SIZE - 1];
+  aom_prob bits[MV_OFFSET_BITS];
+  aom_prob class0_fp[CLASS0_SIZE][MV_FP_SIZE - 1];
+  aom_prob fp[MV_FP_SIZE - 1];
+  aom_prob class0_hp;
+  aom_prob hp;
 } nmv_component;
 
 typedef struct {
-  vpx_prob joints[MV_JOINTS - 1];
+  aom_prob joints[MV_JOINTS - 1];
 #if CONFIG_REF_MV
-  vpx_prob zero_rmv;
+  aom_prob zero_rmv;
 #endif
   nmv_component comps[2];
 } nmv_context;
 
-static INLINE MV_JOINT_TYPE vp10_get_mv_joint(const MV *mv) {
+static INLINE MV_JOINT_TYPE av1_get_mv_joint(const MV *mv) {
   if (mv->row == 0) {
     return mv->col == 0 ? MV_JOINT_ZERO : MV_JOINT_HNZVZ;
   } else {
@@ -108,7 +108,7 @@
   }
 }
 
-MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset);
+MV_CLASS_TYPE av1_get_mv_class(int z, int *offset);
 
 typedef struct {
   unsigned int sign[2];
@@ -129,15 +129,15 @@
   nmv_component_counts comps[2];
 } nmv_context_counts;
 
-void vp10_inc_mv(const MV *mv, nmv_context_counts *mvctx, const int usehp);
+void av1_inc_mv(const MV *mv, nmv_context_counts *mvctx, const int usehp);
 
 #if CONFIG_GLOBAL_MOTION
-extern const vpx_tree_index
-    vp10_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)];
+extern const aom_tree_index
+    av1_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)];
 #endif  // CONFIG_GLOBAL_MOTION
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENTROPYMV_H_
+#endif  // AV1_COMMON_ENTROPYMV_H_
diff --git a/av1/common/enums.h b/av1/common/enums.h
index 8cdec8e..899c8b9 100644
--- a/av1/common/enums.h
+++ b/av1/common/enums.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ENUMS_H_
-#define VP10_COMMON_ENUMS_H_
+#ifndef AV1_COMMON_ENUMS_H_
+#define AV1_COMMON_ENUMS_H_
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -211,20 +211,20 @@
 #endif                  // CONFIG_EXT_TX
 
 typedef enum {
-  VPX_LAST_FLAG = 1 << 0,
+  AOM_LAST_FLAG = 1 << 0,
 #if CONFIG_EXT_REFS
-  VPX_LAST2_FLAG = 1 << 1,
-  VPX_LAST3_FLAG = 1 << 2,
-  VPX_GOLD_FLAG = 1 << 3,
-  VPX_BWD_FLAG = 1 << 4,
-  VPX_ALT_FLAG = 1 << 5,
-  VPX_REFFRAME_ALL = (1 << 6) - 1
+  AOM_LAST2_FLAG = 1 << 1,
+  AOM_LAST3_FLAG = 1 << 2,
+  AOM_GOLD_FLAG = 1 << 3,
+  AOM_BWD_FLAG = 1 << 4,
+  AOM_ALT_FLAG = 1 << 5,
+  AOM_REFFRAME_ALL = (1 << 6) - 1
 #else
-  VPX_GOLD_FLAG = 1 << 1,
-  VPX_ALT_FLAG = 1 << 2,
-  VPX_REFFRAME_ALL = (1 << 3) - 1
+  AOM_GOLD_FLAG = 1 << 1,
+  AOM_ALT_FLAG = 1 << 2,
+  AOM_REFFRAME_ALL = (1 << 3) - 1
 #endif  // CONFIG_EXT_REFS
-} VPX_REFFRAME;
+} AOM_REFFRAME;
 
 typedef enum { PLANE_TYPE_Y = 0, PLANE_TYPE_UV = 1, PLANE_TYPES } PLANE_TYPE;
 
@@ -435,4 +435,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENUMS_H_
+#endif  // AV1_COMMON_ENUMS_H_
diff --git a/av1/common/filter.c b/av1/common/filter.c
index 46eca5d..4881642 100644
--- a/av1/common/filter.c
+++ b/av1/common/filter.c
@@ -186,7 +186,7 @@
 #endif  // CONFIG_EXT_INTERP
 
 #if CONFIG_EXT_INTRA
-const InterpKernel *vp10_intra_filter_kernels[INTRA_FILTERS] = {
+const InterpKernel *av1_intra_filter_kernels[INTRA_FILTERS] = {
   bilinear_filters,         // INTRA_FILTER_LINEAR
   sub_pel_filters_8,        // INTRA_FILTER_8TAP
   sub_pel_filters_8sharp,   // INTRA_FILTER_8TAP_SHARP
@@ -196,7 +196,7 @@
 
 #if CONFIG_EXT_INTERP
 static const InterpFilterParams
-    vp10_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
+    av1_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
       { (const int16_t *)sub_pel_filters_8, SUBPEL_TAPS, SUBPEL_SHIFTS },
       { (const int16_t *)sub_pel_filters_8smooth, SUBPEL_TAPS, SUBPEL_SHIFTS },
       { (const int16_t *)sub_pel_filters_10sharp, 10, SUBPEL_SHIFTS },
@@ -206,7 +206,7 @@
     };
 #else
 static const InterpFilterParams
-    vp10_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
+    av1_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
       { (const int16_t *)sub_pel_filters_8, SUBPEL_TAPS, SUBPEL_SHIFTS },
       { (const int16_t *)sub_pel_filters_8smooth, SUBPEL_TAPS, SUBPEL_SHIFTS },
       { (const int16_t *)sub_pel_filters_8sharp, SUBPEL_TAPS, SUBPEL_SHIFTS },
@@ -215,32 +215,31 @@
 #endif  // CONFIG_EXT_INTERP
 
 #if USE_TEMPORALFILTER_12TAP
-static const InterpFilterParams vp10_interp_temporalfilter_12tap = {
+static const InterpFilterParams av1_interp_temporalfilter_12tap = {
   (const int16_t *)sub_pel_filters_temporalfilter_12, 12, SUBPEL_SHIFTS
 };
 #endif  // USE_TEMPORALFILTER_12TAP
 
-InterpFilterParams vp10_get_interp_filter_params(
+InterpFilterParams av1_get_interp_filter_params(
     const INTERP_FILTER interp_filter) {
 #if USE_TEMPORALFILTER_12TAP
   if (interp_filter == TEMPORALFILTER_12TAP)
-    return vp10_interp_temporalfilter_12tap;
+    return av1_interp_temporalfilter_12tap;
 #endif  // USE_TEMPORALFILTER_12TAP
-  return vp10_interp_filter_params_list[interp_filter];
+  return av1_interp_filter_params_list[interp_filter];
 }
 
-const int16_t *vp10_get_interp_filter_kernel(
-    const INTERP_FILTER interp_filter) {
+const int16_t *av1_get_interp_filter_kernel(const INTERP_FILTER interp_filter) {
 #if USE_TEMPORALFILTER_12TAP
   if (interp_filter == TEMPORALFILTER_12TAP)
-    return vp10_interp_temporalfilter_12tap.filter_ptr;
+    return av1_interp_temporalfilter_12tap.filter_ptr;
 #endif  // USE_TEMPORALFILTER_12TAP
-  return (const int16_t *)vp10_interp_filter_params_list[interp_filter]
+  return (const int16_t *)av1_interp_filter_params_list[interp_filter]
       .filter_ptr;
 }
 
-SubpelFilterCoeffs vp10_get_subpel_filter_signal_dir(const InterpFilterParams p,
-                                                     int index) {
+SubpelFilterCoeffs av1_get_subpel_filter_signal_dir(const InterpFilterParams p,
+                                                    int index) {
 #if CONFIG_EXT_INTERP && HAVE_SSSE3
   if (p.filter_ptr == (const int16_t *)sub_pel_filters_12sharp) {
     return &sub_pel_filters_12sharp_signal_dir[index][0];
@@ -259,7 +258,7 @@
   return NULL;
 }
 
-SubpelFilterCoeffs vp10_get_subpel_filter_ver_signal_dir(
+SubpelFilterCoeffs av1_get_subpel_filter_ver_signal_dir(
     const InterpFilterParams p, int index) {
 #if CONFIG_EXT_INTERP && HAVE_SSSE3
   if (p.filter_ptr == (const int16_t *)sub_pel_filters_12sharp) {
@@ -279,8 +278,8 @@
   return NULL;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-HbdSubpelFilterCoeffs vp10_hbd_get_subpel_filter_ver_signal_dir(
+#if CONFIG_AOM_HIGHBITDEPTH
+HbdSubpelFilterCoeffs av1_hbd_get_subpel_filter_ver_signal_dir(
     const InterpFilterParams p, int index) {
 #if CONFIG_EXT_INTERP && HAVE_SSE4_1
   if (p.filter_ptr == (const int16_t *)sub_pel_filters_12sharp) {
diff --git a/av1/common/filter.h b/av1/common/filter.h
index 39fad23..c5a8521 100644
--- a/av1/common/filter.h
+++ b/av1/common/filter.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_FILTER_H_
-#define VP10_COMMON_FILTER_H_
+#ifndef AV1_COMMON_FILTER_H_
+#define AV1_COMMON_FILTER_H_
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_filter.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_filter.h"
 #include "aom_ports/mem.h"
 
 #ifdef __cplusplus
@@ -65,7 +65,7 @@
   INTRA_FILTERS,
 } INTRA_FILTER;
 
-extern const InterpKernel *vp10_intra_filter_kernels[INTRA_FILTERS];
+extern const InterpKernel *av1_intra_filter_kernels[INTRA_FILTERS];
 #endif  // CONFIG_EXT_INTRA
 
 typedef struct InterpFilterParams {
@@ -74,26 +74,26 @@
   uint16_t subpel_shifts;
 } InterpFilterParams;
 
-InterpFilterParams vp10_get_interp_filter_params(
+InterpFilterParams av1_get_interp_filter_params(
     const INTERP_FILTER interp_filter);
 
-const int16_t *vp10_get_interp_filter_kernel(const INTERP_FILTER interp_filter);
+const int16_t *av1_get_interp_filter_kernel(const INTERP_FILTER interp_filter);
 
-static INLINE const int16_t *vp10_get_interp_filter_subpel_kernel(
+static INLINE const int16_t *av1_get_interp_filter_subpel_kernel(
     const InterpFilterParams filter_params, const int subpel) {
   return filter_params.filter_ptr + filter_params.taps * subpel;
 }
 
-static INLINE int vp10_is_interpolating_filter(
+static INLINE int av1_is_interpolating_filter(
     const INTERP_FILTER interp_filter) {
-  const InterpFilterParams ip = vp10_get_interp_filter_params(interp_filter);
+  const InterpFilterParams ip = av1_get_interp_filter_params(interp_filter);
   return (ip.filter_ptr[ip.taps / 2 - 1] == 128);
 }
 
 #if USE_TEMPORALFILTER_12TAP
 extern const int8_t sub_pel_filters_temporalfilter_12_signal_dir[15][2][16];
 extern const int8_t sub_pel_filters_temporalfilter_12_ver_signal_dir[15][6][16];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 extern const int16_t
     sub_pel_filters_temporalfilter_12_highbd_ver_signal_dir[15][6][8];
 #endif
@@ -104,24 +104,24 @@
 extern const int8_t sub_pel_filters_10sharp_signal_dir[15][2][16];
 extern const int8_t sub_pel_filters_12sharp_ver_signal_dir[15][6][16];
 extern const int8_t sub_pel_filters_10sharp_ver_signal_dir[15][6][16];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 extern const int16_t sub_pel_filters_12sharp_highbd_ver_signal_dir[15][6][8];
 extern const int16_t sub_pel_filters_10sharp_highbd_ver_signal_dir[15][6][8];
 #endif
 #endif
 
 typedef const int8_t (*SubpelFilterCoeffs)[16];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef const int16_t (*HbdSubpelFilterCoeffs)[8];
 #endif
 
-SubpelFilterCoeffs vp10_get_subpel_filter_signal_dir(const InterpFilterParams p,
-                                                     int index);
+SubpelFilterCoeffs av1_get_subpel_filter_signal_dir(const InterpFilterParams p,
+                                                    int index);
 
-SubpelFilterCoeffs vp10_get_subpel_filter_ver_signal_dir(
+SubpelFilterCoeffs av1_get_subpel_filter_ver_signal_dir(
     const InterpFilterParams p, int index);
-#if CONFIG_VP9_HIGHBITDEPTH
-HbdSubpelFilterCoeffs vp10_hbd_get_subpel_filter_ver_signal_dir(
+#if CONFIG_AOM_HIGHBITDEPTH
+HbdSubpelFilterCoeffs av1_hbd_get_subpel_filter_ver_signal_dir(
     const InterpFilterParams p, int index);
 #endif
 
@@ -129,4 +129,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_FILTER_H_
+#endif  // AV1_COMMON_FILTER_H_
diff --git a/av1/common/frame_buffers.c b/av1/common/frame_buffers.c
index 5c736a9..89f4e4f 100644
--- a/av1/common/frame_buffers.c
+++ b/av1/common/frame_buffers.c
@@ -11,34 +11,34 @@
 #include <assert.h>
 
 #include "av1/common/frame_buffers.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
-int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
+int av1_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
   assert(list != NULL);
-  vp10_free_internal_frame_buffers(list);
+  av1_free_internal_frame_buffers(list);
 
   list->num_internal_frame_buffers =
-      VPX_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS;
-  list->int_fb = (InternalFrameBuffer *)vpx_calloc(
+      AOM_MAXIMUM_REF_BUFFERS + AOM_MAXIMUM_WORK_BUFFERS;
+  list->int_fb = (InternalFrameBuffer *)aom_calloc(
       list->num_internal_frame_buffers, sizeof(*list->int_fb));
   return (list->int_fb == NULL);
 }
 
-void vp10_free_internal_frame_buffers(InternalFrameBufferList *list) {
+void av1_free_internal_frame_buffers(InternalFrameBufferList *list) {
   int i;
 
   assert(list != NULL);
 
   for (i = 0; i < list->num_internal_frame_buffers; ++i) {
-    vpx_free(list->int_fb[i].data);
+    aom_free(list->int_fb[i].data);
     list->int_fb[i].data = NULL;
   }
-  vpx_free(list->int_fb);
+  aom_free(list->int_fb);
   list->int_fb = NULL;
 }
 
-int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
-                          vpx_codec_frame_buffer_t *fb) {
+int av1_get_frame_buffer(void *cb_priv, size_t min_size,
+                         aom_codec_frame_buffer_t *fb) {
   int i;
   InternalFrameBufferList *const int_fb_list =
       (InternalFrameBufferList *)cb_priv;
@@ -53,7 +53,7 @@
 
   if (int_fb_list->int_fb[i].size < min_size) {
     int_fb_list->int_fb[i].data =
-        (uint8_t *)vpx_realloc(int_fb_list->int_fb[i].data, min_size);
+        (uint8_t *)aom_realloc(int_fb_list->int_fb[i].data, min_size);
     if (!int_fb_list->int_fb[i].data) return -1;
 
     // This memset is needed for fixing valgrind error from C loop filter
@@ -72,7 +72,7 @@
   return 0;
 }
 
-int vp10_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb) {
+int av1_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb) {
   InternalFrameBuffer *const int_fb = (InternalFrameBuffer *)fb->priv;
   (void)cb_priv;
   if (int_fb) int_fb->in_use = 0;
diff --git a/av1/common/frame_buffers.h b/av1/common/frame_buffers.h
index 6667132..63253be 100644
--- a/av1/common/frame_buffers.h
+++ b/av1/common/frame_buffers.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_FRAME_BUFFERS_H_
-#define VP10_COMMON_FRAME_BUFFERS_H_
+#ifndef AV1_COMMON_FRAME_BUFFERS_H_
+#define AV1_COMMON_FRAME_BUFFERS_H_
 
-#include "aom/vpx_frame_buffer.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_frame_buffer.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -30,24 +30,24 @@
 } InternalFrameBufferList;
 
 // Initializes |list|. Returns 0 on success.
-int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list);
+int av1_alloc_internal_frame_buffers(InternalFrameBufferList *list);
 
 // Free any data allocated to the frame buffers.
-void vp10_free_internal_frame_buffers(InternalFrameBufferList *list);
+void av1_free_internal_frame_buffers(InternalFrameBufferList *list);
 
 // Callback used by libaom to request an external frame buffer. |cb_priv|
 // Callback private data, which points to an InternalFrameBufferList.
 // |min_size| is the minimum size in bytes needed to decode the next frame.
 // |fb| pointer to the frame buffer.
-int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
-                          vpx_codec_frame_buffer_t *fb);
+int av1_get_frame_buffer(void *cb_priv, size_t min_size,
+                         aom_codec_frame_buffer_t *fb);
 
 // Callback used by libaom when there are no references to the frame buffer.
 // |cb_priv| is not used. |fb| pointer to the frame buffer.
-int vp10_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb);
+int av1_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_FRAME_BUFFERS_H_
+#endif  // AV1_COMMON_FRAME_BUFFERS_H_
diff --git a/av1/common/idct.c b/av1/common/idct.c
index 83b44d5..536e346 100644
--- a/av1/common/idct.c
+++ b/av1/common/idct.c
@@ -10,19 +10,19 @@
 
 #include <math.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "av1/common/blockd.h"
 #include "av1/common/enums.h"
 #include "av1/common/idct.h"
-#include "av1/common/vp10_inv_txfm2d_cfg.h"
+#include "av1/common/av1_inv_txfm2d_cfg.h"
 #include "aom_dsp/inv_txfm.h"
 #include "aom_ports/mem.h"
 
 int get_tx_scale(const MACROBLOCKD *const xd, const TX_TYPE tx_type,
                  const TX_SIZE tx_size) {
   (void)tx_type;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     return txsize_sqr_up_map[tx_size] == TX_32X32;
   }
@@ -70,7 +70,7 @@
   // Note overall scaling factor is 4 times orthogonal
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_iidtx4_c(const tran_low_t *input, tran_low_t *output,
                             int bd) {
   int i;
@@ -113,10 +113,10 @@
     inputhalf[i] =
         HIGHBD_WRAPLOW(highbd_dct_const_round_shift(input[i] * Sqrt2), bd);
   }
-  vpx_highbd_idct16_c(inputhalf, output + 16, bd);
+  aom_highbd_idct16_c(inputhalf, output + 16, bd);
   // Note overall scaling factor is 4 times orthogonal
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // Inverse identity transform and add.
 static void inv_idtx_add_c(const tran_low_t *input, uint8_t *dest, int stride,
@@ -177,7 +177,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void highbd_idst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step[4];
   tran_high_t temp1, temp2;
@@ -255,7 +255,7 @@
 }
 
 void highbd_idst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
-  // vp9_highbd_igentx16(input, output, bd, Tx16);
+  // av1_highbd_igentx16(input, output, bd, Tx16);
   tran_low_t step1[16], step2[16];
   tran_high_t temp1, temp2;
   (void)bd;
@@ -474,11 +474,11 @@
     default: assert(0); break;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_EXT_TX
 
-void vp10_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                          int tx_type) {
+void av1_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                         int tx_type) {
   static const transform_2d IHT_4[] = {
     { idct4_c, idct4_c },    // DCT_DCT
     { iadst4_c, idct4_c },   // ADST_DCT
@@ -541,8 +541,8 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                          int tx_type) {
+void av1_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                         int tx_type) {
   static const transform_2d IHT_4x8[] = {
     { idct8_c, idct4_c },    // DCT_DCT
     { iadst8_c, idct4_c },   // ADST_DCT
@@ -594,8 +594,8 @@
   }
 }
 
-void vp10_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                          int tx_type) {
+void av1_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                         int tx_type) {
   static const transform_2d IHT_8x4[] = {
     { idct4_c, idct8_c },    // DCT_DCT
     { iadst4_c, idct8_c },   // ADST_DCT
@@ -647,8 +647,8 @@
   }
 }
 
-void vp10_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                            int tx_type) {
+void av1_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                           int tx_type) {
   static const transform_2d IHT_8x16[] = {
     { idct16_c, idct8_c },    // DCT_DCT
     { iadst16_c, idct8_c },   // ADST_DCT
@@ -700,8 +700,8 @@
   }
 }
 
-void vp10_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                            int tx_type) {
+void av1_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                           int tx_type) {
   static const transform_2d IHT_16x8[] = {
     { idct8_c, idct16_c },    // DCT_DCT
     { iadst8_c, idct16_c },   // ADST_DCT
@@ -753,8 +753,8 @@
   }
 }
 
-void vp10_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                             int tx_type) {
+void av1_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
   static const transform_2d IHT_16x32[] = {
     { idct32_c, idct16_c },         // DCT_DCT
     { ihalfright32_c, idct16_c },   // ADST_DCT
@@ -806,8 +806,8 @@
   }
 }
 
-void vp10_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                             int tx_type) {
+void av1_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
   static const transform_2d IHT_32x16[] = {
     { idct16_c, idct32_c },         // DCT_DCT
     { iadst16_c, idct32_c },        // ADST_DCT
@@ -860,8 +860,8 @@
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                          int tx_type) {
+void av1_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                         int tx_type) {
   static const transform_2d IHT_8[] = {
     { idct8_c, idct8_c },    // DCT_DCT
     { iadst8_c, idct8_c },   // ADST_DCT
@@ -923,8 +923,8 @@
   }
 }
 
-void vp10_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                             int tx_type) {
+void av1_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
   static const transform_2d IHT_16[] = {
     { idct16_c, idct16_c },    // DCT_DCT
     { iadst16_c, idct16_c },   // ADST_DCT
@@ -987,8 +987,8 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
-                              int stride, int tx_type) {
+void av1_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                             int tx_type) {
   static const transform_2d IHT_32[] = {
     { idct32_c, idct32_c },              // DCT_DCT
     { ihalfright32_c, idct32_c },        // ADST_DCT
@@ -1048,82 +1048,82 @@
 #endif  // CONFIG_EXT_TX
 
 // idct
-void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob) {
+void av1_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob) {
   if (eob > 1)
-    vpx_idct4x4_16_add(input, dest, stride);
+    aom_idct4x4_16_add(input, dest, stride);
   else
-    vpx_idct4x4_1_add(input, dest, stride);
+    aom_idct4x4_1_add(input, dest, stride);
 }
 
-void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob) {
+void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob) {
   if (eob > 1)
-    vpx_iwht4x4_16_add(input, dest, stride);
+    aom_iwht4x4_16_add(input, dest, stride);
   else
-    vpx_iwht4x4_1_add(input, dest, stride);
+    aom_iwht4x4_1_add(input, dest, stride);
 }
 
-void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob) {
+void av1_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob) {
   // If dc is 1, then input[0] is the reconstructed value, do not need
   // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
 
   // The calculation can be simplified if there are not many non-zero dct
   // coefficients. Use eobs to decide what to do.
-  // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c.
+  // TODO(yunqingwang): "eobs = 1" case is also handled in av1_short_idct8x8_c.
   // Combine that with code here.
   if (eob == 1)
     // DC only DCT coefficient
-    vpx_idct8x8_1_add(input, dest, stride);
+    aom_idct8x8_1_add(input, dest, stride);
   else if (eob <= 12)
-    vpx_idct8x8_12_add(input, dest, stride);
+    aom_idct8x8_12_add(input, dest, stride);
   else
-    vpx_idct8x8_64_add(input, dest, stride);
+    aom_idct8x8_64_add(input, dest, stride);
 }
 
-void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
-                        int eob) {
+void av1_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
+                       int eob) {
   /* The calculation can be simplified if there are not many non-zero dct
    * coefficients. Use eobs to separate different cases. */
   if (eob == 1) /* DC only DCT coefficient. */
-    vpx_idct16x16_1_add(input, dest, stride);
+    aom_idct16x16_1_add(input, dest, stride);
   else if (eob <= 10)
-    vpx_idct16x16_10_add(input, dest, stride);
+    aom_idct16x16_10_add(input, dest, stride);
   else
-    vpx_idct16x16_256_add(input, dest, stride);
+    aom_idct16x16_256_add(input, dest, stride);
 }
 
-void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
-                        int eob) {
+void av1_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
+                       int eob) {
   if (eob == 1)
-    vpx_idct32x32_1_add(input, dest, stride);
+    aom_idct32x32_1_add(input, dest, stride);
   else if (eob <= 34)
     // non-zero coeff only in upper-left 8x8
-    vpx_idct32x32_34_add(input, dest, stride);
+    aom_idct32x32_34_add(input, dest, stride);
   else
-    vpx_idct32x32_1024_add(input, dest, stride);
+    aom_idct32x32_1024_add(input, dest, stride);
 }
 
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type, int lossless) {
+void av1_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type, int lossless) {
   if (lossless) {
     assert(tx_type == DCT_DCT);
-    vp10_iwht4x4_add(input, dest, stride, eob);
+    av1_iwht4x4_add(input, dest, stride, eob);
     return;
   }
 
   switch (tx_type) {
-    case DCT_DCT: vp10_idct4x4_add(input, dest, stride, eob); break;
+    case DCT_DCT: av1_idct4x4_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST: vp10_iht4x4_16_add(input, dest, stride, tx_type); break;
+    case ADST_ADST: av1_iht4x4_16_add(input, dest, stride, tx_type); break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
     case DCT_FLIPADST:
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
-    case FLIPADST_ADST: vp10_iht4x4_16_add(input, dest, stride, tx_type); break;
+    case FLIPADST_ADST: av1_iht4x4_16_add(input, dest, stride, tx_type); break;
     case V_DCT:
     case H_DCT:
     case V_ADST:
@@ -1131,7 +1131,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_iht4x4_16_add_c(input, dest, stride, tx_type);
+      av1_iht4x4_16_add_c(input, dest, stride, tx_type);
       break;
     case IDTX: inv_idtx_add_c(input, dest, stride, 4, tx_type); break;
 #endif  // CONFIG_EXT_TX
@@ -1140,56 +1140,56 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type) {
+  (void)eob;
+  av1_iht4x8_32_add(input, dest, stride, tx_type);
+}
+
+void av1_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type) {
+  (void)eob;
+  av1_iht8x4_32_add(input, dest, stride, tx_type);
+}
+
+void av1_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest, int stride,
                            int eob, TX_TYPE tx_type) {
   (void)eob;
-  vp10_iht4x8_32_add(input, dest, stride, tx_type);
+  av1_iht8x16_128_add(input, dest, stride, tx_type);
 }
 
-void vp10_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest, int stride,
                            int eob, TX_TYPE tx_type) {
   (void)eob;
-  vp10_iht8x4_32_add(input, dest, stride, tx_type);
+  av1_iht16x8_128_add(input, dest, stride, tx_type);
 }
 
-void vp10_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest, int stride,
                             int eob, TX_TYPE tx_type) {
   (void)eob;
-  vp10_iht8x16_128_add(input, dest, stride, tx_type);
+  av1_iht16x32_512_add(input, dest, stride, tx_type);
 }
 
-void vp10_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest, int stride,
                             int eob, TX_TYPE tx_type) {
   (void)eob;
-  vp10_iht16x8_128_add(input, dest, stride, tx_type);
-}
-
-void vp10_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type) {
-  (void)eob;
-  vp10_iht16x32_512_add(input, dest, stride, tx_type);
-}
-
-void vp10_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type) {
-  (void)eob;
-  vp10_iht32x16_512_add(input, dest, stride, tx_type);
+  av1_iht32x16_512_add(input, dest, stride, tx_type);
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type) {
+void av1_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT: vp10_idct8x8_add(input, dest, stride, eob); break;
+    case DCT_DCT: av1_idct8x8_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST: vp10_iht8x8_64_add(input, dest, stride, tx_type); break;
+    case ADST_ADST: av1_iht8x8_64_add(input, dest, stride, tx_type); break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
     case DCT_FLIPADST:
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
-    case FLIPADST_ADST: vp10_iht8x8_64_add(input, dest, stride, tx_type); break;
+    case FLIPADST_ADST: av1_iht8x8_64_add(input, dest, stride, tx_type); break;
     case V_DCT:
     case H_DCT:
     case V_ADST:
@@ -1197,7 +1197,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_iht8x8_64_add_c(input, dest, stride, tx_type);
+      av1_iht8x8_64_add_c(input, dest, stride, tx_type);
       break;
     case IDTX: inv_idtx_add_c(input, dest, stride, 8, tx_type); break;
 #endif  // CONFIG_EXT_TX
@@ -1205,20 +1205,20 @@
   }
 }
 
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type) {
+void av1_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT: vp10_idct16x16_add(input, dest, stride, eob); break;
+    case DCT_DCT: av1_idct16x16_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST: vp10_iht16x16_256_add(input, dest, stride, tx_type); break;
+    case ADST_ADST: av1_iht16x16_256_add(input, dest, stride, tx_type); break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
     case DCT_FLIPADST:
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_iht16x16_256_add(input, dest, stride, tx_type);
+      av1_iht16x16_256_add(input, dest, stride, tx_type);
       break;
     case V_DCT:
     case H_DCT:
@@ -1227,7 +1227,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_iht16x16_256_add_c(input, dest, stride, tx_type);
+      av1_iht16x16_256_add_c(input, dest, stride, tx_type);
       break;
     case IDTX: inv_idtx_add_c(input, dest, stride, 16, tx_type); break;
 #endif  // CONFIG_EXT_TX
@@ -1235,10 +1235,10 @@
   }
 }
 
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type) {
+void av1_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT: vp10_idct32x32_add(input, dest, stride, eob); break;
+    case DCT_DCT: av1_idct32x32_add(input, dest, stride, eob); break;
 #if CONFIG_EXT_TX
     case ADST_DCT:
     case DCT_ADST:
@@ -1254,7 +1254,7 @@
     case H_ADST:
     case V_FLIPADST:
     case H_FLIPADST:
-      vp10_iht32x32_1024_add_c(input, dest, stride, tx_type);
+      av1_iht32x32_1024_add_c(input, dest, stride, tx_type);
       break;
     case IDTX: inv_idtx_add_c(input, dest, stride, 32, tx_type); break;
 #endif  // CONFIG_EXT_TX
@@ -1262,27 +1262,27 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int tx_type, int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_4[] = {
-    { vpx_highbd_idct4_c, vpx_highbd_idct4_c },    // DCT_DCT
-    { vpx_highbd_iadst4_c, vpx_highbd_idct4_c },   // ADST_DCT
-    { vpx_highbd_idct4_c, vpx_highbd_iadst4_c },   // DCT_ADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c },  // ADST_ADST
+    { aom_highbd_idct4_c, aom_highbd_idct4_c },    // DCT_DCT
+    { aom_highbd_iadst4_c, aom_highbd_idct4_c },   // ADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst4_c },   // DCT_ADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { vpx_highbd_iadst4_c, vpx_highbd_idct4_c },   // FLIPADST_DCT
-    { vpx_highbd_idct4_c, vpx_highbd_iadst4_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c },  // FLIPADST_ADST
+    { aom_highbd_iadst4_c, aom_highbd_idct4_c },   // FLIPADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst4_c },   // DCT_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // ADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // FLIPADST_ADST
     { highbd_iidtx4_c, highbd_iidtx4_c },          // IDTX
-    { vpx_highbd_idct4_c, highbd_iidtx4_c },       // V_DCT
-    { highbd_iidtx4_c, vpx_highbd_idct4_c },       // H_DCT
-    { vpx_highbd_iadst4_c, highbd_iidtx4_c },      // V_ADST
-    { highbd_iidtx4_c, vpx_highbd_iadst4_c },      // H_ADST
-    { vpx_highbd_iadst4_c, highbd_iidtx4_c },      // V_FLIPADST
-    { highbd_iidtx4_c, vpx_highbd_iadst4_c },      // H_FLIPADST
+    { aom_highbd_idct4_c, highbd_iidtx4_c },       // V_DCT
+    { highbd_iidtx4_c, aom_highbd_idct4_c },       // H_DCT
+    { aom_highbd_iadst4_c, highbd_iidtx4_c },      // V_ADST
+    { highbd_iidtx4_c, aom_highbd_iadst4_c },      // H_ADST
+    { aom_highbd_iadst4_c, highbd_iidtx4_c },      // V_FLIPADST
+    { highbd_iidtx4_c, aom_highbd_iadst4_c },      // H_FLIPADST
 #endif                                             // CONFIG_EXT_TX
   };
 
@@ -1330,25 +1330,25 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_highbd_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int tx_type, int bd) {
+void av1_highbd_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_4x8[] = {
-    { vpx_highbd_idct8_c, vpx_highbd_idct4_c },    // DCT_DCT
-    { vpx_highbd_iadst8_c, vpx_highbd_idct4_c },   // ADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst4_c },   // DCT_ADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c },  // ADST_ADST
-    { vpx_highbd_iadst8_c, vpx_highbd_idct4_c },   // FLIPADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst4_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c },  // FLIPADST_ADST
+    { aom_highbd_idct8_c, aom_highbd_idct4_c },    // DCT_DCT
+    { aom_highbd_iadst8_c, aom_highbd_idct4_c },   // ADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst4_c },   // DCT_ADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // ADST_ADST
+    { aom_highbd_iadst8_c, aom_highbd_idct4_c },   // FLIPADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst4_c },   // DCT_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // ADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // FLIPADST_ADST
     { highbd_iidtx8_c, highbd_iidtx4_c },          // IDTX
-    { vpx_highbd_idct8_c, highbd_iidtx4_c },       // V_DCT
-    { highbd_iidtx8_c, vpx_highbd_idct4_c },       // H_DCT
-    { vpx_highbd_iadst8_c, highbd_iidtx4_c },      // V_ADST
-    { highbd_iidtx8_c, vpx_highbd_iadst4_c },      // H_ADST
-    { vpx_highbd_iadst8_c, highbd_iidtx4_c },      // V_FLIPADST
-    { highbd_iidtx8_c, vpx_highbd_iadst4_c },      // H_FLIPADST
+    { aom_highbd_idct8_c, highbd_iidtx4_c },       // V_DCT
+    { highbd_iidtx8_c, aom_highbd_idct4_c },       // H_DCT
+    { aom_highbd_iadst8_c, highbd_iidtx4_c },      // V_ADST
+    { highbd_iidtx8_c, aom_highbd_iadst4_c },      // H_ADST
+    { aom_highbd_iadst8_c, highbd_iidtx4_c },      // V_FLIPADST
+    { highbd_iidtx8_c, aom_highbd_iadst4_c },      // H_FLIPADST
   };
   const int n = 4;
   const int n2 = 8;
@@ -1388,25 +1388,25 @@
   }
 }
 
-void vp10_highbd_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int tx_type, int bd) {
+void av1_highbd_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_8x4[] = {
-    { vpx_highbd_idct4_c, vpx_highbd_idct8_c },    // DCT_DCT
-    { vpx_highbd_iadst4_c, vpx_highbd_idct8_c },   // ADST_DCT
-    { vpx_highbd_idct4_c, vpx_highbd_iadst8_c },   // DCT_ADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c },  // ADST_ADST
-    { vpx_highbd_iadst4_c, vpx_highbd_idct8_c },   // FLIPADST_DCT
-    { vpx_highbd_idct4_c, vpx_highbd_iadst8_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c },  // FLIPADST_ADST
+    { aom_highbd_idct4_c, aom_highbd_idct8_c },    // DCT_DCT
+    { aom_highbd_iadst4_c, aom_highbd_idct8_c },   // ADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst8_c },   // DCT_ADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // ADST_ADST
+    { aom_highbd_iadst4_c, aom_highbd_idct8_c },   // FLIPADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst8_c },   // DCT_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // ADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // FLIPADST_ADST
     { highbd_iidtx4_c, highbd_iidtx8_c },          // IDTX
-    { vpx_highbd_idct4_c, highbd_iidtx8_c },       // V_DCT
-    { highbd_iidtx4_c, vpx_highbd_idct8_c },       // H_DCT
-    { vpx_highbd_iadst4_c, highbd_iidtx8_c },      // V_ADST
-    { highbd_iidtx4_c, vpx_highbd_iadst8_c },      // H_ADST
-    { vpx_highbd_iadst4_c, highbd_iidtx8_c },      // V_FLIPADST
-    { highbd_iidtx4_c, vpx_highbd_iadst8_c },      // H_FLIPADST
+    { aom_highbd_idct4_c, highbd_iidtx8_c },       // V_DCT
+    { highbd_iidtx4_c, aom_highbd_idct8_c },       // H_DCT
+    { aom_highbd_iadst4_c, highbd_iidtx8_c },      // V_ADST
+    { highbd_iidtx4_c, aom_highbd_iadst8_c },      // H_ADST
+    { aom_highbd_iadst4_c, highbd_iidtx8_c },      // V_FLIPADST
+    { highbd_iidtx4_c, aom_highbd_iadst8_c },      // H_FLIPADST
   };
   const int n = 4;
   const int n2 = 8;
@@ -1446,25 +1446,25 @@
   }
 }
 
-void vp10_highbd_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest8,
-                                   int stride, int tx_type, int bd) {
+void av1_highbd_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest8,
+                                  int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_8x16[] = {
-    { vpx_highbd_idct16_c, vpx_highbd_idct8_c },    // DCT_DCT
-    { vpx_highbd_iadst16_c, vpx_highbd_idct8_c },   // ADST_DCT
-    { vpx_highbd_idct16_c, vpx_highbd_iadst8_c },   // DCT_ADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c },  // ADST_ADST
-    { vpx_highbd_iadst16_c, vpx_highbd_idct8_c },   // FLIPADST_DCT
-    { vpx_highbd_idct16_c, vpx_highbd_iadst8_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c },  // FLIPADST_ADST
+    { aom_highbd_idct16_c, aom_highbd_idct8_c },    // DCT_DCT
+    { aom_highbd_iadst16_c, aom_highbd_idct8_c },   // ADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst8_c },   // DCT_ADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // ADST_ADST
+    { aom_highbd_iadst16_c, aom_highbd_idct8_c },   // FLIPADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst8_c },   // DCT_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // ADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // FLIPADST_ADST
     { highbd_iidtx16_c, highbd_iidtx8_c },          // IDTX
-    { vpx_highbd_idct16_c, highbd_iidtx8_c },       // V_DCT
-    { highbd_iidtx16_c, vpx_highbd_idct8_c },       // H_DCT
-    { vpx_highbd_iadst16_c, highbd_iidtx8_c },      // V_ADST
-    { highbd_iidtx16_c, vpx_highbd_iadst8_c },      // H_ADST
-    { vpx_highbd_iadst16_c, highbd_iidtx8_c },      // V_FLIPADST
-    { highbd_iidtx16_c, vpx_highbd_iadst8_c },      // H_FLIPADST
+    { aom_highbd_idct16_c, highbd_iidtx8_c },       // V_DCT
+    { highbd_iidtx16_c, aom_highbd_idct8_c },       // H_DCT
+    { aom_highbd_iadst16_c, highbd_iidtx8_c },      // V_ADST
+    { highbd_iidtx16_c, aom_highbd_iadst8_c },      // H_ADST
+    { aom_highbd_iadst16_c, highbd_iidtx8_c },      // V_FLIPADST
+    { highbd_iidtx16_c, aom_highbd_iadst8_c },      // H_FLIPADST
   };
   const int n = 8;
   const int n2 = 16;
@@ -1503,25 +1503,25 @@
   }
 }
 
-void vp10_highbd_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest8,
-                                   int stride, int tx_type, int bd) {
+void av1_highbd_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest8,
+                                  int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_16x8[] = {
-    { vpx_highbd_idct8_c, vpx_highbd_idct16_c },    // DCT_DCT
-    { vpx_highbd_iadst8_c, vpx_highbd_idct16_c },   // ADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst16_c },   // DCT_ADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c },  // ADST_ADST
-    { vpx_highbd_iadst8_c, vpx_highbd_idct16_c },   // FLIPADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst16_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c },  // FLIPADST_ADST
+    { aom_highbd_idct8_c, aom_highbd_idct16_c },    // DCT_DCT
+    { aom_highbd_iadst8_c, aom_highbd_idct16_c },   // ADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst16_c },   // DCT_ADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // ADST_ADST
+    { aom_highbd_iadst8_c, aom_highbd_idct16_c },   // FLIPADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst16_c },   // DCT_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // ADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // FLIPADST_ADST
     { highbd_iidtx8_c, highbd_iidtx16_c },          // IDTX
-    { vpx_highbd_idct8_c, highbd_iidtx16_c },       // V_DCT
-    { highbd_iidtx8_c, vpx_highbd_idct16_c },       // H_DCT
-    { vpx_highbd_iadst8_c, highbd_iidtx16_c },      // V_ADST
-    { highbd_iidtx8_c, vpx_highbd_iadst16_c },      // H_ADST
-    { vpx_highbd_iadst8_c, highbd_iidtx16_c },      // V_FLIPADST
-    { highbd_iidtx8_c, vpx_highbd_iadst16_c },      // H_FLIPADST
+    { aom_highbd_idct8_c, highbd_iidtx16_c },       // V_DCT
+    { highbd_iidtx8_c, aom_highbd_idct16_c },       // H_DCT
+    { aom_highbd_iadst8_c, highbd_iidtx16_c },      // V_ADST
+    { highbd_iidtx8_c, aom_highbd_iadst16_c },      // H_ADST
+    { aom_highbd_iadst8_c, highbd_iidtx16_c },      // V_FLIPADST
+    { highbd_iidtx8_c, aom_highbd_iadst16_c },      // H_FLIPADST
   };
   const int n = 8;
   const int n2 = 16;
@@ -1560,25 +1560,25 @@
   }
 }
 
-void vp10_highbd_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int tx_type, int bd) {
+void av1_highbd_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest8,
+                                   int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_16x32[] = {
-    { vpx_highbd_idct32_c, vpx_highbd_idct16_c },     // DCT_DCT
-    { highbd_ihalfright32_c, vpx_highbd_idct16_c },   // ADST_DCT
-    { vpx_highbd_idct32_c, vpx_highbd_iadst16_c },    // DCT_ADST
-    { highbd_ihalfright32_c, vpx_highbd_iadst16_c },  // ADST_ADST
-    { highbd_ihalfright32_c, vpx_highbd_idct16_c },   // FLIPADST_DCT
-    { vpx_highbd_idct32_c, vpx_highbd_iadst16_c },    // DCT_FLIPADST
-    { highbd_ihalfright32_c, vpx_highbd_iadst16_c },  // FLIPADST_FLIPADST
-    { highbd_ihalfright32_c, vpx_highbd_iadst16_c },  // ADST_FLIPADST
-    { highbd_ihalfright32_c, vpx_highbd_iadst16_c },  // FLIPADST_ADST
+    { aom_highbd_idct32_c, aom_highbd_idct16_c },     // DCT_DCT
+    { highbd_ihalfright32_c, aom_highbd_idct16_c },   // ADST_DCT
+    { aom_highbd_idct32_c, aom_highbd_iadst16_c },    // DCT_ADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // ADST_ADST
+    { highbd_ihalfright32_c, aom_highbd_idct16_c },   // FLIPADST_DCT
+    { aom_highbd_idct32_c, aom_highbd_iadst16_c },    // DCT_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // FLIPADST_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // ADST_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // FLIPADST_ADST
     { highbd_iidtx32_c, highbd_iidtx16_c },           // IDTX
-    { vpx_highbd_idct32_c, highbd_iidtx16_c },        // V_DCT
-    { highbd_iidtx32_c, vpx_highbd_idct16_c },        // H_DCT
+    { aom_highbd_idct32_c, highbd_iidtx16_c },        // V_DCT
+    { highbd_iidtx32_c, aom_highbd_idct16_c },        // H_DCT
     { highbd_ihalfright32_c, highbd_iidtx16_c },      // V_ADST
-    { highbd_iidtx32_c, vpx_highbd_iadst16_c },       // H_ADST
+    { highbd_iidtx32_c, aom_highbd_iadst16_c },       // H_ADST
     { highbd_ihalfright32_c, highbd_iidtx16_c },      // V_FLIPADST
-    { highbd_iidtx32_c, vpx_highbd_iadst16_c },       // H_FLIPADST
+    { highbd_iidtx32_c, aom_highbd_iadst16_c },       // H_FLIPADST
   };
   const int n = 16;
   const int n2 = 32;
@@ -1617,24 +1617,24 @@
   }
 }
 
-void vp10_highbd_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int tx_type, int bd) {
+void av1_highbd_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest8,
+                                   int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_32x16[] = {
-    { vpx_highbd_idct16_c, vpx_highbd_idct32_c },     // DCT_DCT
-    { vpx_highbd_iadst16_c, vpx_highbd_idct32_c },    // ADST_DCT
-    { vpx_highbd_idct16_c, highbd_ihalfright32_c },   // DCT_ADST
-    { vpx_highbd_iadst16_c, highbd_ihalfright32_c },  // ADST_ADST
-    { vpx_highbd_iadst16_c, vpx_highbd_idct32_c },    // FLIPADST_DCT
-    { vpx_highbd_idct16_c, highbd_ihalfright32_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst16_c, highbd_ihalfright32_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst16_c, highbd_ihalfright32_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst16_c, highbd_ihalfright32_c },  // FLIPADST_ADST
+    { aom_highbd_idct16_c, aom_highbd_idct32_c },     // DCT_DCT
+    { aom_highbd_iadst16_c, aom_highbd_idct32_c },    // ADST_DCT
+    { aom_highbd_idct16_c, highbd_ihalfright32_c },   // DCT_ADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // ADST_ADST
+    { aom_highbd_iadst16_c, aom_highbd_idct32_c },    // FLIPADST_DCT
+    { aom_highbd_idct16_c, highbd_ihalfright32_c },   // DCT_FLIPADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // ADST_FLIPADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // FLIPADST_ADST
     { highbd_iidtx16_c, highbd_iidtx32_c },           // IDTX
-    { vpx_highbd_idct16_c, highbd_iidtx32_c },        // V_DCT
-    { highbd_iidtx16_c, vpx_highbd_idct32_c },        // H_DCT
-    { vpx_highbd_iadst16_c, highbd_iidtx32_c },       // V_ADST
+    { aom_highbd_idct16_c, highbd_iidtx32_c },        // V_DCT
+    { highbd_iidtx16_c, aom_highbd_idct32_c },        // H_DCT
+    { aom_highbd_iadst16_c, highbd_iidtx32_c },       // V_ADST
     { highbd_iidtx16_c, highbd_ihalfright32_c },      // H_ADST
-    { vpx_highbd_iadst16_c, highbd_iidtx32_c },       // V_FLIPADST
+    { aom_highbd_iadst16_c, highbd_iidtx32_c },       // V_FLIPADST
     { highbd_iidtx16_c, highbd_ihalfright32_c },      // H_FLIPADST
   };
   const int n = 16;
@@ -1675,26 +1675,26 @@
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int tx_type, int bd) {
+void av1_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_8[] = {
-    { vpx_highbd_idct8_c, vpx_highbd_idct8_c },    // DCT_DCT
-    { vpx_highbd_iadst8_c, vpx_highbd_idct8_c },   // ADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst8_c },   // DCT_ADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c },  // ADST_ADST
+    { aom_highbd_idct8_c, aom_highbd_idct8_c },    // DCT_DCT
+    { aom_highbd_iadst8_c, aom_highbd_idct8_c },   // ADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst8_c },   // DCT_ADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { vpx_highbd_iadst8_c, vpx_highbd_idct8_c },   // FLIPADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst8_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c },  // FLIPADST_ADST
+    { aom_highbd_iadst8_c, aom_highbd_idct8_c },   // FLIPADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst8_c },   // DCT_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // ADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // FLIPADST_ADST
     { highbd_iidtx8_c, highbd_iidtx8_c },          // IDTX
-    { vpx_highbd_idct8_c, highbd_iidtx8_c },       // V_DCT
-    { highbd_iidtx8_c, vpx_highbd_idct8_c },       // H_DCT
-    { vpx_highbd_iadst8_c, highbd_iidtx8_c },      // V_ADST
-    { highbd_iidtx8_c, vpx_highbd_iadst8_c },      // H_ADST
-    { vpx_highbd_iadst8_c, highbd_iidtx8_c },      // V_FLIPADST
-    { highbd_iidtx8_c, vpx_highbd_iadst8_c },      // H_FLIPADST
+    { aom_highbd_idct8_c, highbd_iidtx8_c },       // V_DCT
+    { highbd_iidtx8_c, aom_highbd_idct8_c },       // H_DCT
+    { aom_highbd_iadst8_c, highbd_iidtx8_c },      // V_ADST
+    { highbd_iidtx8_c, aom_highbd_iadst8_c },      // H_ADST
+    { aom_highbd_iadst8_c, highbd_iidtx8_c },      // V_FLIPADST
+    { highbd_iidtx8_c, aom_highbd_iadst8_c },      // H_FLIPADST
 #endif                                             // CONFIG_EXT_TX
   };
 
@@ -1741,26 +1741,26 @@
   }
 }
 
-void vp10_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int tx_type, int bd) {
+void av1_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+                                   int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_16[] = {
-    { vpx_highbd_idct16_c, vpx_highbd_idct16_c },    // DCT_DCT
-    { vpx_highbd_iadst16_c, vpx_highbd_idct16_c },   // ADST_DCT
-    { vpx_highbd_idct16_c, vpx_highbd_iadst16_c },   // DCT_ADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c },  // ADST_ADST
+    { aom_highbd_idct16_c, aom_highbd_idct16_c },    // DCT_DCT
+    { aom_highbd_iadst16_c, aom_highbd_idct16_c },   // ADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst16_c },   // DCT_ADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { vpx_highbd_iadst16_c, vpx_highbd_idct16_c },   // FLIPADST_DCT
-    { vpx_highbd_idct16_c, vpx_highbd_iadst16_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c },  // FLIPADST_ADST
+    { aom_highbd_iadst16_c, aom_highbd_idct16_c },   // FLIPADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst16_c },   // DCT_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // ADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // FLIPADST_ADST
     { highbd_iidtx16_c, highbd_iidtx16_c },          // IDTX
-    { vpx_highbd_idct16_c, highbd_iidtx16_c },       // V_DCT
-    { highbd_iidtx16_c, vpx_highbd_idct16_c },       // H_DCT
-    { vpx_highbd_iadst16_c, highbd_iidtx16_c },      // V_ADST
-    { highbd_iidtx16_c, vpx_highbd_iadst16_c },      // H_ADST
-    { vpx_highbd_iadst16_c, highbd_iidtx16_c },      // V_FLIPADST
-    { highbd_iidtx16_c, vpx_highbd_iadst16_c },      // H_FLIPADST
+    { aom_highbd_idct16_c, highbd_iidtx16_c },       // V_DCT
+    { highbd_iidtx16_c, aom_highbd_idct16_c },       // H_DCT
+    { aom_highbd_iadst16_c, highbd_iidtx16_c },      // V_ADST
+    { highbd_iidtx16_c, aom_highbd_iadst16_c },      // H_ADST
+    { aom_highbd_iadst16_c, highbd_iidtx16_c },      // V_FLIPADST
+    { highbd_iidtx16_c, aom_highbd_iadst16_c },      // H_FLIPADST
 #endif                                               // CONFIG_EXT_TX
   };
 
@@ -1808,21 +1808,21 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_highbd_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
-                                     int stride, int tx_type, int bd) {
+void av1_highbd_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_32[] = {
-    { vpx_highbd_idct32_c, vpx_highbd_idct32_c },      // DCT_DCT
-    { highbd_ihalfright32_c, vpx_highbd_idct32_c },    // ADST_DCT
-    { vpx_highbd_idct32_c, highbd_ihalfright32_c },    // DCT_ADST
+    { aom_highbd_idct32_c, aom_highbd_idct32_c },      // DCT_DCT
+    { highbd_ihalfright32_c, aom_highbd_idct32_c },    // ADST_DCT
+    { aom_highbd_idct32_c, highbd_ihalfright32_c },    // DCT_ADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // ADST_ADST
-    { highbd_ihalfright32_c, vpx_highbd_idct32_c },    // FLIPADST_DCT
-    { vpx_highbd_idct32_c, highbd_ihalfright32_c },    // DCT_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_idct32_c },    // FLIPADST_DCT
+    { aom_highbd_idct32_c, highbd_ihalfright32_c },    // DCT_FLIPADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // FLIPADST_FLIPADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // ADST_FLIPADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // FLIPADST_ADST
     { highbd_iidtx32_c, highbd_iidtx32_c },            // IDTX
-    { vpx_highbd_idct32_c, highbd_iidtx32_c },         // V_DCT
-    { highbd_iidtx32_c, vpx_highbd_idct32_c },         // H_DCT
+    { aom_highbd_idct32_c, highbd_iidtx32_c },         // V_DCT
+    { highbd_iidtx32_c, aom_highbd_idct32_c },         // H_DCT
     { highbd_ihalfright32_c, highbd_iidtx32_c },       // V_ADST
     { highbd_iidtx32_c, highbd_ihalfright32_c },       // H_ADST
     { highbd_ihalfright32_c, highbd_iidtx32_c },       // V_FLIPADST
@@ -1872,73 +1872,73 @@
 #endif  // CONFIG_EXT_TX
 
 // idct
-void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd) {
+void av1_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd) {
   if (eob > 1)
-    vpx_highbd_idct4x4_16_add(input, dest, stride, bd);
+    aom_highbd_idct4x4_16_add(input, dest, stride, bd);
   else
-    vpx_highbd_idct4x4_1_add(input, dest, stride, bd);
+    aom_highbd_idct4x4_1_add(input, dest, stride, bd);
 }
 
-void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd) {
+void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd) {
   if (eob > 1)
-    vpx_highbd_iwht4x4_16_add(input, dest, stride, bd);
+    aom_highbd_iwht4x4_16_add(input, dest, stride, bd);
   else
-    vpx_highbd_iwht4x4_1_add(input, dest, stride, bd);
+    aom_highbd_iwht4x4_1_add(input, dest, stride, bd);
 }
 
-void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd) {
+void av1_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd) {
   // If dc is 1, then input[0] is the reconstructed value, do not need
   // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
 
   // The calculation can be simplified if there are not many non-zero dct
   // coefficients. Use eobs to decide what to do.
-  // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c.
+  // TODO(yunqingwang): "eobs = 1" case is also handled in av1_short_idct8x8_c.
   // Combine that with code here.
   // DC only DCT coefficient
   if (eob == 1) {
-    vpx_highbd_idct8x8_1_add(input, dest, stride, bd);
+    aom_highbd_idct8x8_1_add(input, dest, stride, bd);
   } else if (eob <= 10) {
-    vpx_highbd_idct8x8_10_add(input, dest, stride, bd);
+    aom_highbd_idct8x8_10_add(input, dest, stride, bd);
   } else {
-    vpx_highbd_idct8x8_64_add(input, dest, stride, bd);
+    aom_highbd_idct8x8_64_add(input, dest, stride, bd);
   }
 }
 
-void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
-                               int stride, int eob, int bd) {
+void av1_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
+                              int stride, int eob, int bd) {
   // The calculation can be simplified if there are not many non-zero dct
   // coefficients. Use eobs to separate different cases.
   // DC only DCT coefficient.
   if (eob == 1) {
-    vpx_highbd_idct16x16_1_add(input, dest, stride, bd);
+    aom_highbd_idct16x16_1_add(input, dest, stride, bd);
   } else if (eob <= 10) {
-    vpx_highbd_idct16x16_10_add(input, dest, stride, bd);
+    aom_highbd_idct16x16_10_add(input, dest, stride, bd);
   } else {
-    vpx_highbd_idct16x16_256_add(input, dest, stride, bd);
+    aom_highbd_idct16x16_256_add(input, dest, stride, bd);
   }
 }
 
-void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
-                               int stride, int eob, int bd) {
+void av1_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
+                              int stride, int eob, int bd) {
   // Non-zero coeff only in upper-left 8x8
   if (eob == 1) {
-    vpx_highbd_idct32x32_1_add(input, dest, stride, bd);
+    aom_highbd_idct32x32_1_add(input, dest, stride, bd);
   } else if (eob <= 34) {
-    vpx_highbd_idct32x32_34_add(input, dest, stride, bd);
+    aom_highbd_idct32x32_34_add(input, dest, stride, bd);
   } else {
-    vpx_highbd_idct32x32_1024_add(input, dest, stride, bd);
+    aom_highbd_idct32x32_1024_add(input, dest, stride, bd);
   }
 }
 
-void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd, TX_TYPE tx_type,
-                                  int lossless) {
+void av1_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type,
+                                 int lossless) {
   if (lossless) {
     assert(tx_type == DCT_DCT);
-    vp10_highbd_iwht4x4_add(input, dest, stride, eob, bd);
+    av1_highbd_iwht4x4_add(input, dest, stride, eob, bd);
     return;
   }
 
@@ -1947,8 +1947,8 @@
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
-                              bd);
+      av1_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+                             bd);
       break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
@@ -1956,8 +1956,8 @@
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
-                              bd);
+      av1_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+                             bd);
       break;
     case V_DCT:
     case H_DCT:
@@ -1966,7 +1966,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_highbd_iht4x4_16_add_c(input, dest, stride, tx_type, bd);
+      av1_highbd_iht4x4_16_add_c(input, dest, stride, tx_type, bd);
       break;
     case IDTX:
       highbd_inv_idtx_add_c(input, dest, stride, 4, tx_type, bd);
@@ -1977,60 +1977,57 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type) {
+  (void)eob;
+  av1_highbd_iht4x8_32_add_c(input, dest, stride, tx_type, bd);
+}
+
+void av1_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type) {
+  (void)eob;
+  av1_highbd_iht8x4_32_add_c(input, dest, stride, tx_type, bd);
+}
+
+void av1_highbd_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest,
                                   int stride, int eob, int bd,
                                   TX_TYPE tx_type) {
   (void)eob;
-  vp10_highbd_iht4x8_32_add_c(input, dest, stride, tx_type, bd);
+  av1_highbd_iht8x16_128_add_c(input, dest, stride, tx_type, bd);
 }
 
-void vp10_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest,
                                   int stride, int eob, int bd,
                                   TX_TYPE tx_type) {
   (void)eob;
-  vp10_highbd_iht8x4_32_add_c(input, dest, stride, tx_type, bd);
+  av1_highbd_iht16x8_128_add_c(input, dest, stride, tx_type, bd);
 }
 
-void vp10_highbd_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest,
                                    int stride, int eob, int bd,
                                    TX_TYPE tx_type) {
   (void)eob;
-  vp10_highbd_iht8x16_128_add_c(input, dest, stride, tx_type, bd);
+  av1_highbd_iht16x32_512_add_c(input, dest, stride, tx_type, bd);
 }
 
-void vp10_highbd_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest,
                                    int stride, int eob, int bd,
                                    TX_TYPE tx_type) {
   (void)eob;
-  vp10_highbd_iht16x8_128_add_c(input, dest, stride, tx_type, bd);
-}
-
-void vp10_highbd_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type) {
-  (void)eob;
-  vp10_highbd_iht16x32_512_add_c(input, dest, stride, tx_type, bd);
-}
-
-void vp10_highbd_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type) {
-  (void)eob;
-  vp10_highbd_iht32x16_512_add_c(input, dest, stride, tx_type, bd);
+  av1_highbd_iht32x16_512_add_c(input, dest, stride, tx_type, bd);
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd,
-                                  TX_TYPE tx_type) {
+void av1_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type) {
   (void)eob;
   switch (tx_type) {
     case DCT_DCT:
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
-                              bd);
+      av1_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+                             bd);
       break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
@@ -2038,8 +2035,8 @@
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
-                              bd);
+      av1_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+                             bd);
       break;
     case V_DCT:
     case H_DCT:
@@ -2048,7 +2045,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_highbd_iht8x8_64_add_c(input, dest, stride, tx_type, bd);
+      av1_highbd_iht8x8_64_add_c(input, dest, stride, tx_type, bd);
       break;
     case IDTX:
       highbd_inv_idtx_add_c(input, dest, stride, 8, tx_type, bd);
@@ -2058,17 +2055,17 @@
   }
 }
 
-void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type) {
+void av1_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
+                                   int stride, int eob, int bd,
+                                   TX_TYPE tx_type) {
   (void)eob;
   switch (tx_type) {
     case DCT_DCT:
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
-                                tx_type, bd);
+      av1_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
+                               tx_type, bd);
       break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
@@ -2076,8 +2073,8 @@
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
-                                tx_type, bd);
+      av1_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
+                               tx_type, bd);
       break;
     case V_DCT:
     case H_DCT:
@@ -2086,7 +2083,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_highbd_iht16x16_256_add_c(input, dest, stride, tx_type, bd);
+      av1_highbd_iht16x16_256_add_c(input, dest, stride, tx_type, bd);
       break;
     case IDTX:
       highbd_inv_idtx_add_c(input, dest, stride, 16, tx_type, bd);
@@ -2096,14 +2093,14 @@
   }
 }
 
-void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type) {
+void av1_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
+                                   int stride, int eob, int bd,
+                                   TX_TYPE tx_type) {
   (void)eob;
   switch (tx_type) {
     case DCT_DCT:
-      vp10_inv_txfm2d_add_32x32(input, CONVERT_TO_SHORTPTR(dest), stride,
-                                DCT_DCT, bd);
+      av1_inv_txfm2d_add_32x32(input, CONVERT_TO_SHORTPTR(dest), stride,
+                               DCT_DCT, bd);
       break;
 #if CONFIG_EXT_TX
     case ADST_DCT:
@@ -2120,7 +2117,7 @@
     case H_ADST:
     case V_FLIPADST:
     case H_FLIPADST:
-      vp10_highbd_iht32x32_1024_add_c(input, dest, stride, tx_type, bd);
+      av1_highbd_iht32x32_1024_add_c(input, dest, stride, tx_type, bd);
       break;
     case IDTX:
       highbd_inv_idtx_add_c(input, dest, stride, 32, tx_type, bd);
@@ -2129,7 +2126,7 @@
     default: assert(0); break;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 void inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
                   INV_TXFM_PARAM *inv_txfm_param) {
@@ -2140,45 +2137,39 @@
 
   switch (tx_size) {
     case TX_32X32:
-      vp10_inv_txfm_add_32x32(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_32x32(input, dest, stride, eob, tx_type);
       break;
     case TX_16X16:
-      vp10_inv_txfm_add_16x16(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_16x16(input, dest, stride, eob, tx_type);
       break;
-    case TX_8X8:
-      vp10_inv_txfm_add_8x8(input, dest, stride, eob, tx_type);
-      break;
+    case TX_8X8: av1_inv_txfm_add_8x8(input, dest, stride, eob, tx_type); break;
 #if CONFIG_EXT_TX
-    case TX_4X8:
-      vp10_inv_txfm_add_4x8(input, dest, stride, eob, tx_type);
-      break;
-    case TX_8X4:
-      vp10_inv_txfm_add_8x4(input, dest, stride, eob, tx_type);
-      break;
+    case TX_4X8: av1_inv_txfm_add_4x8(input, dest, stride, eob, tx_type); break;
+    case TX_8X4: av1_inv_txfm_add_8x4(input, dest, stride, eob, tx_type); break;
     case TX_8X16:
-      vp10_inv_txfm_add_8x16(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_8x16(input, dest, stride, eob, tx_type);
       break;
     case TX_16X8:
-      vp10_inv_txfm_add_16x8(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_16x8(input, dest, stride, eob, tx_type);
       break;
     case TX_16X32:
-      vp10_inv_txfm_add_16x32(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_16x32(input, dest, stride, eob, tx_type);
       break;
     case TX_32X16:
-      vp10_inv_txfm_add_32x16(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_32x16(input, dest, stride, eob, tx_type);
       break;
 #endif  // CONFIG_EXT_TX
     case TX_4X4:
-      // this is like vp10_short_idct4x4 but has a special case around eob<=1
+      // this is like av1_short_idct4x4 but has a special case around eob<=1
       // which is significant (not just an optimization) for the lossless
       // case.
-      vp10_inv_txfm_add_4x4(input, dest, stride, eob, tx_type, lossless);
+      av1_inv_txfm_add_4x4(input, dest, stride, eob, tx_type, lossless);
       break;
     default: assert(0 && "Invalid transform size"); break;
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void highbd_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
                          INV_TXFM_PARAM *inv_txfm_param) {
   const TX_TYPE tx_type = inv_txfm_param->tx_type;
@@ -2189,42 +2180,42 @@
 
   switch (tx_size) {
     case TX_32X32:
-      vp10_highbd_inv_txfm_add_32x32(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_32x32(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_16X16:
-      vp10_highbd_inv_txfm_add_16x16(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_16x16(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_8X8:
-      vp10_highbd_inv_txfm_add_8x8(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_8x8(input, dest, stride, eob, bd, tx_type);
       break;
 #if CONFIG_EXT_TX
     case TX_4X8:
-      vp10_highbd_inv_txfm_add_4x8(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_4x8(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_8X4:
-      vp10_highbd_inv_txfm_add_8x4(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_8x4(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_8X16:
-      vp10_highbd_inv_txfm_add_8x16(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_8x16(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_16X8:
-      vp10_highbd_inv_txfm_add_16x8(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_16x8(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_16X32:
-      vp10_highbd_inv_txfm_add_16x32(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_16x32(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_32X16:
-      vp10_highbd_inv_txfm_add_32x16(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_32x16(input, dest, stride, eob, bd, tx_type);
       break;
 #endif  // CONFIG_EXT_TX
     case TX_4X4:
-      // this is like vp10_short_idct4x4 but has a special case around eob<=1
+      // this is like av1_short_idct4x4 but has a special case around eob<=1
       // which is significant (not just an optimization) for the lossless
       // case.
-      vp10_highbd_inv_txfm_add_4x4(input, dest, stride, eob, bd, tx_type,
-                                   lossless);
+      av1_highbd_inv_txfm_add_4x4(input, dest, stride, eob, bd, tx_type,
+                                  lossless);
       break;
     default: assert(0 && "Invalid transform size"); break;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/idct.h b/av1/common/idct.h
index 9b3be62..58ee0c7 100644
--- a/av1/common/idct.h
+++ b/av1/common/idct.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_IDCT_H_
-#define VP10_COMMON_IDCT_H_
+#ifndef AV1_COMMON_IDCT_H_
+#define AV1_COMMON_IDCT_H_
 
 #include <assert.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "av1/common/blockd.h"
 #include "av1/common/common.h"
 #include "av1/common/enums.h"
@@ -30,7 +30,7 @@
   TX_SIZE tx_size;
   int eob;
   int lossless;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   int bd;
 #endif
 } INV_TXFM_PARAM;
@@ -41,78 +41,78 @@
   transform_1d cols, rows;  // vertical and horizontal
 } transform_2d;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *, int bd);
 
 typedef struct {
   highbd_transform_1d cols, rows;  // vertical and horizontal
 } highbd_transform_2d;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #define MAX_TX_SCALE 1
 int get_tx_scale(const MACROBLOCKD *const xd, const TX_TYPE tx_type,
                  const TX_SIZE tx_size);
 
-void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob);
-void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob);
-void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob);
-void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
-                        int eob);
-void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
-                        int eob);
+void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob);
+void av1_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob);
+void av1_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob);
+void av1_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
+                       int eob);
+void av1_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
+                       int eob);
 
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type, int lossless);
+void av1_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type, int lossless);
 #if CONFIG_EXT_TX
-void vp10_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type);
 #endif  // CONFIG_EXT_TX
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, TX_TYPE tx_type);
 void inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
                   INV_TXFM_PARAM *inv_txfm_param);
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd);
-void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd);
-void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd);
-void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
-                               int stride, int eob, int bd);
-void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
-                               int stride, int eob, int bd);
-void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd, TX_TYPE tx_type,
-                                  int lossless);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd);
+void av1_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd);
+void av1_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd);
+void av1_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
+                              int stride, int eob, int bd);
+void av1_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
+                              int stride, int eob, int bd);
+void av1_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type,
+                                 int lossless);
 #if CONFIG_EXT_TX
-void vp10_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd, TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd, TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type);
 #endif  // CONFIG_EXT_TX
-void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd, TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
+                                   int stride, int eob, int bd,
+                                   TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
+                                   int stride, int eob, int bd,
+                                   TX_TYPE tx_type);
 void highbd_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
                          INV_TXFM_PARAM *inv_txfm_param);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_IDCT_H_
+#endif  // AV1_COMMON_IDCT_H_
diff --git a/av1/common/intra_filters.h b/av1/common/intra_filters.h
index 021fb8e..350f7ca 100644
--- a/av1/common/intra_filters.h
+++ b/av1/common/intra_filters.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_INTRA_FILTERS_H_
-#define VP10_COMMON_INTRA_FILTERS_H_
+#ifndef AV1_COMMON_INTRA_FILTERS_H_
+#define AV1_COMMON_INTRA_FILTERS_H_
 
 #define FILTER_INTRA_PREC_BITS (10)
 
@@ -64,4 +64,4 @@
   },
 };
 
-#endif  // VP10_COMMON_INTRA_FILTERS_H_
+#endif  // AV1_COMMON_INTRA_FILTERS_H_
diff --git a/av1/common/loopfilter.c b/av1/common/loopfilter.c
index e4636a5..906223f 100644
--- a/av1/common/loopfilter.c
+++ b/av1/common/loopfilter.c
@@ -10,14 +10,14 @@
 
 #include <math.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "av1/common/loopfilter.h"
 #include "av1/common/onyxc_int.h"
 #include "av1/common/reconinter.h"
 #include "av1/common/restoration.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #include "av1/common/seg_common.h"
@@ -241,7 +241,7 @@
 static uint8_t get_filter_level(const loop_filter_info_n *lfi_n,
                                 const MB_MODE_INFO *mbmi) {
 #if CONFIG_SUPERTX
-  const int segment_id = VPXMIN(mbmi->segment_id, mbmi->segment_id_supertx);
+  const int segment_id = AOMMIN(mbmi->segment_id, mbmi->segment_id_supertx);
   assert(
       IMPLIES(supertx_enabled(mbmi), mbmi->segment_id_supertx != MAX_SEGMENTS));
   assert(IMPLIES(supertx_enabled(mbmi),
@@ -252,7 +252,7 @@
   return lfi_n->lvl[segment_id][mbmi->ref_frame[0]][mode_lf_lut[mbmi->mode]];
 }
 
-void vp10_loop_filter_init(VP10_COMMON *cm) {
+void av1_loop_filter_init(AV1_COMMON *cm) {
   loop_filter_info_n *lfi = &cm->lf_info;
   struct loopfilter *lf = &cm->lf;
   int lvl;
@@ -266,7 +266,7 @@
     memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH);
 }
 
-void vp10_loop_filter_frame_init(VP10_COMMON *cm, int default_filt_lvl) {
+void av1_loop_filter_frame_init(AV1_COMMON *cm, int default_filt_lvl) {
   int seg_id;
   // n_shift is the multiplier for lf_deltas
   // the multiplier is 1 for when filter_lvl is between 0 and 31;
@@ -341,52 +341,52 @@
     if (mask & 1) {
       if ((mask_16x16_0 | mask_16x16_1) & 1) {
         if ((mask_16x16_0 & mask_16x16_1) & 1) {
-          vpx_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                    lfi0->hev_thr);
         } else if (mask_16x16_0 & 1) {
-          vpx_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
+          aom_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
         } else {
-          vpx_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+          aom_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
                               lfi1->hev_thr);
         }
       }
 
       if ((mask_8x8_0 | mask_8x8_1) & 1) {
         if ((mask_8x8_0 & mask_8x8_1) & 1) {
-          vpx_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                   lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                   lfi1->hev_thr);
         } else if (mask_8x8_0 & 1) {
-          vpx_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
+          aom_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
         } else {
-          vpx_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+          aom_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
                              lfi1->hev_thr);
         }
       }
 
       if ((mask_4x4_0 | mask_4x4_1) & 1) {
         if ((mask_4x4_0 & mask_4x4_1) & 1) {
-          vpx_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                   lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                   lfi1->hev_thr);
         } else if (mask_4x4_0 & 1) {
-          vpx_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
+          aom_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
         } else {
-          vpx_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+          aom_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
                              lfi1->hev_thr);
         }
       }
 
       if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
         if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
-          vpx_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
+          aom_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
                                   lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                   lfi1->hev_thr);
         } else if (mask_4x4_int_0 & 1) {
-          vpx_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
+          aom_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
                              lfi0->hev_thr);
         } else {
-          vpx_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim,
+          aom_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim,
                              lfi1->hev_thr);
         }
       }
@@ -405,7 +405,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_filter_selectively_vert_row2(
     int subsampling_factor, uint16_t *s, int pitch, unsigned int mask_16x16_l,
     unsigned int mask_8x8_l, unsigned int mask_4x4_l,
@@ -434,55 +434,55 @@
     if (mask & 1) {
       if ((mask_16x16_0 | mask_16x16_1) & 1) {
         if ((mask_16x16_0 & mask_16x16_1) & 1) {
-          vpx_highbd_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                           lfi0->hev_thr, bd);
         } else if (mask_16x16_0 & 1) {
-          vpx_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
                                      lfi0->hev_thr, bd);
         } else {
-          vpx_highbd_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim,
+          aom_highbd_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim,
                                      lfi1->lim, lfi1->hev_thr, bd);
         }
       }
 
       if ((mask_8x8_0 | mask_8x8_1) & 1) {
         if ((mask_8x8_0 & mask_8x8_1) & 1) {
-          vpx_highbd_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                          lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                          lfi1->hev_thr, bd);
         } else if (mask_8x8_0 & 1) {
-          vpx_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim,
                                     lfi0->hev_thr, bd);
         } else {
-          vpx_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim,
+          aom_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim,
                                     lfi1->lim, lfi1->hev_thr, bd);
         }
       }
 
       if ((mask_4x4_0 | mask_4x4_1) & 1) {
         if ((mask_4x4_0 & mask_4x4_1) & 1) {
-          vpx_highbd_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                          lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                          lfi1->hev_thr, bd);
         } else if (mask_4x4_0 & 1) {
-          vpx_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim,
                                     lfi0->hev_thr, bd);
         } else {
-          vpx_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim,
+          aom_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim,
                                     lfi1->lim, lfi1->hev_thr, bd);
         }
       }
 
       if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
         if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
-          vpx_highbd_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
                                          lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                          lfi1->hev_thr, bd);
         } else if (mask_4x4_int_0 & 1) {
-          vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
                                     lfi0->hev_thr, bd);
         } else {
-          vpx_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim,
+          aom_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim,
                                     lfi1->lim, lfi1->hev_thr, bd);
         }
       }
@@ -500,7 +500,7 @@
     mask_4x4_int_1 >>= 1;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static void filter_selectively_horiz(
     uint8_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
@@ -517,11 +517,11 @@
     if (mask & 1) {
       if (mask_16x16 & 1) {
         if ((mask_16x16 & 3) == 3) {
-          vpx_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
+          aom_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
                                      lfi->hev_thr);
           count = 2;
         } else {
-          vpx_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
+          aom_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
                                     lfi->hev_thr);
         }
       } else if (mask_8x8 & 1) {
@@ -529,28 +529,28 @@
           // Next block's thresholds.
           const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
 
-          vpx_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
+          aom_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
                                     lfi->hev_thr, lfin->mblim, lfin->lim,
                                     lfin->hev_thr);
 
           if ((mask_4x4_int & 3) == 3) {
-            vpx_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
+            aom_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
                                       lfi->lim, lfi->hev_thr, lfin->mblim,
                                       lfin->lim, lfin->hev_thr);
           } else {
             if (mask_4x4_int & 1)
-              vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+              aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                                    lfi->hev_thr);
             else if (mask_4x4_int & 2)
-              vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+              aom_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
                                    lfin->lim, lfin->hev_thr);
           }
           count = 2;
         } else {
-          vpx_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+          aom_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
 
           if (mask_4x4_int & 1)
-            vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+            aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                                  lfi->hev_thr);
         }
       } else if (mask_4x4 & 1) {
@@ -558,31 +558,31 @@
           // Next block's thresholds.
           const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
 
-          vpx_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
+          aom_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
                                     lfi->hev_thr, lfin->mblim, lfin->lim,
                                     lfin->hev_thr);
           if ((mask_4x4_int & 3) == 3) {
-            vpx_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
+            aom_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
                                       lfi->lim, lfi->hev_thr, lfin->mblim,
                                       lfin->lim, lfin->hev_thr);
           } else {
             if (mask_4x4_int & 1)
-              vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+              aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                                    lfi->hev_thr);
             else if (mask_4x4_int & 2)
-              vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+              aom_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
                                    lfin->lim, lfin->hev_thr);
           }
           count = 2;
         } else {
-          vpx_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+          aom_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
 
           if (mask_4x4_int & 1)
-            vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+            aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                                  lfi->hev_thr);
         }
       } else if (mask_4x4_int & 1) {
-        vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+        aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                              lfi->hev_thr);
       }
     }
@@ -595,7 +595,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_filter_selectively_horiz(
     uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
     unsigned int mask_4x4, unsigned int mask_4x4_int,
@@ -611,11 +611,11 @@
     if (mask & 1) {
       if (mask_16x16 & 1) {
         if ((mask_16x16 & 3) == 3) {
-          vpx_highbd_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
                                             lfi->hev_thr, bd);
           count = 2;
         } else {
-          vpx_highbd_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
                                            lfi->hev_thr, bd);
         }
       } else if (mask_8x8 & 1) {
@@ -623,30 +623,30 @@
           // Next block's thresholds.
           const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
 
-          vpx_highbd_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
                                            lfi->hev_thr, lfin->mblim, lfin->lim,
                                            lfin->hev_thr, bd);
 
           if ((mask_4x4_int & 3) == 3) {
-            vpx_highbd_lpf_horizontal_4_dual(
+            aom_highbd_lpf_horizontal_4_dual(
                 s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                 lfin->mblim, lfin->lim, lfin->hev_thr, bd);
           } else {
             if (mask_4x4_int & 1) {
-              vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+              aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
                                           lfi->lim, lfi->hev_thr, bd);
             } else if (mask_4x4_int & 2) {
-              vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+              aom_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
                                           lfin->lim, lfin->hev_thr, bd);
             }
           }
           count = 2;
         } else {
-          vpx_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim,
                                       lfi->hev_thr, bd);
 
           if (mask_4x4_int & 1) {
-            vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+            aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
                                         lfi->lim, lfi->hev_thr, bd);
           }
         }
@@ -655,34 +655,34 @@
           // Next block's thresholds.
           const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
 
-          vpx_highbd_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
                                            lfi->hev_thr, lfin->mblim, lfin->lim,
                                            lfin->hev_thr, bd);
           if ((mask_4x4_int & 3) == 3) {
-            vpx_highbd_lpf_horizontal_4_dual(
+            aom_highbd_lpf_horizontal_4_dual(
                 s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                 lfin->mblim, lfin->lim, lfin->hev_thr, bd);
           } else {
             if (mask_4x4_int & 1) {
-              vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+              aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
                                           lfi->lim, lfi->hev_thr, bd);
             } else if (mask_4x4_int & 2) {
-              vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+              aom_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
                                           lfin->lim, lfin->hev_thr, bd);
             }
           }
           count = 2;
         } else {
-          vpx_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim,
                                       lfi->hev_thr, bd);
 
           if (mask_4x4_int & 1) {
-            vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+            aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
                                         lfi->lim, lfi->hev_thr, bd);
           }
         }
       } else if (mask_4x4_int & 1) {
-        vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+        aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                                     lfi->hev_thr, bd);
       }
     }
@@ -694,7 +694,7 @@
     mask_4x4_int >>= count;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // This function ors into the current lfm structure, where to do loop
 // filters for the specific mi we are looking at. It uses information
@@ -833,9 +833,9 @@
 // This function sets up the bit masks for the entire 64x64 region represented
 // by mi_row, mi_col.
 // TODO(JBB): This function only works for yv12.
-void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
-                     MODE_INFO **mi, const int mode_info_stride,
-                     LOOP_FILTER_MASK *lfm) {
+void av1_setup_mask(AV1_COMMON *const cm, const int mi_row, const int mi_col,
+                    MODE_INFO **mi, const int mode_info_stride,
+                    LOOP_FILTER_MASK *lfm) {
   int idx_32, idx_16, idx_8;
   const loop_filter_info_n *const lfi_n = &cm->lf_info;
   MODE_INFO **mip = mi;
@@ -861,13 +861,13 @@
   const int shift_32_uv[] = { 0, 2, 8, 10 };
   const int shift_16_uv[] = { 0, 1, 4, 5 };
   int i;
-  const int max_rows = VPXMIN(cm->mi_rows - mi_row, MAX_MIB_SIZE);
-  const int max_cols = VPXMIN(cm->mi_cols - mi_col, MAX_MIB_SIZE);
+  const int max_rows = AOMMIN(cm->mi_rows - mi_row, MAX_MIB_SIZE);
+  const int max_cols = AOMMIN(cm->mi_cols - mi_col, MAX_MIB_SIZE);
 #if CONFIG_EXT_PARTITION
   assert(0 && "Not yet updated");
 #endif  // CONFIG_EXT_PARTITION
 
-  vp10_zero(*lfm);
+  av1_zero(*lfm);
   assert(mip[0] != NULL);
 
   // TODO(jimbankoski): Try moving most of the following code into decode
@@ -1123,15 +1123,15 @@
 
     if (mask & 1) {
       if (mask_16x16 & 1) {
-        vpx_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+        aom_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
       } else if (mask_8x8 & 1) {
-        vpx_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+        aom_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
       } else if (mask_4x4 & 1) {
-        vpx_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+        aom_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
       }
     }
     if (mask_4x4_int & 1)
-      vpx_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+      aom_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
     s += 8;
     lfl += 1;
     mask_16x16 >>= 1;
@@ -1141,7 +1141,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_filter_selectively_vert(
     uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
     unsigned int mask_4x4, unsigned int mask_4x4_int,
@@ -1154,18 +1154,18 @@
 
     if (mask & 1) {
       if (mask_16x16 & 1) {
-        vpx_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+        aom_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                                    bd);
       } else if (mask_8x8 & 1) {
-        vpx_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+        aom_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                                   bd);
       } else if (mask_4x4 & 1) {
-        vpx_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+        aom_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                                   bd);
       }
     }
     if (mask_4x4_int & 1)
-      vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim,
+      aom_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim,
                                 lfi->hev_thr, bd);
     s += 8;
     lfl += 1;
@@ -1175,11 +1175,11 @@
     mask_4x4_int >>= 1;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_filter_block_plane_non420(VP10_COMMON *cm,
-                                    struct macroblockd_plane *plane,
-                                    MODE_INFO **mib, int mi_row, int mi_col) {
+void av1_filter_block_plane_non420(AV1_COMMON *cm,
+                                   struct macroblockd_plane *plane,
+                                   MODE_INFO **mib, int mi_row, int mi_col) {
   const int ss_x = plane->subsampling_x;
   const int ss_y = plane->subsampling_y;
   const int row_step = 1 << ss_y;
@@ -1254,17 +1254,17 @@
 
 #if CONFIG_EXT_TX && CONFIG_RECT_TX
       tx_size_r =
-          VPXMIN(txsize_horz_map[tx_size], cm->above_txfm_context[mi_col + c]);
-      tx_size_c = VPXMIN(txsize_vert_map[tx_size],
+          AOMMIN(txsize_horz_map[tx_size], cm->above_txfm_context[mi_col + c]);
+      tx_size_c = AOMMIN(txsize_vert_map[tx_size],
                          cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
 
       cm->above_txfm_context[mi_col + c] = txsize_horz_map[tx_size];
       cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK] =
           txsize_vert_map[tx_size];
 #else
-      tx_size_r = VPXMIN(tx_size, cm->above_txfm_context[mi_col + c]);
+      tx_size_r = AOMMIN(tx_size, cm->above_txfm_context[mi_col + c]);
       tx_size_c =
-          VPXMIN(tx_size, cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
+          AOMMIN(tx_size, cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
 
       cm->above_txfm_context[mi_col + c] = tx_size;
       cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK] = tx_size;
@@ -1333,7 +1333,7 @@
 
     // Disable filtering on the leftmost column
     border_mask = ~(mi_col == 0);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_vert(
           CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1350,7 +1350,7 @@
     filter_selectively_vert(dst->buf, dst->stride, mask_16x16_c & border_mask,
                             mask_8x8_c & border_mask, mask_4x4_c & border_mask,
                             mask_4x4_int[r], &cm->lf_info, &lfl[r][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     dst->buf += MI_SIZE * dst->stride;
     mib += row_step * cm->mi_stride;
   }
@@ -1374,7 +1374,7 @@
       mask_8x8_r = mask_8x8[r];
       mask_4x4_r = mask_4x4[r];
     }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
                                       dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1389,14 +1389,14 @@
     filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
                              mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
                              &lfl[r][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     dst->buf += MI_SIZE * dst->stride;
   }
 }
 
-void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
-                                  struct macroblockd_plane *const plane,
-                                  int mi_row, LOOP_FILTER_MASK *lfm) {
+void av1_filter_block_plane_ss00(AV1_COMMON *const cm,
+                                 struct macroblockd_plane *const plane,
+                                 int mi_row, LOOP_FILTER_MASK *lfm) {
   struct buf_2d *const dst = &plane->dst;
   uint8_t *const dst0 = dst->buf;
   int r;
@@ -1415,7 +1415,7 @@
     unsigned int mask_4x4_int_l = mask_4x4_int & 0xffff;
 
 // Disable filtering on the leftmost column.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_vert_row2(
           plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1430,7 +1430,7 @@
     filter_selectively_vert_row2(
         plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
         mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_y[r][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     dst->buf += 2 * MI_SIZE * dst->stride;
     mask_16x16 >>= 2 * MI_SIZE;
     mask_8x8 >>= 2 * MI_SIZE;
@@ -1460,7 +1460,7 @@
       mask_4x4_r = mask_4x4 & 0xff;
     }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_horiz(
           CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1475,7 +1475,7 @@
     filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
                              mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info,
                              &lfm->lfl_y[r][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     dst->buf += MI_SIZE * dst->stride;
     mask_16x16 >>= MI_SIZE;
@@ -1485,9 +1485,9 @@
   }
 }
 
-void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
-                                  struct macroblockd_plane *const plane,
-                                  int mi_row, LOOP_FILTER_MASK *lfm) {
+void av1_filter_block_plane_ss11(AV1_COMMON *const cm,
+                                 struct macroblockd_plane *const plane,
+                                 int mi_row, LOOP_FILTER_MASK *lfm) {
   struct buf_2d *const dst = &plane->dst;
   uint8_t *const dst0 = dst->buf;
   int r, c;
@@ -1514,7 +1514,7 @@
       unsigned int mask_4x4_int_l = mask_4x4_int & 0xff;
 
 // Disable filtering on the leftmost column.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cm->use_highbitdepth) {
         highbd_filter_selectively_vert_row2(
             plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1530,7 +1530,7 @@
       filter_selectively_vert_row2(
           plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
           mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_uv[r >> 1][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
       dst->buf += 2 * MI_SIZE * dst->stride;
       mask_16x16 >>= MI_SIZE;
@@ -1565,7 +1565,7 @@
       mask_4x4_r = mask_4x4 & 0xf;
     }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_horiz(
           CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1580,7 +1580,7 @@
     filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
                              mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
                              &lfm->lfl_uv[r >> 1][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     dst->buf += MI_SIZE * dst->stride;
     mask_16x16 >>= MI_SIZE / 2;
@@ -1590,9 +1590,9 @@
   }
 }
 
-void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, VP10_COMMON *cm,
-                           struct macroblockd_plane planes[MAX_MB_PLANE],
-                           int start, int stop, int y_only) {
+void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, AV1_COMMON *cm,
+                          struct macroblockd_plane planes[MAX_MB_PLANE],
+                          int start, int stop, int y_only) {
 #if CONFIG_VAR_TX || CONFIG_EXT_PARTITION || CONFIG_EXT_PARTITION_TYPES
   const int num_planes = y_only ? 1 : MAX_MB_PLANE;
   int mi_row, mi_col;
@@ -1608,11 +1608,11 @@
     for (mi_col = 0; mi_col < cm->mi_cols; mi_col += cm->mib_size) {
       int plane;
 
-      vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+      av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
 
       for (plane = 0; plane < num_planes; ++plane)
-        vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
-                                       mi_col);
+        av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
+                                      mi_col);
     }
   }
 #else
@@ -1635,23 +1635,23 @@
     for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MAX_MIB_SIZE) {
       int plane;
 
-      vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+      av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
 
       // TODO(JBB): Make setup_mask work for non 420.
-      vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
+      av1_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
 
-      vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
+      av1_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
       for (plane = 1; plane < num_planes; ++plane) {
         switch (path) {
           case LF_PATH_420:
-            vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_444:
-            vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_SLOW:
-            vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
-                                           mi_row, mi_col);
+            av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+                                          mi_row, mi_col);
             break;
         }
       }
@@ -1660,9 +1660,9 @@
 #endif  // CONFIG_VAR_TX || CONFIG_EXT_PARTITION || CONFIG_EXT_PARTITION_TYPES
 }
 
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                            MACROBLOCKD *xd, int frame_filter_level, int y_only,
-                            int partial_frame) {
+void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                           MACROBLOCKD *xd, int frame_filter_level, int y_only,
+                           int partial_frame) {
   int start_mi_row, end_mi_row, mi_rows_to_filter;
   if (!frame_filter_level) return;
   start_mi_row = 0;
@@ -1670,17 +1670,16 @@
   if (partial_frame && cm->mi_rows > 8) {
     start_mi_row = cm->mi_rows >> 1;
     start_mi_row &= 0xfffffff8;
-    mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
+    mi_rows_to_filter = AOMMAX(cm->mi_rows / 8, 8);
   }
   end_mi_row = start_mi_row + mi_rows_to_filter;
-  vp10_loop_filter_frame_init(cm, frame_filter_level);
-  vp10_loop_filter_rows(frame, cm, xd->plane, start_mi_row, end_mi_row, y_only);
+  av1_loop_filter_frame_init(cm, frame_filter_level);
+  av1_loop_filter_rows(frame, cm, xd->plane, start_mi_row, end_mi_row, y_only);
 }
 
-void vp10_loop_filter_data_reset(
+void av1_loop_filter_data_reset(
     LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
-    struct VP10Common *cm,
-    const struct macroblockd_plane planes[MAX_MB_PLANE]) {
+    struct AV1Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]) {
   lf_data->frame_buffer = frame_buffer;
   lf_data->cm = cm;
   lf_data->start = 0;
@@ -1689,9 +1688,9 @@
   memcpy(lf_data->planes, planes, sizeof(lf_data->planes));
 }
 
-int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
+int av1_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
   (void)unused;
-  vp10_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
-                        lf_data->start, lf_data->stop, lf_data->y_only);
+  av1_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
+                       lf_data->start, lf_data->stop, lf_data->y_only);
   return 1;
 }
diff --git a/av1/common/loopfilter.h b/av1/common/loopfilter.h
index b85ed04..d3377e2 100644
--- a/av1/common/loopfilter.h
+++ b/av1/common/loopfilter.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_LOOPFILTER_H_
-#define VP10_COMMON_LOOPFILTER_H_
+#ifndef AV1_COMMON_LOOPFILTER_H_
+#define AV1_COMMON_LOOPFILTER_H_
 
 #include "aom_ports/mem.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #include "av1/common/blockd.h"
 #include "av1/common/restoration.h"
@@ -89,49 +89,49 @@
 } LOOP_FILTER_MASK;
 
 /* assorted loopfilter functions which get used elsewhere */
-struct VP10Common;
+struct AV1Common;
 struct macroblockd;
-struct VP10LfSyncData;
+struct AV1LfSyncData;
 
 // This function sets up the bit masks for the entire 64x64 region represented
 // by mi_row, mi_col.
-void vp10_setup_mask(struct VP10Common *const cm, const int mi_row,
-                     const int mi_col, MODE_INFO **mi_8x8,
-                     const int mode_info_stride, LOOP_FILTER_MASK *lfm);
+void av1_setup_mask(struct AV1Common *const cm, const int mi_row,
+                    const int mi_col, MODE_INFO **mi_8x8,
+                    const int mode_info_stride, LOOP_FILTER_MASK *lfm);
 
-void vp10_filter_block_plane_ss00(struct VP10Common *const cm,
-                                  struct macroblockd_plane *const plane,
-                                  int mi_row, LOOP_FILTER_MASK *lfm);
+void av1_filter_block_plane_ss00(struct AV1Common *const cm,
+                                 struct macroblockd_plane *const plane,
+                                 int mi_row, LOOP_FILTER_MASK *lfm);
 
-void vp10_filter_block_plane_ss11(struct VP10Common *const cm,
-                                  struct macroblockd_plane *const plane,
-                                  int mi_row, LOOP_FILTER_MASK *lfm);
+void av1_filter_block_plane_ss11(struct AV1Common *const cm,
+                                 struct macroblockd_plane *const plane,
+                                 int mi_row, LOOP_FILTER_MASK *lfm);
 
-void vp10_filter_block_plane_non420(struct VP10Common *cm,
-                                    struct macroblockd_plane *plane,
-                                    MODE_INFO **mi_8x8, int mi_row, int mi_col);
+void av1_filter_block_plane_non420(struct AV1Common *cm,
+                                   struct macroblockd_plane *plane,
+                                   MODE_INFO **mi_8x8, int mi_row, int mi_col);
 
-void vp10_loop_filter_init(struct VP10Common *cm);
+void av1_loop_filter_init(struct AV1Common *cm);
 
 // Update the loop filter for the current frame.
-// This should be called before vp10_loop_filter_rows(),
-// vp10_loop_filter_frame()
+// This should be called before av1_loop_filter_rows(),
+// av1_loop_filter_frame()
 // calls this function directly.
-void vp10_loop_filter_frame_init(struct VP10Common *cm, int default_filt_lvl);
+void av1_loop_filter_frame_init(struct AV1Common *cm, int default_filt_lvl);
 
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
-                            struct macroblockd *mbd, int filter_level,
-                            int y_only, int partial_frame);
+void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+                           struct macroblockd *mbd, int filter_level,
+                           int y_only, int partial_frame);
 
 // Apply the loop filter to [start, stop) macro block rows in frame_buffer.
-void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
-                           struct VP10Common *cm,
-                           struct macroblockd_plane planes[MAX_MB_PLANE],
-                           int start, int stop, int y_only);
+void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
+                          struct AV1Common *cm,
+                          struct macroblockd_plane planes[MAX_MB_PLANE],
+                          int start, int stop, int y_only);
 
 typedef struct LoopFilterWorkerData {
   YV12_BUFFER_CONFIG *frame_buffer;
-  struct VP10Common *cm;
+  struct AV1Common *cm;
   struct macroblockd_plane planes[MAX_MB_PLANE];
 
   int start;
@@ -139,14 +139,14 @@
   int y_only;
 } LFWorkerData;
 
-void vp10_loop_filter_data_reset(
+void av1_loop_filter_data_reset(
     LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
-    struct VP10Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]);
+    struct AV1Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]);
 
 // Operates on the rows described by 'lf_data'.
-int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused);
+int av1_loop_filter_worker(LFWorkerData *const lf_data, void *unused);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_LOOPFILTER_H_
+#endif  // AV1_COMMON_LOOPFILTER_H_
diff --git a/av1/common/mips/dspr2/itrans16_dspr2.c b/av1/common/mips/dspr2/itrans16_dspr2.c
index c0b9b2a..9e63d4d 100644
--- a/av1/common/mips/dspr2/itrans16_dspr2.c
+++ b/av1/common/mips/dspr2/itrans16_dspr2.c
@@ -11,8 +11,8 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_config.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
 #include "av1/common/common.h"
 #include "av1/common/blockd.h"
 #include "av1/common/idct.h"
@@ -21,8 +21,8 @@
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
-void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
-                                 int tx_type) {
+void av1_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
+                                int tx_type) {
   int i, j;
   DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
   int16_t *outptr = out;
@@ -90,7 +90,7 @@
                                            dest[j * pitch + i]);
       }
     } break;
-    default: printf("vp10_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break;
+    default: printf("av1_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/av1/common/mips/dspr2/itrans4_dspr2.c b/av1/common/mips/dspr2/itrans4_dspr2.c
index dcb28c9..61fc0e7 100644
--- a/av1/common/mips/dspr2/itrans4_dspr2.c
+++ b/av1/common/mips/dspr2/itrans4_dspr2.c
@@ -11,8 +11,8 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_config.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
 #include "av1/common/common.h"
 #include "av1/common/blockd.h"
 #include "av1/common/idct.h"
@@ -21,8 +21,8 @@
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
-void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
-                              int dest_stride, int tx_type) {
+void av1_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
+                             int dest_stride, int tx_type) {
   int i, j;
   DECLARE_ALIGNED(32, int16_t, out[4 * 4]);
   int16_t *outptr = out;
@@ -36,11 +36,11 @@
 
   switch (tx_type) {
     case DCT_DCT:  // DCT in both horizontal and vertical
-      vpx_idct4_rows_dspr2(input, outptr);
-      vpx_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
+      aom_idct4_rows_dspr2(input, outptr);
+      aom_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
       break;
     case ADST_DCT:  // ADST in vertical, DCT in horizontal
-      vpx_idct4_rows_dspr2(input, outptr);
+      aom_idct4_rows_dspr2(input, outptr);
 
       outptr = out;
 
@@ -66,7 +66,7 @@
           temp_in[i * 4 + j] = out[j * 4 + i];
         }
       }
-      vpx_idct4_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride);
+      aom_idct4_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride);
       break;
     case ADST_ADST:  // ADST in both directions
       for (i = 0; i < 4; ++i) {
@@ -84,7 +84,7 @@
               ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * dest_stride + i]);
       }
       break;
-    default: printf("vp10_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break;
+    default: printf("av1_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/av1/common/mips/dspr2/itrans8_dspr2.c b/av1/common/mips/dspr2/itrans8_dspr2.c
index 761d6f0..fe99f31 100644
--- a/av1/common/mips/dspr2/itrans8_dspr2.c
+++ b/av1/common/mips/dspr2/itrans8_dspr2.c
@@ -11,8 +11,8 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_config.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
 #include "av1/common/common.h"
 #include "av1/common/blockd.h"
 #include "aom_dsp/mips/inv_txfm_dspr2.h"
@@ -20,8 +20,8 @@
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
-void vp10_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
-                              int dest_stride, int tx_type) {
+void av1_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
+                             int dest_stride, int tx_type) {
   int i, j;
   DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
   int16_t *outptr = out;
@@ -78,7 +78,7 @@
               ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]);
       }
       break;
-    default: printf("vp10_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break;
+    default: printf("av1_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/av1/common/mips/msa/idct16x16_msa.c b/av1/common/mips/msa/idct16x16_msa.c
index baa3a97..e5a68fa 100644
--- a/av1/common/mips/msa/idct16x16_msa.c
+++ b/av1/common/mips/msa/idct16x16_msa.c
@@ -13,8 +13,8 @@
 #include "av1/common/enums.h"
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vp10_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
-                               int32_t dst_stride, int32_t tx_type) {
+void av1_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
+                              int32_t dst_stride, int32_t tx_type) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
   int16_t *out_ptr = &out[0];
@@ -24,13 +24,13 @@
       /* transform rows */
       for (i = 0; i < 2; ++i) {
         /* process 16 * 8 block */
-        vpx_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+        aom_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
       }
 
       /* transform columns */
       for (i = 0; i < 2; ++i) {
         /* process 8 * 16 block */
-        vpx_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+        aom_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
                                          dst_stride);
       }
       break;
@@ -38,12 +38,12 @@
       /* transform rows */
       for (i = 0; i < 2; ++i) {
         /* process 16 * 8 block */
-        vpx_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+        aom_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
       }
 
       /* transform columns */
       for (i = 0; i < 2; ++i) {
-        vpx_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
+        aom_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
                                           (dst + (i << 3)), dst_stride);
       }
       break;
@@ -51,13 +51,13 @@
       /* transform rows */
       for (i = 0; i < 2; ++i) {
         /* process 16 * 8 block */
-        vpx_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+        aom_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
       }
 
       /* transform columns */
       for (i = 0; i < 2; ++i) {
         /* process 8 * 16 block */
-        vpx_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+        aom_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
                                          dst_stride);
       }
       break;
@@ -65,12 +65,12 @@
       /* transform rows */
       for (i = 0; i < 2; ++i) {
         /* process 16 * 8 block */
-        vpx_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+        aom_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
       }
 
       /* transform columns */
       for (i = 0; i < 2; ++i) {
-        vpx_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
+        aom_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
                                           (dst + (i << 3)), dst_stride);
       }
       break;
diff --git a/av1/common/mips/msa/idct4x4_msa.c b/av1/common/mips/msa/idct4x4_msa.c
index 0620df7..7b4ba12 100644
--- a/av1/common/mips/msa/idct4x4_msa.c
+++ b/av1/common/mips/msa/idct4x4_msa.c
@@ -13,8 +13,8 @@
 #include "av1/common/enums.h"
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vp10_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
-                            int32_t dst_stride, int32_t tx_type) {
+void av1_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+                           int32_t dst_stride, int32_t tx_type) {
   v8i16 in0, in1, in2, in3;
 
   /* load vector elements of 4x4 block */
@@ -24,31 +24,31 @@
   switch (tx_type) {
     case DCT_DCT:
       /* DCT in horizontal */
-      VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       /* DCT in vertical */
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     case ADST_DCT:
       /* DCT in horizontal */
-      VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       /* ADST in vertical */
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     case DCT_ADST:
       /* ADST in horizontal */
-      VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       /* DCT in vertical */
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     case ADST_ADST:
       /* ADST in horizontal */
-      VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       /* ADST in vertical */
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     default: assert(0); break;
   }
diff --git a/av1/common/mips/msa/idct8x8_msa.c b/av1/common/mips/msa/idct8x8_msa.c
index 5c62c4a..ce61676 100644
--- a/av1/common/mips/msa/idct8x8_msa.c
+++ b/av1/common/mips/msa/idct8x8_msa.c
@@ -13,8 +13,8 @@
 #include "av1/common/enums.h"
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vp10_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
-                            int32_t dst_stride, int32_t tx_type) {
+void av1_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
+                           int32_t dst_stride, int32_t tx_type) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
 
   /* load vector elements of 8x8 block */
@@ -26,42 +26,42 @@
   switch (tx_type) {
     case DCT_DCT:
       /* DCT in horizontal */
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+      AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
       /* DCT in vertical */
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+      AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
       break;
     case ADST_DCT:
       /* DCT in horizontal */
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+      AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
       /* ADST in vertical */
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       break;
     case DCT_ADST:
       /* ADST in horizontal */
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       /* DCT in vertical */
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+      AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
       break;
     case ADST_ADST:
       /* ADST in horizontal */
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       /* ADST in vertical */
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       break;
     default: assert(0); break;
@@ -72,7 +72,7 @@
   SRARI_H4_SH(in4, in5, in6, in7, 5);
 
   /* add block and store 8x8 */
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
   dst += (4 * dst_stride);
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
 }
diff --git a/av1/common/mv.h b/av1/common/mv.h
index dba3336..4908d74 100644
--- a/av1/common/mv.h
+++ b/av1/common/mv.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_MV_H_
-#define VP10_COMMON_MV_H_
+#ifndef AV1_COMMON_MV_H_
+#define AV1_COMMON_MV_H_
 
 #include "av1/common/common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
 #if CONFIG_GLOBAL_MOTION
 #include "av1/common/warped_motion.h"
 #endif  // CONFIG_GLOBAL_MOTION
@@ -146,4 +146,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_MV_H_
+#endif  // AV1_COMMON_MV_H_
diff --git a/av1/common/mvref_common.c b/av1/common/mvref_common.c
index 836b065..e14df3c 100644
--- a/av1/common/mvref_common.c
+++ b/av1/common/mvref_common.c
@@ -146,7 +146,7 @@
   return newmv_count;
 }
 
-static uint8_t scan_row_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static uint8_t scan_row_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                              const int mi_row, const int mi_col, int block,
                              const MV_REFERENCE_FRAME rf[2], int row_offset,
                              CANDIDATE_MV *ref_mv_stack, uint8_t *refmv_count) {
@@ -164,7 +164,7 @@
           xd->mi[mi_pos.row * xd->mi_stride + mi_pos.col];
       const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
       const int len =
-          VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[candidate->sb_type]);
+          AOMMIN(xd->n8_w, num_8x8_blocks_wide_lookup[candidate->sb_type]);
 
       newmv_count += add_ref_mv_candidate(
           candidate_mi, candidate, rf, refmv_count, ref_mv_stack,
@@ -178,7 +178,7 @@
   return newmv_count;
 }
 
-static uint8_t scan_col_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static uint8_t scan_col_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                              const int mi_row, const int mi_col, int block,
                              const MV_REFERENCE_FRAME rf[2], int col_offset,
                              CANDIDATE_MV *ref_mv_stack, uint8_t *refmv_count) {
@@ -196,7 +196,7 @@
           xd->mi[mi_pos.row * xd->mi_stride + mi_pos.col];
       const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
       const int len =
-          VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[candidate->sb_type]);
+          AOMMIN(xd->n8_h, num_8x8_blocks_high_lookup[candidate->sb_type]);
 
       newmv_count += add_ref_mv_candidate(
           candidate_mi, candidate, rf, refmv_count, ref_mv_stack,
@@ -210,7 +210,7 @@
   return newmv_count;
 }
 
-static uint8_t scan_blk_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static uint8_t scan_blk_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                              const int mi_row, const int mi_col, int block,
                              const MV_REFERENCE_FRAME rf[2], int row_offset,
                              int col_offset, CANDIDATE_MV *ref_mv_stack,
@@ -288,7 +288,7 @@
 
   for (rf = 0; rf < 2; ++rf) {
     if (candidate->ref_frame[rf] == ref_frame) {
-      const int list_range = VPXMIN(refmv_count, MAX_MV_REF_CANDIDATES);
+      const int list_range = AOMMIN(refmv_count, MAX_MV_REF_CANDIDATES);
 
       const int_mv pred_mv = candidate->mv[rf];
       for (idx = 0; idx < list_range; ++idx)
@@ -304,7 +304,7 @@
   }
 }
 
-static void setup_ref_mv_list(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void setup_ref_mv_list(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                               MV_REFERENCE_FRAME ref_frame,
                               uint8_t *refmv_count, CANDIDATE_MV *ref_mv_stack,
                               int_mv *mv_ref_list, int block, int mi_row,
@@ -320,11 +320,11 @@
           ? cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col
           : NULL;
 
-  int bs = VPXMAX(xd->n8_w, xd->n8_h);
+  int bs = AOMMAX(xd->n8_w, xd->n8_h);
   int has_tr = has_top_right(xd, mi_row, mi_col, bs);
 
   MV_REFERENCE_FRAME rf[2];
-  vp10_set_ref_frame(rf, ref_frame);
+  av1_set_ref_frame(rf, ref_frame);
 
   mode_context[ref_frame] = 0;
   *refmv_count = 0;
@@ -502,7 +502,7 @@
                    xd->n8_h << 3, xd);
     }
   } else {
-    for (idx = 0; idx < VPXMIN(MAX_MV_REF_CANDIDATES, *refmv_count); ++idx) {
+    for (idx = 0; idx < AOMMIN(MAX_MV_REF_CANDIDATES, *refmv_count); ++idx) {
       mv_ref_list[idx].as_int = ref_mv_stack[idx].this_mv.as_int;
       clamp_mv_ref(&mv_ref_list[idx].as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
     }
@@ -512,7 +512,7 @@
 
 // This function searches the neighbourhood of a given MB/SB
 // to try and find candidate reference vectors.
-static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void find_mv_refs_idx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                              MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
                              int_mv *mv_ref_list, int block, int mi_row,
                              int mi_col, find_mv_refs_sync sync,
@@ -648,10 +648,10 @@
 
 #if CONFIG_EXT_INTER
 // This function keeps a mode count for a given MB/SB
-void vp10_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
-                            MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
-                            int block, int mi_row, int mi_col,
-                            int16_t *mode_context) {
+void av1_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
+                           MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
+                           int block, int mi_row, int mi_col,
+                           int16_t *mode_context) {
   int i, refmv_count = 0;
   const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
   int context_counter = 0;
@@ -691,26 +691,26 @@
 }
 #endif  // CONFIG_EXT_INTER
 
-void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
-                       MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
+void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+                      MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
 #if CONFIG_REF_MV
-                       uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
+                      uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
 #if CONFIG_EXT_INTER
-                       int16_t *compound_mode_context,
+                      int16_t *compound_mode_context,
 #endif  // CONFIG_EXT_INTER
 #endif
-                       int_mv *mv_ref_list, int mi_row, int mi_col,
-                       find_mv_refs_sync sync, void *const data,
-                       int16_t *mode_context) {
+                      int_mv *mv_ref_list, int mi_row, int mi_col,
+                      find_mv_refs_sync sync, void *const data,
+                      int16_t *mode_context) {
 #if CONFIG_REF_MV
   int idx, all_zero = 1;
 #endif
 #if CONFIG_EXT_INTER
-  vp10_update_mv_context(xd, mi, ref_frame, mv_ref_list, -1, mi_row, mi_col,
+  av1_update_mv_context(xd, mi, ref_frame, mv_ref_list, -1, mi_row, mi_col,
 #if CONFIG_REF_MV
-                         compound_mode_context);
+                        compound_mode_context);
 #else
-                         mode_context);
+                        mode_context);
 #endif  // CONFIG_REF_MV
   find_mv_refs_idx(cm, xd, mi, ref_frame, mv_ref_list, -1, mi_row, mi_col, sync,
                    data, NULL);
@@ -730,8 +730,8 @@
 #endif
 }
 
-void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
-                            int_mv *near_mv) {
+void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+                           int_mv *near_mv) {
   int i;
   // Make sure all the candidates are properly clamped etc
   for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
@@ -741,16 +741,16 @@
   *near_mv = mvlist[1];
 }
 
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
-                                    int ref, int mi_row, int mi_col,
+void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
+                                   int ref, int mi_row, int mi_col,
 #if CONFIG_REF_MV
-                                    CANDIDATE_MV *ref_mv_stack,
-                                    uint8_t *ref_mv_count,
+                                   CANDIDATE_MV *ref_mv_stack,
+                                   uint8_t *ref_mv_count,
 #endif
 #if CONFIG_EXT_INTER
-                                    int_mv *mv_list,
+                                   int_mv *mv_list,
 #endif  // CONFIG_EXT_INTER
-                                    int_mv *nearest_mv, int_mv *near_mv) {
+                                   int_mv *nearest_mv, int_mv *near_mv) {
 #if !CONFIG_EXT_INTER
   int_mv mv_list[MAX_MV_REF_CANDIDATES];
 #endif  // !CONFIG_EXT_INTER
@@ -789,7 +789,7 @@
     clamp_mv_ref(&ref_mv_stack[idx].this_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3,
                  xd);
 
-  for (idx = 0; idx < VPXMIN(MAX_MV_REF_CANDIDATES, *ref_mv_count); ++idx)
+  for (idx = 0; idx < AOMMIN(MAX_MV_REF_CANDIDATES, *ref_mv_count); ++idx)
     mv_list[idx].as_int = ref_mv_stack[idx].this_mv.as_int;
 #endif
 
diff --git a/av1/common/mvref_common.h b/av1/common/mvref_common.h
index babd4f0..b65509a 100644
--- a/av1/common/mvref_common.h
+++ b/av1/common/mvref_common.h
@@ -7,8 +7,8 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#ifndef VP10_COMMON_MVREF_COMMON_H_
-#define VP10_COMMON_MVREF_COMMON_H_
+#ifndef AV1_COMMON_MVREF_COMMON_H_
+#define AV1_COMMON_MVREF_COMMON_H_
 
 #include "av1/common/onyxc_int.h"
 #include "av1/common/blockd.h"
@@ -340,7 +340,7 @@
 }
 
 static INLINE void lower_mv_precision(MV *mv, int allow_hp) {
-  const int use_hp = allow_hp && vp10_use_mv_hp(mv);
+  const int use_hp = allow_hp && av1_use_mv_hp(mv);
   if (!use_hp) {
     if (mv->row & 1) mv->row += (mv->row > 0 ? -1 : 1);
     if (mv->col & 1) mv->col += (mv->col > 0 ? -1 : 1);
@@ -348,8 +348,8 @@
 }
 
 #if CONFIG_REF_MV
-static INLINE int vp10_nmv_ctx(const uint8_t ref_mv_count,
-                               const CANDIDATE_MV *ref_mv_stack) {
+static INLINE int av1_nmv_ctx(const uint8_t ref_mv_count,
+                              const CANDIDATE_MV *ref_mv_stack) {
 #if CONFIG_EXT_INTER
   return 0;
 #endif
@@ -365,7 +365,7 @@
   return 0;
 }
 
-static INLINE int8_t vp10_ref_frame_type(const MV_REFERENCE_FRAME *const rf) {
+static INLINE int8_t av1_ref_frame_type(const MV_REFERENCE_FRAME *const rf) {
   if (rf[1] > INTRA_FRAME) {
     return TOTAL_REFS_PER_FRAME + FWD_RF_OFFSET(rf[0]) +
            BWD_RF_OFFSET(rf[1]) * FWD_REFS;
@@ -386,8 +386,8 @@
 #endif
 };
 
-static INLINE void vp10_set_ref_frame(MV_REFERENCE_FRAME *rf,
-                                      int8_t ref_frame_type) {
+static INLINE void av1_set_ref_frame(MV_REFERENCE_FRAME *rf,
+                                     int8_t ref_frame_type) {
   if (ref_frame_type >= TOTAL_REFS_PER_FRAME) {
     rf[0] = ref_frame_map[ref_frame_type - TOTAL_REFS_PER_FRAME][0];
     rf[1] = ref_frame_map[ref_frame_type - TOTAL_REFS_PER_FRAME][1];
@@ -399,7 +399,7 @@
   }
 }
 
-static INLINE int16_t vp10_mode_context_analyzer(
+static INLINE int16_t av1_mode_context_analyzer(
     const int16_t *const mode_context, const MV_REFERENCE_FRAME *const rf,
     BLOCK_SIZE bsize, int block) {
   int16_t mode_ctx = 0;
@@ -420,8 +420,8 @@
     return mode_context[rf[0]];
 }
 
-static INLINE uint8_t vp10_drl_ctx(const CANDIDATE_MV *ref_mv_stack,
-                                   int ref_idx) {
+static INLINE uint8_t av1_drl_ctx(const CANDIDATE_MV *ref_mv_stack,
+                                  int ref_idx) {
   if (ref_mv_stack[ref_idx].weight >= REF_CAT_LEVEL &&
       ref_mv_stack[ref_idx + 1].weight >= REF_CAT_LEVEL) {
     if (ref_mv_stack[ref_idx].weight == ref_mv_stack[ref_idx + 1].weight)
@@ -447,45 +447,45 @@
 #endif
 
 typedef void (*find_mv_refs_sync)(void *const data, int mi_row);
-void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
-                       MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
+void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+                      MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
 #if CONFIG_REF_MV
-                       uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
+                      uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
 #if CONFIG_EXT_INTER
-                       int16_t *compound_mode_context,
+                      int16_t *compound_mode_context,
 #endif  // CONFIG_EXT_INTER
 #endif
-                       int_mv *mv_ref_list, int mi_row, int mi_col,
-                       find_mv_refs_sync sync, void *const data,
-                       int16_t *mode_context);
+                      int_mv *mv_ref_list, int mi_row, int mi_col,
+                      find_mv_refs_sync sync, void *const data,
+                      int16_t *mode_context);
 
 // check a list of motion vectors by sad score using a number rows of pixels
 // above and a number cols of pixels in the left to select the one with best
 // score to use as ref motion vector
-void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
-                            int_mv *near_mv);
+void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+                           int_mv *near_mv);
 
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
-                                    int ref, int mi_row, int mi_col,
+void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
+                                   int ref, int mi_row, int mi_col,
 #if CONFIG_REF_MV
-                                    CANDIDATE_MV *ref_mv_stack,
-                                    uint8_t *ref_mv_count,
+                                   CANDIDATE_MV *ref_mv_stack,
+                                   uint8_t *ref_mv_count,
 #endif
 #if CONFIG_EXT_INTER
-                                    int_mv *mv_list,
+                                   int_mv *mv_list,
 #endif  // CONFIG_EXT_INTER
-                                    int_mv *nearest_mv, int_mv *near_mv);
+                                   int_mv *nearest_mv, int_mv *near_mv);
 
 #if CONFIG_EXT_INTER
 // This function keeps a mode count for a given MB/SB
-void vp10_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
-                            MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
-                            int block, int mi_row, int mi_col,
-                            int16_t *mode_context);
+void av1_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
+                           MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
+                           int block, int mi_row, int mi_col,
+                           int16_t *mode_context);
 #endif  // CONFIG_EXT_INTER
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_MVREF_COMMON_H_
+#endif  // AV1_COMMON_MVREF_COMMON_H_
diff --git a/av1/common/odintrin.h b/av1/common/odintrin.h
index 87b1a36..8e9b3e4 100644
--- a/av1/common/odintrin.h
+++ b/av1/common/odintrin.h
@@ -1,9 +1,9 @@
-#ifndef VP10_COMMON_ODINTRIN_H_
-#define VP10_COMMON_ODINTRIN_H_
+#ifndef AV1_COMMON_ODINTRIN_H_
+#define AV1_COMMON_ODINTRIN_H_
 
 #include "av1/common/enums.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/bitops.h"
 
 /*Smallest blocks are 4x4*/
@@ -33,7 +33,7 @@
 #define OD_DIVU(_x, _d) \
   (((_d) < OD_DIVU_DMAX) ? (OD_DIVU_SMALL((_x), (_d))) : ((_x) / (_d)))
 
-#define OD_MINI VPXMIN
+#define OD_MINI AOMMIN
 #define OD_CLAMPI(min, val, max) clamp((val), (min), (max))
 
 #define OD_CLZ0 (1)
diff --git a/av1/common/onyxc_int.h b/av1/common/onyxc_int.h
index 55a8112..d3bc820 100644
--- a/av1/common/onyxc_int.h
+++ b/av1/common/onyxc_int.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ONYXC_INT_H_
-#define VP10_COMMON_ONYXC_INT_H_
+#ifndef AV1_COMMON_ONYXC_INT_H_
+#define AV1_COMMON_ONYXC_INT_H_
 
-#include "./vpx_config.h"
-#include "aom/internal/vpx_codec_internal.h"
-#include "aom_util/vpx_thread.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "aom/internal/aom_codec_internal.h"
+#include "aom_util/aom_thread.h"
+#include "./av1_rtcd.h"
 #include "av1/common/alloccommon.h"
 #include "av1/common/loopfilter.h"
 #include "av1/common/entropymv.h"
@@ -87,14 +87,14 @@
   MV_REF *mvs;
   int mi_rows;
   int mi_cols;
-  vpx_codec_frame_buffer_t raw_frame_buffer;
+  aom_codec_frame_buffer_t raw_frame_buffer;
   YV12_BUFFER_CONFIG buf;
 
   // The Following variables will only be used in frame parallel decode.
 
   // frame_worker_owner indicates which FrameWorker owns this buffer. NULL means
   // that no FrameWorker owns, or is decoding, this buffer.
-  VPxWorker *frame_worker_owner;
+  AVxWorker *frame_worker_owner;
 
   // row and col indicate which position frame has been decoded to in real
   // pixel unit. They are reset to -1 when decoding begins and set to INT_MAX
@@ -114,8 +114,8 @@
   // Private data associated with the frame buffer callbacks.
   void *cb_priv;
 
-  vpx_get_frame_buffer_cb_fn_t get_fb_cb;
-  vpx_release_frame_buffer_cb_fn_t release_fb_cb;
+  aom_get_frame_buffer_cb_fn_t get_fb_cb;
+  aom_release_frame_buffer_cb_fn_t release_fb_cb;
 
   RefCntBuffer frame_bufs[FRAME_BUFFERS];
 
@@ -123,9 +123,9 @@
   InternalFrameBufferList int_frame_buffers;
 } BufferPool;
 
-typedef struct VP10Common {
-  struct vpx_internal_error_info error;
-  vpx_color_space_t color_space;
+typedef struct AV1Common {
+  struct aom_internal_error_info error;
+  aom_color_space_t color_space;
   int color_range;
   int width;
   int height;
@@ -140,7 +140,7 @@
   int subsampling_x;
   int subsampling_y;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   // Marks if we need to use 16bit frame buffers (1: yes, 0: no).
   int use_highbitdepth;
 #endif
@@ -247,9 +247,9 @@
   MODE_INFO *prev_mi;  /* 'mi' from last frame (points into prev_mip) */
 
   // Separate mi functions between encoder and decoder.
-  int (*alloc_mi)(struct VP10Common *cm, int mi_size);
-  void (*free_mi)(struct VP10Common *cm);
-  void (*setup_mi)(struct VP10Common *cm);
+  int (*alloc_mi)(struct AV1Common *cm, int mi_size);
+  void (*free_mi)(struct AV1Common *cm);
+  void (*setup_mi)(struct AV1Common *cm);
 
   // Grid of pointers to 8x8 MODE_INFO structs.  Any 8x8 not in the visible
   // area will be NULL.
@@ -307,7 +307,7 @@
 #if CONFIG_ENTROPY
   // The initial probabilities for a frame, before any subframe backward update,
   // and after forward update.
-  vp10_coeff_probs_model starting_coef_probs[TX_SIZES][PLANE_TYPES];
+  av1_coeff_probs_model starting_coef_probs[TX_SIZES][PLANE_TYPES];
   // Number of subframe backward updates already done
   uint8_t coef_probs_update_idx;
   // Signal if the backward update is subframe or end-of-frame
@@ -319,9 +319,9 @@
   unsigned int current_video_frame;
   BITSTREAM_PROFILE profile;
 
-  // VPX_BITS_8 in profile 0 or 1, VPX_BITS_10 or VPX_BITS_12 in profile 2 or 3.
-  vpx_bit_depth_t bit_depth;
-  vpx_bit_depth_t dequant_bit_depth;  // bit_depth of current dequantizer
+  // AOM_BITS_8 in profile 0 or 1, AOM_BITS_10 or AOM_BITS_12 in profile 2 or 3.
+  aom_bit_depth_t bit_depth;
+  aom_bit_depth_t dequant_bit_depth;  // bit_depth of current dequantizer
 
   int error_resilient_mode;
 
@@ -336,8 +336,8 @@
 
   // Private data associated with the frame buffer callbacks.
   void *cb_priv;
-  vpx_get_frame_buffer_cb_fn_t get_fb_cb;
-  vpx_release_frame_buffer_cb_fn_t release_fb_cb;
+  aom_get_frame_buffer_cb_fn_t get_fb_cb;
+  aom_release_frame_buffer_cb_fn_t release_fb_cb;
 
   // Handles memory for the codec.
   InternalFrameBufferList int_frame_buffers;
@@ -356,7 +356,7 @@
   // scratch memory for intraonly/keyframe forward updates from default tables
   // - this is intentionally not placed in FRAME_CONTEXT since it's reset upon
   // each keyframe and not used afterwards
-  vpx_prob kf_y_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
+  aom_prob kf_y_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
 #if CONFIG_GLOBAL_MOTION
   Global_Motion_Params global_motion[TOTAL_REFS_PER_FRAME];
 #endif
@@ -367,7 +367,7 @@
 #if CONFIG_DERING
   int dering_level;
 #endif
-} VP10_COMMON;
+} AV1_COMMON;
 
 // TODO(hkuang): Don't need to lock the whole pool after implementing atomic
 // frame reference count.
@@ -387,7 +387,7 @@
 #endif
 }
 
-static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP10_COMMON *cm, int index) {
+static INLINE YV12_BUFFER_CONFIG *get_ref_frame(AV1_COMMON *cm, int index) {
   if (index < 0 || index >= REF_FRAMES) return NULL;
   if (cm->ref_frame_map[index] < 0) return NULL;
   assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
@@ -395,11 +395,11 @@
 }
 
 static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(
-    const VP10_COMMON *const cm) {
+    const AV1_COMMON *const cm) {
   return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
 }
 
-static INLINE int get_free_fb(VP10_COMMON *cm) {
+static INLINE int get_free_fb(AV1_COMMON *cm) {
   RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
   int i;
 
@@ -429,20 +429,20 @@
   bufs[new_idx].ref_count++;
 }
 
-static INLINE int mi_cols_aligned_to_sb(const VP10_COMMON *cm) {
+static INLINE int mi_cols_aligned_to_sb(const AV1_COMMON *cm) {
   return ALIGN_POWER_OF_TWO(cm->mi_cols, cm->mib_size_log2);
 }
 
-static INLINE int mi_rows_aligned_to_sb(const VP10_COMMON *cm) {
+static INLINE int mi_rows_aligned_to_sb(const AV1_COMMON *cm) {
   return ALIGN_POWER_OF_TWO(cm->mi_rows, cm->mib_size_log2);
 }
 
-static INLINE int frame_is_intra_only(const VP10_COMMON *const cm) {
+static INLINE int frame_is_intra_only(const AV1_COMMON *const cm) {
   return cm->frame_type == KEY_FRAME || cm->intra_only;
 }
 
-static INLINE void vp10_init_macroblockd(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                         tran_low_t *dqcoeff) {
+static INLINE void av1_init_macroblockd(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                        tran_low_t *dqcoeff) {
   int i;
   for (i = 0; i < MAX_MB_PLANE; ++i) {
     xd->plane[i].dqcoeff = dqcoeff;
@@ -536,13 +536,13 @@
 #endif
 }
 
-static INLINE const vpx_prob *get_y_mode_probs(const VP10_COMMON *cm,
+static INLINE const aom_prob *get_y_mode_probs(const AV1_COMMON *cm,
                                                const MODE_INFO *mi,
                                                const MODE_INFO *above_mi,
                                                const MODE_INFO *left_mi,
                                                int block) {
-  const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, block);
-  const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, block);
+  const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
+  const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
   return cm->kf_y_prob[above][left];
 }
 
@@ -622,8 +622,8 @@
   return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
 }
 
-static INLINE void vp10_zero_above_context(VP10_COMMON *const cm,
-                                           int mi_col_start, int mi_col_end) {
+static INLINE void av1_zero_above_context(AV1_COMMON *const cm,
+                                          int mi_col_start, int mi_col_end) {
   const int width = mi_col_end - mi_col_start;
 
   const int offset_y = 2 * mi_col_start;
@@ -631,22 +631,22 @@
   const int offset_uv = offset_y >> cm->subsampling_x;
   const int width_uv = width_y >> cm->subsampling_x;
 
-  vp10_zero_array(cm->above_context[0] + offset_y, width_y);
-  vp10_zero_array(cm->above_context[1] + offset_uv, width_uv);
-  vp10_zero_array(cm->above_context[2] + offset_uv, width_uv);
+  av1_zero_array(cm->above_context[0] + offset_y, width_y);
+  av1_zero_array(cm->above_context[1] + offset_uv, width_uv);
+  av1_zero_array(cm->above_context[2] + offset_uv, width_uv);
 
-  vp10_zero_array(cm->above_seg_context + mi_col_start, width);
+  av1_zero_array(cm->above_seg_context + mi_col_start, width);
 
 #if CONFIG_VAR_TX
-  vp10_zero_array(cm->above_txfm_context + mi_col_start, width);
+  av1_zero_array(cm->above_txfm_context + mi_col_start, width);
 #endif  // CONFIG_VAR_TX
 }
 
-static INLINE void vp10_zero_left_context(MACROBLOCKD *const xd) {
-  vp10_zero(xd->left_context);
-  vp10_zero(xd->left_seg_context);
+static INLINE void av1_zero_left_context(MACROBLOCKD *const xd) {
+  av1_zero(xd->left_context);
+  av1_zero(xd->left_seg_context);
 #if CONFIG_VAR_TX
-  vp10_zero(xd->left_txfm_context_buffer);
+  av1_zero(xd->left_txfm_context_buffer);
 #endif
 }
 
@@ -684,7 +684,7 @@
 }
 #endif
 
-static INLINE PARTITION_TYPE get_partition(const VP10_COMMON *const cm,
+static INLINE PARTITION_TYPE get_partition(const AV1_COMMON *const cm,
                                            const int mi_row, const int mi_col,
                                            const BLOCK_SIZE bsize) {
   if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) {
@@ -726,8 +726,7 @@
   }
 }
 
-static INLINE void set_sb_size(VP10_COMMON *const cm,
-                               const BLOCK_SIZE sb_size) {
+static INLINE void set_sb_size(AV1_COMMON *const cm, const BLOCK_SIZE sb_size) {
   cm->sb_size = sb_size;
   cm->mib_size = num_8x8_blocks_wide_lookup[cm->sb_size];
   cm->mib_size_log2 = mi_width_log2_lookup[cm->sb_size];
@@ -737,4 +736,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ONYXC_INT_H_
+#endif  // AV1_COMMON_ONYXC_INT_H_
diff --git a/av1/common/pred_common.c b/av1/common/pred_common.c
index 0e1045e..6fe1188 100644
--- a/av1/common/pred_common.c
+++ b/av1/common/pred_common.c
@@ -34,7 +34,7 @@
   return ref_type;
 }
 
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir) {
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir) {
   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const int ctx_offset =
       (mbmi->ref_frame[1] > INTRA_FRAME) * INTER_FILTER_COMP_OFFSET;
@@ -67,7 +67,7 @@
   return filter_type_ctx;
 }
 #else
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
   // Note:
   // The mode info data structure has a one element border above and to the
   // left of the entries corresponding to real macroblocks.
@@ -115,7 +115,7 @@
       if (mode != DC_PRED && mode != TM_PRED) {
         int p_angle =
             mode_to_angle_map[mode] + ref_mbmi->angle_delta[0] * ANGLE_STEP;
-        if (vp10_is_intra_filter_switchable(p_angle)) {
+        if (av1_is_intra_filter_switchable(p_angle)) {
           ref_type = ref_mbmi->intra_filter;
         }
       }
@@ -124,7 +124,7 @@
   return ref_type;
 }
 
-int vp10_get_pred_context_intra_interp(const MACROBLOCKD *xd) {
+int av1_get_pred_context_intra_interp(const MACROBLOCKD *xd) {
   int left_type = INTRA_FILTERS, above_type = INTRA_FILTERS;
 
   if (xd->left_available) left_type = get_ref_intra_filter(xd->left_mbmi);
@@ -149,7 +149,7 @@
 // 1 - intra/inter, inter/intra
 // 2 - intra/--, --/intra
 // 3 - intra/intra
-int vp10_get_intra_inter_context(const MACROBLOCKD *xd) {
+int av1_get_intra_inter_context(const MACROBLOCKD *xd) {
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
   const int has_above = xd->up_available;
@@ -171,8 +171,8 @@
 #define CHECK_BWDREF_OR_ALTREF(ref_frame) \
   (((ref_frame) == BWDREF_FRAME) || ((ref_frame) == ALTREF_FRAME))
 
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
-                                    const MACROBLOCKD *xd) {
+int av1_get_reference_mode_context(const AV1_COMMON *cm,
+                                   const MACROBLOCKD *xd) {
   int ctx;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -218,8 +218,8 @@
 
 #else  // CONFIG_EXT_REFS
 
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
-                                    const MACROBLOCKD *xd) {
+int av1_get_reference_mode_context(const AV1_COMMON *cm,
+                                   const MACROBLOCKD *xd) {
   int ctx;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -279,8 +279,8 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is either
 //               GOLDEN_FRAME or LAST3_FRAME.
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
-                                     const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
+                                    const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -379,8 +379,8 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is LAST_FRAME,
 // conditioning on it is either LAST_FRAME or LAST2_FRAME.
-int vp10_get_pred_context_comp_ref_p1(const VP10_COMMON *cm,
-                                      const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p1(const AV1_COMMON *cm,
+                                     const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -480,8 +480,8 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is GOLDEN_FRAME,
 // conditioning on it is either GOLDEN or LAST3.
-int vp10_get_pred_context_comp_ref_p2(const VP10_COMMON *cm,
-                                      const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p2(const AV1_COMMON *cm,
+                                     const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -575,8 +575,8 @@
 }
 
 // Returns a context number for the given MB prediction signal
-int vp10_get_pred_context_comp_bwdref_p(const VP10_COMMON *cm,
-                                        const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_bwdref_p(const AV1_COMMON *cm,
+                                       const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -687,8 +687,8 @@
 #else  // CONFIG_EXT_REFS
 
 // Returns a context number for the given MB prediction signal
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
-                                     const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
+                                    const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -777,7 +777,7 @@
 // or a BWDREF_FRAME.
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is ALTREF/BWDREF.
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -856,7 +856,7 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is ALTREF_FRAME, conditioning
 // on it is either ALTREF_FRAME/BWDREF_FRAME.
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -950,7 +950,7 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is LAST3/GOLDEN, conditioning
 // on it is either LAST3/GOLDEN/LAST2/LAST.
-int vp10_get_pred_context_single_ref_p3(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p3(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1047,7 +1047,7 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is LAST2_FRAME, conditioning
 // on it is either LAST2_FRAME/LAST_FRAME.
-int vp10_get_pred_context_single_ref_p4(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p4(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1139,7 +1139,7 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is GOLDEN_FRAME, conditioning
 // on it is either GOLDEN_FRAME/LAST3_FRAME.
-int vp10_get_pred_context_single_ref_p5(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p5(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1229,7 +1229,7 @@
 
 #else  // CONFIG_EXT_REFS
 
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1295,7 +1295,7 @@
   return pred_context;
 }
 
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
diff --git a/av1/common/pred_common.h b/av1/common/pred_common.h
index 9a3e3f1..5873bf0 100644
--- a/av1/common/pred_common.h
+++ b/av1/common/pred_common.h
@@ -8,37 +8,37 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_PRED_COMMON_H_
-#define VP10_COMMON_PRED_COMMON_H_
+#ifndef AV1_COMMON_PRED_COMMON_H_
+#define AV1_COMMON_PRED_COMMON_H_
 
 #include "av1/common/blockd.h"
 #include "av1/common/onyxc_int.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-static INLINE int get_segment_id(const VP10_COMMON *cm,
+static INLINE int get_segment_id(const AV1_COMMON *cm,
                                  const uint8_t *segment_ids, BLOCK_SIZE bsize,
                                  int mi_row, int mi_col) {
   const int mi_offset = mi_row * cm->mi_cols + mi_col;
   const int bw = num_8x8_blocks_wide_lookup[bsize];
   const int bh = num_8x8_blocks_high_lookup[bsize];
-  const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
-  const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
+  const int xmis = AOMMIN(cm->mi_cols - mi_col, bw);
+  const int ymis = AOMMIN(cm->mi_rows - mi_row, bh);
   int x, y, segment_id = MAX_SEGMENTS;
 
   for (y = 0; y < ymis; ++y)
     for (x = 0; x < xmis; ++x)
       segment_id =
-          VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
+          AOMMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
 
   assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
   return segment_id;
 }
 
-static INLINE int vp10_get_pred_context_seg_id(const MACROBLOCKD *xd) {
+static INLINE int av1_get_pred_context_seg_id(const MACROBLOCKD *xd) {
   const MODE_INFO *const above_mi = xd->above_mi;
   const MODE_INFO *const left_mi = xd->left_mi;
   const int above_sip =
@@ -48,12 +48,12 @@
   return above_sip + left_sip;
 }
 
-static INLINE vpx_prob vp10_get_pred_prob_seg_id(
+static INLINE aom_prob av1_get_pred_prob_seg_id(
     const struct segmentation_probs *segp, const MACROBLOCKD *xd) {
-  return segp->pred_probs[vp10_get_pred_context_seg_id(xd)];
+  return segp->pred_probs[av1_get_pred_context_seg_id(xd)];
 }
 
-static INLINE int vp10_get_skip_context(const MACROBLOCKD *xd) {
+static INLINE int av1_get_skip_context(const MACROBLOCKD *xd) {
   const MODE_INFO *const above_mi = xd->above_mi;
   const MODE_INFO *const left_mi = xd->left_mi;
   const int above_skip = (above_mi != NULL) ? above_mi->mbmi.skip : 0;
@@ -61,109 +61,108 @@
   return above_skip + left_skip;
 }
 
-static INLINE vpx_prob vp10_get_skip_prob(const VP10_COMMON *cm,
-                                          const MACROBLOCKD *xd) {
-  return cm->fc->skip_probs[vp10_get_skip_context(xd)];
+static INLINE aom_prob av1_get_skip_prob(const AV1_COMMON *cm,
+                                         const MACROBLOCKD *xd) {
+  return cm->fc->skip_probs[av1_get_skip_context(xd)];
 }
 
 #if CONFIG_DUAL_FILTER
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir);
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir);
 #else
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
 #endif
 
 #if CONFIG_EXT_INTRA
-int vp10_get_pred_context_intra_interp(const MACROBLOCKD *xd);
+int av1_get_pred_context_intra_interp(const MACROBLOCKD *xd);
 #endif  // CONFIG_EXT_INTRA
 
-int vp10_get_intra_inter_context(const MACROBLOCKD *xd);
+int av1_get_intra_inter_context(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_intra_inter_prob(const VP10_COMMON *cm,
-                                                 const MACROBLOCKD *xd) {
-  return cm->fc->intra_inter_prob[vp10_get_intra_inter_context(xd)];
+static INLINE aom_prob av1_get_intra_inter_prob(const AV1_COMMON *cm,
+                                                const MACROBLOCKD *xd) {
+  return cm->fc->intra_inter_prob[av1_get_intra_inter_context(xd)];
 }
 
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
+int av1_get_reference_mode_context(const AV1_COMMON *cm, const MACROBLOCKD *xd);
+
+static INLINE aom_prob av1_get_reference_mode_prob(const AV1_COMMON *cm,
+                                                   const MACROBLOCKD *xd) {
+  return cm->fc->comp_inter_prob[av1_get_reference_mode_context(cm, xd)];
+}
+
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
                                     const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_reference_mode_prob(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_pred_prob_comp_ref_p(const AV1_COMMON *cm,
                                                     const MACROBLOCKD *xd) {
-  return cm->fc->comp_inter_prob[vp10_get_reference_mode_context(cm, xd)];
-}
-
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
-                                     const MACROBLOCKD *xd);
-
-static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p(const VP10_COMMON *cm,
-                                                     const MACROBLOCKD *xd) {
-  const int pred_context = vp10_get_pred_context_comp_ref_p(cm, xd);
+  const int pred_context = av1_get_pred_context_comp_ref_p(cm, xd);
   return cm->fc->comp_ref_prob[pred_context][0];
 }
 
 #if CONFIG_EXT_REFS
-int vp10_get_pred_context_comp_ref_p1(const VP10_COMMON *cm,
-                                      const MACROBLOCKD *xd);
+int av1_get_pred_context_comp_ref_p1(const AV1_COMMON *cm,
+                                     const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p1(const VP10_COMMON *cm,
-                                                      const MACROBLOCKD *xd) {
-  const int pred_context = vp10_get_pred_context_comp_ref_p1(cm, xd);
+static INLINE aom_prob av1_get_pred_prob_comp_ref_p1(const AV1_COMMON *cm,
+                                                     const MACROBLOCKD *xd) {
+  const int pred_context = av1_get_pred_context_comp_ref_p1(cm, xd);
   return cm->fc->comp_ref_prob[pred_context][1];
 }
 
-int vp10_get_pred_context_comp_ref_p2(const VP10_COMMON *cm,
-                                      const MACROBLOCKD *xd);
+int av1_get_pred_context_comp_ref_p2(const AV1_COMMON *cm,
+                                     const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p2(const VP10_COMMON *cm,
-                                                      const MACROBLOCKD *xd) {
-  const int pred_context = vp10_get_pred_context_comp_ref_p2(cm, xd);
+static INLINE aom_prob av1_get_pred_prob_comp_ref_p2(const AV1_COMMON *cm,
+                                                     const MACROBLOCKD *xd) {
+  const int pred_context = av1_get_pred_context_comp_ref_p2(cm, xd);
   return cm->fc->comp_ref_prob[pred_context][2];
 }
 
-int vp10_get_pred_context_comp_bwdref_p(const VP10_COMMON *cm,
-                                        const MACROBLOCKD *xd);
+int av1_get_pred_context_comp_bwdref_p(const AV1_COMMON *cm,
+                                       const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_comp_bwdref_p(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  const int pred_context = vp10_get_pred_context_comp_bwdref_p(cm, xd);
+static INLINE aom_prob av1_get_pred_prob_comp_bwdref_p(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  const int pred_context = av1_get_pred_context_comp_bwdref_p(cm, xd);
   return cm->fc->comp_bwdref_prob[pred_context][0];
 }
 
 #endif  // CONFIG_EXT_REFS
 
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p1(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p1(xd)][0];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p1(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p1(xd)][0];
 }
 
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p2(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p2(xd)][1];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p2(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p2(xd)][1];
 }
 
 #if CONFIG_EXT_REFS
-int vp10_get_pred_context_single_ref_p3(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p3(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p3(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p3(xd)][2];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p3(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p3(xd)][2];
 }
 
-int vp10_get_pred_context_single_ref_p4(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p4(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p4(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p4(xd)][3];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p4(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p4(xd)][3];
 }
 
-int vp10_get_pred_context_single_ref_p5(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p5(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p5(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p5(xd)][4];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p5(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p5(xd)][4];
 }
 #endif  // CONFIG_EXT_REFS
 
@@ -192,7 +191,7 @@
 }
 
 #if CONFIG_VAR_TX
-static void update_tx_counts(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void update_tx_counts(AV1_COMMON *cm, MACROBLOCKD *xd,
                              MB_MODE_INFO *mbmi, BLOCK_SIZE plane_bsize,
                              TX_SIZE tx_size, int blk_row, int blk_col,
                              TX_SIZE max_tx_size, int ctx) {
@@ -232,7 +231,7 @@
   }
 }
 
-static INLINE void inter_block_tx_count_update(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE void inter_block_tx_count_update(AV1_COMMON *cm, MACROBLOCKD *xd,
                                                MB_MODE_INFO *mbmi,
                                                BLOCK_SIZE plane_bsize,
                                                int ctx) {
@@ -254,4 +253,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_PRED_COMMON_H_
+#endif  // AV1_COMMON_PRED_COMMON_H_
diff --git a/av1/common/quant_common.c b/av1/common/quant_common.c
index 79d8fb8..3adfa7b 100644
--- a/av1/common/quant_common.c
+++ b/av1/common/quant_common.c
@@ -130,8 +130,8 @@
     cuml_bins[i] = ROUND_POWER_OF_TWO(cuml_knots[i] * q, 7);
 }
 
-void vp10_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
-                              tran_low_t *cuml_bins, int q_profile) {
+void av1_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
+                             tran_low_t *cuml_bins, int q_profile) {
   const uint8_t *knots = get_nuq_knots(qindex, band, q_profile);
   tran_low_t cuml_bins_[NUQ_KNOTS], *cuml_bins_ptr;
   tran_low_t doff;
@@ -150,15 +150,15 @@
       cuml_bins_ptr[NUQ_KNOTS - 1] + ROUND_POWER_OF_TWO((64 - doff) * q, 7);
 }
 
-tran_low_t vp10_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq) {
+tran_low_t av1_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq) {
   if (v <= NUQ_KNOTS)
     return dq[v];
   else
     return dq[NUQ_KNOTS] + (v - NUQ_KNOTS) * q;
 }
 
-tran_low_t vp10_dequant_coeff_nuq(int v, int q, const tran_low_t *dq) {
-  tran_low_t dqmag = vp10_dequant_abscoeff_nuq(abs(v), q, dq);
+tran_low_t av1_dequant_coeff_nuq(int v, int q, const tran_low_t *dq) {
+  tran_low_t dqmag = av1_dequant_abscoeff_nuq(abs(v), q, dq);
   return (v < 0 ? -dqmag : dqmag);
 }
 #endif  // CONFIG_NEW_QUANT
@@ -185,7 +185,7 @@
   1184, 1232, 1282, 1336,
 };
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const int16_t dc_qlookup_10[QINDEX_RANGE] = {
   4,    9,    10,   13,   15,   17,   20,   22,   25,   28,   31,   34,   37,
   40,   43,   47,   50,   53,   57,   60,   64,   68,   71,   75,   78,   82,
@@ -260,7 +260,7 @@
   1567, 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
 };
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const int16_t ac_qlookup_10[QINDEX_RANGE] = {
   4,    9,    11,   13,   16,   18,   21,   24,   27,   30,   33,   37,   40,
   44,   48,   51,   55,   59,   63,   67,   71,   75,   79,   83,   88,   92,
@@ -312,14 +312,14 @@
 };
 #endif
 
-int16_t vp10_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) {
-#if CONFIG_VP9_HIGHBITDEPTH
+int16_t av1_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8: return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_10: return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_12: return dc_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_8: return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_10: return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_12: return dc_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
     default:
-      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
       return -1;
   }
 #else
@@ -328,14 +328,14 @@
 #endif
 }
 
-int16_t vp10_ac_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) {
-#if CONFIG_VP9_HIGHBITDEPTH
+int16_t av1_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8: return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_10: return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_12: return ac_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_8: return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_10: return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_12: return ac_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
     default:
-      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
       return -1;
   }
 #else
@@ -344,8 +344,8 @@
 #endif
 }
 
-int vp10_get_qindex(const struct segmentation *seg, int segment_id,
-                    int base_qindex) {
+int av1_get_qindex(const struct segmentation *seg, int segment_id,
+                   int base_qindex) {
   if (segfeature_active(seg, segment_id, SEG_LVL_ALT_Q)) {
     const int data = get_segdata(seg, segment_id, SEG_LVL_ALT_Q);
     const int seg_qindex =
@@ -357,11 +357,11 @@
 }
 
 #if CONFIG_AOM_QM
-qm_val_t *aom_iqmatrix(VP10_COMMON *cm, int qmlevel, int is_chroma,
+qm_val_t *aom_iqmatrix(AV1_COMMON *cm, int qmlevel, int is_chroma,
                        int log2sizem2, int is_intra) {
   return &cm->giqmatrix[qmlevel][!!is_chroma][!!is_intra][log2sizem2][0];
 }
-qm_val_t *aom_qmatrix(VP10_COMMON *cm, int qmlevel, int is_chroma,
+qm_val_t *aom_qmatrix(AV1_COMMON *cm, int qmlevel, int is_chroma,
                       int log2sizem2, int is_intra) {
   return &cm->gqmatrix[qmlevel][!!is_chroma][!!is_intra][log2sizem2][0];
 }
@@ -371,7 +371,7 @@
 static uint16_t
     wt_matrix_ref[NUM_QM_LEVELS][2][2][4 * 4 + 8 * 8 + 16 * 16 + 32 * 32];
 
-void aom_qm_init(VP10_COMMON *cm) {
+void aom_qm_init(AV1_COMMON *cm) {
   int q, c, f, t, size;
   int current;
   for (q = 0; q < NUM_QM_LEVELS; ++q) {
diff --git a/av1/common/quant_common.h b/av1/common/quant_common.h
index 6ceed49..d04103e 100644
--- a/av1/common/quant_common.h
+++ b/av1/common/quant_common.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_QUANT_COMMON_H_
-#define VP10_COMMON_QUANT_COMMON_H_
+#ifndef AV1_COMMON_QUANT_COMMON_H_
+#define AV1_COMMON_QUANT_COMMON_H_
 
-#include "aom/vpx_codec.h"
+#include "aom/aom_codec.h"
 #include "av1/common/seg_common.h"
 #include "av1/common/enums.h"
 
@@ -34,25 +34,25 @@
 #define DEFAULT_QM_LAST (NUM_QM_LEVELS - 1)
 #endif
 
-struct VP10Common;
+struct AV1Common;
 
-int16_t vp10_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth);
-int16_t vp10_ac_quant(int qindex, int delta, vpx_bit_depth_t bit_depth);
+int16_t av1_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
+int16_t av1_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
 
-int vp10_get_qindex(const struct segmentation *seg, int segment_id,
-                    int base_qindex);
+int av1_get_qindex(const struct segmentation *seg, int segment_id,
+                   int base_qindex);
 #if CONFIG_AOM_QM
 // Reduce the large number of quantizers to a smaller number of levels for which
 // different matrices may be defined
 static inline int aom_get_qmlevel(int qindex, int first, int last) {
   int qmlevel = (qindex * (last + 1 - first) + QINDEX_RANGE / 2) / QINDEX_RANGE;
-  qmlevel = VPXMIN(qmlevel + first, NUM_QM_LEVELS - 1);
+  qmlevel = AOMMIN(qmlevel + first, NUM_QM_LEVELS - 1);
   return qmlevel;
 }
-void aom_qm_init(struct VP10Common *cm);
-qm_val_t *aom_iqmatrix(struct VP10Common *cm, int qindex, int comp,
+void aom_qm_init(struct AV1Common *cm);
+qm_val_t *aom_iqmatrix(struct AV1Common *cm, int qindex, int comp,
                        int log2sizem2, int is_intra);
-qm_val_t *aom_qmatrix(struct VP10Common *cm, int qindex, int comp,
+qm_val_t *aom_qmatrix(struct AV1Common *cm, int qindex, int comp,
                       int log2sizem2, int is_intra);
 #endif
 
@@ -64,13 +64,13 @@
 
 typedef tran_low_t dequant_val_type_nuq[NUQ_KNOTS + 1];
 typedef tran_low_t cuml_bins_type_nuq[NUQ_KNOTS];
-void vp10_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
-                              tran_low_t *cuml_bins, int dq_off_index);
-tran_low_t vp10_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq);
-tran_low_t vp10_dequant_coeff_nuq(int v, int q, const tran_low_t *dq);
+void av1_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
+                             tran_low_t *cuml_bins, int dq_off_index);
+tran_low_t av1_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq);
+tran_low_t av1_dequant_coeff_nuq(int v, int q, const tran_low_t *dq);
 
 static INLINE int get_dq_profile_from_ctx(int q_ctx) {
-  return VPXMIN(q_ctx, QUANT_PROFILES - 1);
+  return AOMMIN(q_ctx, QUANT_PROFILES - 1);
 }
 #endif  // CONFIG_NEW_QUANT
 
@@ -78,4 +78,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_QUANT_COMMON_H_
+#endif  // AV1_COMMON_QUANT_COMMON_H_
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index 0c3b93a..3db35e7 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -10,11 +10,11 @@
 
 #include <assert.h>
 
-#include "./vpx_scale_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
+#include "./aom_scale_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/blend.h"
 
 #include "av1/common/blockd.h"
@@ -242,9 +242,9 @@
   return master;
 }
 
-const uint8_t *vp10_get_soft_mask(int wedge_index, int wedge_sign,
-                                  BLOCK_SIZE sb_type, int offset_x,
-                                  int offset_y) {
+const uint8_t *av1_get_soft_mask(int wedge_index, int wedge_sign,
+                                 BLOCK_SIZE sb_type, int offset_x,
+                                 int offset_y) {
   const uint8_t *mask =
       get_wedge_mask_inplace(wedge_index, wedge_sign, sb_type);
   if (mask) mask -= (offset_x + offset_y * MASK_MASTER_STRIDE);
@@ -326,13 +326,13 @@
     if (wbits == 0) continue;
     for (w = 0; w < wtypes; ++w) {
       mask = get_wedge_mask_inplace(w, 0, bsize);
-      vpx_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
+      aom_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
                         bh);
       wedge_params->masks[0][w] = dst;
       dst += bw * bh;
 
       mask = get_wedge_mask_inplace(w, 1, bsize);
-      vpx_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
+      aom_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
                         bh);
       wedge_params->masks[1][w] = dst;
       dst += bw * bh;
@@ -342,7 +342,7 @@
 }
 
 // Equation of line: f(x, y) = a[0]*(x - a[2]*w/8) + a[1]*(y - a[3]*h/8) = 0
-void vp10_init_wedge_masks() {
+void av1_init_wedge_masks() {
   init_wedge_master_masks();
   init_wedge_signs();
   init_wedge_masks();
@@ -355,13 +355,13 @@
     BLOCK_SIZE sb_type, int wedge_offset_x, int wedge_offset_y, int h, int w) {
   const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
   const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
-  const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type,
-                                           wedge_offset_x, wedge_offset_y);
-  vpx_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+  const uint8_t *mask = av1_get_soft_mask(wedge_index, wedge_sign, sb_type,
+                                          wedge_offset_x, wedge_offset_y);
+  aom_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
                      mask, MASK_MASTER_STRIDE, h, w, subh, subw);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void build_masked_compound_wedge_extend_highbd(
     uint8_t *dst_8, int dst_stride, const uint8_t *src0_8, int src0_stride,
     const uint8_t *src1_8, int src1_stride, int wedge_index, int wedge_sign,
@@ -369,13 +369,13 @@
     int bd) {
   const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
   const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
-  const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type,
-                                           wedge_offset_x, wedge_offset_y);
-  vpx_highbd_blend_a64_mask(dst_8, dst_stride, src0_8, src0_stride, src1_8,
+  const uint8_t *mask = av1_get_soft_mask(wedge_index, wedge_sign, sb_type,
+                                          wedge_offset_x, wedge_offset_y);
+  aom_highbd_blend_a64_mask(dst_8, dst_stride, src0_8, src0_stride, src1_8,
                             src1_stride, mask, MASK_MASTER_STRIDE, h, w, subh,
                             subw, bd);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_SUPERTX
 
 static void build_masked_compound_wedge(uint8_t *dst, int dst_stride,
@@ -388,13 +388,13 @@
   const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
   const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
   const uint8_t *mask =
-      vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
-  vpx_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+      av1_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
+  aom_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
                      mask, 4 * num_4x4_blocks_wide_lookup[sb_type], h, w, subh,
                      subw);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void build_masked_compound_wedge_highbd(
     uint8_t *dst_8, int dst_stride, const uint8_t *src0_8, int src0_stride,
     const uint8_t *src1_8, int src1_stride, int wedge_index, int wedge_sign,
@@ -404,28 +404,28 @@
   const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
   const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
   const uint8_t *mask =
-      vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
-  vpx_highbd_blend_a64_mask(
+      av1_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
+  aom_highbd_blend_a64_mask(
       dst_8, dst_stride, src0_8, src0_stride, src1_8, src1_stride, mask,
       4 * num_4x4_blocks_wide_lookup[sb_type], h, w, subh, subw, bd);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
-                                      uint8_t *dst, int dst_stride,
-                                      const int subpel_x, const int subpel_y,
-                                      const struct scale_factors *sf, int w,
-                                      int h,
+void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
+                                     uint8_t *dst, int dst_stride,
+                                     const int subpel_x, const int subpel_y,
+                                     const struct scale_factors *sf, int w,
+                                     int h,
 #if CONFIG_DUAL_FILTER
-                                      const INTERP_FILTER *interp_filter,
+                                     const INTERP_FILTER *interp_filter,
 #else
-                                      const INTERP_FILTER interp_filter,
+                                     const INTERP_FILTER interp_filter,
 #endif
-                                      int xs, int ys,
+                                     int xs, int ys,
 #if CONFIG_SUPERTX
-                                      int wedge_offset_x, int wedge_offset_y,
+                                     int wedge_offset_x, int wedge_offset_y,
 #endif  // CONFIG_SUPERTX
-                                      const MACROBLOCKD *xd) {
+                                     const MACROBLOCKD *xd) {
   const MODE_INFO *mi = xd->mi[0];
 // The prediction filter types used here should be those for
 // the second reference block.
@@ -436,13 +436,13 @@
 #else
   INTERP_FILTER tmp_ipf = interp_filter;
 #endif  // CONFIG_DUAL_FILTER
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint8_t, tmp_dst_[2 * MAX_SB_SQUARE]);
   uint8_t *tmp_dst = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
                          ? CONVERT_TO_BYTEPTR(tmp_dst_)
                          : tmp_dst_;
-  vp10_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
-                            subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
+  av1_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
+                           subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
 #if CONFIG_SUPERTX
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
     build_masked_compound_wedge_extend_highbd(
@@ -466,10 +466,10 @@
                                 mi->mbmi.interinter_wedge_sign,
                                 mi->mbmi.sb_type, h, w);
 #endif  // CONFIG_SUPERTX
-#else   // CONFIG_VP9_HIGHBITDEPTH
+#else   // CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint8_t, tmp_dst[MAX_SB_SQUARE]);
-  vp10_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
-                            subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
+  av1_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
+                           subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
 #if CONFIG_SUPERTX
   build_masked_compound_wedge_extend(
       dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
@@ -481,12 +481,12 @@
                               mi->mbmi.interinter_wedge_sign, mi->mbmi.sb_type,
                               h, w);
 #endif  // CONFIG_SUPERTX
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
 #endif  // CONFIG_EXT_INTER
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_build_inter_predictor(
     const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
     const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref,
 #if CONFIG_DUAL_FILTER
@@ -498,7 +498,7 @@
   const int is_q4 = precision == MV_PRECISION_Q4;
   const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
                      is_q4 ? src_mv->col : src_mv->col * 2 };
-  MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf);
+  MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
   const int subpel_x = mv.col & SUBPEL_MASK;
   const int subpel_y = mv.row & SUBPEL_MASK;
 
@@ -508,22 +508,22 @@
                          sf, w, h, ref, interp_filter, sf->x_step_q4,
                          sf->y_step_q4, bd);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
-                                uint8_t *dst, int dst_stride, const MV *src_mv,
-                                const struct scale_factors *sf, int w, int h,
-                                int ref,
+void av1_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst,
+                               int dst_stride, const MV *src_mv,
+                               const struct scale_factors *sf, int w, int h,
+                               int ref,
 #if CONFIG_DUAL_FILTER
-                                const INTERP_FILTER *interp_filter,
+                               const INTERP_FILTER *interp_filter,
 #else
-                                const INTERP_FILTER interp_filter,
+                               const INTERP_FILTER interp_filter,
 #endif
-                                enum mv_precision precision, int x, int y) {
+                               enum mv_precision precision, int x, int y) {
   const int is_q4 = precision == MV_PRECISION_Q4;
   const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
                      is_q4 ? src_mv->col : src_mv->col * 2 };
-  MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf);
+  MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
   const int subpel_x = mv.col & SUBPEL_MASK;
   const int subpel_y = mv.row & SUBPEL_MASK;
 
@@ -600,7 +600,7 @@
           uint8_t *pre;
           MV32 scaled_mv;
           int xs, ys, subpel_x, subpel_y;
-          const int is_scaled = vp10_is_scaled(sf);
+          const int is_scaled = av1_is_scaled(sf);
 
           x = x_base + idx * x_step;
           y = y_base + idy * y_step;
@@ -610,7 +610,7 @@
           if (is_scaled) {
             pre =
                 pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
-            scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+            scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
             xs = sf->x_step_q4;
             ys = sf->y_step_q4;
           } else {
@@ -628,7 +628,7 @@
 #if CONFIG_EXT_INTER
           if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
               mi->mbmi.use_wedge_interinter)
-            vp10_make_masked_inter_predictor(
+            av1_make_masked_inter_predictor(
                 pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, subpel_y,
                 sf, w, h, mi->mbmi.interp_filter, xs, ys,
 #if CONFIG_SUPERTX
@@ -637,9 +637,9 @@
                 xd);
           else
 #endif  // CONFIG_EXT_INTER
-            vp10_make_inter_predictor(
-                pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, subpel_y,
-                sf, x_step, y_step, ref, mi->mbmi.interp_filter, xs, ys, xd);
+            av1_make_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
+                                     subpel_x, subpel_y, sf, x_step, y_step,
+                                     ref, mi->mbmi.interp_filter, xs, ys, xd);
         }
       }
     }
@@ -667,11 +667,11 @@
     uint8_t *pre;
     MV32 scaled_mv;
     int xs, ys, subpel_x, subpel_y;
-    const int is_scaled = vp10_is_scaled(sf);
+    const int is_scaled = av1_is_scaled(sf);
 
     if (is_scaled) {
       pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
-      scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+      scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
       xs = sf->x_step_q4;
       ys = sf->y_step_q4;
     } else {
@@ -689,36 +689,36 @@
 #if CONFIG_EXT_INTER
     if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
         mi->mbmi.use_wedge_interinter)
-      vp10_make_masked_inter_predictor(pre, pre_buf->stride, dst,
-                                       dst_buf->stride, subpel_x, subpel_y, sf,
-                                       w, h, mi->mbmi.interp_filter, xs, ys,
+      av1_make_masked_inter_predictor(pre, pre_buf->stride, dst,
+                                      dst_buf->stride, subpel_x, subpel_y, sf,
+                                      w, h, mi->mbmi.interp_filter, xs, ys,
 #if CONFIG_SUPERTX
-                                       wedge_offset_x, wedge_offset_y,
+                                      wedge_offset_x, wedge_offset_y,
 #endif  // CONFIG_SUPERTX
-                                       xd);
+                                      xd);
     else
 #else  // CONFIG_EXT_INTER
 #if CONFIG_GLOBAL_MOTION
     if (is_global[ref])
-      vp10_warp_plane(&(gm[ref]->motion_params),
-#if CONFIG_VP9_HIGHBITDEPTH
-                      xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH, xd->bd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                      pre_buf->buf0, pre_buf->width, pre_buf->height,
-                      pre_buf->stride, dst, (mi_x >> pd->subsampling_x) + x,
-                      (mi_y >> pd->subsampling_y) + y, w, h, dst_buf->stride,
-                      pd->subsampling_x, pd->subsampling_y, xs, ys);
+      av1_warp_plane(&(gm[ref]->motion_params),
+#if CONFIG_AOM_HIGHBITDEPTH
+                     xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH, xd->bd,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                     pre_buf->buf0, pre_buf->width, pre_buf->height,
+                     pre_buf->stride, dst, (mi_x >> pd->subsampling_x) + x,
+                     (mi_y >> pd->subsampling_y) + y, w, h, dst_buf->stride,
+                     pd->subsampling_x, pd->subsampling_y, xs, ys);
     else
 #endif  // CONFIG_GLOBAL_MOTION
 #endif  // CONFIG_EXT_INTER
-      vp10_make_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
-                                subpel_x, subpel_y, sf, w, h, ref,
-                                mi->mbmi.interp_filter, xs, ys, xd);
+      av1_make_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
+                               subpel_x, subpel_y, sf, w, h, ref,
+                               mi->mbmi.interp_filter, xs, ys, xd);
   }
 }
 
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
-                                       int ir, int ic, int mi_row, int mi_col) {
+void av1_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i, int ir,
+                                      int ic, int mi_row, int mi_col) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
   MODE_INFO *const mi = xd->mi[0];
   const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd);
@@ -732,27 +732,27 @@
   for (ref = 0; ref < 1 + is_compound; ++ref) {
     const uint8_t *pre =
         &pd->pre[ref].buf[(ir * pd->pre[ref].stride + ic) << 2];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-      vp10_highbd_build_inter_predictor(
+      av1_highbd_build_inter_predictor(
           pre, pd->pre[ref].stride, dst, pd->dst.stride,
           &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
           ref, mi->mbmi.interp_filter, MV_PRECISION_Q3,
           mi_col * MI_SIZE + 4 * ic, mi_row * MI_SIZE + 4 * ir, xd->bd);
     } else {
-      vp10_build_inter_predictor(
+      av1_build_inter_predictor(
           pre, pd->pre[ref].stride, dst, pd->dst.stride,
           &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
           ref, mi->mbmi.interp_filter, MV_PRECISION_Q3,
           mi_col * MI_SIZE + 4 * ic, mi_row * MI_SIZE + 4 * ir);
     }
 #else
-    vp10_build_inter_predictor(
+    av1_build_inter_predictor(
         pre, pd->pre[ref].stride, dst, pd->dst.stride,
         &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
         ref, mi->mbmi.interp_filter, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
         mi_row * MI_SIZE + 4 * ir);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 }
 
@@ -804,61 +804,61 @@
   }
 }
 
-void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                     BLOCK_SIZE bsize) {
+void av1_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                    BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
 #if CONFIG_EXT_INTER
   if (is_interintra_pred(&xd->mi[0]->mbmi))
-    vp10_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
-                                         xd->plane[0].dst.stride, bsize);
+    av1_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
+                                        xd->plane[0].dst.stride, bsize);
 #endif  // CONFIG_EXT_INTER
 }
 
-void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                     BLOCK_SIZE bsize, int plane) {
+void av1_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                    BLOCK_SIZE bsize, int plane) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, plane, plane);
 #if CONFIG_EXT_INTER
   if (is_interintra_pred(&xd->mi[0]->mbmi)) {
     if (plane == 0) {
-      vp10_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
-                                           xd->plane[0].dst.stride, bsize);
+      av1_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
+                                          xd->plane[0].dst.stride, bsize);
     } else {
-      vp10_build_interintra_predictors_sbc(xd, xd->plane[plane].dst.buf,
-                                           xd->plane[plane].dst.stride, plane,
-                                           bsize);
+      av1_build_interintra_predictors_sbc(xd, xd->plane[plane].dst.buf,
+                                          xd->plane[plane].dst.stride, plane,
+                                          bsize);
     }
   }
 #endif  // CONFIG_EXT_INTER
 }
 
-void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                      BLOCK_SIZE bsize) {
+void av1_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                     BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
                                     MAX_MB_PLANE - 1);
 #if CONFIG_EXT_INTER
   if (is_interintra_pred(&xd->mi[0]->mbmi))
-    vp10_build_interintra_predictors_sbuv(
+    av1_build_interintra_predictors_sbuv(
         xd, xd->plane[1].dst.buf, xd->plane[2].dst.buf, xd->plane[1].dst.stride,
         xd->plane[2].dst.stride, bsize);
 #endif  // CONFIG_EXT_INTER
 }
 
-void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                    BLOCK_SIZE bsize) {
+void av1_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                   BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
                                     MAX_MB_PLANE - 1);
 #if CONFIG_EXT_INTER
   if (is_interintra_pred(&xd->mi[0]->mbmi))
-    vp10_build_interintra_predictors(
+    av1_build_interintra_predictors(
         xd, xd->plane[0].dst.buf, xd->plane[1].dst.buf, xd->plane[2].dst.buf,
         xd->plane[0].dst.stride, xd->plane[1].dst.stride,
         xd->plane[2].dst.stride, bsize);
 #endif  // CONFIG_EXT_INTER
 }
 
-void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
-                           const YV12_BUFFER_CONFIG *src, int mi_row,
-                           int mi_col) {
+void av1_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
+                          const YV12_BUFFER_CONFIG *src, int mi_row,
+                          int mi_col) {
   uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
                                            src->v_buffer };
   const int widths[MAX_MB_PLANE] = { src->y_crop_width, src->uv_crop_width,
@@ -877,9 +877,9 @@
   }
 }
 
-void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
-                           const YV12_BUFFER_CONFIG *src, int mi_row,
-                           int mi_col, const struct scale_factors *sf) {
+void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
+                          const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
+                          const struct scale_factors *sf) {
   if (src != NULL) {
     int i;
     uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
@@ -929,7 +929,7 @@
   return NULL;
 }
 
-void vp10_build_masked_inter_predictor_complex(
+void av1_build_masked_inter_predictor_complex(
     MACROBLOCKD *xd, uint8_t *dst, int dst_stride, const uint8_t *pre,
     int pre_stride, int mi_row, int mi_col, int mi_row_ori, int mi_col_ori,
     BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, PARTITION_TYPE partition,
@@ -946,9 +946,9 @@
 
   int w_remain, h_remain;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int is_hdb = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   assert(bsize <= BLOCK_32X32);
   assert(IMPLIES(plane == 0, ssx == 0));
@@ -963,13 +963,13 @@
       dst += h_offset * dst_stride;
       pre += h_offset * pre_stride;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (is_hdb)
-        vpx_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre,
+        aom_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre,
                                    pre_stride, mask, h, top_w, xd->bd);
       else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-        vpx_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+        aom_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
                             mask, h, top_w);
 
       dst += h * dst_stride;
@@ -984,13 +984,13 @@
       dst += w_offset;
       pre += w_offset;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (is_hdb)
-        vpx_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre,
+        aom_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre,
                                    pre_stride, mask, top_h, w, xd->bd);
       else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-        vpx_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+        aom_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
                             mask, top_h, w);
 
       dst += w;
@@ -1007,7 +1007,7 @@
     return;
   }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (is_hdb) {
     dst = (uint8_t *)CONVERT_TO_SHORTPTR(dst);
     pre = (const uint8_t *)CONVERT_TO_SHORTPTR(pre);
@@ -1015,7 +1015,7 @@
     pre_stride *= 2;
     w_remain *= 2;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   do {
     memcpy(dst, pre, w_remain * sizeof(uint8_t));
@@ -1024,13 +1024,12 @@
   } while (--h_remain);
 }
 
-void vp10_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
+void av1_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
 #if CONFIG_EXT_INTER
-                                                  int mi_row_ori,
-                                                  int mi_col_ori,
+                                                 int mi_row_ori, int mi_col_ori,
 #endif  // CONFIG_EXT_INTER
-                                                  int mi_row, int mi_col,
-                                                  BLOCK_SIZE bsize, int block) {
+                                                 int mi_row, int mi_col,
+                                                 BLOCK_SIZE bsize, int block) {
   // Prediction function used in supertx:
   // Use the mv at current block (which is less than 8x8)
   // to get prediction of a block located at (mi_row, mi_col) at size of bsize
@@ -1068,19 +1067,19 @@
   }
 #if CONFIG_EXT_INTER
   if (is_interintra_pred(&xd->mi[0]->mbmi))
-    vp10_build_interintra_predictors(
+    av1_build_interintra_predictors(
         xd, xd->plane[0].dst.buf, xd->plane[1].dst.buf, xd->plane[2].dst.buf,
         xd->plane[0].dst.stride, xd->plane[1].dst.stride,
         xd->plane[2].dst.stride, bsize);
 #endif  // CONFIG_EXT_INTER
 }
 
-void vp10_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
+void av1_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
 #if CONFIG_EXT_INTER
-                                           int mi_row_ori, int mi_col_ori,
+                                          int mi_row_ori, int mi_col_ori,
 #endif  // CONFIG_EXT_INTER
-                                           int mi_row, int mi_col,
-                                           BLOCK_SIZE bsize) {
+                                          int mi_row, int mi_col,
+                                          BLOCK_SIZE bsize) {
   int plane;
   const int mi_x = mi_col * MI_SIZE;
   const int mi_y = mi_row * MI_SIZE;
@@ -1152,7 +1151,7 @@
 };
 #endif  // CONFIG_EXT_PARTITION
 
-const uint8_t *vp10_get_obmc_mask(int length) {
+const uint8_t *av1_get_obmc_mask(int length) {
   switch (length) {
     case 1: return obmc_mask_1;
     case 2: return obmc_mask_2;
@@ -1171,22 +1170,22 @@
 // top/left neighboring blocks' inter predictors with the regular inter
 // prediction. We assume the original prediction (bmc) is stored in
 // xd->plane[].dst.buf
-void vp10_build_obmc_inter_prediction(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                      int mi_row, int mi_col,
-                                      uint8_t *above[MAX_MB_PLANE],
-                                      int above_stride[MAX_MB_PLANE],
-                                      uint8_t *left[MAX_MB_PLANE],
-                                      int left_stride[MAX_MB_PLANE]) {
+void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                     int mi_row, int mi_col,
+                                     uint8_t *above[MAX_MB_PLANE],
+                                     int above_stride[MAX_MB_PLANE],
+                                     uint8_t *left[MAX_MB_PLANE],
+                                     int left_stride[MAX_MB_PLANE]) {
   const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   int plane, i;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int is_hbd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // handle above row
   if (xd->up_available) {
     const int overlap = num_4x4_blocks_high_lookup[bsize] * 2;
-    const int miw = VPXMIN(xd->n8_w, cm->mi_cols - mi_col);
+    const int miw = AOMMIN(xd->n8_w, cm->mi_cols - mi_col);
     const int mi_row_offset = -1;
 
     assert(miw > 0);
@@ -1197,7 +1196,7 @@
       const MB_MODE_INFO *const above_mbmi =
           &xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
       const int mi_step =
-          VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
+          AOMMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
 
       if (is_neighbor_overlappable(above_mbmi)) {
         for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
@@ -1209,15 +1208,15 @@
           const int tmp_stride = above_stride[plane];
           const uint8_t *const tmp =
               &above[plane][(i * MI_SIZE) >> pd->subsampling_x];
-          const uint8_t *const mask = vp10_get_obmc_mask(bh);
+          const uint8_t *const mask = av1_get_obmc_mask(bh);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (is_hbd)
-            vpx_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
+            aom_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
                                        tmp_stride, mask, bh, bw, xd->bd);
           else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-            vpx_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+            aom_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
                                 tmp_stride, mask, bh, bw);
         }
       }
@@ -1228,7 +1227,7 @@
   // handle left column
   if (xd->left_available) {
     const int overlap = num_4x4_blocks_wide_lookup[bsize] * 2;
-    const int mih = VPXMIN(xd->n8_h, cm->mi_rows - mi_row);
+    const int mih = AOMMIN(xd->n8_h, cm->mi_rows - mi_row);
     const int mi_col_offset = -1;
 
     assert(mih > 0);
@@ -1239,7 +1238,7 @@
       const MB_MODE_INFO *const left_mbmi =
           &xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
       const int mi_step =
-          VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
+          AOMMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
 
       if (is_neighbor_overlappable(left_mbmi)) {
         for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
@@ -1252,15 +1251,15 @@
           const int tmp_stride = left_stride[plane];
           const uint8_t *const tmp =
               &left[plane][(i * MI_SIZE * tmp_stride) >> pd->subsampling_y];
-          const uint8_t *const mask = vp10_get_obmc_mask(bw);
+          const uint8_t *const mask = av1_get_obmc_mask(bw);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (is_hbd)
-            vpx_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
+            aom_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
                                        tmp_stride, mask, bh, bw, xd->bd);
           else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-            vpx_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+            aom_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
                                 tmp_stride, mask, bh, bw);
         }
       }
@@ -1282,19 +1281,19 @@
 }
 #endif  // CONFIG_EXT_INTER
 
-void vp10_build_prediction_by_above_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                          int mi_row, int mi_col,
-                                          uint8_t *tmp_buf[MAX_MB_PLANE],
-                                          int tmp_width[MAX_MB_PLANE],
-                                          int tmp_height[MAX_MB_PLANE],
-                                          int tmp_stride[MAX_MB_PLANE]) {
+void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                         int mi_row, int mi_col,
+                                         uint8_t *tmp_buf[MAX_MB_PLANE],
+                                         int tmp_width[MAX_MB_PLANE],
+                                         int tmp_height[MAX_MB_PLANE],
+                                         int tmp_stride[MAX_MB_PLANE]) {
   const TileInfo *const tile = &xd->tile;
   BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   int i, j, mi_step, ref;
 
   if (mi_row <= tile->mi_row_start) return;
 
-  for (i = 0; i < VPXMIN(xd->n8_w, cm->mi_cols - mi_col); i += mi_step) {
+  for (i = 0; i < AOMMIN(xd->n8_w, cm->mi_cols - mi_col); i += mi_step) {
     int mi_row_offset = -1;
     int mi_col_offset = i;
     int mi_x, mi_y, bw, bh;
@@ -1304,7 +1303,7 @@
     MB_MODE_INFO backup_mbmi;
 #endif  // CONFIG_EXT_INTER
 
-    mi_step = VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
+    mi_step = AOMMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
 
     if (!is_neighbor_overlappable(above_mbmi)) continue;
 
@@ -1324,11 +1323,11 @@
       RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
 
       xd->block_refs[ref] = ref_buf;
-      if ((!vp10_is_valid_scale(&ref_buf->sf)))
-        vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+      if ((!av1_is_valid_scale(&ref_buf->sf)))
+        aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
                            "Reference frame has invalid dimensions");
-      vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col + i,
-                            &ref_buf->sf);
+      av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col + i,
+                           &ref_buf->sf);
     }
 
     xd->mb_to_left_edge = -(((mi_col + i) * MI_SIZE) * 8);
@@ -1338,7 +1337,7 @@
     for (j = 0; j < MAX_MB_PLANE; ++j) {
       const struct macroblockd_plane *pd = &xd->plane[j];
       bw = (mi_step * 8) >> pd->subsampling_x;
-      bh = VPXMAX((num_4x4_blocks_high_lookup[bsize] * 2) >> pd->subsampling_y,
+      bh = AOMMAX((num_4x4_blocks_high_lookup[bsize] * 2) >> pd->subsampling_y,
                   4);
 
       if (above_mbmi->sb_type < BLOCK_8X8) {
@@ -1379,19 +1378,19 @@
   xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
 }
 
-void vp10_build_prediction_by_left_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                         int mi_row, int mi_col,
-                                         uint8_t *tmp_buf[MAX_MB_PLANE],
-                                         int tmp_width[MAX_MB_PLANE],
-                                         int tmp_height[MAX_MB_PLANE],
-                                         int tmp_stride[MAX_MB_PLANE]) {
+void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                        int mi_row, int mi_col,
+                                        uint8_t *tmp_buf[MAX_MB_PLANE],
+                                        int tmp_width[MAX_MB_PLANE],
+                                        int tmp_height[MAX_MB_PLANE],
+                                        int tmp_stride[MAX_MB_PLANE]) {
   const TileInfo *const tile = &xd->tile;
   BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   int i, j, mi_step, ref;
 
   if (mi_col == 0 || (mi_col - 1 < tile->mi_col_start)) return;
 
-  for (i = 0; i < VPXMIN(xd->n8_h, cm->mi_rows - mi_row); i += mi_step) {
+  for (i = 0; i < AOMMIN(xd->n8_h, cm->mi_rows - mi_row); i += mi_step) {
     int mi_row_offset = i;
     int mi_col_offset = -1;
     int mi_x, mi_y, bw, bh;
@@ -1401,7 +1400,7 @@
     MB_MODE_INFO backup_mbmi;
 #endif  // CONFIG_EXT_INTER
 
-    mi_step = VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
+    mi_step = AOMMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
 
     if (!is_neighbor_overlappable(left_mbmi)) continue;
 
@@ -1421,11 +1420,11 @@
       RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
 
       xd->block_refs[ref] = ref_buf;
-      if ((!vp10_is_valid_scale(&ref_buf->sf)))
-        vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+      if ((!av1_is_valid_scale(&ref_buf->sf)))
+        aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
                            "Reference frame has invalid dimensions");
-      vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row + i, mi_col,
-                            &ref_buf->sf);
+      av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row + i, mi_col,
+                           &ref_buf->sf);
     }
 
     xd->mb_to_top_edge = -(((mi_row + i) * MI_SIZE) * 8);
@@ -1434,7 +1433,7 @@
 
     for (j = 0; j < MAX_MB_PLANE; ++j) {
       const struct macroblockd_plane *pd = &xd->plane[j];
-      bw = VPXMAX((num_4x4_blocks_wide_lookup[bsize] * 2) >> pd->subsampling_x,
+      bw = AOMMAX((num_4x4_blocks_wide_lookup[bsize] * 2) >> pd->subsampling_x,
                   4);
       bh = (mi_step << MI_SIZE_LOG2) >> pd->subsampling_y;
 
@@ -1515,10 +1514,10 @@
   if (use_wedge_interintra) {
     if (is_interintra_wedge_used(bsize)) {
       const uint8_t *mask =
-          vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+          av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
       const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
       const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
-      vpx_blend_a64_mask(
+      aom_blend_a64_mask(
           comppred, compstride, intrapred, intrastride, interpred, interstride,
           mask, 4 * num_4x4_blocks_wide_lookup[bsize], bh, bw, subh, subw);
     }
@@ -1531,7 +1530,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[i * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1542,7 +1541,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[j * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1556,7 +1555,7 @@
                        ii_weights1d[j * size_scale]) >>
                       2;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1570,7 +1569,7 @@
                        ii_weights1d[i * size_scale]) >>
                       2;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1581,7 +1580,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[(i < j ? i : j) * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1594,7 +1593,7 @@
               (ii_weights1d[i * size_scale] + ii_weights1d[j * size_scale]) >>
               1;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1605,7 +1604,7 @@
     default:
       for (i = 0; i < bh; ++i) {
         for (j = 0; j < bw; ++j) {
-          comppred[i * compstride + j] = VPX_BLEND_AVG(
+          comppred[i * compstride + j] = AOM_BLEND_AVG(
               intrapred[i * intrastride + j], interpred[i * interstride + j]);
         }
       }
@@ -1613,7 +1612,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void combine_interintra_highbd(
     INTERINTRA_MODE mode, int use_wedge_interintra, int wedge_index,
     int wedge_sign, BLOCK_SIZE bsize, BLOCK_SIZE plane_bsize,
@@ -1631,10 +1630,10 @@
   if (use_wedge_interintra) {
     if (is_interintra_wedge_used(bsize)) {
       const uint8_t *mask =
-          vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+          av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
       const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
       const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
-      vpx_highbd_blend_a64_mask(comppred8, compstride, intrapred8, intrastride,
+      aom_highbd_blend_a64_mask(comppred8, compstride, intrapred8, intrastride,
                                 interpred8, interstride, mask, bw, bh, bw, subh,
                                 subw, bd);
     }
@@ -1647,7 +1646,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[i * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1658,7 +1657,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[j * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1672,7 +1671,7 @@
                        ii_weights1d[j * size_scale]) >>
                       2;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1686,7 +1685,7 @@
                        ii_weights1d[i * size_scale]) >>
                       2;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1697,7 +1696,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[(i < j ? i : j) * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1710,7 +1709,7 @@
               (ii_weights1d[i * size_scale] + ii_weights1d[j * size_scale]) >>
               1;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1721,14 +1720,14 @@
     default:
       for (i = 0; i < bh; ++i) {
         for (j = 0; j < bw; ++j) {
-          comppred[i * compstride + j] = VPX_BLEND_AVG(
+          comppred[i * compstride + j] = AOM_BLEND_AVG(
               interpred[i * interstride + j], intrapred[i * intrastride + j]);
         }
       }
       break;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // Break down rectangular intra prediction for joint spatio-temporal prediction
 // into two square intra predictions.
@@ -1745,47 +1744,47 @@
   TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
 
   if (bwl == bhl) {
-    vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
-                             dst, dst_stride, 0, 0, plane);
+    av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
+                            dst, dst_stride, 0, 0, plane);
 
   } else if (bwl < bhl) {
     uint8_t *src_2 = ref + pxbw * ref_stride;
     uint8_t *dst_2 = dst + pxbw * dst_stride;
-    vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
-                             dst, dst_stride, 0, 0, plane);
-#if CONFIG_VP9_HIGHBITDEPTH
+    av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
+                            dst, dst_stride, 0, 0, plane);
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       uint16_t *src_216 = CONVERT_TO_SHORTPTR(src_2);
       uint16_t *dst_216 = CONVERT_TO_SHORTPTR(dst_2);
       memcpy(src_216 - ref_stride, dst_216 - dst_stride,
              sizeof(*src_216) * pxbw);
     } else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     {
       memcpy(src_2 - ref_stride, dst_2 - dst_stride, sizeof(*src_2) * pxbw);
     }
-    vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
-                             dst_2, dst_stride, 0, 1 << bwl, plane);
+    av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
+                            dst_2, dst_stride, 0, 1 << bwl, plane);
   } else {  // bwl > bhl
     int i;
     uint8_t *src_2 = ref + pxbh;
     uint8_t *dst_2 = dst + pxbh;
-    vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
-                             dst, dst_stride, 0, 0, plane);
-#if CONFIG_VP9_HIGHBITDEPTH
+    av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
+                            dst, dst_stride, 0, 0, plane);
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       uint16_t *src_216 = CONVERT_TO_SHORTPTR(src_2);
       uint16_t *dst_216 = CONVERT_TO_SHORTPTR(dst_2);
       for (i = 0; i < pxbh; ++i)
         src_216[i * ref_stride - 1] = dst_216[i * dst_stride - 1];
     } else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     {
       for (i = 0; i < pxbh; ++i)
         src_2[i * ref_stride - 1] = dst_2[i * dst_stride - 1];
     }
-    vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
-                             dst_2, dst_stride, 1 << bhl, 0, plane);
+    av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
+                            dst_2, dst_stride, 1 << bhl, 0, plane);
   }
 }
 
@@ -1795,20 +1794,20 @@
   D117_PRED, D153_PRED, D207_PRED, D63_PRED, TM_PRED
 };
 
-void vp10_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
-                                                BLOCK_SIZE bsize, int plane,
-                                                uint8_t *dst, int dst_stride) {
+void av1_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
+                                               BLOCK_SIZE bsize, int plane,
+                                               uint8_t *dst, int dst_stride) {
   build_intra_predictors_for_interintra(
       xd, xd->plane[plane].dst.buf, xd->plane[plane].dst.stride, dst,
       dst_stride, interintra_to_intra_mode[xd->mi[0]->mbmi.interintra_mode],
       bsize, plane);
 }
 
-void vp10_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
-                             const uint8_t *inter_pred, int inter_stride,
-                             const uint8_t *intra_pred, int intra_stride) {
+void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
+                            const uint8_t *inter_pred, int inter_stride,
+                            const uint8_t *intra_pred, int intra_stride) {
   const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, &xd->plane[plane]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     combine_interintra_highbd(
         xd->mi[0]->mbmi.interintra_mode, xd->mi[0]->mbmi.use_wedge_interintra,
@@ -1818,7 +1817,7 @@
         inter_stride, intra_pred, intra_stride, xd->bd);
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   combine_interintra(xd->mi[0]->mbmi.interintra_mode,
                      xd->mi[0]->mbmi.use_wedge_interintra,
                      xd->mi[0]->mbmi.interintra_wedge_index,
@@ -1827,63 +1826,63 @@
                      inter_pred, inter_stride, intra_pred, intra_stride);
 }
 
-void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
-                                          int ystride, BLOCK_SIZE bsize) {
-#if CONFIG_VP9_HIGHBITDEPTH
+void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
+                                         int ystride, BLOCK_SIZE bsize) {
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     DECLARE_ALIGNED(16, uint16_t, intrapredictor[MAX_SB_SQUARE]);
-    vp10_build_intra_predictors_for_interintra(
+    av1_build_intra_predictors_for_interintra(
         xd, bsize, 0, CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
-    vp10_combine_interintra(xd, bsize, 0, ypred, ystride,
-                            CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
+    av1_combine_interintra(xd, bsize, 0, ypred, ystride,
+                           CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   {
     DECLARE_ALIGNED(16, uint8_t, intrapredictor[MAX_SB_SQUARE]);
-    vp10_build_intra_predictors_for_interintra(xd, bsize, 0, intrapredictor,
-                                               MAX_SB_SIZE);
-    vp10_combine_interintra(xd, bsize, 0, ypred, ystride, intrapredictor,
-                            MAX_SB_SIZE);
+    av1_build_intra_predictors_for_interintra(xd, bsize, 0, intrapredictor,
+                                              MAX_SB_SIZE);
+    av1_combine_interintra(xd, bsize, 0, ypred, ystride, intrapredictor,
+                           MAX_SB_SIZE);
   }
 }
 
-void vp10_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
-                                          int ustride, int plane,
-                                          BLOCK_SIZE bsize) {
-#if CONFIG_VP9_HIGHBITDEPTH
+void av1_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
+                                         int ustride, int plane,
+                                         BLOCK_SIZE bsize) {
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     DECLARE_ALIGNED(16, uint16_t, uintrapredictor[MAX_SB_SQUARE]);
-    vp10_build_intra_predictors_for_interintra(
+    av1_build_intra_predictors_for_interintra(
         xd, bsize, plane, CONVERT_TO_BYTEPTR(uintrapredictor), MAX_SB_SIZE);
-    vp10_combine_interintra(xd, bsize, plane, upred, ustride,
-                            CONVERT_TO_BYTEPTR(uintrapredictor), MAX_SB_SIZE);
+    av1_combine_interintra(xd, bsize, plane, upred, ustride,
+                           CONVERT_TO_BYTEPTR(uintrapredictor), MAX_SB_SIZE);
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   {
     DECLARE_ALIGNED(16, uint8_t, uintrapredictor[MAX_SB_SQUARE]);
-    vp10_build_intra_predictors_for_interintra(xd, bsize, plane,
-                                               uintrapredictor, MAX_SB_SIZE);
-    vp10_combine_interintra(xd, bsize, plane, upred, ustride, uintrapredictor,
-                            MAX_SB_SIZE);
+    av1_build_intra_predictors_for_interintra(xd, bsize, plane, uintrapredictor,
+                                              MAX_SB_SIZE);
+    av1_combine_interintra(xd, bsize, plane, upred, ustride, uintrapredictor,
+                           MAX_SB_SIZE);
   }
 }
 
-void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
-                                           uint8_t *vpred, int ustride,
-                                           int vstride, BLOCK_SIZE bsize) {
-  vp10_build_interintra_predictors_sbc(xd, upred, ustride, 1, bsize);
-  vp10_build_interintra_predictors_sbc(xd, vpred, vstride, 2, bsize);
+void av1_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
+                                          uint8_t *vpred, int ustride,
+                                          int vstride, BLOCK_SIZE bsize) {
+  av1_build_interintra_predictors_sbc(xd, upred, ustride, 1, bsize);
+  av1_build_interintra_predictors_sbc(xd, vpred, vstride, 2, bsize);
 }
 
-void vp10_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
-                                      uint8_t *upred, uint8_t *vpred,
-                                      int ystride, int ustride, int vstride,
-                                      BLOCK_SIZE bsize) {
-  vp10_build_interintra_predictors_sby(xd, ypred, ystride, bsize);
-  vp10_build_interintra_predictors_sbuv(xd, upred, vpred, ustride, vstride,
-                                        bsize);
+void av1_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
+                                     uint8_t *upred, uint8_t *vpred,
+                                     int ystride, int ustride, int vstride,
+                                     BLOCK_SIZE bsize) {
+  av1_build_interintra_predictors_sby(xd, ypred, ystride, bsize);
+  av1_build_interintra_predictors_sbuv(xd, upred, vpred, ustride, vstride,
+                                       bsize);
 }
 
 // Builds the inter-predictor for the single ref case
@@ -1899,7 +1898,7 @@
 
   const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
   struct buf_2d *const pre_buf = &pd->pre[ref];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint8_t *const dst =
       (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH ? CONVERT_TO_BYTEPTR(ext_dst)
                                                    : ext_dst) +
@@ -1922,11 +1921,11 @@
   uint8_t *pre;
   MV32 scaled_mv;
   int xs, ys, subpel_x, subpel_y;
-  const int is_scaled = vp10_is_scaled(sf);
+  const int is_scaled = av1_is_scaled(sf);
 
   if (is_scaled) {
     pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
-    scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+    scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
     xs = sf->x_step_q4;
     ys = sf->y_step_q4;
   } else {
@@ -1941,12 +1940,12 @@
   pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride +
          (scaled_mv.col >> SUBPEL_BITS);
 
-  vp10_make_inter_predictor(pre, pre_buf->stride, dst, ext_dst_stride, subpel_x,
-                            subpel_y, sf, w, h, 0, mi->mbmi.interp_filter, xs,
-                            ys, xd);
+  av1_make_inter_predictor(pre, pre_buf->stride, dst, ext_dst_stride, subpel_x,
+                           subpel_y, sf, w, h, 0, mi->mbmi.interp_filter, xs,
+                           ys, xd);
 }
 
-void vp10_build_inter_predictors_for_planes_single_buf(
+void av1_build_inter_predictors_for_planes_single_buf(
     MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to, int mi_row,
     int mi_col, int ref, uint8_t *ext_dst[3], int ext_dst_stride[3]) {
   int plane;
@@ -1987,7 +1986,7 @@
 
   if (is_compound && is_interinter_wedge_used(mbmi->sb_type) &&
       mbmi->use_wedge_interinter) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
       build_masked_compound_wedge_highbd(
           dst, dst_buf->stride, CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
@@ -1995,28 +1994,30 @@
           mbmi->interinter_wedge_index, mbmi->interinter_wedge_sign,
           mbmi->sb_type, h, w, xd->bd);
     else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       build_masked_compound_wedge(
           dst, dst_buf->stride, ext_dst0, ext_dst_stride0, ext_dst1,
           ext_dst_stride1, mbmi->interinter_wedge_index,
           mbmi->interinter_wedge_sign, mbmi->sb_type, h, w);
   } else {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
-      vpx_highbd_convolve_copy(CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
+      aom_highbd_convolve_copy(CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
                                dst, dst_buf->stride, NULL, 0, NULL, 0, w, h,
                                xd->bd);
     else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-      vpx_convolve_copy(ext_dst0, ext_dst_stride0, dst, dst_buf->stride, NULL,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+      aom_convolve_copy(ext_dst0, ext_dst_stride0, dst, dst_buf->stride, NULL,
                         0, NULL, 0, w, h);
   }
 }
 
-void vp10_build_wedge_inter_predictor_from_buf(
-    MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to,
-    uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst1[3],
-    int ext_dst_stride1[3]) {
+void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+                                              int plane_from, int plane_to,
+                                              uint8_t *ext_dst0[3],
+                                              int ext_dst_stride0[3],
+                                              uint8_t *ext_dst1[3],
+                                              int ext_dst_stride1[3]) {
   int plane;
   for (plane = plane_from; plane <= plane_to; ++plane) {
     const BLOCK_SIZE plane_bsize =
diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h
index 092926d..4182d9f 100644
--- a/av1/common/reconinter.h
+++ b/av1/common/reconinter.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_RECONINTER_H_
-#define VP10_COMMON_RECONINTER_H_
+#ifndef AV1_COMMON_RECONINTER_H_
+#define AV1_COMMON_RECONINTER_H_
 
 #include "av1/common/filter.h"
 #include "av1/common/onyxc_int.h"
-#include "av1/common/vp10_convolve.h"
-#include "aom/vpx_integer.h"
+#include "av1/common/av1_convolve.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -33,27 +33,27 @@
                                    int xs, int ys) {
 #if CONFIG_DUAL_FILTER
   InterpFilterParams interp_filter_params_x =
-      vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+      av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
   InterpFilterParams interp_filter_params_y =
-      vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+      av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
 #else
   InterpFilterParams interp_filter_params =
-      vp10_get_interp_filter_params(interp_filter);
+      av1_get_interp_filter_params(interp_filter);
 #endif
 
 #if CONFIG_DUAL_FILTER
   if (interp_filter_params_x.taps == SUBPEL_TAPS &&
       interp_filter_params_y.taps == SUBPEL_TAPS && w > 2 && h > 2) {
     const int16_t *kernel_x =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
     const int16_t *kernel_y =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
 #else
   if (interp_filter_params.taps == SUBPEL_TAPS) {
     const int16_t *kernel_x =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
     const int16_t *kernel_y =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
 #endif
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
     if (IsInterpolatingFilter(interp_filter)) {
@@ -72,12 +72,12 @@
     // ref_idx > 0 means this is the second reference frame
     // first reference frame's prediction result is already in dst
     // therefore we need to average the first and second results
-    vp10_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
-                  subpel_x, xs, subpel_y, ys, ref_idx);
+    av1_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
+                 subpel_x, xs, subpel_y, ys, ref_idx);
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE void highbd_inter_predictor(const uint8_t *src, int src_stride,
                                           uint8_t *dst, int dst_stride,
                                           const int subpel_x,
@@ -92,27 +92,27 @@
                                           int xs, int ys, int bd) {
 #if CONFIG_DUAL_FILTER
   InterpFilterParams interp_filter_params_x =
-      vp10_get_interp_filter_params(interp_filter[1 + 2 * ref]);
+      av1_get_interp_filter_params(interp_filter[1 + 2 * ref]);
   InterpFilterParams interp_filter_params_y =
-      vp10_get_interp_filter_params(interp_filter[0 + 2 * ref]);
+      av1_get_interp_filter_params(interp_filter[0 + 2 * ref]);
 #else
   InterpFilterParams interp_filter_params =
-      vp10_get_interp_filter_params(interp_filter);
+      av1_get_interp_filter_params(interp_filter);
 #endif
 
 #if CONFIG_DUAL_FILTER
   if (interp_filter_params_x.taps == SUBPEL_TAPS &&
       interp_filter_params_y.taps == SUBPEL_TAPS && w > 2 && h > 2) {
     const int16_t *kernel_x =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
     const int16_t *kernel_y =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
 #else
   if (interp_filter_params.taps == SUBPEL_TAPS) {
     const int16_t *kernel_x =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
     const int16_t *kernel_y =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
 #endif  // CONFIG_DUAL_FILTER
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
     if (IsInterpolatingFilter(interp_filter)) {
@@ -134,11 +134,11 @@
     // first reference frame's prediction result is already in dst
     // therefore we need to average the first and second results
     int avg = ref > 0;
-    vp10_highbd_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
-                         subpel_x, xs, subpel_y, ys, avg, bd);
+    av1_highbd_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
+                        subpel_x, xs, subpel_y, ys, avg, bd);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #if CONFIG_EXT_INTER
 // Set to one to use larger codebooks
@@ -223,7 +223,7 @@
 #endif  // CONFIG_SUPERTX && CONFIG_EXT_INTER
                             int mi_x, int mi_y);
 
-static INLINE void vp10_make_inter_predictor(
+static INLINE void av1_make_inter_predictor(
     const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
     const int subpel_x, const int subpel_y, const struct scale_factors *sf,
     int w, int h, int ref,
@@ -234,32 +234,32 @@
 #endif
     int xs, int ys, const MACROBLOCKD *xd) {
   (void)xd;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
     highbd_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
                            sf, w, h, ref, interp_filter, xs, ys, xd->bd);
   else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, sf, w,
                     h, ref, interp_filter, xs, ys);
 }
 
 #if CONFIG_EXT_INTER
-void vp10_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
-                                      uint8_t *dst, int dst_stride,
-                                      const int subpel_x, const int subpel_y,
-                                      const struct scale_factors *sf, int w,
-                                      int h,
+void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
+                                     uint8_t *dst, int dst_stride,
+                                     const int subpel_x, const int subpel_y,
+                                     const struct scale_factors *sf, int w,
+                                     int h,
 #if CONFIG_DUAL_FILTER
-                                      const INTERP_FILTER *interp_filter,
+                                     const INTERP_FILTER *interp_filter,
 #else
-                                      const INTERP_FILTER interp_filter,
+                                     const INTERP_FILTER interp_filter,
 #endif
-                                      int xs, int ys,
+                                     int xs, int ys,
 #if CONFIG_SUPERTX
-                                      int wedge_offset_x, int wedge_offset_y,
+                                     int wedge_offset_x, int wedge_offset_y,
 #endif  // CONFIG_SUPERTX
-                                      const MACROBLOCKD *xd);
+                                     const MACROBLOCKD *xd);
 #endif  // CONFIG_EXT_INTER
 
 static INLINE int round_mv_comp_q4(int value) {
@@ -297,9 +297,9 @@
   // If the MV points so far into the UMV border that no visible pixels
   // are used for reconstruction, the subpel part of the MV can be
   // discarded and the MV limited to 16 pixels with equivalent results.
-  const int spel_left = (VPX_INTERP_EXTEND + bw) << SUBPEL_BITS;
+  const int spel_left = (AOM_INTERP_EXTEND + bw) << SUBPEL_BITS;
   const int spel_right = spel_left - SUBPEL_SHIFTS;
-  const int spel_top = (VPX_INTERP_EXTEND + bh) << SUBPEL_BITS;
+  const int spel_top = (AOM_INTERP_EXTEND + bh) << SUBPEL_BITS;
   const int spel_bottom = spel_top - SUBPEL_SHIFTS;
   MV clamped_mv = { src_mv->row * (1 << (1 - ss_y)),
                     src_mv->col * (1 << (1 - ss_x)) };
@@ -328,57 +328,56 @@
   return res;
 }
 
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
-                                       int ir, int ic, int mi_row, int mi_col);
+void av1_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i, int ir,
+                                      int ic, int mi_row, int mi_col);
 
-void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                     BLOCK_SIZE bsize);
-
-void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                     BLOCK_SIZE bsize, int plane);
-
-void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                      BLOCK_SIZE bsize);
-
-void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
                                     BLOCK_SIZE bsize);
 
-#if CONFIG_SUPERTX
-void vp10_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
-#if CONFIG_EXT_INTER
-                                                  int mi_row_ori,
-                                                  int mi_col_ori,
-#endif  // CONFIG_EXT_INTER
-                                                  int mi_row, int mi_col,
-                                                  BLOCK_SIZE bsize, int block);
+void av1_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                    BLOCK_SIZE bsize, int plane);
 
-void vp10_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
+void av1_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                     BLOCK_SIZE bsize);
+
+void av1_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                   BLOCK_SIZE bsize);
+
+#if CONFIG_SUPERTX
+void av1_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
 #if CONFIG_EXT_INTER
-                                           int mi_row_ori, int mi_col_ori,
+                                                 int mi_row_ori, int mi_col_ori,
 #endif  // CONFIG_EXT_INTER
-                                           int mi_row, int mi_col,
-                                           BLOCK_SIZE bsize);
+                                                 int mi_row, int mi_col,
+                                                 BLOCK_SIZE bsize, int block);
+
+void av1_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
+#if CONFIG_EXT_INTER
+                                          int mi_row_ori, int mi_col_ori,
+#endif  // CONFIG_EXT_INTER
+                                          int mi_row, int mi_col,
+                                          BLOCK_SIZE bsize);
 struct macroblockd_plane;
-void vp10_build_masked_inter_predictor_complex(
+void av1_build_masked_inter_predictor_complex(
     MACROBLOCKD *xd, uint8_t *dst, int dst_stride, const uint8_t *pre,
     int pre_stride, int mi_row, int mi_col, int mi_row_ori, int mi_col_ori,
     BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, PARTITION_TYPE partition,
     int plane);
 #endif  // CONFIG_SUPERTX
 
-void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
-                                uint8_t *dst, int dst_stride, const MV *mv_q3,
-                                const struct scale_factors *sf, int w, int h,
-                                int do_avg,
+void av1_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst,
+                               int dst_stride, const MV *mv_q3,
+                               const struct scale_factors *sf, int w, int h,
+                               int do_avg,
 #if CONFIG_DUAL_FILTER
-                                const INTERP_FILTER *interp_filter,
+                               const INTERP_FILTER *interp_filter,
 #else
-                                const INTERP_FILTER interp_filter,
+                               const INTERP_FILTER interp_filter,
 #endif
-                                enum mv_precision precision, int x, int y);
+                               enum mv_precision precision, int x, int y);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_build_inter_predictor(
     const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
     const MV *mv_q3, const struct scale_factors *sf, int w, int h, int do_avg,
 #if CONFIG_DUAL_FILTER
@@ -410,13 +409,13 @@
   dst->stride = stride;
 }
 
-void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
-                           const YV12_BUFFER_CONFIG *src, int mi_row,
-                           int mi_col);
+void av1_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
+                          const YV12_BUFFER_CONFIG *src, int mi_row,
+                          int mi_col);
 
-void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
-                           const YV12_BUFFER_CONFIG *src, int mi_row,
-                           int mi_col, const struct scale_factors *sf);
+void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
+                          const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
+                          const struct scale_factors *sf);
 
 #if CONFIG_DUAL_FILTER
 // Detect if the block have sub-pixel level motion vectors
@@ -463,7 +462,7 @@
 #endif
 
 #if CONFIG_EXT_INTERP
-static INLINE int vp10_is_interp_needed(const MACROBLOCKD *const xd) {
+static INLINE int av1_is_interp_needed(const MACROBLOCKD *const xd) {
   MODE_INFO *const mi = xd->mi[0];
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   const BLOCK_SIZE bsize = mbmi->sb_type;
@@ -483,8 +482,8 @@
 #endif
 
   // For scaled references, interpolation filter is indicated all the time.
-  if (vp10_is_scaled(&xd->block_refs[0]->sf)) return 1;
-  if (is_compound && vp10_is_scaled(&xd->block_refs[1]->sf)) return 1;
+  if (av1_is_scaled(&xd->block_refs[0]->sf)) return 1;
+  if (is_compound && av1_is_scaled(&xd->block_refs[1]->sf)) return 1;
 
   if (bsize < BLOCK_8X8) {
     for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
@@ -516,81 +515,83 @@
 #endif  // CONFIG_EXT_INTERP
 
 #if CONFIG_OBMC
-const uint8_t *vp10_get_obmc_mask(int length);
-void vp10_build_obmc_inter_prediction(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                      int mi_row, int mi_col,
-                                      uint8_t *above[MAX_MB_PLANE],
-                                      int above_stride[MAX_MB_PLANE],
-                                      uint8_t *left[MAX_MB_PLANE],
-                                      int left_stride[MAX_MB_PLANE]);
-void vp10_build_prediction_by_above_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                          int mi_row, int mi_col,
-                                          uint8_t *tmp_buf[MAX_MB_PLANE],
-                                          int tmp_width[MAX_MB_PLANE],
-                                          int tmp_height[MAX_MB_PLANE],
-                                          int tmp_stride[MAX_MB_PLANE]);
-void vp10_build_prediction_by_left_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
+const uint8_t *av1_get_obmc_mask(int length);
+void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                     int mi_row, int mi_col,
+                                     uint8_t *above[MAX_MB_PLANE],
+                                     int above_stride[MAX_MB_PLANE],
+                                     uint8_t *left[MAX_MB_PLANE],
+                                     int left_stride[MAX_MB_PLANE]);
+void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
                                          int mi_row, int mi_col,
                                          uint8_t *tmp_buf[MAX_MB_PLANE],
                                          int tmp_width[MAX_MB_PLANE],
                                          int tmp_height[MAX_MB_PLANE],
                                          int tmp_stride[MAX_MB_PLANE]);
+void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                        int mi_row, int mi_col,
+                                        uint8_t *tmp_buf[MAX_MB_PLANE],
+                                        int tmp_width[MAX_MB_PLANE],
+                                        int tmp_height[MAX_MB_PLANE],
+                                        int tmp_stride[MAX_MB_PLANE]);
 #endif  // CONFIG_OBMC
 
 #if CONFIG_EXT_INTER
 #define MASK_MASTER_SIZE (2 * MAX_SB_SIZE)
 #define MASK_MASTER_STRIDE (2 * MAX_SB_SIZE)
 
-void vp10_init_wedge_masks();
+void av1_init_wedge_masks();
 
-static INLINE const uint8_t *vp10_get_contiguous_soft_mask(int wedge_index,
-                                                           int wedge_sign,
-                                                           BLOCK_SIZE sb_type) {
+static INLINE const uint8_t *av1_get_contiguous_soft_mask(int wedge_index,
+                                                          int wedge_sign,
+                                                          BLOCK_SIZE sb_type) {
   return wedge_params_lookup[sb_type].masks[wedge_sign][wedge_index];
 }
 
-const uint8_t *vp10_get_soft_mask(int wedge_index, int wedge_sign,
-                                  BLOCK_SIZE sb_type, int wedge_offset_x,
-                                  int wedge_offset_y);
+const uint8_t *av1_get_soft_mask(int wedge_index, int wedge_sign,
+                                 BLOCK_SIZE sb_type, int wedge_offset_x,
+                                 int wedge_offset_y);
 
-void vp10_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
-                                      uint8_t *upred, uint8_t *vpred,
-                                      int ystride, int ustride, int vstride,
-                                      BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
-                                          int ystride, BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
-                                          int ustride, int plane,
-                                          BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
-                                           uint8_t *vpred, int ustride,
-                                           int vstride, BLOCK_SIZE bsize);
+void av1_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
+                                     uint8_t *upred, uint8_t *vpred,
+                                     int ystride, int ustride, int vstride,
+                                     BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
+                                         int ystride, BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
+                                         int ustride, int plane,
+                                         BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
+                                          uint8_t *vpred, int ustride,
+                                          int vstride, BLOCK_SIZE bsize);
 
-void vp10_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
-                                                BLOCK_SIZE bsize, int plane,
-                                                uint8_t *intra_pred,
-                                                int intra_stride);
-void vp10_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
-                             const uint8_t *inter_pred, int inter_stride,
-                             const uint8_t *intra_pred, int intra_stride);
-void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
-                                           uint8_t *vpred, int ustride,
-                                           int vstride, BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
-                                          int ystride, BLOCK_SIZE bsize);
+void av1_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
+                                               BLOCK_SIZE bsize, int plane,
+                                               uint8_t *intra_pred,
+                                               int intra_stride);
+void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
+                            const uint8_t *inter_pred, int inter_stride,
+                            const uint8_t *intra_pred, int intra_stride);
+void av1_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
+                                          uint8_t *vpred, int ustride,
+                                          int vstride, BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
+                                         int ystride, BLOCK_SIZE bsize);
 
 // Encoder only
-void vp10_build_inter_predictors_for_planes_single_buf(
+void av1_build_inter_predictors_for_planes_single_buf(
     MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to, int mi_row,
     int mi_col, int ref, uint8_t *ext_dst[3], int ext_dst_stride[3]);
-void vp10_build_wedge_inter_predictor_from_buf(
-    MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to,
-    uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst1[3],
-    int ext_dst_stride1[3]);
+void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+                                              int plane_from, int plane_to,
+                                              uint8_t *ext_dst0[3],
+                                              int ext_dst_stride0[3],
+                                              uint8_t *ext_dst1[3],
+                                              int ext_dst_stride1[3]);
 #endif  // CONFIG_EXT_INTER
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_RECONINTER_H_
+#endif  // AV1_COMMON_RECONINTER_H_
diff --git a/av1/common/reconintra.c b/av1/common/reconintra.c
index 801f61e..3c08ac4 100644
--- a/av1/common/reconintra.c
+++ b/av1/common/reconintra.c
@@ -10,17 +10,17 @@
 
 #include <math.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_ports/system_state.h"
 
-#if CONFIG_VP9_HIGHBITDEPTH
-#include "aom_dsp/vpx_dsp_common.h"
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-#include "aom_mem/vpx_mem.h"
+#if CONFIG_AOM_HIGHBITDEPTH
+#include "aom_dsp/aom_dsp_common.h"
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
-#include "aom_ports/vpx_once.h"
+#include "aom_ports/aom_once.h"
 #if CONFIG_EXT_INTRA
 #include "av1/common/intra_filters.h"
 #endif
@@ -222,14 +222,14 @@
 #endif  // CONFIG_EXT_PARTITION
 #endif  // CONFIG_EXT_PARTITION_TYPES
 
-static int vp10_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
-                          int right_available,
+static int av1_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
+                         int right_available,
 #if CONFIG_EXT_PARTITION_TYPES
-                          PARTITION_TYPE partition,
+                         PARTITION_TYPE partition,
 #endif
-                          TX_SIZE txsz, int y, int x, int ss_x) {
+                         TX_SIZE txsz, int y, int x, int ss_x) {
   const int wl = mi_width_log2_lookup[bsize];
-  const int w = VPXMAX(num_4x4_blocks_wide_lookup[bsize] >> ss_x, 1);
+  const int w = AOMMAX(num_4x4_blocks_wide_lookup[bsize] >> ss_x, 1);
   const int step = 1 << txsz;
 
   if (!right_available) {
@@ -270,9 +270,9 @@
   }
 }
 
-static int vp10_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
-                           int bottom_available, TX_SIZE txsz, int y, int x,
-                           int ss_y) {
+static int av1_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
+                          int bottom_available, TX_SIZE txsz, int y, int x,
+                          int ss_y) {
   if (!bottom_available || x != 0) {
     return 0;
   } else {
@@ -309,22 +309,22 @@
 static intra_pred_fn pred[INTRA_MODES][TX_SIZES];
 static intra_pred_fn dc_pred[2][2][TX_SIZES];
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*intra_high_pred_fn)(uint16_t *dst, ptrdiff_t stride,
                                    const uint16_t *above, const uint16_t *left,
                                    int bd);
 static intra_high_pred_fn pred_high[INTRA_MODES][4];
 static intra_high_pred_fn dc_pred_high[2][2][4];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static void vp10_init_intra_predictors_internal(void) {
+static void av1_init_intra_predictors_internal(void) {
 #define INIT_NO_4X4(p, type)                  \
-  p[TX_8X8] = vpx_##type##_predictor_8x8;     \
-  p[TX_16X16] = vpx_##type##_predictor_16x16; \
-  p[TX_32X32] = vpx_##type##_predictor_32x32
+  p[TX_8X8] = aom_##type##_predictor_8x8;     \
+  p[TX_16X16] = aom_##type##_predictor_16x16; \
+  p[TX_32X32] = aom_##type##_predictor_32x32
 
 #define INIT_ALL_SIZES(p, type)           \
-  p[TX_4X4] = vpx_##type##_predictor_4x4; \
+  p[TX_4X4] = aom_##type##_predictor_4x4; \
   INIT_NO_4X4(p, type)
 
   INIT_ALL_SIZES(pred[V_PRED], v);
@@ -342,7 +342,7 @@
   INIT_ALL_SIZES(dc_pred[1][0], dc_left);
   INIT_ALL_SIZES(dc_pred[1][1], dc);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   INIT_ALL_SIZES(pred_high[V_PRED], highbd_v);
   INIT_ALL_SIZES(pred_high[H_PRED], highbd_h);
   INIT_ALL_SIZES(pred_high[D207_PRED], highbd_d207e);
@@ -357,7 +357,7 @@
   INIT_ALL_SIZES(dc_pred_high[0][1], highbd_dc_top);
   INIT_ALL_SIZES(dc_pred_high[1][0], highbd_dc_left);
   INIT_ALL_SIZES(dc_pred_high[1][1], highbd_dc);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #undef intra_pred_allsizes
 }
@@ -388,13 +388,13 @@
     val = ROUND_POWER_OF_TWO(val, 8);
   } else {
     filter_idx = ROUND_POWER_OF_TWO(shift, 8 - SUBPEL_BITS);
-    filter = vp10_intra_filter_kernels[filter_type][filter_idx];
+    filter = av1_intra_filter_kernels[filter_type][filter_idx];
 
     if (filter_idx < (1 << SUBPEL_BITS)) {
       val = 0;
       for (k = 0; k < SUBPEL_TAPS; ++k) {
         idx = base + 1 - (SUBPEL_TAPS / 2) + k;
-        idx = VPXMAX(VPXMIN(idx, ref_end_idx), ref_start_idx);
+        idx = AOMMAX(AOMMIN(idx, ref_end_idx), ref_start_idx);
         val += ref[idx] * filter[k];
       }
       val = ROUND_POWER_OF_TWO(val, FILTER_BITS);
@@ -439,7 +439,7 @@
         base += 1;
         shift = 0;
       }
-      len = VPXMIN(bs, 2 * bs - 1 - base);
+      len = AOMMIN(bs, 2 * bs - 1 - base);
       if (len <= 0) {
         int i;
         for (i = r; i < bs; ++i) {
@@ -460,8 +460,8 @@
         }
       } else {
         if (!flags[shift]) {
-          const int16_t *filter = vp10_intra_filter_kernels[filter_type][shift];
-          vpx_convolve8_horiz(src + pad_size, 2 * bs, buf[shift], 2 * bs,
+          const int16_t *filter = av1_intra_filter_kernels[filter_type][shift];
+          aom_convolve8_horiz(src + pad_size, 2 * bs, buf[shift], 2 * bs,
                               filter, 16, NULL, 16, 2 * bs,
                               2 * bs < 16 ? 2 : 1);
           flags[shift] = 1;
@@ -570,7 +570,7 @@
         base += 1;
         shift = 0;
       }
-      len = VPXMIN(bs, 2 * bs - 1 - base);
+      len = AOMMIN(bs, 2 * bs - 1 - base);
 
       if (len <= 0) {
         for (r = 0; r < bs; ++r) {
@@ -590,8 +590,8 @@
         }
       } else {
         if (!flags[shift]) {
-          const int16_t *filter = vp10_intra_filter_kernels[filter_type][shift];
-          vpx_convolve8_vert(src + 4 * pad_size, 4, buf[0] + 4 * shift,
+          const int16_t *filter = av1_intra_filter_kernels[filter_type][shift];
+          aom_convolve8_vert(src + 4 * pad_size, 4, buf[0] + 4 * shift,
                              4 * SUBPEL_SHIFTS, NULL, 16, filter, 16,
                              2 * bs < 16 ? 4 : 4, 2 * bs);
           flags[shift] = 1;
@@ -730,53 +730,53 @@
   }
 }
 
-void vp10_dc_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                const uint8_t *above, const uint8_t *left) {
+void av1_dc_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                               const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, DC_PRED);
 }
 
-void vp10_v_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                               const uint8_t *above, const uint8_t *left) {
+void av1_v_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                              const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, V_PRED);
 }
 
-void vp10_h_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                               const uint8_t *above, const uint8_t *left) {
+void av1_h_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                              const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, H_PRED);
 }
 
-void vp10_d45_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                 const uint8_t *above, const uint8_t *left) {
+void av1_d45_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D45_PRED);
 }
 
-void vp10_d135_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                  const uint8_t *above, const uint8_t *left) {
+void av1_d135_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                 const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D135_PRED);
 }
 
-void vp10_d117_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                  const uint8_t *above, const uint8_t *left) {
+void av1_d117_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                 const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D117_PRED);
 }
 
-void vp10_d153_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                  const uint8_t *above, const uint8_t *left) {
+void av1_d153_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                 const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D153_PRED);
 }
 
-void vp10_d207_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                  const uint8_t *above, const uint8_t *left) {
+void av1_d207_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                 const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D207_PRED);
 }
 
-void vp10_d63_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                 const uint8_t *above, const uint8_t *left) {
+void av1_d63_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D63_PRED);
 }
 
-void vp10_tm_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                const uint8_t *above, const uint8_t *left) {
+void av1_tm_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                               const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, TM_PRED);
 }
 
@@ -784,33 +784,33 @@
                                     int bs, const uint8_t *above,
                                     const uint8_t *left) {
   switch (mode) {
-    case DC_PRED: vp10_dc_filter_predictor(dst, stride, bs, above, left); break;
-    case V_PRED: vp10_v_filter_predictor(dst, stride, bs, above, left); break;
-    case H_PRED: vp10_h_filter_predictor(dst, stride, bs, above, left); break;
+    case DC_PRED: av1_dc_filter_predictor(dst, stride, bs, above, left); break;
+    case V_PRED: av1_v_filter_predictor(dst, stride, bs, above, left); break;
+    case H_PRED: av1_h_filter_predictor(dst, stride, bs, above, left); break;
     case D45_PRED:
-      vp10_d45_filter_predictor(dst, stride, bs, above, left);
+      av1_d45_filter_predictor(dst, stride, bs, above, left);
       break;
     case D135_PRED:
-      vp10_d135_filter_predictor(dst, stride, bs, above, left);
+      av1_d135_filter_predictor(dst, stride, bs, above, left);
       break;
     case D117_PRED:
-      vp10_d117_filter_predictor(dst, stride, bs, above, left);
+      av1_d117_filter_predictor(dst, stride, bs, above, left);
       break;
     case D153_PRED:
-      vp10_d153_filter_predictor(dst, stride, bs, above, left);
+      av1_d153_filter_predictor(dst, stride, bs, above, left);
       break;
     case D207_PRED:
-      vp10_d207_filter_predictor(dst, stride, bs, above, left);
+      av1_d207_filter_predictor(dst, stride, bs, above, left);
       break;
     case D63_PRED:
-      vp10_d63_filter_predictor(dst, stride, bs, above, left);
+      av1_d63_filter_predictor(dst, stride, bs, above, left);
       break;
-    case TM_PRED: vp10_tm_filter_predictor(dst, stride, bs, above, left); break;
+    case TM_PRED: av1_tm_filter_predictor(dst, stride, bs, above, left); break;
     default: assert(0);
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static int highbd_intra_subpel_interp(int base, int shift, const uint16_t *ref,
                                       int ref_start_idx, int ref_end_idx,
                                       INTRA_FILTER filter_type) {
@@ -822,13 +822,13 @@
     val = ROUND_POWER_OF_TWO(val, 8);
   } else {
     filter_idx = ROUND_POWER_OF_TWO(shift, 8 - SUBPEL_BITS);
-    filter = vp10_intra_filter_kernels[filter_type][filter_idx];
+    filter = av1_intra_filter_kernels[filter_type][filter_idx];
 
     if (filter_idx < (1 << SUBPEL_BITS)) {
       val = 0;
       for (k = 0; k < SUBPEL_TAPS; ++k) {
         idx = base + 1 - (SUBPEL_TAPS / 2) + k;
-        idx = VPXMAX(VPXMIN(idx, ref_end_idx), ref_start_idx);
+        idx = AOMMAX(AOMMIN(idx, ref_end_idx), ref_start_idx);
         val += ref[idx] * filter[k];
       }
       val = ROUND_POWER_OF_TWO(val, FILTER_BITS);
@@ -956,7 +956,7 @@
   (void)above;
   (void)bd;
   for (r = 0; r < bs; r++) {
-    vpx_memset16(dst, left[r], bs);
+    aom_memset16(dst, left[r], bs);
     dst += stride;
   }
 }
@@ -1025,70 +1025,70 @@
   }
 }
 
-void vp10_highbd_dc_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                       const uint16_t *above,
-                                       const uint16_t *left, int bd) {
+void av1_highbd_dc_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                      const uint16_t *above,
+                                      const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, DC_PRED,
                                       bd);
 }
 
-void vp10_highbd_v_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                      const uint16_t *above,
-                                      const uint16_t *left, int bd) {
+void av1_highbd_v_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                     const uint16_t *above,
+                                     const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, V_PRED, bd);
 }
 
-void vp10_highbd_h_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                      const uint16_t *above,
-                                      const uint16_t *left, int bd) {
+void av1_highbd_h_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                     const uint16_t *above,
+                                     const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, H_PRED, bd);
 }
 
-void vp10_highbd_d45_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                        const uint16_t *above,
-                                        const uint16_t *left, int bd) {
+void av1_highbd_d45_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                       const uint16_t *above,
+                                       const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D45_PRED,
                                       bd);
 }
 
-void vp10_highbd_d135_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
-                                         int bs, const uint16_t *above,
-                                         const uint16_t *left, int bd) {
+void av1_highbd_d135_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                        const uint16_t *above,
+                                        const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D135_PRED,
                                       bd);
 }
 
-void vp10_highbd_d117_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
-                                         int bs, const uint16_t *above,
-                                         const uint16_t *left, int bd) {
+void av1_highbd_d117_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                        const uint16_t *above,
+                                        const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D117_PRED,
                                       bd);
 }
 
-void vp10_highbd_d153_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
-                                         int bs, const uint16_t *above,
-                                         const uint16_t *left, int bd) {
+void av1_highbd_d153_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                        const uint16_t *above,
+                                        const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D153_PRED,
                                       bd);
 }
 
-void vp10_highbd_d207_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
-                                         int bs, const uint16_t *above,
-                                         const uint16_t *left, int bd) {
+void av1_highbd_d207_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                        const uint16_t *above,
+                                        const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D207_PRED,
                                       bd);
 }
 
-void vp10_highbd_d63_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                        const uint16_t *above,
-                                        const uint16_t *left, int bd) {
+void av1_highbd_d63_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                       const uint16_t *above,
+                                       const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D63_PRED,
                                       bd);
 }
 
-void vp10_highbd_tm_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                       const uint16_t *above,
-                                       const uint16_t *left, int bd) {
+void av1_highbd_tm_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                      const uint16_t *above,
+                                      const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, TM_PRED,
                                       bd);
 }
@@ -1099,42 +1099,42 @@
                                            const uint16_t *left, int bd) {
   switch (mode) {
     case DC_PRED:
-      vp10_highbd_dc_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_dc_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case V_PRED:
-      vp10_highbd_v_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_v_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case H_PRED:
-      vp10_highbd_h_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_h_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D45_PRED:
-      vp10_highbd_d45_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d45_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D135_PRED:
-      vp10_highbd_d135_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d135_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D117_PRED:
-      vp10_highbd_d117_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d117_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D153_PRED:
-      vp10_highbd_d153_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d153_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D207_PRED:
-      vp10_highbd_d207_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d207_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D63_PRED:
-      vp10_highbd_d63_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d63_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case TM_PRED:
-      vp10_highbd_tm_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_tm_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     default: assert(0);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_EXT_INTRA
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void build_intra_predictors_high(
     const MACROBLOCKD *xd, const uint8_t *ref8, int ref_stride, uint8_t *dst8,
     int dst_stride, PREDICTION_MODE mode, TX_SIZE tx_size, int n_top_px,
@@ -1194,7 +1194,7 @@
     int i;
     const int val = (n_left_px == 0) ? base + 1 : base - 1;
     for (i = 0; i < bs; ++i) {
-      vpx_memset16(dst, val, bs);
+      aom_memset16(dst, val, bs);
       dst += dst_stride;
     }
     return;
@@ -1224,9 +1224,9 @@
           left_col[i] = ref[i * ref_stride - 1];
       }
       if (i < (bs << need_bottom))
-        vpx_memset16(&left_col[i], left_col[i - 1], (bs << need_bottom) - i);
+        aom_memset16(&left_col[i], left_col[i - 1], (bs << need_bottom) - i);
     } else {
-      vpx_memset16(left_col, base + 1, bs << need_bottom);
+      aom_memset16(left_col, base + 1, bs << need_bottom);
     }
   }
 
@@ -1254,9 +1254,9 @@
         i += n_topright_px;
       }
       if (i < (bs << need_right))
-        vpx_memset16(&above_row[i], above_row[i - 1], (bs << need_right) - i);
+        aom_memset16(&above_row[i], above_row[i - 1], (bs << need_right) - i);
     } else {
-      vpx_memset16(above_row, base - 1, bs << need_right);
+      aom_memset16(above_row, base - 1, bs << need_right);
     }
   }
 
@@ -1285,7 +1285,7 @@
   if (mode != DC_PRED && mode != TM_PRED &&
       xd->mi[0]->mbmi.sb_type >= BLOCK_8X8) {
     INTRA_FILTER filter = INTRA_FILTER_LINEAR;
-    if (plane == 0 && vp10_is_intra_filter_switchable(p_angle))
+    if (plane == 0 && av1_is_intra_filter_switchable(p_angle))
       filter = xd->mi[0]->mbmi.intra_filter;
     highbd_dr_predictor(dst, dst_stride, bs, const_above_row, left_col, p_angle,
                         xd->bd, filter);
@@ -1302,7 +1302,7 @@
                              xd->bd);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
                                    int ref_stride, uint8_t *dst, int dst_stride,
@@ -1453,7 +1453,7 @@
   if (mode != DC_PRED && mode != TM_PRED &&
       xd->mi[0]->mbmi.sb_type >= BLOCK_8X8) {
     INTRA_FILTER filter = INTRA_FILTER_LINEAR;
-    if (plane == 0 && vp10_is_intra_filter_switchable(p_angle))
+    if (plane == 0 && av1_is_intra_filter_switchable(p_angle))
       filter = xd->mi[0]->mbmi.intra_filter;
     dr_predictor(dst, dst_stride, tx_size, const_above_row, left_col, p_angle,
                  filter);
@@ -1470,11 +1470,11 @@
   }
 }
 
-void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
-                              TX_SIZE tx_size, PREDICTION_MODE mode,
-                              const uint8_t *ref, int ref_stride, uint8_t *dst,
-                              int dst_stride, int col_off, int row_off,
-                              int plane) {
+void av1_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
+                             TX_SIZE tx_size, PREDICTION_MODE mode,
+                             const uint8_t *ref, int ref_stride, uint8_t *dst,
+                             int dst_stride, int col_off, int row_off,
+                             int plane) {
   const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   const struct macroblockd_plane *const pd = &xd->plane[plane];
   const int txw = num_4x4_blocks_wide_txsize_lookup[tx_size];
@@ -1483,8 +1483,8 @@
   const int have_left = col_off || xd->left_available;
   const int x = col_off * 4;
   const int y = row_off * 4;
-  const int bw = pd->subsampling_x ? 1 << bwl_in : VPXMAX(2, 1 << bwl_in);
-  const int bh = pd->subsampling_y ? 1 << bhl_in : VPXMAX(2, 1 << bhl_in);
+  const int bw = pd->subsampling_x ? 1 << bwl_in : AOMMAX(2, 1 << bwl_in);
+  const int bh = pd->subsampling_y ? 1 << bhl_in : AOMMAX(2, 1 << bhl_in);
   const int mi_row = -xd->mb_to_top_edge >> (3 + MI_SIZE_LOG2);
   const int mi_col = -xd->mb_to_left_edge >> (3 + MI_SIZE_LOG2);
   const int wpx = 4 * bw;
@@ -1506,31 +1506,30 @@
   const PARTITION_TYPE partition = xd->mi[0]->mbmi.partition;
 #endif
   const int have_right =
-      vp10_has_right(bsize, mi_row, mi_col, right_available,
+      av1_has_right(bsize, mi_row, mi_col, right_available,
 #if CONFIG_EXT_PARTITION_TYPES
-                     partition,
+                    partition,
 #endif
-                     tx_size, row_off, col_off, pd->subsampling_x);
-  const int have_bottom =
-      vp10_has_bottom(bsize, mi_row, mi_col, yd > 0, tx_size, row_off, col_off,
-                      pd->subsampling_y);
+                    tx_size, row_off, col_off, pd->subsampling_x);
+  const int have_bottom = av1_has_bottom(bsize, mi_row, mi_col, yd > 0, tx_size,
+                                         row_off, col_off, pd->subsampling_y);
 
   if (xd->mi[0]->mbmi.palette_mode_info.palette_size[plane != 0] > 0) {
     const int bs = 4 * num_4x4_blocks_wide_txsize_lookup[tx_size];
     const int stride = 4 * (1 << bwl_in);
     int r, c;
     uint8_t *map = NULL;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     uint16_t *palette = xd->mi[0]->mbmi.palette_mode_info.palette_colors +
                         plane * PALETTE_MAX_SIZE;
 #else
     uint8_t *palette = xd->mi[0]->mbmi.palette_mode_info.palette_colors +
                        plane * PALETTE_MAX_SIZE;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     map = xd->plane[plane != 0].color_index_map;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
       for (r = 0; r < bs; ++r)
@@ -1546,29 +1545,29 @@
     for (r = 0; r < bs; ++r)
       for (c = 0; c < bs; ++c)
         dst[r * dst_stride + c] = palette[map[(r + y) * stride + c + x]];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     return;
   }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     build_intra_predictors_high(
         xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
-        have_top ? VPXMIN(txwpx, xr + txwpx) : 0,
-        have_top && have_right ? VPXMIN(txwpx, xr) : 0,
-        have_left ? VPXMIN(txhpx, yd + txhpx) : 0,
-        have_bottom && have_left ? VPXMIN(txhpx, yd) : 0, plane);
+        have_top ? AOMMIN(txwpx, xr + txwpx) : 0,
+        have_top && have_right ? AOMMIN(txwpx, xr) : 0,
+        have_left ? AOMMIN(txhpx, yd + txhpx) : 0,
+        have_bottom && have_left ? AOMMIN(txhpx, yd) : 0, plane);
     return;
   }
 #endif
   build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
-                         have_top ? VPXMIN(txwpx, xr + txwpx) : 0,
-                         have_top && have_right ? VPXMIN(txwpx, xr) : 0,
-                         have_left ? VPXMIN(txhpx, yd + txhpx) : 0,
-                         have_bottom && have_left ? VPXMIN(txhpx, yd) : 0,
+                         have_top ? AOMMIN(txwpx, xr + txwpx) : 0,
+                         have_top && have_right ? AOMMIN(txwpx, xr) : 0,
+                         have_left ? AOMMIN(txhpx, yd + txhpx) : 0,
+                         have_bottom && have_left ? AOMMIN(txhpx, yd) : 0,
                          plane);
 }
 
-void vp10_init_intra_predictors(void) {
-  once(vp10_init_intra_predictors_internal);
+void av1_init_intra_predictors(void) {
+  once(av1_init_intra_predictors_internal);
 }
diff --git a/av1/common/reconintra.h b/av1/common/reconintra.h
index d20b5a4..3adde50 100644
--- a/av1/common/reconintra.h
+++ b/av1/common/reconintra.h
@@ -8,27 +8,27 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_RECONINTRA_H_
-#define VP10_COMMON_RECONINTRA_H_
+#ifndef AV1_COMMON_RECONINTRA_H_
+#define AV1_COMMON_RECONINTRA_H_
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "av1/common/blockd.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-void vp10_init_intra_predictors(void);
+void av1_init_intra_predictors(void);
 
-void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
-                              TX_SIZE tx_size, PREDICTION_MODE mode,
-                              const uint8_t *ref, int ref_stride, uint8_t *dst,
-                              int dst_stride, int aoff, int loff, int plane);
+void av1_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
+                             TX_SIZE tx_size, PREDICTION_MODE mode,
+                             const uint8_t *ref, int ref_stride, uint8_t *dst,
+                             int dst_stride, int aoff, int loff, int plane);
 #if CONFIG_EXT_INTRA
-int vp10_is_intra_filter_switchable(int angle);
+int av1_is_intra_filter_switchable(int angle);
 #endif  // CONFIG_EXT_INTRA
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_RECONINTRA_H_
+#endif  // AV1_COMMON_RECONINTRA_H_
diff --git a/av1/common/restoration.c b/av1/common/restoration.c
index 4d4c9fc..fad5dd6 100644
--- a/av1/common/restoration.c
+++ b/av1/common/restoration.c
@@ -10,12 +10,12 @@
 
 #include <math.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "av1/common/onyxc_int.h"
 #include "av1/common/restoration.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #define BILATERAL_PARAM_PRECISION 16
@@ -55,15 +55,15 @@
 typedef void (*restore_func_type)(uint8_t *data8, int width, int height,
                                   int stride, RestorationInternal *rst,
                                   uint8_t *tmpdata8, int tmpstride);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*restore_func_highbd_type)(uint8_t *data8, int width, int height,
                                          int stride, RestorationInternal *rst,
                                          uint8_t *tmpdata8, int tmpstride,
                                          int bit_depth);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static INLINE BilateralParamsType vp10_bilateral_level_to_params(int index,
-                                                                 int kf) {
+static INLINE BilateralParamsType av1_bilateral_level_to_params(int index,
+                                                                int kf) {
   return kf ? bilateral_level_to_params_arr_kf[index]
             : bilateral_level_to_params_arr[index];
 }
@@ -77,31 +77,31 @@
   { 64, 64 }, { 128, 128 }, { 256, 256 }
 };
 
-void vp10_get_restoration_tile_size(int tilesize, int width, int height,
-                                    int *tile_width, int *tile_height,
-                                    int *nhtiles, int *nvtiles) {
+void av1_get_restoration_tile_size(int tilesize, int width, int height,
+                                   int *tile_width, int *tile_height,
+                                   int *nhtiles, int *nvtiles) {
   *tile_width = (tilesize < 0)
                     ? width
-                    : VPXMIN(restoration_tile_sizes[tilesize].width, width);
+                    : AOMMIN(restoration_tile_sizes[tilesize].width, width);
   *tile_height = (tilesize < 0)
                      ? height
-                     : VPXMIN(restoration_tile_sizes[tilesize].height, height);
+                     : AOMMIN(restoration_tile_sizes[tilesize].height, height);
   *nhtiles = (width + (*tile_width >> 1)) / *tile_width;
   *nvtiles = (height + (*tile_height >> 1)) / *tile_height;
 }
 
-int vp10_get_restoration_ntiles(int tilesize, int width, int height) {
+int av1_get_restoration_ntiles(int tilesize, int width, int height) {
   int nhtiles, nvtiles;
   int tile_width, tile_height;
-  vp10_get_restoration_tile_size(tilesize, width, height, &tile_width,
-                                 &tile_height, &nhtiles, &nvtiles);
+  av1_get_restoration_tile_size(tilesize, width, height, &tile_width,
+                                &tile_height, &nhtiles, &nvtiles);
   return (nhtiles * nvtiles);
 }
 
-void vp10_loop_restoration_precal() {
+void av1_loop_restoration_precal() {
   int i;
   for (i = 0; i < BILATERAL_LEVELS_KF; i++) {
-    const BilateralParamsType param = vp10_bilateral_level_to_params(i, 1);
+    const BilateralParamsType param = av1_bilateral_level_to_params(i, 1);
     const int sigma_x = param.sigma_x;
     const int sigma_y = param.sigma_y;
     const int sigma_r = param.sigma_r;
@@ -129,7 +129,7 @@
     }
   }
   for (i = 0; i < BILATERAL_LEVELS; i++) {
-    const BilateralParamsType param = vp10_bilateral_level_to_params(i, 0);
+    const BilateralParamsType param = av1_bilateral_level_to_params(i, 0);
     const int sigma_x = param.sigma_x;
     const int sigma_y = param.sigma_y;
     const int sigma_r = param.sigma_r;
@@ -159,13 +159,13 @@
   }
 }
 
-int vp10_bilateral_level_bits(const VP10_COMMON *const cm) {
+int av1_bilateral_level_bits(const AV1_COMMON *const cm) {
   return cm->frame_type == KEY_FRAME ? BILATERAL_LEVEL_BITS_KF
                                      : BILATERAL_LEVEL_BITS;
 }
 
-void vp10_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
-                                int kf, int width, int height) {
+void av1_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
+                               int kf, int width, int height) {
   int i, tile_idx;
   rst->restoration_type = rsi->restoration_type;
   rst->subsampling_x = 0;
@@ -173,10 +173,10 @@
   if (rsi->restoration_type == RESTORE_BILATERAL) {
     rst->tilesize_index = BILATERAL_TILESIZE;
     rst->ntiles =
-        vp10_get_restoration_ntiles(rst->tilesize_index, width, height);
-    vp10_get_restoration_tile_size(rst->tilesize_index, width, height,
-                                   &rst->tile_width, &rst->tile_height,
-                                   &rst->nhtiles, &rst->nvtiles);
+        av1_get_restoration_ntiles(rst->tilesize_index, width, height);
+    av1_get_restoration_tile_size(rst->tilesize_index, width, height,
+                                  &rst->tile_width, &rst->tile_height,
+                                  &rst->nhtiles, &rst->nvtiles);
     rst->bilateral_level = rsi->bilateral_level;
     rst->wr_lut = (uint8_t **)malloc(sizeof(*rst->wr_lut) * rst->ntiles);
     assert(rst->wr_lut != NULL);
@@ -195,10 +195,10 @@
   } else if (rsi->restoration_type == RESTORE_WIENER) {
     rst->tilesize_index = WIENER_TILESIZE;
     rst->ntiles =
-        vp10_get_restoration_ntiles(rst->tilesize_index, width, height);
-    vp10_get_restoration_tile_size(rst->tilesize_index, width, height,
-                                   &rst->tile_width, &rst->tile_height,
-                                   &rst->nhtiles, &rst->nvtiles);
+        av1_get_restoration_ntiles(rst->tilesize_index, width, height);
+    av1_get_restoration_tile_size(rst->tilesize_index, width, height,
+                                  &rst->tile_width, &rst->tile_height,
+                                  &rst->nhtiles, &rst->nvtiles);
     rst->wiener_level = rsi->wiener_level;
     rst->vfilter =
         (int(*)[RESTORATION_WIN])malloc(sizeof(*rst->vfilter) * rst->ntiles);
@@ -373,7 +373,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void loop_bilateral_filter_highbd(uint8_t *data8, int width, int height,
                                          int stride, RestorationInternal *rst,
                                          uint8_t *tmpdata8, int tmpstride,
@@ -530,10 +530,10 @@
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_loop_restoration_rows(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                                int start_mi_row, int end_mi_row, int y_only) {
+void av1_loop_restoration_rows(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                               int start_mi_row, int end_mi_row, int y_only) {
   const int ywidth = frame->y_crop_width;
   const int ystride = frame->y_stride;
   const int uvwidth = frame->uv_crop_width;
@@ -546,35 +546,35 @@
       cm->rst_internal.restoration_type == RESTORE_BILATERAL
           ? loop_bilateral_filter
           : loop_wiener_filter;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   restore_func_highbd_type restore_func_highbd =
       cm->rst_internal.restoration_type == RESTORE_BILATERAL
           ? loop_bilateral_filter_highbd
           : loop_wiener_filter_highbd;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   YV12_BUFFER_CONFIG tmp_buf;
   memset(&tmp_buf, 0, sizeof(YV12_BUFFER_CONFIG));
 
-  yend = VPXMIN(yend, cm->height);
-  uvend = VPXMIN(uvend, cm->subsampling_y ? (cm->height + 1) >> 1 : cm->height);
+  yend = AOMMIN(yend, cm->height);
+  uvend = AOMMIN(uvend, cm->subsampling_y ? (cm->height + 1) >> 1 : cm->height);
 
-  if (vpx_realloc_frame_buffer(
+  if (aom_realloc_frame_buffer(
           &tmp_buf, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           cm->use_highbitdepth,
 #endif
-          VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL) < 0)
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+          AOM_DEC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL) < 0)
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate tmp restoration buffer");
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (cm->use_highbitdepth)
     restore_func_highbd(frame->y_buffer + ystart * ystride, ywidth,
                         yend - ystart, ystride, &cm->rst_internal,
                         tmp_buf.y_buffer + ystart * tmp_buf.y_stride,
                         tmp_buf.y_stride, cm->bit_depth);
   else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     restore_func(frame->y_buffer + ystart * ystride, ywidth, yend - ystart,
                  ystride, &cm->rst_internal,
                  tmp_buf.y_buffer + ystart * tmp_buf.y_stride,
@@ -582,7 +582,7 @@
   if (!y_only) {
     cm->rst_internal.subsampling_x = cm->subsampling_x;
     cm->rst_internal.subsampling_y = cm->subsampling_y;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       restore_func_highbd(frame->u_buffer + uvstart * uvstride, uvwidth,
                           uvend - uvstart, uvstride, &cm->rst_internal,
@@ -593,7 +593,7 @@
                           tmp_buf.v_buffer + uvstart * tmp_buf.uv_stride,
                           tmp_buf.uv_stride, cm->bit_depth);
     } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       restore_func(frame->u_buffer + uvstart * uvstride, uvwidth,
                    uvend - uvstart, uvstride, &cm->rst_internal,
                    tmp_buf.u_buffer + uvstart * tmp_buf.uv_stride,
@@ -602,11 +602,11 @@
                    uvend - uvstart, uvstride, &cm->rst_internal,
                    tmp_buf.v_buffer + uvstart * tmp_buf.uv_stride,
                    tmp_buf.uv_stride);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
-  vpx_free_frame_buffer(&tmp_buf);
+  aom_free_frame_buffer(&tmp_buf);
   if (cm->rst_internal.restoration_type == RESTORE_BILATERAL) {
     free(cm->rst_internal.wr_lut);
     cm->rst_internal.wr_lut = NULL;
@@ -621,9 +621,9 @@
   }
 }
 
-void vp10_loop_restoration_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                                 RestorationInfo *rsi, int y_only,
-                                 int partial_frame) {
+void av1_loop_restoration_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                                RestorationInfo *rsi, int y_only,
+                                int partial_frame) {
   int start_mi_row, end_mi_row, mi_rows_to_filter;
   if (rsi->restoration_type != RESTORE_NONE) {
     start_mi_row = 0;
@@ -631,12 +631,12 @@
     if (partial_frame && cm->mi_rows > 8) {
       start_mi_row = cm->mi_rows >> 1;
       start_mi_row &= 0xfffffff8;
-      mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
+      mi_rows_to_filter = AOMMAX(cm->mi_rows / 8, 8);
     }
     end_mi_row = start_mi_row + mi_rows_to_filter;
-    vp10_loop_restoration_init(&cm->rst_internal, rsi,
-                               cm->frame_type == KEY_FRAME, cm->width,
-                               cm->height);
-    vp10_loop_restoration_rows(frame, cm, start_mi_row, end_mi_row, y_only);
+    av1_loop_restoration_init(&cm->rst_internal, rsi,
+                              cm->frame_type == KEY_FRAME, cm->width,
+                              cm->height);
+    av1_loop_restoration_rows(frame, cm, start_mi_row, end_mi_row, y_only);
   }
 }
diff --git a/av1/common/restoration.h b/av1/common/restoration.h
index c1e937a..6c53a77 100644
--- a/av1/common/restoration.h
+++ b/av1/common/restoration.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_RESTORATION_H_
-#define VP10_COMMON_RESTORATION_H_
+#ifndef AV1_COMMON_RESTORATION_H_
+#define AV1_COMMON_RESTORATION_H_
 
 #include "aom_ports/mem.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #include "av1/common/blockd.h"
 
@@ -88,22 +88,21 @@
   int (*vfilter)[RESTORATION_WIN], (*hfilter)[RESTORATION_WIN];
 } RestorationInternal;
 
-int vp10_bilateral_level_bits(const struct VP10Common *const cm);
-int vp10_get_restoration_ntiles(int tilesize, int width, int height);
-void vp10_get_restoration_tile_size(int tilesize, int width, int height,
-                                    int *tile_width, int *tile_height,
-                                    int *nhtiles, int *nvtiles);
-void vp10_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
-                                int kf, int width, int height);
-void vp10_loop_restoration_frame(YV12_BUFFER_CONFIG *frame,
-                                 struct VP10Common *cm, RestorationInfo *rsi,
-                                 int y_only, int partial_frame);
-void vp10_loop_restoration_rows(YV12_BUFFER_CONFIG *frame,
-                                struct VP10Common *cm, int start_mi_row,
-                                int end_mi_row, int y_only);
-void vp10_loop_restoration_precal();
+int av1_bilateral_level_bits(const struct AV1Common *const cm);
+int av1_get_restoration_ntiles(int tilesize, int width, int height);
+void av1_get_restoration_tile_size(int tilesize, int width, int height,
+                                   int *tile_width, int *tile_height,
+                                   int *nhtiles, int *nvtiles);
+void av1_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
+                               int kf, int width, int height);
+void av1_loop_restoration_frame(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+                                RestorationInfo *rsi, int y_only,
+                                int partial_frame);
+void av1_loop_restoration_rows(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+                               int start_mi_row, int end_mi_row, int y_only);
+void av1_loop_restoration_precal();
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_RESTORATION_H_
+#endif  // AV1_COMMON_RESTORATION_H_
diff --git a/av1/common/scale.c b/av1/common/scale.c
index 6bd3b74..908a2db 100644
--- a/av1/common/scale.c
+++ b/av1/common/scale.c
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "av1/common/filter.h"
 #include "av1/common/scale.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
 
 static INLINE int scaled_x(int val, const struct scale_factors *sf) {
   return (int)((int64_t)val * sf->x_scale_fp >> REF_SCALE_SHIFT);
@@ -34,7 +34,7 @@
   return (other_size << REF_SCALE_SHIFT) / this_size;
 }
 
-MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
+MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
   const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK;
   const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK;
   const MV32 res = { scaled_y(mv->row, sf) + y_off_q4,
@@ -42,13 +42,13 @@
   return res;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
-                                        int other_h, int this_w, int this_h,
-                                        int use_highbd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                       int other_h, int this_w, int this_h,
+                                       int use_highbd) {
 #else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
-                                        int other_h, int this_w, int this_h) {
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                       int other_h, int this_w, int this_h) {
 #endif
   if (!valid_ref_frame_size(other_w, other_h, this_w, this_h)) {
     sf->x_scale_fp = REF_INVALID_SCALE;
@@ -61,7 +61,7 @@
   sf->x_step_q4 = scaled_x(16, sf);
   sf->y_step_q4 = scaled_y(16, sf);
 
-  if (vp10_is_scaled(sf)) {
+  if (av1_is_scaled(sf)) {
     sf->scale_value_x = scaled_x;
     sf->scale_value_y = scaled_y;
   } else {
@@ -76,108 +76,108 @@
 // best quality, but it may be worth trying an additional mode that does
 // do the filtering on full-pel.
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
-  sf->predict_ni[0][0][0] = vpx_convolve8_c;
-  sf->predict_ni[0][0][1] = vpx_convolve8_avg_c;
-  sf->predict_ni[0][1][0] = vpx_convolve8_c;
-  sf->predict_ni[0][1][1] = vpx_convolve8_avg_c;
-  sf->predict_ni[1][0][0] = vpx_convolve8_c;
-  sf->predict_ni[1][0][1] = vpx_convolve8_avg_c;
-  sf->predict_ni[1][1][0] = vpx_convolve8;
-  sf->predict_ni[1][1][1] = vpx_convolve8_avg;
+  sf->predict_ni[0][0][0] = aom_convolve8_c;
+  sf->predict_ni[0][0][1] = aom_convolve8_avg_c;
+  sf->predict_ni[0][1][0] = aom_convolve8_c;
+  sf->predict_ni[0][1][1] = aom_convolve8_avg_c;
+  sf->predict_ni[1][0][0] = aom_convolve8_c;
+  sf->predict_ni[1][0][1] = aom_convolve8_avg_c;
+  sf->predict_ni[1][1][0] = aom_convolve8;
+  sf->predict_ni[1][1][1] = aom_convolve8_avg;
 #endif  // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
   if (sf->x_step_q4 == 16) {
     if (sf->y_step_q4 == 16) {
       // No scaling in either direction.
-      sf->predict[0][0][0] = vpx_convolve_copy;
-      sf->predict[0][0][1] = vpx_convolve_avg;
-      sf->predict[0][1][0] = vpx_convolve8_vert;
-      sf->predict[0][1][1] = vpx_convolve8_avg_vert;
-      sf->predict[1][0][0] = vpx_convolve8_horiz;
-      sf->predict[1][0][1] = vpx_convolve8_avg_horiz;
+      sf->predict[0][0][0] = aom_convolve_copy;
+      sf->predict[0][0][1] = aom_convolve_avg;
+      sf->predict[0][1][0] = aom_convolve8_vert;
+      sf->predict[0][1][1] = aom_convolve8_avg_vert;
+      sf->predict[1][0][0] = aom_convolve8_horiz;
+      sf->predict[1][0][1] = aom_convolve8_avg_horiz;
     } else {
       // No scaling in x direction. Must always scale in the y direction.
-      sf->predict[0][0][0] = vpx_convolve8_vert;
-      sf->predict[0][0][1] = vpx_convolve8_avg_vert;
-      sf->predict[0][1][0] = vpx_convolve8_vert;
-      sf->predict[0][1][1] = vpx_convolve8_avg_vert;
-      sf->predict[1][0][0] = vpx_convolve8;
-      sf->predict[1][0][1] = vpx_convolve8_avg;
+      sf->predict[0][0][0] = aom_convolve8_vert;
+      sf->predict[0][0][1] = aom_convolve8_avg_vert;
+      sf->predict[0][1][0] = aom_convolve8_vert;
+      sf->predict[0][1][1] = aom_convolve8_avg_vert;
+      sf->predict[1][0][0] = aom_convolve8;
+      sf->predict[1][0][1] = aom_convolve8_avg;
     }
   } else {
     if (sf->y_step_q4 == 16) {
       // No scaling in the y direction. Must always scale in the x direction.
-      sf->predict[0][0][0] = vpx_convolve8_horiz;
-      sf->predict[0][0][1] = vpx_convolve8_avg_horiz;
-      sf->predict[0][1][0] = vpx_convolve8;
-      sf->predict[0][1][1] = vpx_convolve8_avg;
-      sf->predict[1][0][0] = vpx_convolve8_horiz;
-      sf->predict[1][0][1] = vpx_convolve8_avg_horiz;
+      sf->predict[0][0][0] = aom_convolve8_horiz;
+      sf->predict[0][0][1] = aom_convolve8_avg_horiz;
+      sf->predict[0][1][0] = aom_convolve8;
+      sf->predict[0][1][1] = aom_convolve8_avg;
+      sf->predict[1][0][0] = aom_convolve8_horiz;
+      sf->predict[1][0][1] = aom_convolve8_avg_horiz;
     } else {
       // Must always scale in both directions.
-      sf->predict[0][0][0] = vpx_convolve8;
-      sf->predict[0][0][1] = vpx_convolve8_avg;
-      sf->predict[0][1][0] = vpx_convolve8;
-      sf->predict[0][1][1] = vpx_convolve8_avg;
-      sf->predict[1][0][0] = vpx_convolve8;
-      sf->predict[1][0][1] = vpx_convolve8_avg;
+      sf->predict[0][0][0] = aom_convolve8;
+      sf->predict[0][0][1] = aom_convolve8_avg;
+      sf->predict[0][1][0] = aom_convolve8;
+      sf->predict[0][1][1] = aom_convolve8_avg;
+      sf->predict[1][0][0] = aom_convolve8;
+      sf->predict[1][0][1] = aom_convolve8_avg;
     }
   }
   // 2D subpel motion always gets filtered in both directions
-  sf->predict[1][1][0] = vpx_convolve8;
-  sf->predict[1][1][1] = vpx_convolve8_avg;
+  sf->predict[1][1][0] = aom_convolve8;
+  sf->predict[1][1][1] = aom_convolve8_avg;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (use_highbd) {
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
-    sf->highbd_predict_ni[0][0][0] = vpx_highbd_convolve8_c;
-    sf->highbd_predict_ni[0][0][1] = vpx_highbd_convolve8_avg_c;
-    sf->highbd_predict_ni[0][1][0] = vpx_highbd_convolve8_c;
-    sf->highbd_predict_ni[0][1][1] = vpx_highbd_convolve8_avg_c;
-    sf->highbd_predict_ni[1][0][0] = vpx_highbd_convolve8_c;
-    sf->highbd_predict_ni[1][0][1] = vpx_highbd_convolve8_avg_c;
-    sf->highbd_predict_ni[1][1][0] = vpx_highbd_convolve8;
-    sf->highbd_predict_ni[1][1][1] = vpx_highbd_convolve8_avg;
+    sf->highbd_predict_ni[0][0][0] = aom_highbd_convolve8_c;
+    sf->highbd_predict_ni[0][0][1] = aom_highbd_convolve8_avg_c;
+    sf->highbd_predict_ni[0][1][0] = aom_highbd_convolve8_c;
+    sf->highbd_predict_ni[0][1][1] = aom_highbd_convolve8_avg_c;
+    sf->highbd_predict_ni[1][0][0] = aom_highbd_convolve8_c;
+    sf->highbd_predict_ni[1][0][1] = aom_highbd_convolve8_avg_c;
+    sf->highbd_predict_ni[1][1][0] = aom_highbd_convolve8;
+    sf->highbd_predict_ni[1][1][1] = aom_highbd_convolve8_avg;
 #endif  // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
     if (sf->x_step_q4 == 16) {
       if (sf->y_step_q4 == 16) {
         // No scaling in either direction.
-        sf->highbd_predict[0][0][0] = vpx_highbd_convolve_copy;
-        sf->highbd_predict[0][0][1] = vpx_highbd_convolve_avg;
-        sf->highbd_predict[0][1][0] = vpx_highbd_convolve8_vert;
-        sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg_vert;
-        sf->highbd_predict[1][0][0] = vpx_highbd_convolve8_horiz;
-        sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg_horiz;
+        sf->highbd_predict[0][0][0] = aom_highbd_convolve_copy;
+        sf->highbd_predict[0][0][1] = aom_highbd_convolve_avg;
+        sf->highbd_predict[0][1][0] = aom_highbd_convolve8_vert;
+        sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg_vert;
+        sf->highbd_predict[1][0][0] = aom_highbd_convolve8_horiz;
+        sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg_horiz;
       } else {
         // No scaling in x direction. Must always scale in the y direction.
-        sf->highbd_predict[0][0][0] = vpx_highbd_convolve8_vert;
-        sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg_vert;
-        sf->highbd_predict[0][1][0] = vpx_highbd_convolve8_vert;
-        sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg_vert;
-        sf->highbd_predict[1][0][0] = vpx_highbd_convolve8;
-        sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg;
+        sf->highbd_predict[0][0][0] = aom_highbd_convolve8_vert;
+        sf->highbd_predict[0][0][1] = aom_highbd_convolve8_avg_vert;
+        sf->highbd_predict[0][1][0] = aom_highbd_convolve8_vert;
+        sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg_vert;
+        sf->highbd_predict[1][0][0] = aom_highbd_convolve8;
+        sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg;
       }
     } else {
       if (sf->y_step_q4 == 16) {
         // No scaling in the y direction. Must always scale in the x direction.
-        sf->highbd_predict[0][0][0] = vpx_highbd_convolve8_horiz;
-        sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg_horiz;
-        sf->highbd_predict[0][1][0] = vpx_highbd_convolve8;
-        sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg;
-        sf->highbd_predict[1][0][0] = vpx_highbd_convolve8_horiz;
-        sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg_horiz;
+        sf->highbd_predict[0][0][0] = aom_highbd_convolve8_horiz;
+        sf->highbd_predict[0][0][1] = aom_highbd_convolve8_avg_horiz;
+        sf->highbd_predict[0][1][0] = aom_highbd_convolve8;
+        sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg;
+        sf->highbd_predict[1][0][0] = aom_highbd_convolve8_horiz;
+        sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg_horiz;
       } else {
         // Must always scale in both directions.
-        sf->highbd_predict[0][0][0] = vpx_highbd_convolve8;
-        sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg;
-        sf->highbd_predict[0][1][0] = vpx_highbd_convolve8;
-        sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg;
-        sf->highbd_predict[1][0][0] = vpx_highbd_convolve8;
-        sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg;
+        sf->highbd_predict[0][0][0] = aom_highbd_convolve8;
+        sf->highbd_predict[0][0][1] = aom_highbd_convolve8_avg;
+        sf->highbd_predict[0][1][0] = aom_highbd_convolve8;
+        sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg;
+        sf->highbd_predict[1][0][0] = aom_highbd_convolve8;
+        sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg;
       }
     }
     // 2D subpel motion always gets filtered in both directions.
-    sf->highbd_predict[1][1][0] = vpx_highbd_convolve8;
-    sf->highbd_predict[1][1][1] = vpx_highbd_convolve8_avg;
+    sf->highbd_predict[1][1][0] = aom_highbd_convolve8;
+    sf->highbd_predict[1][1][1] = aom_highbd_convolve8_avg;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
diff --git a/av1/common/scale.h b/av1/common/scale.h
index bb02601..0b49b68 100644
--- a/av1/common/scale.h
+++ b/av1/common/scale.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_SCALE_H_
-#define VP10_COMMON_SCALE_H_
+#ifndef AV1_COMMON_SCALE_H_
+#define AV1_COMMON_SCALE_H_
 
 #include "av1/common/mv.h"
-#include "aom_dsp/vpx_convolve.h"
+#include "aom_dsp/aom_convolve.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -32,37 +32,37 @@
   int (*scale_value_y)(int val, const struct scale_factors *sf);
 
   convolve_fn_t predict[2][2][2];  // horiz, vert, avg
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   highbd_convolve_fn_t highbd_predict[2][2][2];  // horiz, vert, avg
-#endif                                           // CONFIG_VP9_HIGHBITDEPTH
+#endif                                           // CONFIG_AOM_HIGHBITDEPTH
 
 // Functions for non-interpolating filters (those that filter zero offsets)
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
   convolve_fn_t predict_ni[2][2][2];  // horiz, vert, avg
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   highbd_convolve_fn_t highbd_predict_ni[2][2][2];  // horiz, vert, avg
-#endif                                              // CONFIG_VP9_HIGHBITDEPTH
+#endif                                              // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
 };
 
-MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
+MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
-                                        int other_h, int this_w, int this_h,
-                                        int use_high);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                       int other_h, int this_w, int this_h,
+                                       int use_high);
 #else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
-                                        int other_h, int this_w, int this_h);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                       int other_h, int this_w, int this_h);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static INLINE int vp10_is_valid_scale(const struct scale_factors *sf) {
+static INLINE int av1_is_valid_scale(const struct scale_factors *sf) {
   return sf->x_scale_fp != REF_INVALID_SCALE &&
          sf->y_scale_fp != REF_INVALID_SCALE;
 }
 
-static INLINE int vp10_is_scaled(const struct scale_factors *sf) {
-  return vp10_is_valid_scale(sf) &&
+static INLINE int av1_is_scaled(const struct scale_factors *sf) {
+  return av1_is_valid_scale(sf) &&
          (sf->x_scale_fp != REF_NO_SCALE || sf->y_scale_fp != REF_NO_SCALE);
 }
 
@@ -76,4 +76,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_SCALE_H_
+#endif  // AV1_COMMON_SCALE_H_
diff --git a/av1/common/scan.c b/av1/common/scan.c
index dbc36eb..8fc4ca2 100644
--- a/av1/common/scan.c
+++ b/av1/common/scan.c
@@ -2817,69 +2817,69 @@
 };
 #endif  // CONFIG_EXT_TX
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_4x4[16]) = {
   0, 2, 5, 8, 1, 3, 9, 12, 4, 7, 11, 14, 6, 10, 13, 15,
 };
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_4x4[16]) = {
   0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_4x4[16]) = {
   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
 };
 #endif  // CONFIG_EXT_TX
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_4x4[16]) = {
   0, 3, 7, 11, 1, 5, 9, 12, 2, 6, 10, 14, 4, 8, 13, 15,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_4x4[16]) = {
   0, 1, 3, 5, 2, 4, 6, 9, 7, 8, 11, 13, 10, 12, 14, 15,
 };
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_4x8[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_4x8[32]) = {
   0,  1,  4,  9,  2,  3,  6,  11, 5,  7,  8,  13, 10, 12, 14, 17,
   15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_4x8[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_4x8[32]) = {
   0, 8,  16, 24, 1, 9,  17, 25, 2, 10, 18, 26, 3, 11, 19, 27,
   4, 12, 20, 28, 5, 13, 21, 29, 6, 14, 22, 30, 7, 15, 23, 31,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_4x8[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_4x8[32]) = {
   0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15,
   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x4[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_8x4[32]) = {
   0, 1, 4, 9,  15, 19, 24, 28, 2,  3,  6,  11, 16, 21, 25, 29,
   5, 7, 8, 13, 18, 22, 26, 30, 10, 12, 14, 17, 20, 23, 27, 31,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_8x4[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_8x4[32]) = {
   0, 4, 8,  12, 16, 20, 24, 28, 1, 5, 9,  13, 17, 21, 25, 29,
   2, 6, 10, 14, 18, 22, 26, 30, 3, 7, 11, 15, 19, 23, 27, 31,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_8x4[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_8x4[32]) = {
   0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15,
   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
 };
 #endif  // CONFIG_EXT_TX
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_8x8[64]) = {
   0, 8,  16, 24, 32, 40, 48, 56, 1, 9,  17, 25, 33, 41, 49, 57,
   2, 10, 18, 26, 34, 42, 50, 58, 3, 11, 19, 27, 35, 43, 51, 59,
   4, 12, 20, 28, 36, 44, 52, 60, 5, 13, 21, 29, 37, 45, 53, 61,
   6, 14, 22, 30, 38, 46, 54, 62, 7, 15, 23, 31, 39, 47, 55, 63,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_8x8[64]) = {
   0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15,
   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
   32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
@@ -2887,21 +2887,21 @@
 };
 #endif  // CONFIG_EXT_TX
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_8x8[64]) = {
   0,  3,  8,  15, 22, 32, 40, 47, 1,  5,  11, 18, 26, 34, 44, 51,
   2,  7,  13, 20, 28, 38, 46, 54, 4,  10, 16, 24, 31, 41, 50, 56,
   6,  12, 21, 27, 35, 43, 52, 58, 9,  17, 25, 33, 39, 48, 55, 60,
   14, 23, 30, 37, 45, 53, 59, 62, 19, 29, 36, 42, 49, 57, 61, 63,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_8x8[64]) = {
   0,  1,  2,  5,  8,  12, 19, 24, 3,  4,  7,  10, 15, 20, 30, 39,
   6,  9,  13, 16, 21, 27, 37, 46, 11, 14, 17, 23, 28, 34, 44, 52,
   18, 22, 25, 31, 35, 41, 50, 57, 26, 29, 33, 38, 43, 49, 55, 59,
   32, 36, 42, 47, 51, 54, 60, 61, 40, 45, 48, 53, 56, 58, 62, 63,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_8x8[64]) = {
   0,  2,  5,  9,  14, 22, 31, 37, 1,  4,  8,  13, 19, 26, 38, 44,
   3,  6,  10, 17, 24, 30, 42, 49, 7,  11, 15, 21, 29, 36, 47, 53,
   12, 16, 20, 27, 34, 43, 52, 57, 18, 23, 28, 35, 41, 48, 56, 60,
@@ -2909,7 +2909,7 @@
 };
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x16[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_8x16[128]) = {
   0,  1,  3,   6,   10,  15,  21,  28,  2,  4,   7,   11,  16,  22,  29,  36,
   5,  8,  12,  17,  23,  30,  37,  44,  9,  13,  18,  24,  31,  38,  45,  52,
   14, 19, 25,  32,  39,  46,  53,  60,  20, 26,  33,  40,  47,  54,  61,  68,
@@ -2920,7 +2920,7 @@
   91, 98, 105, 111, 116, 120, 123, 125, 99, 106, 112, 117, 121, 124, 126, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x8[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_16x8[128]) = {
   0,  1,  3,  6,  10, 15, 21, 28, 36, 44,  52,  60,  68,  76,  84,  92,
   2,  4,  7,  11, 16, 22, 29, 37, 45, 53,  61,  69,  77,  85,  93,  100,
   5,  8,  12, 17, 23, 30, 38, 46, 54, 62,  70,  78,  86,  94,  101, 107,
@@ -2931,7 +2931,7 @@
   35, 43, 51, 59, 67, 75, 83, 91, 99, 106, 112, 117, 121, 124, 126, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_8x16[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_8x16[128]) = {
   0,  16, 32, 48, 64, 80, 96,  112, 1,  17, 33, 49, 65, 81, 97,  113,
   2,  18, 34, 50, 66, 82, 98,  114, 3,  19, 35, 51, 67, 83, 99,  115,
   4,  20, 36, 52, 68, 84, 100, 116, 5,  21, 37, 53, 69, 85, 101, 117,
@@ -2942,7 +2942,7 @@
   14, 30, 46, 62, 78, 94, 110, 126, 15, 31, 47, 63, 79, 95, 111, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_16x8[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_16x8[128]) = {
   0, 8,  16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96,  104, 112, 120,
   1, 9,  17, 25, 33, 41, 49, 57, 65, 73, 81, 89, 97,  105, 113, 121,
   2, 10, 18, 26, 34, 42, 50, 58, 66, 74, 82, 90, 98,  106, 114, 122,
@@ -2953,7 +2953,7 @@
   7, 15, 23, 31, 39, 47, 55, 63, 71, 79, 87, 95, 103, 111, 119, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_8x16[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_8x16[128]) = {
   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,
   15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
   30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,  42,  43,  44,
@@ -2965,7 +2965,7 @@
   120, 121, 122, 123, 124, 125, 126, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_16x8[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_16x8[128]) = {
   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,
   15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
   30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,  42,  43,  44,
@@ -2977,7 +2977,7 @@
   120, 121, 122, 123, 124, 125, 126, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x32[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_16x32[512]) = {
   0,   1,   3,   6,   10,  15,  21,  28,  36,  45,  55,  66,  78,  91,  105,
   120, 2,   4,   7,   11,  16,  22,  29,  37,  46,  56,  67,  79,  92,  106,
   121, 136, 5,   8,   12,  17,  23,  30,  38,  47,  57,  68,  80,  93,  107,
@@ -3015,7 +3015,7 @@
   510, 511,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_32x16[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_32x16[512]) = {
   0,   1,   3,   6,   10,  15,  21,  28,  36,  45,  55,  66,  78,  91,  105,
   120, 136, 152, 168, 184, 200, 216, 232, 248, 264, 280, 296, 312, 328, 344,
   360, 376, 2,   4,   7,   11,  16,  22,  29,  37,  46,  56,  67,  79,  92,
@@ -3053,7 +3053,7 @@
   510, 511,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_16x32[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_16x32[512]) = {
   0,  32, 64, 96,  128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 480,
   1,  33, 65, 97,  129, 161, 193, 225, 257, 289, 321, 353, 385, 417, 449, 481,
   2,  34, 66, 98,  130, 162, 194, 226, 258, 290, 322, 354, 386, 418, 450, 482,
@@ -3088,7 +3088,7 @@
   31, 63, 95, 127, 159, 191, 223, 255, 287, 319, 351, 383, 415, 447, 479, 511,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_32x16[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_32x16[512]) = {
   0,   16,  32,  48,  64,  80,  96,  112, 128, 144, 160, 176, 192, 208, 224,
   240, 256, 272, 288, 304, 320, 336, 352, 368, 384, 400, 416, 432, 448, 464,
   480, 496, 1,   17,  33,  49,  65,  81,  97,  113, 129, 145, 161, 177, 193,
@@ -3126,7 +3126,7 @@
   495, 511,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_16x32[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_16x32[512]) = {
   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,
   15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
   30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,  42,  43,  44,
@@ -3164,7 +3164,7 @@
   510, 511,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_32x16[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_32x16[512]) = {
   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,
   15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
   30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,  42,  43,  44,
@@ -3205,7 +3205,7 @@
 #endif  // CONFIG_EXT_TX
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_16x16[256]) = {
   0,  16, 32, 48, 64, 80, 96,  112, 128, 144, 160, 176, 192, 208, 224, 240,
   1,  17, 33, 49, 65, 81, 97,  113, 129, 145, 161, 177, 193, 209, 225, 241,
   2,  18, 34, 50, 66, 82, 98,  114, 130, 146, 162, 178, 194, 210, 226, 242,
@@ -3224,7 +3224,7 @@
   15, 31, 47, 63, 79, 95, 111, 127, 143, 159, 175, 191, 207, 223, 239, 255,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_16x16[256]) = {
   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,
   15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
   30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,  42,  43,  44,
@@ -3246,7 +3246,7 @@
 };
 #endif  // CONFIG_EXT_TX
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_16x16[256]) = {
   0,  4,  11,  20,  31,  43,  59,  75,  85,  109, 130, 150, 165, 181, 195, 198,
   1,  6,  14,  23,  34,  47,  64,  81,  95,  114, 135, 153, 171, 188, 201, 212,
   2,  8,  16,  25,  38,  52,  67,  83,  101, 116, 136, 157, 172, 190, 205, 216,
@@ -3265,7 +3265,7 @@
   65, 88, 107, 124, 139, 152, 163, 177, 185, 199, 221, 234, 243, 248, 252, 255,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_16x16[256]) = {
   0,   1,   2,   4,   6,   9,   12,  17,  22,  29,  36,  43,  54,  64,  76,
   86,  3,   5,   7,   11,  15,  19,  25,  32,  38,  48,  59,  68,  84,  99,
   115, 130, 8,   10,  13,  18,  23,  27,  33,  42,  51,  60,  72,  88,  103,
@@ -3286,7 +3286,7 @@
   255,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_16x16[256]) = {
   0,   2,   5,   9,   17,  24,  36,  44,  55,  72,  88,  104, 128, 143, 166,
   179, 1,   4,   8,   13,  20,  30,  40,  54,  66,  79,  96,  113, 141, 154,
   178, 196, 3,   7,   11,  18,  25,  33,  46,  57,  71,  86,  101, 119, 148,
@@ -3308,7 +3308,7 @@
 };
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_32x32[1024]) = {
   0,   32,   64,  96,   128, 160,  192, 224,  256, 288,  320, 352,  384, 416,
   448, 480,  512, 544,  576, 608,  640, 672,  704, 736,  768, 800,  832, 864,
   896, 928,  960, 992,  1,   33,   65,  97,   129, 161,  193, 225,  257, 289,
@@ -3385,7 +3385,7 @@
   991, 1023,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_32x32[1024]) = {
   0,    1,    2,    3,    4,    5,    6,    7,    8,    9,    10,   11,   12,
   13,   14,   15,   16,   17,   18,   19,   20,   21,   22,   23,   24,   25,
   26,   27,   28,   29,   30,   31,   32,   33,   34,   35,   36,   37,   38,
@@ -3468,7 +3468,7 @@
 };
 #endif  // CONFIG_EXT_TX
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_32x32[1024]) = {
   0,    2,    5,    10,   17,   25,   38,   47,   62,   83,   101,  121,  145,
   170,  193,  204,  210,  219,  229,  233,  245,  257,  275,  299,  342,  356,
   377,  405,  455,  471,  495,  527,  1,    4,    8,    15,   22,   30,   45,
@@ -3551,7 +3551,7 @@
 };
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_v2_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_v2_iscan_32x32[1024]) = {
   0,    1,    4,    9,    15,   22,   33,   43,   56,   71,   86,   104,  121,
   142,  166,  189,  512,  518,  527,  539,  551,  566,  584,  602,  621,  644,
   668,  695,  721,  748,  780,  811,  2,    3,    6,    11,   17,   26,   35,
@@ -3633,7 +3633,7 @@
   978,  987,  995,  1002, 1008, 1013, 1017, 1020, 1022, 1023,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_h2_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_h2_iscan_32x32[1024]) = {
   0,    1,    4,    9,    15,   22,   33,   43,   56,   71,   86,   104,  121,
   142,  166,  189,  214,  233,  254,  273,  292,  309,  328,  345,  362,  378,
   397,  415,  431,  447,  464,  481,  2,    3,    6,    11,   17,   26,   35,
@@ -3715,7 +3715,7 @@
   978,  987,  995,  1002, 1008, 1013, 1017, 1020, 1022, 1023,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_qtr_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_qtr_iscan_32x32[1024]) = {
   0,    1,    4,    9,    15,   22,   33,   43,   56,   71,   86,   104,  121,
   142,  166,  189,  256,  268,  286,  310,  334,  364,  400,  435,  471,  510,
   553,  598,  640,  683,  732,  780,  2,    3,    6,    11,   17,   26,   35,
@@ -3798,371 +3798,369 @@
 };
 #endif  // CONFIG_EXT_TX
 
-const scan_order vp10_default_scan_orders[TX_SIZES] = {
-  { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-  { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-  { default_scan_16x16, vp10_default_iscan_16x16,
-    default_scan_16x16_neighbors },
-  { default_scan_32x32, vp10_default_iscan_32x32,
-    default_scan_32x32_neighbors },
+const scan_order av1_default_scan_orders[TX_SIZES] = {
+  { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+  { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+  { default_scan_16x16, av1_default_iscan_16x16, default_scan_16x16_neighbors },
+  { default_scan_32x32, av1_default_iscan_32x32, default_scan_32x32_neighbors },
 };
 
 #if CONFIG_EXT_TX
-const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES] = {
+const scan_order av1_intra_scan_orders[TX_SIZES][TX_TYPES] = {
   {
       // TX_4X4
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
-      { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
-      { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
-      { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
-      { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
-      { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
-      { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
-      { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+      { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+      { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+      { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+      { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+      { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+      { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+      { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
   },
   {
       // TX_8X8
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
-      { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
-      { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
-      { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
-      { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
-      { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
-      { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
-      { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+      { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+      { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+      { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+      { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+      { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+      { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+      { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
   },
   {
       // TX_16X16
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
-      { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+      { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
-      { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
-      { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
-      { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
-      { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
-      { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
-      { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
+      { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+      { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+      { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+      { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+      { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+      { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+      { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
   },
   {
       // TX_32X32
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
-      { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
-      { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
-      { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+      { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+      { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
   }
 };
 
-const scan_order vp10_inter_scan_orders[TX_SIZES_ALL][TX_TYPES] = {
+const scan_order av1_inter_scan_orders[TX_SIZES_ALL][TX_TYPES] = {
   {
       // TX_4X4
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
-      { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
-      { mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
-      { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
-      { mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
-      { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
-      { mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+      { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+      { mcol_scan_4x4, av1_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
+      { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+      { mcol_scan_4x4, av1_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
+      { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+      { mcol_scan_4x4, av1_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
   },
   {
       // TX_8X8
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
-      { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
-      { mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
-      { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
-      { mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
-      { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
-      { mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+      { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+      { mcol_scan_8x8, av1_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
+      { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+      { mcol_scan_8x8, av1_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
+      { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+      { mcol_scan_8x8, av1_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
   },
   {
       // TX_16X16
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
-      { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
-      { mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
-      { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
-      { mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
-      { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
-      { mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
+      { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+      { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+      { mcol_scan_16x16, av1_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
+      { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+      { mcol_scan_16x16, av1_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
+      { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+      { mcol_scan_16x16, av1_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
   },
   {
       // TX_32X32
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
-      { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
-      { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
-      { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+      { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+      { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
   },
   {
       // TX_4X8
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
-      { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
-      { mcol_scan_4x8, vp10_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
-      { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
-      { mcol_scan_4x8, vp10_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
-      { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
-      { mcol_scan_4x8, vp10_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+      { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+      { mcol_scan_4x8, av1_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
+      { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+      { mcol_scan_4x8, av1_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
+      { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+      { mcol_scan_4x8, av1_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
   },
   {
       // TX_8X4
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
-      { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
-      { mcol_scan_8x4, vp10_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
-      { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
-      { mcol_scan_8x4, vp10_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
-      { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
-      { mcol_scan_8x4, vp10_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+      { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+      { mcol_scan_8x4, av1_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
+      { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+      { mcol_scan_8x4, av1_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
+      { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+      { mcol_scan_8x4, av1_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
   },
   {
       // TX_8X16
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
-      { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
-      { mcol_scan_8x16, vp10_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
-      { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
-      { mcol_scan_8x16, vp10_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
-      { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
-      { mcol_scan_8x16, vp10_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
+      { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+      { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+      { mcol_scan_8x16, av1_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
+      { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+      { mcol_scan_8x16, av1_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
+      { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+      { mcol_scan_8x16, av1_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
   },
   {
       // TX_16X8
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
-      { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
-      { mcol_scan_16x8, vp10_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
-      { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
-      { mcol_scan_16x8, vp10_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
-      { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
-      { mcol_scan_16x8, vp10_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
+      { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+      { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+      { mcol_scan_16x8, av1_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
+      { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+      { mcol_scan_16x8, av1_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
+      { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+      { mcol_scan_16x8, av1_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
   },
   {
       // TX_16X32
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
-      { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
-      { mcol_scan_16x32, vp10_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
-      { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
-      { mcol_scan_16x32, vp10_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
-      { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
-      { mcol_scan_16x32, vp10_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
+      { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+      { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+      { mcol_scan_16x32, av1_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
+      { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+      { mcol_scan_16x32, av1_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
+      { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+      { mcol_scan_16x32, av1_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
   },
   {
       // TX_32X16
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
-      { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
-      { mcol_scan_32x16, vp10_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
-      { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
-      { mcol_scan_32x16, vp10_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
-      { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
-      { mcol_scan_32x16, vp10_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
+      { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+      { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+      { mcol_scan_32x16, av1_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
+      { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+      { mcol_scan_32x16, av1_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
+      { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+      { mcol_scan_32x16, av1_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
   }
 };
 
 #else   // CONFIG_EXT_TX
 
-const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES] = {
+const scan_order av1_intra_scan_orders[TX_SIZES][TX_TYPES] = {
   { // TX_4X4
-    { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-    { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
-    { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
-    { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors } },
+    { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+    { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+    { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+    { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors } },
   { // TX_8X8
-    { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-    { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
-    { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
-    { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors } },
+    { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+    { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+    { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+    { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors } },
   { // TX_16X16
-    { default_scan_16x16, vp10_default_iscan_16x16,
+    { default_scan_16x16, av1_default_iscan_16x16,
       default_scan_16x16_neighbors },
-    { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
-    { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
-    { default_scan_16x16, vp10_default_iscan_16x16,
+    { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+    { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+    { default_scan_16x16, av1_default_iscan_16x16,
       default_scan_16x16_neighbors } },
   {
       // TX_32X32
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
   }
 };
diff --git a/av1/common/scan.h b/av1/common/scan.h
index d2d9f35..cba92e7 100644
--- a/av1/common/scan.h
+++ b/av1/common/scan.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_SCAN_H_
-#define VP10_COMMON_SCAN_H_
+#ifndef AV1_COMMON_SCAN_H_
+#define AV1_COMMON_SCAN_H_
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
 #include "av1/common/enums.h"
@@ -29,8 +29,8 @@
   const int16_t *neighbors;
 } scan_order;
 
-extern const scan_order vp10_default_scan_orders[TX_SIZES];
-extern const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES];
+extern const scan_order av1_default_scan_orders[TX_SIZES];
+extern const scan_order av1_intra_scan_orders[TX_SIZES][TX_TYPES];
 
 static INLINE int get_coef_context(const int16_t *neighbors,
                                    const uint8_t *token_cache, int c) {
@@ -41,26 +41,26 @@
 
 static INLINE const scan_order *get_intra_scan(TX_SIZE tx_size,
                                                TX_TYPE tx_type) {
-  return &vp10_intra_scan_orders[tx_size][tx_type];
+  return &av1_intra_scan_orders[tx_size][tx_type];
 }
 
 #if CONFIG_EXT_TX
-extern const scan_order vp10_inter_scan_orders[TX_SIZES_ALL][TX_TYPES];
+extern const scan_order av1_inter_scan_orders[TX_SIZES_ALL][TX_TYPES];
 
 static INLINE const scan_order *get_inter_scan(TX_SIZE tx_size,
                                                TX_TYPE tx_type) {
-  return &vp10_inter_scan_orders[tx_size][tx_type];
+  return &av1_inter_scan_orders[tx_size][tx_type];
 }
 #endif  // CONFIG_EXT_TX
 
 static INLINE const scan_order *get_scan(TX_SIZE tx_size, TX_TYPE tx_type,
                                          int is_inter) {
 #if CONFIG_EXT_TX
-  return is_inter ? &vp10_inter_scan_orders[tx_size][tx_type]
-                  : &vp10_intra_scan_orders[tx_size][tx_type];
+  return is_inter ? &av1_inter_scan_orders[tx_size][tx_type]
+                  : &av1_intra_scan_orders[tx_size][tx_type];
 #else
   (void)is_inter;
-  return &vp10_intra_scan_orders[tx_size][tx_type];
+  return &av1_intra_scan_orders[tx_size][tx_type];
 #endif  // CONFIG_EXT_TX
 }
 
@@ -68,4 +68,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_SCAN_H_
+#endif  // AV1_COMMON_SCAN_H_
diff --git a/av1/common/seg_common.c b/av1/common/seg_common.c
index f131c7b..9a5b8c8 100644
--- a/av1/common/seg_common.c
+++ b/av1/common/seg_common.c
@@ -25,26 +25,26 @@
 // the coding mechanism is still subject to change so these provide a
 // convenient single point of change.
 
-void vp10_clearall_segfeatures(struct segmentation *seg) {
-  vp10_zero(seg->feature_data);
-  vp10_zero(seg->feature_mask);
+void av1_clearall_segfeatures(struct segmentation *seg) {
+  av1_zero(seg->feature_data);
+  av1_zero(seg->feature_mask);
 }
 
-void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
-                            SEG_LVL_FEATURES feature_id) {
+void av1_enable_segfeature(struct segmentation *seg, int segment_id,
+                           SEG_LVL_FEATURES feature_id) {
   seg->feature_mask[segment_id] |= 1 << feature_id;
 }
 
-int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
+int av1_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
   return seg_feature_data_max[feature_id];
 }
 
-int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
+int av1_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
   return seg_feature_data_signed[feature_id];
 }
 
-void vp10_set_segdata(struct segmentation *seg, int segment_id,
-                      SEG_LVL_FEATURES feature_id, int seg_data) {
+void av1_set_segdata(struct segmentation *seg, int segment_id,
+                     SEG_LVL_FEATURES feature_id, int seg_data) {
   assert(seg_data <= seg_feature_data_max[feature_id]);
   if (seg_data < 0) {
     assert(seg_feature_data_signed[feature_id]);
@@ -54,7 +54,7 @@
   seg->feature_data[segment_id][feature_id] = seg_data;
 }
 
-const vpx_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = {
+const aom_tree_index av1_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = {
   2, 4, 6, 8, 10, 12, 0, -1, -2, -3, -4, -5, -6, -7
 };
 
diff --git a/av1/common/seg_common.h b/av1/common/seg_common.h
index 7a8fa8f..f863ad8 100644
--- a/av1/common/seg_common.h
+++ b/av1/common/seg_common.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_SEG_COMMON_H_
-#define VP10_COMMON_SEG_COMMON_H_
+#ifndef AV1_COMMON_SEG_COMMON_H_
+#define AV1_COMMON_SEG_COMMON_H_
 
 #include "aom_dsp/prob.h"
 
@@ -46,8 +46,8 @@
 };
 
 struct segmentation_probs {
-  vpx_prob tree_probs[SEG_TREE_PROBS];
-  vpx_prob pred_probs[PREDICTION_PROBS];
+  aom_prob tree_probs[SEG_TREE_PROBS];
+  aom_prob pred_probs[PREDICTION_PROBS];
 };
 
 static INLINE int segfeature_active(const struct segmentation *seg,
@@ -56,27 +56,27 @@
   return seg->enabled && (seg->feature_mask[segment_id] & (1 << feature_id));
 }
 
-void vp10_clearall_segfeatures(struct segmentation *seg);
+void av1_clearall_segfeatures(struct segmentation *seg);
 
-void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
-                            SEG_LVL_FEATURES feature_id);
+void av1_enable_segfeature(struct segmentation *seg, int segment_id,
+                           SEG_LVL_FEATURES feature_id);
 
-int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
+int av1_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
 
-int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
+int av1_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
 
-void vp10_set_segdata(struct segmentation *seg, int segment_id,
-                      SEG_LVL_FEATURES feature_id, int seg_data);
+void av1_set_segdata(struct segmentation *seg, int segment_id,
+                     SEG_LVL_FEATURES feature_id, int seg_data);
 
 static INLINE int get_segdata(const struct segmentation *seg, int segment_id,
                               SEG_LVL_FEATURES feature_id) {
   return seg->feature_data[segment_id][feature_id];
 }
 
-extern const vpx_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)];
+extern const aom_tree_index av1_segment_tree[TREE_SIZE(MAX_SEGMENTS)];
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_SEG_COMMON_H_
+#endif  // AV1_COMMON_SEG_COMMON_H_
diff --git a/av1/common/thread_common.c b/av1/common/thread_common.c
index ba91a46..13150e0 100644
--- a/av1/common/thread_common.c
+++ b/av1/common/thread_common.c
@@ -8,9 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_config.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "av1/common/entropymode.h"
 #include "av1/common/thread_common.h"
 #include "av1/common/reconinter.h"
@@ -33,7 +33,7 @@
 }
 #endif  // CONFIG_MULTITHREAD
 
-static INLINE void sync_read(VP10LfSync *const lf_sync, int r, int c) {
+static INLINE void sync_read(AV1LfSync *const lf_sync, int r, int c) {
 #if CONFIG_MULTITHREAD
   const int nsync = lf_sync->sync_range;
 
@@ -53,7 +53,7 @@
 #endif  // CONFIG_MULTITHREAD
 }
 
-static INLINE void sync_write(VP10LfSync *const lf_sync, int r, int c,
+static INLINE void sync_write(AV1LfSync *const lf_sync, int r, int c,
                               const int sb_cols) {
 #if CONFIG_MULTITHREAD
   const int nsync = lf_sync->sync_range;
@@ -86,9 +86,9 @@
 
 // Implement row loopfiltering for each thread.
 static INLINE void thread_loop_filter_rows(
-    const YV12_BUFFER_CONFIG *const frame_buffer, VP10_COMMON *const cm,
+    const YV12_BUFFER_CONFIG *const frame_buffer, AV1_COMMON *const cm,
     struct macroblockd_plane planes[MAX_MB_PLANE], int start, int stop,
-    int y_only, VP10LfSync *const lf_sync) {
+    int y_only, AV1LfSync *const lf_sync) {
   const int num_planes = y_only ? 1 : MAX_MB_PLANE;
   const int sb_cols = mi_cols_aligned_to_sb(cm) >> cm->mib_size_log2;
   int mi_row, mi_col;
@@ -123,28 +123,28 @@
 
       sync_read(lf_sync, r, c);
 
-      vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+      av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
 
 #if CONFIG_EXT_PARTITION_TYPES
       for (plane = 0; plane < num_planes; ++plane)
-        vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
-                                       mi_col);
+        av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
+                                      mi_col);
 #else
       // TODO(JBB): Make setup_mask work for non 420.
-      vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
+      av1_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
 
-      vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
+      av1_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
       for (plane = 1; plane < num_planes; ++plane) {
         switch (path) {
           case LF_PATH_420:
-            vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_444:
-            vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_SLOW:
-            vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
-                                           mi_row, mi_col);
+            av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+                                          mi_row, mi_col);
             break;
         }
       }
@@ -155,7 +155,7 @@
 }
 
 // Row-based multi-threaded loopfilter hook
-static int loop_filter_row_worker(VP10LfSync *const lf_sync,
+static int loop_filter_row_worker(AV1LfSync *const lf_sync,
                                   LFWorkerData *const lf_data) {
   thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
                           lf_data->start, lf_data->stop, lf_data->y_only,
@@ -163,18 +163,18 @@
   return 1;
 }
 
-static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
                                 struct macroblockd_plane planes[MAX_MB_PLANE],
                                 int start, int stop, int y_only,
-                                VPxWorker *workers, int nworkers,
-                                VP10LfSync *lf_sync) {
-  const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+                                AVxWorker *workers, int nworkers,
+                                AV1LfSync *lf_sync) {
+  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
   // Number of superblock rows and cols
   const int sb_rows = mi_rows_aligned_to_sb(cm) >> cm->mib_size_log2;
   // Decoder may allocate more threads than number of tiles based on user's
   // input.
   const int tile_cols = cm->tile_cols;
-  const int num_workers = VPXMIN(nworkers, tile_cols);
+  const int num_workers = AOMMIN(nworkers, tile_cols);
   int i;
 
 #if CONFIG_EXT_PARTITION
@@ -186,8 +186,8 @@
 
   if (!lf_sync->sync_range || sb_rows != lf_sync->rows ||
       num_workers > lf_sync->num_workers) {
-    vp10_loop_filter_dealloc(lf_sync);
-    vp10_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
+    av1_loop_filter_dealloc(lf_sync);
+    av1_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
   }
 
   // Initialize cur_sb_col to -1 for all SB rows.
@@ -202,15 +202,15 @@
   // because of contention. If the multithreading code changes in the future
   // then the number of workers used by the loopfilter should be revisited.
   for (i = 0; i < num_workers; ++i) {
-    VPxWorker *const worker = &workers[i];
+    AVxWorker *const worker = &workers[i];
     LFWorkerData *const lf_data = &lf_sync->lfdata[i];
 
-    worker->hook = (VPxWorkerHook)loop_filter_row_worker;
+    worker->hook = (AVxWorkerHook)loop_filter_row_worker;
     worker->data1 = lf_sync;
     worker->data2 = lf_data;
 
     // Loopfilter data
-    vp10_loop_filter_data_reset(lf_data, frame, cm, planes);
+    av1_loop_filter_data_reset(lf_data, frame, cm, planes);
     lf_data->start = start + i * cm->mib_size;
     lf_data->stop = stop;
     lf_data->y_only = y_only;
@@ -229,11 +229,11 @@
   }
 }
 
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                               struct macroblockd_plane planes[MAX_MB_PLANE],
-                               int frame_filter_level, int y_only,
-                               int partial_frame, VPxWorker *workers,
-                               int num_workers, VP10LfSync *lf_sync) {
+void av1_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                              struct macroblockd_plane planes[MAX_MB_PLANE],
+                              int frame_filter_level, int y_only,
+                              int partial_frame, AVxWorker *workers,
+                              int num_workers, AV1LfSync *lf_sync) {
   int start_mi_row, end_mi_row, mi_rows_to_filter;
 
   if (!frame_filter_level) return;
@@ -243,10 +243,10 @@
   if (partial_frame && cm->mi_rows > 8) {
     start_mi_row = cm->mi_rows >> 1;
     start_mi_row &= 0xfffffff8;
-    mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
+    mi_rows_to_filter = AOMMAX(cm->mi_rows / 8, 8);
   }
   end_mi_row = start_mi_row + mi_rows_to_filter;
-  vp10_loop_filter_frame_init(cm, frame_filter_level);
+  av1_loop_filter_frame_init(cm, frame_filter_level);
 
   loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, y_only,
                       workers, num_workers, lf_sync);
@@ -267,15 +267,15 @@
 }
 
 // Allocate memory for lf row synchronization
-void vp10_loop_filter_alloc(VP10LfSync *lf_sync, VP10_COMMON *cm, int rows,
-                            int width, int num_workers) {
+void av1_loop_filter_alloc(AV1LfSync *lf_sync, AV1_COMMON *cm, int rows,
+                           int width, int num_workers) {
   lf_sync->rows = rows;
 #if CONFIG_MULTITHREAD
   {
     int i;
 
     CHECK_MEM_ERROR(cm, lf_sync->mutex_,
-                    vpx_malloc(sizeof(*lf_sync->mutex_) * rows));
+                    aom_malloc(sizeof(*lf_sync->mutex_) * rows));
     if (lf_sync->mutex_) {
       for (i = 0; i < rows; ++i) {
         pthread_mutex_init(&lf_sync->mutex_[i], NULL);
@@ -283,7 +283,7 @@
     }
 
     CHECK_MEM_ERROR(cm, lf_sync->cond_,
-                    vpx_malloc(sizeof(*lf_sync->cond_) * rows));
+                    aom_malloc(sizeof(*lf_sync->cond_) * rows));
     if (lf_sync->cond_) {
       for (i = 0; i < rows; ++i) {
         pthread_cond_init(&lf_sync->cond_[i], NULL);
@@ -293,18 +293,18 @@
 #endif  // CONFIG_MULTITHREAD
 
   CHECK_MEM_ERROR(cm, lf_sync->lfdata,
-                  vpx_malloc(num_workers * sizeof(*lf_sync->lfdata)));
+                  aom_malloc(num_workers * sizeof(*lf_sync->lfdata)));
   lf_sync->num_workers = num_workers;
 
   CHECK_MEM_ERROR(cm, lf_sync->cur_sb_col,
-                  vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows));
+                  aom_malloc(sizeof(*lf_sync->cur_sb_col) * rows));
 
   // Set up nsync.
   lf_sync->sync_range = get_sync_range(width);
 }
 
 // Deallocate lf synchronization related mutex and data
-void vp10_loop_filter_dealloc(VP10LfSync *lf_sync) {
+void av1_loop_filter_dealloc(AV1LfSync *lf_sync) {
   if (lf_sync != NULL) {
 #if CONFIG_MULTITHREAD
     int i;
@@ -313,26 +313,26 @@
       for (i = 0; i < lf_sync->rows; ++i) {
         pthread_mutex_destroy(&lf_sync->mutex_[i]);
       }
-      vpx_free(lf_sync->mutex_);
+      aom_free(lf_sync->mutex_);
     }
     if (lf_sync->cond_ != NULL) {
       for (i = 0; i < lf_sync->rows; ++i) {
         pthread_cond_destroy(&lf_sync->cond_[i]);
       }
-      vpx_free(lf_sync->cond_);
+      aom_free(lf_sync->cond_);
     }
 #endif  // CONFIG_MULTITHREAD
-    vpx_free(lf_sync->lfdata);
-    vpx_free(lf_sync->cur_sb_col);
+    aom_free(lf_sync->lfdata);
+    aom_free(lf_sync->cur_sb_col);
     // clear the structure as the source of this call may be a resize in which
     // case this call will be followed by an _alloc() which may fail.
-    vp10_zero(*lf_sync);
+    av1_zero(*lf_sync);
   }
 }
 
 // Accumulate frame counts. FRAME_COUNTS consist solely of 'unsigned int'
 // members, so we treat it as an array, and sum over the whole length.
-void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts) {
+void av1_accumulate_frame_counts(AV1_COMMON *cm, FRAME_COUNTS *counts) {
   unsigned int *const acc = (unsigned int *)&cm->counts;
   const unsigned int *const cnt = (unsigned int *)counts;
 
diff --git a/av1/common/thread_common.h b/av1/common/thread_common.h
index 3df9557..29085cb 100644
--- a/av1/common/thread_common.h
+++ b/av1/common/thread_common.h
@@ -8,21 +8,21 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_LOOPFILTER_THREAD_H_
-#define VP10_COMMON_LOOPFILTER_THREAD_H_
-#include "./vpx_config.h"
+#ifndef AV1_COMMON_LOOPFILTER_THREAD_H_
+#define AV1_COMMON_LOOPFILTER_THREAD_H_
+#include "./aom_config.h"
 #include "av1/common/loopfilter.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_util/aom_thread.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 struct FRAME_COUNTS;
 
 // Loopfilter row synchronization
-typedef struct VP10LfSyncData {
+typedef struct AV1LfSyncData {
 #if CONFIG_MULTITHREAD
   pthread_mutex_t *mutex_;
   pthread_cond_t *cond_;
@@ -37,27 +37,27 @@
   // Row-based parallel loopfilter data
   LFWorkerData *lfdata;
   int num_workers;
-} VP10LfSync;
+} AV1LfSync;
 
 // Allocate memory for loopfilter row synchronization.
-void vp10_loop_filter_alloc(VP10LfSync *lf_sync, struct VP10Common *cm,
-                            int rows, int width, int num_workers);
+void av1_loop_filter_alloc(AV1LfSync *lf_sync, struct AV1Common *cm, int rows,
+                           int width, int num_workers);
 
 // Deallocate loopfilter synchronization related mutex and data.
-void vp10_loop_filter_dealloc(VP10LfSync *lf_sync);
+void av1_loop_filter_dealloc(AV1LfSync *lf_sync);
 
 // Multi-threaded loopfilter that uses the tile threads.
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
-                               struct macroblockd_plane planes[MAX_MB_PLANE],
-                               int frame_filter_level, int y_only,
-                               int partial_frame, VPxWorker *workers,
-                               int num_workers, VP10LfSync *lf_sync);
+void av1_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+                              struct macroblockd_plane planes[MAX_MB_PLANE],
+                              int frame_filter_level, int y_only,
+                              int partial_frame, AVxWorker *workers,
+                              int num_workers, AV1LfSync *lf_sync);
 
-void vp10_accumulate_frame_counts(struct VP10Common *cm,
-                                  struct FRAME_COUNTS *counts);
+void av1_accumulate_frame_counts(struct AV1Common *cm,
+                                 struct FRAME_COUNTS *counts);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_LOOPFILTER_THREAD_H_
+#endif  // AV1_COMMON_LOOPFILTER_THREAD_H_
diff --git a/av1/common/tile_common.c b/av1/common/tile_common.c
index e79734e..220cad9 100644
--- a/av1/common/tile_common.c
+++ b/av1/common/tile_common.c
@@ -10,21 +10,21 @@
 
 #include "av1/common/tile_common.h"
 #include "av1/common/onyxc_int.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
-void vp10_tile_set_row(TileInfo *tile, const VP10_COMMON *cm, int row) {
+void av1_tile_set_row(TileInfo *tile, const AV1_COMMON *cm, int row) {
   tile->mi_row_start = row * cm->tile_height;
-  tile->mi_row_end = VPXMIN(tile->mi_row_start + cm->tile_height, cm->mi_rows);
+  tile->mi_row_end = AOMMIN(tile->mi_row_start + cm->tile_height, cm->mi_rows);
 }
 
-void vp10_tile_set_col(TileInfo *tile, const VP10_COMMON *cm, int col) {
+void av1_tile_set_col(TileInfo *tile, const AV1_COMMON *cm, int col) {
   tile->mi_col_start = col * cm->tile_width;
-  tile->mi_col_end = VPXMIN(tile->mi_col_start + cm->tile_width, cm->mi_cols);
+  tile->mi_col_end = AOMMIN(tile->mi_col_start + cm->tile_width, cm->mi_cols);
 }
 
-void vp10_tile_init(TileInfo *tile, const VP10_COMMON *cm, int row, int col) {
-  vp10_tile_set_row(tile, cm, row);
-  vp10_tile_set_col(tile, cm, col);
+void av1_tile_init(TileInfo *tile, const AV1_COMMON *cm, int row, int col) {
+  av1_tile_set_row(tile, cm, row);
+  av1_tile_set_col(tile, cm, col);
 }
 
 #if !CONFIG_EXT_TILE
@@ -49,8 +49,8 @@
   return max_log2 - 1;
 }
 
-void vp10_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
-                          int *max_log2_tile_cols) {
+void av1_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
+                         int *max_log2_tile_cols) {
   const int max_sb_cols =
       ALIGN_POWER_OF_TWO(mi_cols, MAX_MIB_SIZE_LOG2) >> MAX_MIB_SIZE_LOG2;
   *min_log2_tile_cols = get_min_log2_tile_cols(max_sb_cols);
diff --git a/av1/common/tile_common.h b/av1/common/tile_common.h
index a502173..68d434a 100644
--- a/av1/common/tile_common.h
+++ b/av1/common/tile_common.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_TILE_COMMON_H_
-#define VP10_COMMON_TILE_COMMON_H_
+#ifndef AV1_COMMON_TILE_COMMON_H_
+#define AV1_COMMON_TILE_COMMON_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 
 typedef struct TileInfo {
   int mi_row_start, mi_row_end;
@@ -24,17 +24,17 @@
 
 // initializes 'tile->mi_(row|col)_(start|end)' for (row, col) based on
 // 'cm->log2_tile_(rows|cols)' & 'cm->mi_(rows|cols)'
-void vp10_tile_init(TileInfo *tile, const struct VP10Common *cm, int row,
-                    int col);
+void av1_tile_init(TileInfo *tile, const struct AV1Common *cm, int row,
+                   int col);
 
-void vp10_tile_set_row(TileInfo *tile, const struct VP10Common *cm, int row);
-void vp10_tile_set_col(TileInfo *tile, const struct VP10Common *cm, int col);
+void av1_tile_set_row(TileInfo *tile, const struct AV1Common *cm, int row);
+void av1_tile_set_col(TileInfo *tile, const struct AV1Common *cm, int col);
 
-void vp10_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
-                          int *max_log2_tile_cols);
+void av1_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
+                         int *max_log2_tile_cols);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_TILE_COMMON_H_
+#endif  // AV1_COMMON_TILE_COMMON_H_
diff --git a/av1/common/vp10_convolve.h b/av1/common/vp10_convolve.h
deleted file mode 100644
index 9343402..0000000
--- a/av1/common/vp10_convolve.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef VP10_COMMON_VP10_CONVOLVE_H_
-#define VP10_COMMON_VP10_CONVOLVE_H_
-#include "av1/common/filter.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void vp10_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
-                   int dst_stride, int w, int h,
-#if CONFIG_DUAL_FILTER
-                   const INTERP_FILTER *interp_filter,
-#else
-                   const INTERP_FILTER interp_filter,
-#endif
-                   const int subpel_x, int xstep, const int subpel_y, int ystep,
-                   int avg);
-
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
-                          int dst_stride, int w, int h,
-#if CONFIG_DUAL_FILTER
-                          const INTERP_FILTER *interp_filter,
-#else
-                          const INTERP_FILTER interp_filter,
-#endif
-                          const int subpel_x, int xstep, const int subpel_y,
-                          int ystep, int avg, int bd);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // VP10_COMMON_VP10_CONVOLVE_H_
diff --git a/av1/common/vp10_fwd_txfm1d.h b/av1/common/vp10_fwd_txfm1d.h
deleted file mode 100644
index ab9d2ee..0000000
--- a/av1/common/vp10_fwd_txfm1d.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VP10_FWD_TXFM1D_H_
-#define VP10_FWD_TXFM1D_H_
-
-#include "av1/common/vp10_txfm.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void vp10_fdct4_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct8_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct16_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct32_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct64_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-
-void vp10_fadst4_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst8_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst16_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst32_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // VP10_FWD_TXFM1D_H_
diff --git a/av1/common/vp10_inv_txfm1d.h b/av1/common/vp10_inv_txfm1d.h
deleted file mode 100644
index 21b80bf..0000000
--- a/av1/common/vp10_inv_txfm1d.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VP10_INV_TXFM1D_H_
-#define VP10_INV_TXFM1D_H_
-
-#include "av1/common/vp10_txfm.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void vp10_idct4_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct8_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct16_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct32_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct64_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-
-void vp10_iadst4_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst8_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst16_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst32_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // VP10_INV_TXFM1D_H_
diff --git a/av1/common/vp10_rtcd_defs.pl b/av1/common/vp10_rtcd_defs.pl
deleted file mode 100644
index 4a16723..0000000
--- a/av1/common/vp10_rtcd_defs.pl
+++ /dev/null
@@ -1,912 +0,0 @@
-sub vp10_common_forward_decls() {
-print <<EOF
-/*
- * VP10
- */
-
-#include "aom/vpx_integer.h"
-#include "av1/common/common.h"
-#include "av1/common/enums.h"
-#include "av1/common/quant_common.h"
-#include "av1/common/filter.h"
-#include "av1/common/vp10_txfm.h"
-
-struct macroblockd;
-
-/* Encoder forward decls */
-struct macroblock;
-struct vpx_variance_vtable;
-struct search_site_config;
-struct mv;
-union int_mv;
-struct yv12_buffer_config;
-EOF
-}
-forward_decls qw/vp10_common_forward_decls/;
-
-# functions that are 64 bit only.
-$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
-if ($opts{arch} eq "x86_64") {
-  $mmx_x86_64 = 'mmx';
-  $sse2_x86_64 = 'sse2';
-  $ssse3_x86_64 = 'ssse3';
-  $avx_x86_64 = 'avx';
-  $avx2_x86_64 = 'avx2';
-}
-
-#
-# 10/12-tap convolution filters
-#
-add_proto qw/void vp10_convolve_horiz/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
-specialize qw/vp10_convolve_horiz ssse3/;
-
-add_proto qw/void vp10_convolve_vert/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
-specialize qw/vp10_convolve_vert ssse3/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  add_proto qw/void vp10_highbd_convolve_horiz/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
-  specialize qw/vp10_highbd_convolve_horiz sse4_1/;
-  add_proto qw/void vp10_highbd_convolve_vert/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
-  specialize qw/vp10_highbd_convolve_vert sse4_1/;
-}
-
-#
-# dct
-#
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  # Note as optimized versions of these functions are added we need to add a check to ensure
-  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
-  if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add/;
-
-    add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x8_32_add/;
-
-    add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x4_32_add/;
-
-    add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x16_128_add/;
-
-    add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x8_128_add/;
-
-    add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x32_512_add/;
-
-    add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht32x16_512_add/;
-
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add/;
-
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add/;
-
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4/;
-
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1/;
-
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8/;
-
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1/;
-
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16/;
-
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1/;
-
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32/;
-
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd/;
-
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1/;
-
-    add_proto qw/void vp10_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct4x4/;
-
-    add_proto qw/void vp10_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8/;
-
-    add_proto qw/void vp10_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8_1/;
-
-    add_proto qw/void vp10_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16/;
-
-    add_proto qw/void vp10_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16_1/;
-
-    add_proto qw/void vp10_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32/;
-
-    add_proto qw/void vp10_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_rd/;
-
-    add_proto qw/void vp10_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_1/;
-  } else {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add sse2/;
-
-    add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x8_32_add/;
-
-    add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x4_32_add/;
-
-    add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x16_128_add/;
-
-    add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x8_128_add/;
-
-    add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x32_512_add/;
-
-    add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht32x16_512_add/;
-
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add sse2/;
-
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add sse2/;
-
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4 sse2/;
-
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1 sse2/;
-
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8 sse2/;
-
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1 sse2/;
-
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16 sse2/;
-
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1 sse2/;
-
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32 sse2/;
-
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd sse2/;
-
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1 sse2/;
-
-    add_proto qw/void vp10_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct4x4 sse2/;
-
-    add_proto qw/void vp10_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8 sse2/;
-
-    add_proto qw/void vp10_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8_1/;
-
-    add_proto qw/void vp10_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16 sse2/;
-
-    add_proto qw/void vp10_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16_1/;
-
-    add_proto qw/void vp10_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32 sse2/;
-
-    add_proto qw/void vp10_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_rd sse2/;
-
-    add_proto qw/void vp10_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_1/;
-  }
-} else {
-  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
-  if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add/;
-
-    add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x8_32_add/;
-
-    add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x4_32_add/;
-
-    add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x16_128_add/;
-
-    add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x8_128_add/;
-
-    add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x32_512_add/;
-
-    add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht32x16_512_add/;
-
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add/;
-
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add/;
-
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4/;
-
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1/;
-
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8/;
-
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1/;
-
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16/;
-
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1/;
-
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32/;
-
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd/;
-
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1/;
-  } else {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add sse2 neon dspr2/;
-
-    add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x8_32_add/;
-
-    add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x4_32_add/;
-
-    add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x16_128_add/;
-
-    add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x8_128_add/;
-
-    add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x32_512_add/;
-
-    add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht32x16_512_add/;
-
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add sse2 neon dspr2/;
-
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add sse2 dspr2/;
-
-    if (vpx_config("CONFIG_EXT_TX") ne "yes") {
-      specialize qw/vp10_iht4x4_16_add msa/;
-      specialize qw/vp10_iht8x8_64_add msa/;
-      specialize qw/vp10_iht16x16_256_add msa/;
-    }
-
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4 sse2/;
-
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1 sse2/;
-
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8 sse2/;
-
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1 sse2/;
-
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16 sse2/;
-
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1 sse2/;
-
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32 sse2/;
-
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd sse2/;
-
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1 sse2/;
-  }
-}
-
-if (vpx_config("CONFIG_NEW_QUANT") eq "yes") {
-  add_proto qw/void quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-  specialize qw/quantize_nuq/;
-
-  add_proto qw/void quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-  specialize qw/quantize_fp_nuq/;
-
-  add_proto qw/void quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-  specialize qw/quantize_32x32_nuq/;
-
-  add_proto qw/void quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-  specialize qw/quantize_32x32_fp_nuq/;
-}
-
-# EXT_INTRA predictor functions
-if (vpx_config("CONFIG_EXT_INTRA") eq "yes") {
-  add_proto qw/void vp10_dc_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_dc_filter_predictor sse4_1/;
-  add_proto qw/void vp10_v_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_v_filter_predictor sse4_1/;
-  add_proto qw/void vp10_h_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_h_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d45_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d45_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d135_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d135_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d117_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d117_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d153_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d153_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d207_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d207_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d63_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d63_filter_predictor sse4_1/;
-  add_proto qw/void vp10_tm_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_tm_filter_predictor sse4_1/;
-  # High bitdepth functions
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    add_proto qw/void vp10_highbd_dc_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_dc_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_v_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_v_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_h_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_h_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d45_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d45_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d135_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d135_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d117_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d117_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d153_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d153_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d207_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d207_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d63_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d63_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_tm_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_tm_filter_predictor sse4_1/;
-  }
-}
-
-# High bitdepth functions
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  #
-  # Sub Pixel Filters
-  #
-  add_proto qw/void vp10_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve_copy/;
-
-  add_proto qw/void vp10_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve_avg/;
-
-  add_proto qw/void vp10_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8/, "$sse2_x86_64";
-
-  add_proto qw/void vp10_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_horiz/, "$sse2_x86_64";
-
-  add_proto qw/void vp10_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_vert/, "$sse2_x86_64";
-
-  add_proto qw/void vp10_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_avg/, "$sse2_x86_64";
-
-  add_proto qw/void vp10_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
-
-  add_proto qw/void vp10_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_avg_vert/, "$sse2_x86_64";
-
-  #
-  # dct
-  #
-  # Note as optimized versions of these functions are added we need to add a check to ensure
-  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
-  add_proto qw/void vp10_highbd_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht4x4_16_add/;
-
-  add_proto qw/void vp10_highbd_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht4x8_32_add/;
-
-  add_proto qw/void vp10_highbd_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht8x4_32_add/;
-
-  add_proto qw/void vp10_highbd_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht8x16_128_add/;
-
-  add_proto qw/void vp10_highbd_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht16x8_128_add/;
-
-  add_proto qw/void vp10_highbd_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht16x32_512_add/;
-
-  add_proto qw/void vp10_highbd_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht32x16_512_add/;
-
-  add_proto qw/void vp10_highbd_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht8x8_64_add/;
-
-  add_proto qw/void vp10_highbd_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht16x16_256_add/;
-}
-
-#
-# Encoder functions below this point.
-#
-if (vpx_config("CONFIG_VP10_ENCODER") eq "yes") {
-
-# ENCODEMB INVOKE
-
-if (vpx_config("CONFIG_AOM_QM") eq "yes") {
-  if (vpx_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
-    # the transform coefficients are held in 32-bit
-    # values, so the assembler code for  vp10_block_error can no longer be used.
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error/;
-
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-    specialize qw/vp10_fdct8x8_quant/;
-  } else {
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error avx2 msa/, "$sse2_x86inc";
-
-    add_proto qw/int64_t vp10_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
-    specialize qw/vp10_block_error_fp neon/, "$sse2_x86inc";
-
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-  }
-} else {
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    # the transform coefficients are held in 32-bit
-    # values, so the assembler code for  vp10_block_error can no longer be used.
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error/;
-
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp/;
-
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp_32x32/;
-
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_fdct8x8_quant/;
-  } else {
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error sse2 avx2 msa/;
-
-    add_proto qw/int64_t vp10_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
-    specialize qw/vp10_block_error_fp neon sse2/;
-
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp neon sse2/, "$ssse3_x86_64";
-
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp_32x32/, "$ssse3_x86_64";
-
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_fdct8x8_quant sse2 ssse3 neon/;
-  }
-
-}
-
-# fdct functions
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  add_proto qw/void vp10_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht4x4 sse2/;
-
-  add_proto qw/void vp10_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht4x8/;
-
-  add_proto qw/void vp10_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x4/;
-
-  add_proto qw/void vp10_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x16/;
-
-  add_proto qw/void vp10_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x8/;
-
-  add_proto qw/void vp10_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x32/;
-
-  add_proto qw/void vp10_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht32x16/;
-
-  add_proto qw/void vp10_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x8 sse2/;
-
-  add_proto qw/void vp10_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x16 sse2/;
-
-  add_proto qw/void vp10_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht32x32/;
-
-  add_proto qw/void vp10_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp10_fwht4x4/;
-} else {
-  add_proto qw/void vp10_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht4x4 sse2/;
-
-  add_proto qw/void vp10_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht4x8/;
-
-  add_proto qw/void vp10_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x4/;
-
-  add_proto qw/void vp10_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x16/;
-
-  add_proto qw/void vp10_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x8/;
-
-  add_proto qw/void vp10_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x32/;
-
-  add_proto qw/void vp10_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht32x16/;
-
-  add_proto qw/void vp10_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x8 sse2/;
-
-  add_proto qw/void vp10_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x16 sse2/;
-
-  if (vpx_config("CONFIG_EXT_TX") ne "yes") {
-    specialize qw/vp10_fht4x4 msa/;
-    specialize qw/vp10_fht8x8 msa/;
-    specialize qw/vp10_fht16x16 msa/;
-  }
-
-  add_proto qw/void vp10_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht32x32/;
-
-  add_proto qw/void vp10_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp10_fwht4x4/;
-}
-
-add_proto qw/void vp10_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bs, int tx_type";
-  specialize qw/vp10_fwd_idtx/;
-
-# Inverse transform
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  # Note as optimized versions of these functions are added we need to add a check to ensure
-  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
-  add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct4x4_1_add/;
-
-  add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct4x4_16_add/;
-
-  add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct8x8_1_add/;
-
-  add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct8x8_64_add/;
-
-  add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct8x8_12_add/;
-
-  add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct16x16_1_add/;
-
-  add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct16x16_256_add/;
-
-  add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct16x16_10_add/;
-
-  add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct32x32_1024_add/;
-
-  add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct32x32_34_add/;
-
-  add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct32x32_1_add/;
-
-  add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_iwht4x4_1_add/;
-
-  add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_iwht4x4_16_add/;
-
-  add_proto qw/void vp10_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct4x4_1_add/;
-
-  add_proto qw/void vp10_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct8x8_1_add/;
-
-  add_proto qw/void vp10_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct16x16_1_add/;
-
-  add_proto qw/void vp10_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct32x32_1024_add/;
-
-  add_proto qw/void vp10_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct32x32_34_add/;
-
-  add_proto qw/void vp10_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct32x32_1_add/;
-
-  add_proto qw/void vp10_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_iwht4x4_1_add/;
-
-  add_proto qw/void vp10_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_iwht4x4_16_add/;
-
-  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
-  if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct4x4_16_add/;
-
-    add_proto qw/void vp10_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_64_add/;
-
-    add_proto qw/void vp10_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_10_add/;
-
-    add_proto qw/void vp10_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_256_add/;
-
-    add_proto qw/void vp10_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_10_add/;
-  } else {
-    add_proto qw/void vp10_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct4x4_16_add sse2/;
-
-    add_proto qw/void vp10_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_64_add sse2/;
-
-    add_proto qw/void vp10_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_10_add sse2/;
-
-    add_proto qw/void vp10_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_256_add sse2/;
-
-    add_proto qw/void vp10_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_10_add sse2/;
-  }  # CONFIG_EMULATE_HARDWARE
-} else {
-  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
-  if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_1_add/;
-
-    add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_16_add/;
-
-    add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_1_add/;
-
-    add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_64_add/;
-
-    add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_12_add/;
-
-    add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_1_add/;
-
-    add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_256_add/;
-
-    add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_10_add/;
-
-    add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1024_add/;
-
-    add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_34_add/;
-
-    add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1_add/;
-
-    add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_1_add/;
-
-    add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_16_add/;
-  } else {
-    add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_1_add sse2/;
-
-    add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_16_add sse2/;
-
-    add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_1_add sse2/;
-
-    add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_64_add sse2/;
-
-    add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_12_add sse2/;
-
-    add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_1_add sse2/;
-
-    add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_256_add sse2/;
-
-    add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_10_add sse2/;
-
-    add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1024_add sse2/;
-
-    add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_34_add sse2/;
-
-    add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1_add sse2/;
-
-    add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_1_add/;
-
-    add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_16_add/;
-  }  # CONFIG_EMULATE_HARDWARE
-}  # CONFIG_VP9_HIGHBITDEPTH
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  #fwd txfm
-  add_proto qw/void vp10_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_fwd_txfm2d_4x4 sse4_1/;
-  add_proto qw/void vp10_fwd_txfm2d_8x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_fwd_txfm2d_8x8 sse4_1/;
-  add_proto qw/void vp10_fwd_txfm2d_16x16/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_fwd_txfm2d_16x16 sse4_1/;
-  add_proto qw/void vp10_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_fwd_txfm2d_32x32 sse4_1/;
-  add_proto qw/void vp10_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_fwd_txfm2d_64x64 sse4_1/;
-
-  #inv txfm
-  add_proto qw/void vp10_inv_txfm2d_add_4x4/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_inv_txfm2d_add_4x4 sse4_1/;
-  add_proto qw/void vp10_inv_txfm2d_add_8x8/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_inv_txfm2d_add_8x8 sse4_1/;
-  add_proto qw/void vp10_inv_txfm2d_add_16x16/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_inv_txfm2d_add_16x16 sse4_1/;
-  add_proto qw/void vp10_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_inv_txfm2d_add_32x32/;
-  add_proto qw/void vp10_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_inv_txfm2d_add_64x64/;
-}
-
-#
-# Motion search
-#
-add_proto qw/int vp10_full_search_sad/, "const struct macroblock *x, const struct mv *ref_mv, int sad_per_bit, int distance, const struct vpx_variance_vtable *fn_ptr, const struct mv *center_mv, struct mv *best_mv";
-specialize qw/vp10_full_search_sad sse3 sse4_1/;
-$vp10_full_search_sad_sse3=vp10_full_search_sadx3;
-$vp10_full_search_sad_sse4_1=vp10_full_search_sadx8;
-
-add_proto qw/int vp10_diamond_search_sad/, "struct macroblock *x, const struct search_site_config *cfg,  struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct vpx_variance_vtable *fn_ptr, const struct mv *center_mv";
-specialize qw/vp10_diamond_search_sad/;
-
-add_proto qw/int vp10_full_range_search/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct vpx_variance_vtable *fn_ptr, const struct mv *center_mv";
-specialize qw/vp10_full_range_search/;
-
-add_proto qw/void vp10_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
-specialize qw/vp10_temporal_filter_apply sse2 msa/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-
-  # ENCODEMB INVOKE
-  if (vpx_config("CONFIG_NEW_QUANT") eq "yes") {
-    add_proto qw/void highbd_quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-    specialize qw/highbd_quantize_nuq/;
-
-    add_proto qw/void highbd_quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-    specialize qw/highbd_quantize_fp_nuq/;
-
-    add_proto qw/void highbd_quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-    specialize qw/highbd_quantize_32x32_nuq/;
-
-    add_proto qw/void highbd_quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-    specialize qw/highbd_quantize_32x32_fp_nuq/;
-  }
-
-  add_proto qw/int64_t vp10_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
-  specialize qw/vp10_highbd_block_error sse2/;
-
-  if (vpx_config("CONFIG_AOM_QM") eq "yes") {
-    add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-
-    add_proto qw/void vp10_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-  } else {
-    add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
-    specialize qw/vp10_highbd_quantize_fp sse4_1/;
-
-    add_proto qw/void vp10_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
-    specialize qw/vp10_highbd_quantize_b/;
-  }
-
-  # fdct functions
-  add_proto qw/void vp10_highbd_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht4x4 sse4_1/;
-
-  add_proto qw/void vp10_highbd_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht4x8/;
-
-  add_proto qw/void vp10_highbd_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht8x4/;
-
-  add_proto qw/void vp10_highbd_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht8x16/;
-
-  add_proto qw/void vp10_highbd_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht16x8/;
-
-  add_proto qw/void vp10_highbd_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht16x32/;
-
-  add_proto qw/void vp10_highbd_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht32x16/;
-
-  add_proto qw/void vp10_highbd_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht8x8/;
-
-  add_proto qw/void vp10_highbd_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht16x16/;
-
-  add_proto qw/void vp10_highbd_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht32x32/;
-
-  add_proto qw/void vp10_highbd_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp10_highbd_fwht4x4/;
-
-  add_proto qw/void vp10_highbd_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
-  specialize qw/vp10_highbd_temporal_filter_apply/;
-
-}
-# End vp10_high encoder functions
-
-if (vpx_config("CONFIG_EXT_INTER") eq "yes") {
-  add_proto qw/uint64_t vp10_wedge_sse_from_residuals/, "const int16_t *r1, const int16_t *d, const uint8_t *m, int N";
-  specialize qw/vp10_wedge_sse_from_residuals sse2/;
-  add_proto qw/int vp10_wedge_sign_from_residuals/, "const int16_t *ds, const uint8_t *m, int N, int64_t limit";
-  specialize qw/vp10_wedge_sign_from_residuals sse2/;
-  add_proto qw/void vp10_wedge_compute_delta_squares/, "int16_t *d, const int16_t *a, const int16_t *b, int N";
-  specialize qw/vp10_wedge_compute_delta_squares sse2/;
-}
-
-}
-# end encoder functions
-1;
diff --git a/av1/common/warped_motion.c b/av1/common/warped_motion.c
index 5f76453..c742c36 100644
--- a/av1/common/warped_motion.c
+++ b/av1/common/warped_motion.c
@@ -353,7 +353,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE void highbd_get_subcolumn(int taps, uint16_t *ref, int32_t *col,
                                         int stride, int x, int y_start) {
   int i;
@@ -522,7 +522,7 @@
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static double warp_erroradv(WarpedMotionParams *wm, uint8_t *ref, int width,
                             int height, int stride, uint8_t *dst, int p_col,
@@ -574,48 +574,48 @@
   }
 }
 
-double vp10_warp_erroradv(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
-                          int use_hbd, int bd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                          uint8_t *ref, int width, int height, int stride,
-                          uint8_t *dst, int p_col, int p_row, int p_width,
-                          int p_height, int p_stride, int subsampling_x,
-                          int subsampling_y, int x_scale, int y_scale) {
-#if CONFIG_VP9_HIGHBITDEPTH
+double av1_warp_erroradv(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+                         int use_hbd, int bd,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                         uint8_t *ref, int width, int height, int stride,
+                         uint8_t *dst, int p_col, int p_row, int p_width,
+                         int p_height, int p_stride, int subsampling_x,
+                         int subsampling_y, int x_scale, int y_scale) {
+#if CONFIG_AOM_HIGHBITDEPTH
   if (use_hbd)
     return highbd_warp_erroradv(
         wm, ref, width, height, stride, dst, p_col, p_row, p_width, p_height,
         p_stride, subsampling_x, subsampling_y, x_scale, y_scale, bd);
   else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     return warp_erroradv(wm, ref, width, height, stride, dst, p_col, p_row,
                          p_width, p_height, p_stride, subsampling_x,
                          subsampling_y, x_scale, y_scale);
 }
 
-void vp10_warp_plane(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
-                     int use_hbd, int bd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                     uint8_t *ref, int width, int height, int stride,
-                     uint8_t *pred, int p_col, int p_row, int p_width,
-                     int p_height, int p_stride, int subsampling_x,
-                     int subsampling_y, int x_scale, int y_scale) {
-#if CONFIG_VP9_HIGHBITDEPTH
+void av1_warp_plane(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+                    int use_hbd, int bd,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                    uint8_t *ref, int width, int height, int stride,
+                    uint8_t *pred, int p_col, int p_row, int p_width,
+                    int p_height, int p_stride, int subsampling_x,
+                    int subsampling_y, int x_scale, int y_scale) {
+#if CONFIG_AOM_HIGHBITDEPTH
   if (use_hbd)
     highbd_warp_plane(wm, ref, width, height, stride, pred, p_col, p_row,
                       p_width, p_height, p_stride, subsampling_x, subsampling_y,
                       x_scale, y_scale, bd);
   else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     warp_plane(wm, ref, width, height, stride, pred, p_col, p_row, p_width,
                p_height, p_stride, subsampling_x, subsampling_y, x_scale,
                y_scale);
 }
 
-void vp10_integerize_model(const double *model, TransformationType wmtype,
-                           WarpedMotionParams *wm) {
+void av1_integerize_model(const double *model, TransformationType wmtype,
+                          WarpedMotionParams *wm) {
   wm->wmtype = wmtype;
   switch (wmtype) {
     case HOMOGRAPHY:
diff --git a/av1/common/warped_motion.h b/av1/common/warped_motion.h
index a9c57f9..965b296 100644
--- a/av1/common/warped_motion.h
+++ b/av1/common/warped_motion.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_WARPED_MOTION_H
-#define VP10_COMMON_WARPED_MOTION_H
+#ifndef AV1_COMMON_WARPED_MOTION_H
+#define AV1_COMMON_WARPED_MOTION_H
 
 #include <stdio.h>
 #include <stdlib.h>
@@ -17,9 +17,9 @@
 #include <math.h>
 #include <assert.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 // Bits of precision used for the model
 #define WARPEDMODEL_PREC_BITS 8
@@ -72,25 +72,25 @@
   int wmmat[8];  // For homography wmmat[9] is assumed to be 1
 } WarpedMotionParams;
 
-double vp10_warp_erroradv(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
-                          int use_hbd, int bd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                          uint8_t *ref, int width, int height, int stride,
-                          uint8_t *dst, int p_col, int p_row, int p_width,
-                          int p_height, int p_stride, int subsampling_x,
-                          int subsampling_y, int x_scale, int y_scale);
+double av1_warp_erroradv(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+                         int use_hbd, int bd,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                         uint8_t *ref, int width, int height, int stride,
+                         uint8_t *dst, int p_col, int p_row, int p_width,
+                         int p_height, int p_stride, int subsampling_x,
+                         int subsampling_y, int x_scale, int y_scale);
 
-void vp10_warp_plane(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
-                     int use_hbd, int bd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                     uint8_t *ref, int width, int height, int stride,
-                     uint8_t *pred, int p_col, int p_row, int p_width,
-                     int p_height, int p_stride, int subsampling_x,
-                     int subsampling_y, int x_scale, int y_scale);
+void av1_warp_plane(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+                    int use_hbd, int bd,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                    uint8_t *ref, int width, int height, int stride,
+                    uint8_t *pred, int p_col, int p_row, int p_width,
+                    int p_height, int p_stride, int subsampling_x,
+                    int subsampling_y, int x_scale, int y_scale);
 
 // Integerize model into the WarpedMotionParams structure
-void vp10_integerize_model(const double *model, TransformationType wmtype,
-                           WarpedMotionParams *wm);
-#endif  // VP10_COMMON_WARPED_MOTION_H
+void av1_integerize_model(const double *model, TransformationType wmtype,
+                          WarpedMotionParams *wm);
+#endif  // AV1_COMMON_WARPED_MOTION_H
diff --git a/av1/common/x86/vp10_convolve_filters_ssse3.c b/av1/common/x86/av1_convolve_filters_ssse3.c
similarity index 99%
rename from av1/common/x86/vp10_convolve_filters_ssse3.c
rename to av1/common/x86/av1_convolve_filters_ssse3.c
index b842589..7a40b9c 100644
--- a/av1/common/x86/vp10_convolve_filters_ssse3.c
+++ b/av1/common/x86/av1_convolve_filters_ssse3.c
@@ -7,7 +7,7 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "av1/common/filter.h"
 
 #if CONFIG_EXT_INTERP
diff --git a/av1/common/x86/vp10_convolve_ssse3.c b/av1/common/x86/av1_convolve_ssse3.c
similarity index 95%
rename from av1/common/x86/vp10_convolve_ssse3.c
rename to av1/common/x86/av1_convolve_ssse3.c
index e891d74..0c6bb99 100644
--- a/av1/common/x86/vp10_convolve_ssse3.c
+++ b/av1/common/x86/av1_convolve_ssse3.c
@@ -11,7 +11,7 @@
 #include <assert.h>
 #include <tmmintrin.h>
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "av1/common/filter.h"
 
 #define WIDTH_BOUND (16)
@@ -610,10 +610,10 @@
 // (1) 10/12-taps filters
 // (2) x_step_q4 = 16 then filter is fixed at the call
 
-void vp10_convolve_horiz_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
-                               int dst_stride, int w, int h,
-                               const InterpFilterParams filter_params,
-                               const int subpel_x_q4, int x_step_q4, int avg) {
+void av1_convolve_horiz_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
+                              int dst_stride, int w, int h,
+                              const InterpFilterParams filter_params,
+                              const int subpel_x_q4, int x_step_q4, int avg) {
   DECLARE_ALIGNED(16, uint16_t, temp[8 * 8]);
   __m128i verf[6];
   __m128i horf[2];
@@ -630,18 +630,18 @@
   (void)x_step_q4;
 
   if (0 == subpel_x_q4 || 16 != x_step_q4) {
-    vp10_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
-                          subpel_x_q4, x_step_q4, avg);
+    av1_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+                         subpel_x_q4, x_step_q4, avg);
     return;
   }
 
-  hCoeffs = vp10_get_subpel_filter_signal_dir(filter_params, subpel_x_q4 - 1);
+  hCoeffs = av1_get_subpel_filter_signal_dir(filter_params, subpel_x_q4 - 1);
   vCoeffs =
-      vp10_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
+      av1_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
 
   if (!hCoeffs || !vCoeffs) {
-    vp10_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
-                          subpel_x_q4, x_step_q4, avg);
+    av1_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+                         subpel_x_q4, x_step_q4, avg);
     return;
   }
 
@@ -825,10 +825,10 @@
   } while (rowIndex < h);
 }
 
-void vp10_convolve_vert_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
-                              int dst_stride, int w, int h,
-                              const InterpFilterParams filter_params,
-                              const int subpel_y_q4, int y_step_q4, int avg) {
+void av1_convolve_vert_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
+                             int dst_stride, int w, int h,
+                             const InterpFilterParams filter_params,
+                             const int subpel_y_q4, int y_step_q4, int avg) {
   __m128i verf[6];
   SubpelFilterCoeffs vCoeffs;
   const uint8_t *src_ptr;
@@ -839,17 +839,17 @@
   const int tapsNum = filter_params.taps;
 
   if (0 == subpel_y_q4 || 16 != y_step_q4) {
-    vp10_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
-                         subpel_y_q4, y_step_q4, avg);
+    av1_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+                        subpel_y_q4, y_step_q4, avg);
     return;
   }
 
   vCoeffs =
-      vp10_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
+      av1_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
 
   if (!vCoeffs) {
-    vp10_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
-                         subpel_y_q4, y_step_q4, avg);
+    av1_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+                        subpel_y_q4, y_step_q4, avg);
     return;
   }
 
diff --git a/av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h b/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
similarity index 99%
rename from av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h
rename to av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
index e7d63fe..ecd3d4b 100644
--- a/av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h
+++ b/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
@@ -10,8 +10,8 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_fwd_txfm.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_fwd_txfm.h"
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
 
@@ -22,31 +22,31 @@
 #define ADD_EPI16 _mm_adds_epi16
 #define SUB_EPI16 _mm_subs_epi16
 #if FDCT32x32_HIGH_PRECISION
-void vp10_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void av1_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
   int i, j;
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       out[j + i * 32] =
           (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
   }
 }
-#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_c
-#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rows_c
+#define HIGH_FDCT32x32_2D_C av1_highbd_fdct32x32_c
+#define HIGH_FDCT32x32_2D_ROWS_C av1_fdct32x32_rows_c
 #else
-void vp10_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void av1_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
   int i, j;
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
-    vp10_fdct32(temp_in, temp_out, 1);
+    av1_fdct32(temp_in, temp_out, 1);
     for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
   }
 }
-#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_rd_c
-#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rd_rows_c
+#define HIGH_FDCT32x32_2D_C av1_highbd_fdct32x32_rd_c
+#define HIGH_FDCT32x32_2D_ROWS_C av1_fdct32x32_rd_rows_c
 #endif  // FDCT32x32_HIGH_PRECISION
 #else
 #define ADD_EPI16 _mm_add_epi16
diff --git a/av1/common/x86/vp10_fwd_txfm1d_sse4.c b/av1/common/x86/av1_fwd_txfm1d_sse4.c
similarity index 98%
rename from av1/common/x86/vp10_fwd_txfm1d_sse4.c
rename to av1/common/x86/av1_fwd_txfm1d_sse4.c
index 902c9b2..f0bcef9 100644
--- a/av1/common/x86/vp10_fwd_txfm1d_sse4.c
+++ b/av1/common/x86/av1_fwd_txfm1d_sse4.c
@@ -1,7 +1,7 @@
-#include "av1/common/x86/vp10_txfm1d_sse4.h"
+#include "av1/common/x86/av1_txfm1d_sse4.h"
 
-void vp10_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
-                           const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 4;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -53,8 +53,8 @@
   }
 }
 
-void vp10_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
-                           const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 8;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -152,8 +152,8 @@
   }
 }
 
-void vp10_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 16;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -349,8 +349,8 @@
   }
 }
 
-void vp10_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 32;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -764,8 +764,8 @@
   }
 }
 
-void vp10_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 4;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -835,8 +835,8 @@
   }
 }
 
-void vp10_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 8;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -960,8 +960,8 @@
   }
 }
 
-void vp10_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
+                            const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 16;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -1199,8 +1199,8 @@
   }
 }
 
-void vp10_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
+                            const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 32;
   const int num_per_128 = 4;
   const int32_t *cospi;
diff --git a/av1/common/x86/vp10_fwd_txfm2d_sse4.c b/av1/common/x86/av1_fwd_txfm2d_sse4.c
similarity index 72%
rename from av1/common/x86/vp10_fwd_txfm2d_sse4.c
rename to av1/common/x86/av1_fwd_txfm2d_sse4.c
index a59a0c8..07c283e 100644
--- a/av1/common/x86/vp10_fwd_txfm2d_sse4.c
+++ b/av1/common/x86/av1_fwd_txfm2d_sse4.c
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "av1/common/enums.h"
-#include "av1/common/vp10_txfm.h"
-#include "av1/common/x86/vp10_txfm1d_sse4.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/common/x86/av1_txfm1d_sse4.h"
 
 static INLINE void int16_array_with_stride_to_int32_array_without_stride(
     const int16_t *input, int stride, int32_t *output, int txfm1d_size) {
@@ -28,14 +28,14 @@
 
 static INLINE TxfmFuncSSE2 fwd_txfm_type_to_func(TXFM_TYPE txfm_type) {
   switch (txfm_type) {
-    case TXFM_TYPE_DCT4: return vp10_fdct4_new_sse4_1; break;
-    case TXFM_TYPE_DCT8: return vp10_fdct8_new_sse4_1; break;
-    case TXFM_TYPE_DCT16: return vp10_fdct16_new_sse4_1; break;
-    case TXFM_TYPE_DCT32: return vp10_fdct32_new_sse4_1; break;
-    case TXFM_TYPE_ADST4: return vp10_fadst4_new_sse4_1; break;
-    case TXFM_TYPE_ADST8: return vp10_fadst8_new_sse4_1; break;
-    case TXFM_TYPE_ADST16: return vp10_fadst16_new_sse4_1; break;
-    case TXFM_TYPE_ADST32: return vp10_fadst32_new_sse4_1; break;
+    case TXFM_TYPE_DCT4: return av1_fdct4_new_sse4_1; break;
+    case TXFM_TYPE_DCT8: return av1_fdct8_new_sse4_1; break;
+    case TXFM_TYPE_DCT16: return av1_fdct16_new_sse4_1; break;
+    case TXFM_TYPE_DCT32: return av1_fdct32_new_sse4_1; break;
+    case TXFM_TYPE_ADST4: return av1_fadst4_new_sse4_1; break;
+    case TXFM_TYPE_ADST8: return av1_fadst8_new_sse4_1; break;
+    case TXFM_TYPE_ADST16: return av1_fadst16_new_sse4_1; break;
+    case TXFM_TYPE_ADST32: return av1_fadst32_new_sse4_1; break;
     default: assert(0);
   }
   return NULL;
@@ -69,18 +69,18 @@
   transpose_32(txfm_size, buf_128, out_128);
 }
 
-void vp10_fwd_txfm2d_32x32_sse4_1(const int16_t *input, int32_t *output,
-                                  int stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_32x32_sse4_1(const int16_t *input, int32_t *output,
+                                 int stride, int tx_type, int bd) {
   int32_t txfm_buf[1024];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_32X32);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_32X32);
   (void)bd;
   fwd_txfm2d_sse4_1(input, output, stride, cfg.cfg, txfm_buf);
 }
 
-void vp10_fwd_txfm2d_64x64_sse4_1(const int16_t *input, int32_t *output,
-                                  int stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_64x64_sse4_1(const int16_t *input, int32_t *output,
+                                 int stride, int tx_type, int bd) {
   int32_t txfm_buf[4096];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_64x64_cfg(tx_type);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_64x64_cfg(tx_type);
   (void)bd;
   fwd_txfm2d_sse4_1(input, output, stride, cfg.cfg, txfm_buf);
 }
diff --git a/av1/common/x86/vp10_fwd_txfm_impl_sse2.h b/av1/common/x86/av1_fwd_txfm_impl_sse2.h
similarity index 96%
rename from av1/common/x86/vp10_fwd_txfm_impl_sse2.h
rename to av1/common/x86/av1_fwd_txfm_impl_sse2.h
index 9bb8abc..ecaa97c 100644
--- a/av1/common/x86/vp10_fwd_txfm_impl_sse2.h
+++ b/av1/common/x86/av1_fwd_txfm_impl_sse2.h
@@ -10,7 +10,7 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/x86/fwd_txfm_sse2.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
@@ -98,7 +98,7 @@
                        _mm_cmplt_epi16(in1, _mm_set1_epi16(0xfc00)));
   test = _mm_movemask_epi8(_mm_or_si128(cmp0, cmp1));
   if (test) {
-    vpx_highbd_fdct4x4_c(input, output, stride);
+    aom_highbd_fdct4x4_c(input, output, stride);
     return;
   }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -169,7 +169,7 @@
 #if DCT_HIGH_BIT_DEPTH
     overflow = check_epi16_overflow_x2(&x0, &x1);
     if (overflow) {
-      vpx_highbd_fdct4x4_c(input, output, stride);
+      aom_highbd_fdct4x4_c(input, output, stride);
       return;
     }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -191,7 +191,7 @@
 #if DCT_HIGH_BIT_DEPTH
     overflow = check_epi16_overflow_x2(&t0, &t1);
     if (overflow) {
-      vpx_highbd_fdct4x4_c(input, output, stride);
+      aom_highbd_fdct4x4_c(input, output, stride);
       return;
     }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -230,7 +230,7 @@
 #if DCT_HIGH_BIT_DEPTH
       overflow = check_epi16_overflow_x2(&x0, &x1);
       if (overflow) {
-        vpx_highbd_fdct4x4_c(input, output, stride);
+        aom_highbd_fdct4x4_c(input, output, stride);
         return;
       }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -313,7 +313,7 @@
       overflow =
           check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
       if (overflow) {
-        vpx_highbd_fdct8x8_c(input, output, stride);
+        aom_highbd_fdct8x8_c(input, output, stride);
         return;
       }
     }
@@ -328,7 +328,7 @@
 #if DCT_HIGH_BIT_DEPTH
       overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
       if (overflow) {
-        vpx_highbd_fdct8x8_c(input, output, stride);
+        aom_highbd_fdct8x8_c(input, output, stride);
         return;
       }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -371,7 +371,7 @@
 #if DCT_HIGH_BIT_DEPTH
         overflow = check_epi16_overflow_x4(&res0, &res4, &res2, &res6);
         if (overflow) {
-          vpx_highbd_fdct8x8_c(input, output, stride);
+          aom_highbd_fdct8x8_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -401,7 +401,7 @@
 #if DCT_HIGH_BIT_DEPTH
       overflow = check_epi16_overflow_x2(&r0, &r1);
       if (overflow) {
-        vpx_highbd_fdct8x8_c(input, output, stride);
+        aom_highbd_fdct8x8_c(input, output, stride);
         return;
       }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -414,7 +414,7 @@
 #if DCT_HIGH_BIT_DEPTH
         overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
         if (overflow) {
-          vpx_highbd_fdct8x8_c(input, output, stride);
+          aom_highbd_fdct8x8_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -457,7 +457,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&res1, &res7, &res5, &res3);
           if (overflow) {
-            vpx_highbd_fdct8x8_c(input, output, stride);
+            aom_highbd_fdct8x8_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -720,7 +720,7 @@
         overflow = check_epi16_overflow_x8(&input0, &input1, &input2, &input3,
                                            &input4, &input5, &input6, &input7);
         if (overflow) {
-          vpx_highbd_fdct16x16_c(input, output, stride);
+          aom_highbd_fdct16x16_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -740,7 +740,7 @@
             check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
                                     &step1_4, &step1_5, &step1_6, &step1_7);
         if (overflow) {
-          vpx_highbd_fdct16x16_c(input, output, stride);
+          aom_highbd_fdct16x16_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -760,7 +760,7 @@
         overflow =
             check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
         if (overflow) {
-          vpx_highbd_fdct16x16_c(input, output, stride);
+          aom_highbd_fdct16x16_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -774,7 +774,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -796,7 +796,7 @@
 #if DCT_HIGH_BIT_DEPTH
             overflow = check_epi16_overflow_x4(&res00, &res08, &res04, &res12);
             if (overflow) {
-              vpx_highbd_fdct16x16_c(input, output, stride);
+              aom_highbd_fdct16x16_c(input, output, stride);
               return;
             }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -817,7 +817,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x2(&r0, &r1);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -830,7 +830,7 @@
 #if DCT_HIGH_BIT_DEPTH
             overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
             if (overflow) {
-              vpx_highbd_fdct16x16_c(input, output, stride);
+              aom_highbd_fdct16x16_c(input, output, stride);
               return;
             }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -853,7 +853,7 @@
               overflow =
                   check_epi16_overflow_x4(&res02, &res14, &res10, &res06);
               if (overflow) {
-                vpx_highbd_fdct16x16_c(input, output, stride);
+                aom_highbd_fdct16x16_c(input, output, stride);
                 return;
               }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -881,7 +881,7 @@
           overflow =
               check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, &step2_4);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -901,7 +901,7 @@
               check_epi16_overflow_x8(&step3_0, &step3_1, &step3_2, &step3_3,
                                       &step3_4, &step3_5, &step3_6, &step3_7);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -924,7 +924,7 @@
           overflow =
               check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, &step2_5);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -944,7 +944,7 @@
               check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
                                       &step1_4, &step1_5, &step1_6, &step1_7);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -966,7 +966,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&res01, &res09, &res15, &res07);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -987,7 +987,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&res05, &res13, &res11, &res03);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
diff --git a/av1/common/x86/vp10_fwd_txfm_sse2.c b/av1/common/x86/av1_fwd_txfm_sse2.c
similarity index 84%
rename from av1/common/x86/vp10_fwd_txfm_sse2.c
rename to av1/common/x86/av1_fwd_txfm_sse2.c
index 05ec539..3a95071 100644
--- a/av1/common/x86/vp10_fwd_txfm_sse2.c
+++ b/av1/common/x86/av1_fwd_txfm_sse2.c
@@ -10,12 +10,12 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_dsp/x86/fwd_txfm_sse2.h"
 
-void vp10_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   __m128i in0, in1;
   __m128i tmp;
   const __m128i zero = _mm_setzero_si128();
@@ -44,7 +44,7 @@
   store_output(&in0, output);
 }
 
-void vp10_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
   __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
   __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
@@ -84,8 +84,8 @@
   store_output(&in1, output);
 }
 
-void vp10_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
-                           int stride) {
+void av1_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
+                          int stride) {
   __m128i in0, in1, in2, in3;
   __m128i u0, u1;
   __m128i sum = _mm_setzero_si128();
@@ -153,8 +153,8 @@
   store_output(&in1, output);
 }
 
-void vp10_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
-                           int stride) {
+void av1_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
+                          int stride) {
   __m128i in0, in1, in2, in3;
   __m128i u0, u1;
   __m128i sum = _mm_setzero_si128();
@@ -226,47 +226,47 @@
 }
 
 #define DCT_HIGH_BIT_DEPTH 0
-#define FDCT4x4_2D vp10_fdct4x4_sse2
-#define FDCT8x8_2D vp10_fdct8x8_sse2
-#define FDCT16x16_2D vp10_fdct16x16_sse2
-#include "av1/common/x86/vp10_fwd_txfm_impl_sse2.h"
+#define FDCT4x4_2D av1_fdct4x4_sse2
+#define FDCT8x8_2D av1_fdct8x8_sse2
+#define FDCT16x16_2D av1_fdct16x16_sse2
+#include "av1/common/x86/av1_fwd_txfm_impl_sse2.h"
 #undef FDCT4x4_2D
 #undef FDCT8x8_2D
 #undef FDCT16x16_2D
 
-#define FDCT32x32_2D vp10_fdct32x32_rd_sse2
+#define FDCT32x32_2D av1_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h"
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 
-#define FDCT32x32_2D vp10_fdct32x32_sse2
+#define FDCT32x32_2D av1_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h"  // NOLINT
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 #undef DCT_HIGH_BIT_DEPTH
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define DCT_HIGH_BIT_DEPTH 1
-#define FDCT4x4_2D vp10_highbd_fdct4x4_sse2
-#define FDCT8x8_2D vp10_highbd_fdct8x8_sse2
-#define FDCT16x16_2D vp10_highbd_fdct16x16_sse2
-#include "av1/common/x86/vp10_fwd_txfm_impl_sse2.h"  // NOLINT
+#define FDCT4x4_2D av1_highbd_fdct4x4_sse2
+#define FDCT8x8_2D av1_highbd_fdct8x8_sse2
+#define FDCT16x16_2D av1_highbd_fdct16x16_sse2
+#include "av1/common/x86/av1_fwd_txfm_impl_sse2.h"  // NOLINT
 #undef FDCT4x4_2D
 #undef FDCT8x8_2D
 #undef FDCT16x16_2D
 
-#define FDCT32x32_2D vp10_highbd_fdct32x32_rd_sse2
+#define FDCT32x32_2D av1_highbd_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h"  // NOLINT
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 
-#define FDCT32x32_2D vp10_highbd_fdct32x32_sse2
+#define FDCT32x32_2D av1_highbd_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h"  // NOLINT
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 #undef DCT_HIGH_BIT_DEPTH
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/x86/vp10_highbd_convolve_filters_sse4.c b/av1/common/x86/av1_highbd_convolve_filters_sse4.c
similarity index 99%
rename from av1/common/x86/vp10_highbd_convolve_filters_sse4.c
rename to av1/common/x86/av1_highbd_convolve_filters_sse4.c
index 7f3630c..e2337fd 100644
--- a/av1/common/x86/vp10_highbd_convolve_filters_sse4.c
+++ b/av1/common/x86/av1_highbd_convolve_filters_sse4.c
@@ -7,10 +7,10 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "av1/common/filter.h"
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #if CONFIG_EXT_INTERP
 DECLARE_ALIGNED(16, const int16_t,
                 sub_pel_filters_10sharp_highbd_ver_signal_dir[15][6][8]) = {
@@ -137,7 +137,7 @@
 };
 #endif
 #endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #if CONFIG_EXT_INTERP
 DECLARE_ALIGNED(16, const int16_t,
                 sub_pel_filters_12sharp_highbd_ver_signal_dir[15][6][8]) = {
@@ -264,7 +264,7 @@
 };
 #endif
 #endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #if USE_TEMPORALFILTER_12TAP
 DECLARE_ALIGNED(
     16, const int16_t,
diff --git a/av1/common/x86/vp10_highbd_convolve_sse4.c b/av1/common/x86/av1_highbd_convolve_sse4.c
similarity index 91%
rename from av1/common/x86/vp10_highbd_convolve_sse4.c
rename to av1/common/x86/av1_highbd_convolve_sse4.c
index ea78400..705c963 100644
--- a/av1/common/x86/vp10_highbd_convolve_sse4.c
+++ b/av1/common/x86/av1_highbd_convolve_sse4.c
@@ -11,7 +11,7 @@
 #include <assert.h>
 #include <smmintrin.h>
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "av1/common/filter.h"
 
 typedef void (*TransposeSave)(const int width, int pixelsNum, uint32_t *src,
@@ -212,12 +212,12 @@
   _mm_storeu_si128((__m128i *)buf, u[0]);
 }
 
-void vp10_highbd_convolve_horiz_sse4_1(const uint16_t *src, int src_stride,
-                                       uint16_t *dst, int dst_stride, int w,
-                                       int h,
-                                       const InterpFilterParams filter_params,
-                                       const int subpel_x_q4, int x_step_q4,
-                                       int avg, int bd) {
+void av1_highbd_convolve_horiz_sse4_1(const uint16_t *src, int src_stride,
+                                      uint16_t *dst, int dst_stride, int w,
+                                      int h,
+                                      const InterpFilterParams filter_params,
+                                      const int subpel_x_q4, int x_step_q4,
+                                      int avg, int bd) {
   DECLARE_ALIGNED(16, uint32_t, temp[4 * 4]);
   __m128i verf[6];
   HbdSubpelFilterCoeffs vCoeffs;
@@ -228,18 +228,16 @@
   (void)x_step_q4;
 
   if (0 == subpel_x_q4 || 16 != x_step_q4) {
-    vp10_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
-                                 filter_params, subpel_x_q4, x_step_q4, avg,
-                                 bd);
+    av1_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
+                                filter_params, subpel_x_q4, x_step_q4, avg, bd);
     return;
   }
 
   vCoeffs =
-      vp10_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
+      av1_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
   if (!vCoeffs) {
-    vp10_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
-                                 filter_params, subpel_x_q4, x_step_q4, avg,
-                                 bd);
+    av1_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
+                                filter_params, subpel_x_q4, x_step_q4, avg, bd);
     return;
   }
 
@@ -423,27 +421,27 @@
   } while (rowIndex < h);
 }
 
-void vp10_highbd_convolve_vert_sse4_1(const uint16_t *src, int src_stride,
-                                      uint16_t *dst, int dst_stride, int w,
-                                      int h,
-                                      const InterpFilterParams filter_params,
-                                      const int subpel_y_q4, int y_step_q4,
-                                      int avg, int bd) {
+void av1_highbd_convolve_vert_sse4_1(const uint16_t *src, int src_stride,
+                                     uint16_t *dst, int dst_stride, int w,
+                                     int h,
+                                     const InterpFilterParams filter_params,
+                                     const int subpel_y_q4, int y_step_q4,
+                                     int avg, int bd) {
   __m128i verf[6];
   HbdSubpelFilterCoeffs vCoeffs;
   const int tapsNum = filter_params.taps;
 
   if (0 == subpel_y_q4 || 16 != y_step_q4) {
-    vp10_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
-                                filter_params, subpel_y_q4, y_step_q4, avg, bd);
+    av1_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
+                               filter_params, subpel_y_q4, y_step_q4, avg, bd);
     return;
   }
 
   vCoeffs =
-      vp10_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
+      av1_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
   if (!vCoeffs) {
-    vp10_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
-                                filter_params, subpel_y_q4, y_step_q4, avg, bd);
+    av1_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
+                               filter_params, subpel_y_q4, y_step_q4, avg, bd);
     return;
   }
 
diff --git a/av1/common/x86/vp10_inv_txfm_sse2.c b/av1/common/x86/av1_inv_txfm_sse2.c
similarity index 98%
rename from av1/common/x86/vp10_inv_txfm_sse2.c
rename to av1/common/x86/av1_inv_txfm_sse2.c
index b09933e..74a0d90 100644
--- a/av1/common/x86/vp10_inv_txfm_sse2.c
+++ b/av1/common/x86/av1_inv_txfm_sse2.c
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
-#include "av1/common/x86/vp10_inv_txfm_sse2.h"
+#include "./av1_rtcd.h"
+#include "av1/common/x86/av1_inv_txfm_sse2.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
 
 #define RECON_AND_STORE4X4(dest, in_x)                    \
@@ -21,7 +21,7 @@
     *(int *)(dest) = _mm_cvtsi128_si32(d0);               \
   }
 
-void vp10_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i eight = _mm_set1_epi16(8);
   const __m128i cst = _mm_setr_epi16(
@@ -151,7 +151,7 @@
   }
 }
 
-void vp10_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
   int a;
@@ -176,7 +176,7 @@
   res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
 }
 
-void vp10_idct4_sse2(__m128i *in) {
+void av1_idct4_sse2(__m128i *in) {
   const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
@@ -212,7 +212,7 @@
   in[1] = _mm_shuffle_epi32(in[1], 0x4E);
 }
 
-void vp10_iadst4_sse2(__m128i *in) {
+void av1_iadst4_sse2(__m128i *in) {
   const __m128i k__sinpi_p01_p04 = pair_set_epi16(sinpi_1_9, sinpi_4_9);
   const __m128i k__sinpi_p03_p02 = pair_set_epi16(sinpi_3_9, sinpi_2_9);
   const __m128i k__sinpi_p02_m01 = pair_set_epi16(sinpi_2_9, -sinpi_1_9);
@@ -446,7 +446,7 @@
     out7 = _mm_subs_epi16(stp1_0, stp2_7);                                    \
   }
 
-void vp10_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -477,11 +477,11 @@
 
   // 2-D
   for (i = 0; i < 2; i++) {
-    // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
+    // 8x8 Transpose is copied from av1_fdct8x8_sse2()
     TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                   in4, in5, in6, in7);
 
-    // 4-stage 1D vp10_idct8x8
+    // 4-stage 1D av1_idct8x8
     IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in5,
           in6, in7);
   }
@@ -515,7 +515,7 @@
   RECON_AND_STORE(dest + 7 * stride, in7);
 }
 
-void vp10_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
   int a;
@@ -536,7 +536,7 @@
   RECON_AND_STORE(dest + 7 * stride, dc_value);
 }
 
-void vp10_idct8_sse2(__m128i *in) {
+void av1_idct8_sse2(__m128i *in) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
   const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
@@ -552,16 +552,16 @@
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
 
-  // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
+  // 8x8 Transpose is copied from av1_fdct8x8_sse2()
   TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], in0,
                 in1, in2, in3, in4, in5, in6, in7);
 
-  // 4-stage 1D vp10_idct8x8
+  // 4-stage 1D av1_idct8x8
   IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in[0], in[1], in[2], in[3],
         in[4], in[5], in[6], in[7]);
 }
 
-void vp10_iadst8_sse2(__m128i *in) {
+void av1_iadst8_sse2(__m128i *in) {
   const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
   const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
   const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
@@ -789,7 +789,7 @@
   in[7] = _mm_sub_epi16(k__const_0, s1);
 }
 
-void vp10_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -1158,8 +1158,8 @@
                            stp2_12)                                            \
   }
 
-void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
-                                 int stride) {
+void av1_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
+                                int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
@@ -1200,7 +1200,7 @@
 
   curr1 = l;
   for (i = 0; i < 2; i++) {
-    // 1-D vp10_idct
+    // 1-D av1_idct
 
     // Load input data.
     in[0] = _mm_load_si128((const __m128i *)input);
@@ -1248,7 +1248,7 @@
   }
   for (i = 0; i < 2; i++) {
     int j;
-    // 1-D vp10_idct
+    // 1-D av1_idct
     array_transpose_8x8(l + i * 8, in);
     array_transpose_8x8(r + i * 8, in + 8);
 
@@ -1283,8 +1283,7 @@
   }
 }
 
-void vp10_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest,
-                               int stride) {
+void av1_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
   int a, i;
@@ -1316,7 +1315,7 @@
   }
 }
 
-static void vp10_iadst16_8col(__m128i *in) {
+static void av1_iadst16_8col(__m128i *in) {
   // perform 16x16 1-D ADST for 8 columns
   __m128i s[16], x[16], u[32], v[32];
   const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
@@ -1786,7 +1785,7 @@
   in[15] = _mm_sub_epi16(kZero, s[1]);
 }
 
-static void vp10_idct16_8col(__m128i *in) {
+static void av1_idct16_8col(__m128i *in) {
   const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
   const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
   const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
@@ -2130,20 +2129,20 @@
   in[15] = _mm_sub_epi16(s[0], s[15]);
 }
 
-void vp10_idct16_sse2(__m128i *in0, __m128i *in1) {
+void av1_idct16_sse2(__m128i *in0, __m128i *in1) {
   array_transpose_16x16(in0, in1);
-  vp10_idct16_8col(in0);
-  vp10_idct16_8col(in1);
+  av1_idct16_8col(in0);
+  av1_idct16_8col(in1);
 }
 
-void vp10_iadst16_sse2(__m128i *in0, __m128i *in1) {
+void av1_iadst16_sse2(__m128i *in0, __m128i *in1) {
   array_transpose_16x16(in0, in1);
-  vp10_iadst16_8col(in0);
-  vp10_iadst16_8col(in1);
+  av1_iadst16_8col(in0);
+  av1_iadst16_8col(in1);
 }
 
-void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
-                                int stride) {
+void av1_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
+                               int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
@@ -3016,12 +3015,12 @@
   }
 
 // Only upper-left 8x8 has non-zero coeff
-void vp10_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
-                                int stride) {
+void av1_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
+                               int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
 
-  // vp10_idct constants for each stage
+  // av1_idct constants for each stage
   const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
   const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
   const __m128i stg1_6 = pair_set_epi16(cospi_7_64, -cospi_25_64);
@@ -3173,13 +3172,13 @@
   }
 }
 
-void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
-                                  int stride) {
+void av1_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
+                                 int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
 
-  // vp10_idct constants for each stage
+  // av1_idct constants for each stage
   const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
   const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
   const __m128i stg1_2 = pair_set_epi16(cospi_15_64, -cospi_17_64);
@@ -3241,7 +3240,7 @@
 
   for (i = 0; i < 4; i++) {
     i32 = (i << 5);
-    // First 1-D vp10_idct
+    // First 1-D av1_idct
     // Load input data.
     LOAD_DQCOEFF(in[0], input);
     LOAD_DQCOEFF(in[8], input);
@@ -3391,7 +3390,7 @@
     col[i32 + 31] = _mm_sub_epi16(stp1_0, stp1_31);
   }
   for (i = 0; i < 4; i++) {
-    // Second 1-D vp10_idct
+    // Second 1-D av1_idct
     j = i << 3;
 
     // Transpose 32x8 block to 8x32 block
@@ -3447,8 +3446,7 @@
   }
 }
 
-void vp10_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest,
-                               int stride) {
+void av1_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
   int a, i;
@@ -3468,7 +3466,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
   __m128i ubounded, retval;
   const __m128i zero = _mm_set1_epi16(0);
@@ -3482,8 +3480,8 @@
   return retval;
 }
 
-void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                     int stride, int bd) {
+void av1_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int bd) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
   int i, j;
@@ -3516,7 +3514,7 @@
 
   if (!test) {
     // Do the row transform
-    vp10_idct4_sse2(inptr);
+    av1_idct4_sse2(inptr);
 
     // Check the min & max values
     max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3545,14 +3543,14 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vp10_highbd_idct4_c(input, outptr, bd);
+      av1_highbd_idct4_c(input, outptr, bd);
       input += 4;
       outptr += 4;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct4_sse2(inptr);
+    av1_idct4_sse2(inptr);
 
     // Final round and shift
     inptr[0] = _mm_add_epi16(inptr[0], eight);
@@ -3588,7 +3586,7 @@
     // Columns
     for (i = 0; i < 4; ++i) {
       for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
-      vp10_highbd_idct4_c(temp_in, temp_out, bd);
+      av1_highbd_idct4_c(temp_in, temp_out, bd);
       for (j = 0; j < 4; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -3597,8 +3595,8 @@
   }
 }
 
-void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                     int stride, int bd) {
+void av1_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int bd) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
   int i, j, test;
@@ -3632,7 +3630,7 @@
 
   if (!test) {
     // Do the row transform
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Find the min & max for the column transform
     max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3662,14 +3660,14 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 8; ++i) {
-      vp10_highbd_idct8_c(input, outptr, bd);
+      av1_highbd_idct8_c(input, outptr, bd);
       input += 8;
       outptr += 8;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -3688,7 +3686,7 @@
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
       for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-      vp10_highbd_idct8_c(temp_in, temp_out, bd);
+      av1_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3697,8 +3695,8 @@
   }
 }
 
-void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                     int stride, int bd) {
+void av1_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int bd) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
   int i, j, test;
@@ -3733,7 +3731,7 @@
 
   if (!test) {
     // Do the row transform
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Find the min & max for the column transform
     // N.B. Only first 4 cols contain non-zero coeffs
@@ -3765,14 +3763,14 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vp10_highbd_idct8_c(input, outptr, bd);
+      av1_highbd_idct8_c(input, outptr, bd);
       input += 8;
       outptr += 8;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -3791,7 +3789,7 @@
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
       for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-      vp10_highbd_idct8_c(temp_in, temp_out, bd);
+      av1_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3800,8 +3798,8 @@
   }
 }
 
-void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                        int stride, int bd) {
+void av1_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
+                                       int stride, int bd) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
   int i, j, test;
@@ -3838,7 +3836,7 @@
 
   if (!test) {
     // Do the row transform
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Find the min & max for the column transform
     max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3873,14 +3871,14 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 16; ++i) {
-      vp10_highbd_idct16_c(input, outptr, bd);
+      av1_highbd_idct16_c(input, outptr, bd);
       input += 16;
       outptr += 16;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -3904,7 +3902,7 @@
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
       for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-      vp10_highbd_idct16_c(temp_in, temp_out, bd);
+      av1_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -3913,8 +3911,8 @@
   }
 }
 
-void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                       int stride, int bd) {
+void av1_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+                                      int stride, int bd) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
   int i, j, test;
@@ -3953,7 +3951,7 @@
 
   if (!test) {
     // Do the row transform (N.B. This transposes inptr)
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Find the min & max for the column transform
     // N.B. Only first 4 cols contain non-zero coeffs
@@ -3991,14 +3989,14 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vp10_highbd_idct16_c(input, outptr, bd);
+      av1_highbd_idct16_c(input, outptr, bd);
       input += 16;
       outptr += 16;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -4022,7 +4020,7 @@
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
       for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-      vp10_highbd_idct16_c(temp_in, temp_out, bd);
+      av1_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -4030,4 +4028,4 @@
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/x86/vp10_inv_txfm_sse2.h b/av1/common/x86/av1_inv_txfm_sse2.h
similarity index 97%
rename from av1/common/x86/vp10_inv_txfm_sse2.h
rename to av1/common/x86/av1_inv_txfm_sse2.h
index 0839ab9..a4cbbcf 100644
--- a/av1/common/x86/vp10_inv_txfm_sse2.h
+++ b/av1/common/x86/av1_inv_txfm_sse2.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_X86_INV_TXFM_SSE2_H_
-#define VPX_DSP_X86_INV_TXFM_SSE2_H_
+#ifndef AOM_DSP_X86_INV_TXFM_SSE2_H_
+#define AOM_DSP_X86_INV_TXFM_SSE2_H_
 
 #include <emmintrin.h>  // SSE2
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
-#include "av1/common/vp10_inv_txfm.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
+#include "av1/common/av1_inv_txfm.h"
 
 // perform 8x8 transpose
 static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
@@ -181,4 +181,4 @@
 void iadst8_sse2(__m128i *in);
 void iadst16_sse2(__m128i *in0, __m128i *in1);
 
-#endif  // VPX_DSP_X86_INV_TXFM_SSE2_H_
+#endif  // AOM_DSP_X86_INV_TXFM_SSE2_H_
diff --git a/av1/common/x86/vp10_txfm1d_sse4.h b/av1/common/x86/av1_txfm1d_sse4.h
similarity index 68%
rename from av1/common/x86/vp10_txfm1d_sse4.h
rename to av1/common/x86/av1_txfm1d_sse4.h
index f05a54c..af7afb7 100644
--- a/av1/common/x86/vp10_txfm1d_sse4.h
+++ b/av1/common/x86/av1_txfm1d_sse4.h
@@ -1,52 +1,52 @@
-#ifndef VP10_TXMF1D_SSE2_H_
-#define VP10_TXMF1D_SSE2_H_
+#ifndef AV1_TXMF1D_SSE2_H_
+#define AV1_TXMF1D_SSE2_H_
 
 #include <smmintrin.h>
-#include "av1/common/vp10_txfm.h"
+#include "av1/common/av1_txfm.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-void vp10_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fdct64_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
+
+void av1_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
                             const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct64_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
                             const int8_t *cos_bit, const int8_t *stage_range);
 
-void vp10_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range);
-
-void vp10_idct4_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_idct4_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct8_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct16_new_sse4_1(const __m128i *input, __m128i *output,
                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct8_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_idct32_new_sse4_1(const __m128i *input, __m128i *output,
                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct16_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct32_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct64_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct64_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
 
-void vp10_iadst4_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_iadst4_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst8_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst16_new_sse4_1(const __m128i *input, __m128i *output,
                             const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst8_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_iadst32_new_sse4_1(const __m128i *input, __m128i *output,
                             const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst16_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst32_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range);
 
 static INLINE void transpose_32_4x4(int stride, const __m128i *input,
                                     __m128i *output) {
@@ -141,4 +141,4 @@
 }
 #endif
 
-#endif  // VP10_TXMF1D_SSE2_H_
+#endif  // AV1_TXMF1D_SSE2_H_
diff --git a/av1/common/x86/highbd_inv_txfm_sse4.c b/av1/common/x86/highbd_inv_txfm_sse4.c
index f3686eb..eada3af 100644
--- a/av1/common/x86/highbd_inv_txfm_sse4.c
+++ b/av1/common/x86/highbd_inv_txfm_sse4.c
@@ -11,9 +11,9 @@
 #include <assert.h>
 #include <smmintrin.h> /* SSE4.1 */
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "av1/common/vp10_inv_txfm2d_cfg.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "av1/common/av1_inv_txfm2d_cfg.h"
 #include "av1/common/x86/highbd_txfm_utility_sse4.h"
 
 static INLINE void load_buffer_4x4(const int32_t *coeff, __m128i *in) {
@@ -229,8 +229,8 @@
   _mm_storel_epi64((__m128i *)(output + 3 * stride), v3);
 }
 
-void vp10_inv_txfm2d_add_4x4_sse4_1(const int32_t *coeff, uint16_t *output,
-                                    int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_4x4_sse4_1(const int32_t *coeff, uint16_t *output,
+                                   int stride, int tx_type, int bd) {
   __m128i in[4];
   const TXFM_2D_CFG *cfg = NULL;
 
@@ -695,8 +695,8 @@
   _mm_store_si128((__m128i *)(output + 7 * stride), u7);
 }
 
-void vp10_inv_txfm2d_add_8x8_sse4_1(const int32_t *coeff, uint16_t *output,
-                                    int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_8x8_sse4_1(const int32_t *coeff, uint16_t *output,
+                                   int stride, int tx_type, int bd) {
   __m128i in[16], out[16];
   const TXFM_2D_CFG *cfg = NULL;
 
@@ -1295,8 +1295,8 @@
   round_shift_8x8(&in[48], shift);
 }
 
-void vp10_inv_txfm2d_add_16x16_sse4_1(const int32_t *coeff, uint16_t *output,
-                                      int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_16x16_sse4_1(const int32_t *coeff, uint16_t *output,
+                                     int stride, int tx_type, int bd) {
   __m128i in[64], out[64];
   const TXFM_2D_CFG *cfg = NULL;
 
diff --git a/av1/common/x86/idct_intrin_sse2.c b/av1/common/x86/idct_intrin_sse2.c
index 70bf9bf..e9f0ce8 100644
--- a/av1/common/x86/idct_intrin_sse2.c
+++ b/av1/common/x86/idct_intrin_sse2.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "aom_dsp/x86/inv_txfm_sse2.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
 #include "aom_ports/mem.h"
@@ -56,8 +56,8 @@
   } while (0)
 #endif
 
-void vp10_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
-                             int tx_type) {
+void av1_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
   __m128i in[2];
   const __m128i zero = _mm_setzero_si128();
   const __m128i eight = _mm_set1_epi16(8);
@@ -147,8 +147,8 @@
   }
 }
 
-void vp10_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
-                             int tx_type) {
+void av1_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
   __m128i in[8];
   const __m128i zero = _mm_setzero_si128();
   const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -240,8 +240,8 @@
   RECON_AND_STORE(dest + 7 * stride, in[7]);
 }
 
-void vp10_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
-                                int stride, int tx_type) {
+void av1_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
+                               int stride, int tx_type) {
   __m128i in[32];
   __m128i *in0 = &in[0];
   __m128i *in1 = &in[16];
diff --git a/av1/common/x86/reconintra_sse4.c b/av1/common/x86/reconintra_sse4.c
index cac34a6..ab1fa93 100644
--- a/av1/common/x86/reconintra_sse4.c
+++ b/av1/common/x86/reconintra_sse4.c
@@ -9,7 +9,7 @@
  */
 #include <smmintrin.h>
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "aom_ports/mem.h"
 #include "av1/common/enums.h"
 #include "av1/common/intra_filters.h"
@@ -498,86 +498,84 @@
   GeneratePrediction(above, left, bs, prm, meanValue, dst, stride);
 }
 
-void vp10_dc_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                     const uint8_t *above,
-                                     const uint8_t *left) {
+void av1_dc_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                    const uint8_t *above, const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, DC_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_v_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                    const uint8_t *above, const uint8_t *left) {
+void av1_v_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                   const uint8_t *above, const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, V_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_h_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                    const uint8_t *above, const uint8_t *left) {
+void av1_h_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                   const uint8_t *above, const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, H_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d45_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                      const uint8_t *above,
-                                      const uint8_t *left) {
+void av1_d45_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                     const uint8_t *above,
+                                     const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D45_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d135_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                       const uint8_t *above,
-                                       const uint8_t *left) {
+void av1_d135_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                      const uint8_t *above,
+                                      const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D135_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d117_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                       const uint8_t *above,
-                                       const uint8_t *left) {
+void av1_d117_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                      const uint8_t *above,
+                                      const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D117_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d153_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                       const uint8_t *above,
-                                       const uint8_t *left) {
+void av1_d153_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                      const uint8_t *above,
+                                      const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D153_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d207_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                       const uint8_t *above,
-                                       const uint8_t *left) {
+void av1_d207_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                      const uint8_t *above,
+                                      const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D207_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d63_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                      const uint8_t *above,
-                                      const uint8_t *left) {
+void av1_d63_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                     const uint8_t *above,
+                                     const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D63_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_tm_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                     const uint8_t *above,
-                                     const uint8_t *left) {
+void av1_tm_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                    const uint8_t *above, const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, TM_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
 // ============== High Bit Depth ==============
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE int HighbdGetMeanValue4x4(const uint16_t *above,
                                         const uint16_t *left, const int bd,
                                         __m128i *params) {
@@ -809,83 +807,83 @@
   HighbdGeneratePrediction(above, left, bs, bd, prm, meanValue, dst, stride);
 }
 
-void vp10_highbd_dc_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                            int bs, const uint16_t *above,
-                                            const uint16_t *left, int bd) {
+void av1_highbd_dc_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                           int bs, const uint16_t *above,
+                                           const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, DC_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_v_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                           int bs, const uint16_t *above,
-                                           const uint16_t *left, int bd) {
+void av1_highbd_v_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                          int bs, const uint16_t *above,
+                                          const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, V_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_h_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                           int bs, const uint16_t *above,
-                                           const uint16_t *left, int bd) {
+void av1_highbd_h_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                          int bs, const uint16_t *above,
+                                          const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, H_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d45_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                             int bs, const uint16_t *above,
-                                             const uint16_t *left, int bd) {
+void av1_highbd_d45_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                            int bs, const uint16_t *above,
+                                            const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D45_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d135_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                              int bs, const uint16_t *above,
-                                              const uint16_t *left, int bd) {
+void av1_highbd_d135_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                             int bs, const uint16_t *above,
+                                             const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D135_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d117_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                              int bs, const uint16_t *above,
-                                              const uint16_t *left, int bd) {
+void av1_highbd_d117_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                             int bs, const uint16_t *above,
+                                             const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D117_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d153_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                              int bs, const uint16_t *above,
-                                              const uint16_t *left, int bd) {
+void av1_highbd_d153_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                             int bs, const uint16_t *above,
+                                             const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D153_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d207_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                              int bs, const uint16_t *above,
-                                              const uint16_t *left, int bd) {
+void av1_highbd_d207_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                             int bs, const uint16_t *above,
+                                             const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D207_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d63_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                             int bs, const uint16_t *above,
-                                             const uint16_t *left, int bd) {
+void av1_highbd_d63_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                            int bs, const uint16_t *above,
+                                            const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D63_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_tm_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                            int bs, const uint16_t *above,
-                                            const uint16_t *left, int bd) {
+void av1_highbd_tm_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                           int bs, const uint16_t *above,
+                                           const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, TM_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/decoder/bitreader.h b/av1/decoder/bitreader.h
index 75d6aa4..aaf1bb8 100644
--- a/av1/decoder/bitreader.h
+++ b/av1/decoder/bitreader.h
@@ -11,28 +11,28 @@
 /* The purpose of this header is to provide compile time pluggable bit reader
  * implementations with a common interface. */
 
-#ifndef VPX10_DECODER_BITREADER_H_
-#define VPX10_DECODER_BITREADER_H_
+#ifndef AOM10_DECODER_BITREADER_H_
+#define AOM10_DECODER_BITREADER_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #if CONFIG_ANS
 #include "av1/common/ans.h"
-#include "aom/vp8dx.h"  // for vp10_decrypt_cb
-#define vp10_reader struct AnsDecoder
-#define vp10_reader_has_error ans_reader_has_error
-#define vp10_read uabs_read
-#define vp10_read_bit uabs_read_bit
-#define vp10_read_literal uabs_read_literal
-#define vp10_read_tree uabs_read_tree
+#include "aom/aomdx.h"  // for av1_decrypt_cb
+#define aom_reader struct AnsDecoder
+#define aom_reader_has_error ans_reader_has_error
+#define aom_read uabs_read
+#define aom_read_bit uabs_read_bit
+#define aom_read_literal uabs_read_literal
+#define aom_read_tree uabs_read_tree
 #else
 #include "aom_dsp/bitreader.h"
-#define vp10_reader vpx_reader
-#define vp10_reader_has_error vpx_reader_has_error
-#define vp10_read vpx_read
-#define vp10_read_bit vpx_read_bit
-#define vp10_read_literal vpx_read_literal
-#define vp10_read_tree vpx_read_tree
+#define aom_reader aom_reader
+#define aom_reader_has_error aom_reader_has_error
+#define aom_read aom_read
+#define aom_read_bit aom_read_bit
+#define aom_read_literal aom_read_literal
+#define aom_read_tree aom_read_tree
 #endif
 
-#endif  // VPX10_DECODER_BITREADER_H_
+#endif  // AOM10_DECODER_BITREADER_H_
diff --git a/av1/decoder/decodeframe.c b/av1/decoder/decodeframe.c
index 0f90c20..1f1f358 100644
--- a/av1/decoder/decodeframe.c
+++ b/av1/decoder/decodeframe.c
@@ -11,20 +11,19 @@
 #include <assert.h>
 #include <stdlib.h>  // qsort()
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_scale_rtcd.h"
-#include "./vpx_config.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_scale_rtcd.h"
 
 #include "aom_dsp/bitreader_buffer.h"
 #include "av1/decoder/bitreader.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/mem_ops.h"
-#include "aom_scale/vpx_scale.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_scale/aom_scale.h"
+#include "aom_util/aom_thread.h"
 
 #include "av1/common/alloccommon.h"
 #if CONFIG_CLPF
@@ -51,9 +50,9 @@
 #include "av1/decoder/decoder.h"
 #include "av1/decoder/dsubexp.h"
 
-#define MAX_VPX_HEADER_SIZE 80
+#define MAX_AV1_HEADER_SIZE 80
 
-static int is_compound_reference_allowed(const VP10_COMMON *cm) {
+static int is_compound_reference_allowed(const AV1_COMMON *cm) {
   int i;
   if (frame_is_intra_only(cm)) return 0;
   for (i = 1; i < INTER_REFS_PER_FRAME; ++i)
@@ -62,7 +61,7 @@
   return 0;
 }
 
-static void setup_compound_reference_mode(VP10_COMMON *cm) {
+static void setup_compound_reference_mode(AV1_COMMON *cm) {
 #if CONFIG_EXT_REFS
   cm->comp_fwd_ref[0] = LAST_FRAME;
   cm->comp_fwd_ref[1] = LAST2_FRAME;
@@ -94,51 +93,51 @@
   return len != 0 && len <= (size_t)(end - start);
 }
 
-static int decode_unsigned_max(struct vpx_read_bit_buffer *rb, int max) {
-  const int data = vpx_rb_read_literal(rb, get_unsigned_bits(max));
+static int decode_unsigned_max(struct aom_read_bit_buffer *rb, int max) {
+  const int data = aom_rb_read_literal(rb, get_unsigned_bits(max));
   return data > max ? max : data;
 }
 
-static TX_MODE read_tx_mode(struct vpx_read_bit_buffer *rb) {
-  return vpx_rb_read_bit(rb) ? TX_MODE_SELECT : vpx_rb_read_literal(rb, 2);
+static TX_MODE read_tx_mode(struct aom_read_bit_buffer *rb) {
+  return aom_rb_read_bit(rb) ? TX_MODE_SELECT : aom_rb_read_literal(rb, 2);
 }
 
-static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_switchable_interp_probs(FRAME_CONTEXT *fc, aom_reader *r) {
   int i, j;
   for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
     for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
-      vp10_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
+      av1_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
 }
 
-static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_inter_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
   int i;
 #if CONFIG_REF_MV
   for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
-    vp10_diff_update_prob(r, &fc->newmv_prob[i]);
+    av1_diff_update_prob(r, &fc->newmv_prob[i]);
   for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
-    vp10_diff_update_prob(r, &fc->zeromv_prob[i]);
+    av1_diff_update_prob(r, &fc->zeromv_prob[i]);
   for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
-    vp10_diff_update_prob(r, &fc->refmv_prob[i]);
+    av1_diff_update_prob(r, &fc->refmv_prob[i]);
   for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
-    vp10_diff_update_prob(r, &fc->drl_prob[i]);
+    av1_diff_update_prob(r, &fc->drl_prob[i]);
 #if CONFIG_EXT_INTER
-  vp10_diff_update_prob(r, &fc->new2mv_prob);
+  av1_diff_update_prob(r, &fc->new2mv_prob);
 #endif  // CONFIG_EXT_INTER
 #else
   int j;
   for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
     for (j = 0; j < INTER_MODES - 1; ++j)
-      vp10_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
+      av1_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
 #endif
 }
 
 #if CONFIG_EXT_INTER
-static void read_inter_compound_mode_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_inter_compound_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
   int i, j;
-  if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+  if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
     for (j = 0; j < INTER_MODE_CONTEXTS; ++j) {
       for (i = 0; i < INTER_COMPOUND_MODES - 1; ++i) {
-        vp10_diff_update_prob(r, &fc->inter_compound_mode_probs[j][i]);
+        av1_diff_update_prob(r, &fc->inter_compound_mode_probs[j][i]);
       }
     }
   }
@@ -146,28 +145,28 @@
 #endif  // CONFIG_EXT_INTER
 
 static REFERENCE_MODE read_frame_reference_mode(
-    const VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+    const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
   if (is_compound_reference_allowed(cm)) {
-    return vpx_rb_read_bit(rb)
+    return aom_rb_read_bit(rb)
                ? REFERENCE_MODE_SELECT
-               : (vpx_rb_read_bit(rb) ? COMPOUND_REFERENCE : SINGLE_REFERENCE);
+               : (aom_rb_read_bit(rb) ? COMPOUND_REFERENCE : SINGLE_REFERENCE);
   } else {
     return SINGLE_REFERENCE;
   }
 }
 
-static void read_frame_reference_mode_probs(VP10_COMMON *cm, vp10_reader *r) {
+static void read_frame_reference_mode_probs(AV1_COMMON *cm, aom_reader *r) {
   FRAME_CONTEXT *const fc = cm->fc;
   int i, j;
 
   if (cm->reference_mode == REFERENCE_MODE_SELECT)
     for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
-      vp10_diff_update_prob(r, &fc->comp_inter_prob[i]);
+      av1_diff_update_prob(r, &fc->comp_inter_prob[i]);
 
   if (cm->reference_mode != COMPOUND_REFERENCE) {
     for (i = 0; i < REF_CONTEXTS; ++i) {
       for (j = 0; j < (SINGLE_REFS - 1); ++j) {
-        vp10_diff_update_prob(r, &fc->single_ref_prob[i][j]);
+        av1_diff_update_prob(r, &fc->single_ref_prob[i][j]);
       }
     }
   }
@@ -176,29 +175,29 @@
     for (i = 0; i < REF_CONTEXTS; ++i) {
 #if CONFIG_EXT_REFS
       for (j = 0; j < (FWD_REFS - 1); ++j)
-        vp10_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
+        av1_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
       for (j = 0; j < (BWD_REFS - 1); ++j)
-        vp10_diff_update_prob(r, &fc->comp_bwdref_prob[i][j]);
+        av1_diff_update_prob(r, &fc->comp_bwdref_prob[i][j]);
 #else
       for (j = 0; j < (COMP_REFS - 1); ++j)
-        vp10_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
+        av1_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
 #endif  // CONFIG_EXT_REFS
     }
   }
 }
 
-static void update_mv_probs(vpx_prob *p, int n, vp10_reader *r) {
+static void update_mv_probs(aom_prob *p, int n, aom_reader *r) {
   int i;
-  for (i = 0; i < n; ++i) vp10_diff_update_prob(r, &p[i]);
+  for (i = 0; i < n; ++i) av1_diff_update_prob(r, &p[i]);
 }
 
-static void read_mv_probs(nmv_context *ctx, int allow_hp, vp10_reader *r) {
+static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
   int i, j;
 
   update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
 
 #if CONFIG_REF_MV
-  vp10_diff_update_prob(r, &ctx->zero_rmv);
+  av1_diff_update_prob(r, &ctx->zero_rmv);
 #endif
 
   for (i = 0; i < 2; ++i) {
@@ -238,16 +237,16 @@
     inv_txfm_param.eob = eob;
     inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       inv_txfm_param.bd = xd->bd;
       highbd_inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
     } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     if (eob == 1) {
       dqcoeff[0] = 0;
@@ -272,7 +271,7 @@
 #if CONFIG_ANS
                                                 struct AnsDecoder *const r,
 #else
-                                                vp10_reader *r,
+                                                aom_reader *r,
 #endif  // CONFIG_ANS
                                                 MB_MODE_INFO *const mbmi,
                                                 int plane, int row, int col,
@@ -287,22 +286,21 @@
   if (mbmi->sb_type < BLOCK_8X8)
     if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
 
-  vp10_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
-                           pd->dst.stride, dst, pd->dst.stride, col, row,
-                           plane);
+  av1_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
+                          pd->dst.stride, dst, pd->dst.stride, col, row, plane);
 
   if (!mbmi->skip) {
     TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size);
     const scan_order *sc = get_scan(tx_size, tx_type, 0);
-    const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
-                                             tx_type, r, mbmi->segment_id);
+    const int eob = av1_decode_block_tokens(xd, plane, sc, col, row, tx_size,
+                                            tx_type, r, mbmi->segment_id);
     inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride,
                             eob);
   }
 }
 
 #if CONFIG_VAR_TX
-static void decode_reconstruct_tx(MACROBLOCKD *const xd, vp10_reader *r,
+static void decode_reconstruct_tx(MACROBLOCKD *const xd, aom_reader *r,
                                   MB_MODE_INFO *const mbmi, int plane,
                                   BLOCK_SIZE plane_bsize, int block,
                                   int blk_row, int blk_col, TX_SIZE tx_size,
@@ -330,8 +328,8 @@
     TX_TYPE tx_type = get_tx_type(plane_type, xd, block, plane_tx_size);
     const scan_order *sc = get_scan(plane_tx_size, tx_type, 1);
     const int eob =
-        vp10_decode_block_tokens(xd, plane, sc, blk_col, blk_row, plane_tx_size,
-                                 tx_type, r, mbmi->segment_id);
+        av1_decode_block_tokens(xd, plane, sc, blk_col, blk_row, plane_tx_size,
+                                tx_type, r, mbmi->segment_id);
     inverse_transform_block(
         xd, plane, tx_type, plane_tx_size,
         &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col],
@@ -363,7 +361,7 @@
 #if CONFIG_ANS
                                    struct AnsDecoder *const r,
 #else
-                                   vp10_reader *r,
+                                   aom_reader *r,
 #endif
                                    int segment_id, int plane, int row, int col,
                                    TX_SIZE tx_size) {
@@ -372,8 +370,8 @@
   int block_idx = (row << 1) + col;
   TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size);
   const scan_order *sc = get_scan(tx_size, tx_type, 1);
-  const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
-                                           tx_type, r, segment_id);
+  const int eob = av1_decode_block_tokens(xd, plane, sc, col, row, tx_size,
+                                          tx_type, r, segment_id);
 
   inverse_transform_block(xd, plane, tx_type, tx_size,
                           &pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
@@ -385,8 +383,8 @@
 static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi, int n4_wl,
                                          int n4_hl) {
   // get minimum log2 num4x4s dimension
-  const int x = VPXMIN(n4_wl, n4_hl);
-  return VPXMIN(txsize_sqr_map[mbmi->tx_size], x);
+  const int x = AOMMIN(n4_wl, n4_hl);
+  return AOMMIN(txsize_sqr_map[mbmi->tx_size], x);
 }
 
 static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) {
@@ -409,7 +407,7 @@
   }
 }
 
-static MB_MODE_INFO *set_offsets(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static MB_MODE_INFO *set_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
                                  BLOCK_SIZE bsize, int mi_row, int mi_col,
                                  int bw, int bh, int x_mis, int y_mis, int bwl,
                                  int bhl) {
@@ -439,12 +437,12 @@
   // as they are always compared to values that are in 1/8th pel units
   set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
 
-  vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+  av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
   return &xd->mi[0]->mbmi;
 }
 
 #if CONFIG_SUPERTX
-static MB_MODE_INFO *set_offsets_extend(VP10_COMMON *const cm,
+static MB_MODE_INFO *set_offsets_extend(AV1_COMMON *const cm,
                                         MACROBLOCKD *const xd,
                                         const TileInfo *const tile,
                                         BLOCK_SIZE bsize_pred, int mi_row_pred,
@@ -471,10 +469,9 @@
   return &xd->mi[0]->mbmi;
 }
 
-static MB_MODE_INFO *set_mb_offsets(VP10_COMMON *const cm,
-                                    MACROBLOCKD *const xd, BLOCK_SIZE bsize,
-                                    int mi_row, int mi_col, int bw, int bh,
-                                    int x_mis, int y_mis) {
+static MB_MODE_INFO *set_mb_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+                                    BLOCK_SIZE bsize, int mi_row, int mi_col,
+                                    int bw, int bh, int x_mis, int y_mis) {
   const int offset = mi_row * cm->mi_stride + mi_col;
   const TileInfo *const tile = &xd->tile;
   int x, y;
@@ -489,7 +486,7 @@
   return &xd->mi[0]->mbmi;
 }
 
-static void set_offsets_topblock(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static void set_offsets_topblock(AV1_COMMON *const cm, MACROBLOCKD *const xd,
                                  const TileInfo *const tile, BLOCK_SIZE bsize,
                                  int mi_row, int mi_col) {
   const int bw = num_8x8_blocks_wide_lookup[bsize];
@@ -505,16 +502,16 @@
 
   set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
 
-  vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+  av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
 }
 
-static void set_param_topblock(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static void set_param_topblock(AV1_COMMON *const cm, MACROBLOCKD *const xd,
                                BLOCK_SIZE bsize, int mi_row, int mi_col,
                                int txfm, int skip) {
   const int bw = num_8x8_blocks_wide_lookup[bsize];
   const int bh = num_8x8_blocks_high_lookup[bsize];
-  const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
-  const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
+  const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
+  const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
   const int offset = mi_row * cm->mi_stride + mi_col;
   int x, y;
 
@@ -534,21 +531,21 @@
 #endif
 }
 
-static void set_ref(VP10_COMMON *const cm, MACROBLOCKD *const xd, int idx,
+static void set_ref(AV1_COMMON *const cm, MACROBLOCKD *const xd, int idx,
                     int mi_row, int mi_col) {
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME];
   xd->block_refs[idx] = ref_buffer;
-  if (!vp10_is_valid_scale(&ref_buffer->sf))
-    vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+  if (!av1_is_valid_scale(&ref_buffer->sf))
+    aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                        "Invalid scale factors");
-  vp10_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col,
-                        &ref_buffer->sf);
+  av1_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col,
+                       &ref_buffer->sf);
   xd->corrupted |= ref_buffer->buf->corrupted;
 }
 
 static void dec_predict_b_extend(
-    VP10Decoder *const pbi, MACROBLOCKD *const xd, const TileInfo *const tile,
+    AV1Decoder *const pbi, MACROBLOCKD *const xd, const TileInfo *const tile,
     int block, int mi_row_ori, int mi_col_ori, int mi_row_pred, int mi_col_pred,
     int mi_row_top, int mi_col_top, uint8_t *dst_buf[3], int dst_stride[3],
     BLOCK_SIZE bsize_top, BLOCK_SIZE bsize_pred, int b_sub8x8, int bextend) {
@@ -564,7 +561,7 @@
   const int mi_width_top = num_8x8_blocks_wide_lookup[bsize_top];
   const int mi_height_top = num_8x8_blocks_high_lookup[bsize_top];
   MB_MODE_INFO *mbmi;
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
 
   if (mi_row_pred < mi_row_top || mi_col_pred < mi_col_top ||
       mi_row_pred >= mi_row_top + mi_height_top ||
@@ -596,21 +593,21 @@
                          (c >> xd->plane[2].subsampling_x);
 
   if (!b_sub8x8)
-    vp10_build_inter_predictors_sb_extend(xd,
+    av1_build_inter_predictors_sb_extend(xd,
 #if CONFIG_EXT_INTER
-                                          mi_row_ori, mi_col_ori,
+                                         mi_row_ori, mi_col_ori,
 #endif  // CONFIG_EXT_INTER
-                                          mi_row_pred, mi_col_pred, bsize_pred);
+                                         mi_row_pred, mi_col_pred, bsize_pred);
   else
-    vp10_build_inter_predictors_sb_sub8x8_extend(xd,
+    av1_build_inter_predictors_sb_sub8x8_extend(xd,
 #if CONFIG_EXT_INTER
-                                                 mi_row_ori, mi_col_ori,
+                                                mi_row_ori, mi_col_ori,
 #endif  // CONFIG_EXT_INTER
-                                                 mi_row_pred, mi_col_pred,
-                                                 bsize_pred, block);
+                                                mi_row_pred, mi_col_pred,
+                                                bsize_pred, block);
 }
 
-static void dec_extend_dir(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void dec_extend_dir(AV1Decoder *const pbi, MACROBLOCKD *const xd,
                            const TileInfo *const tile, int block,
                            BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, int mi_row,
                            int mi_col, int mi_row_top, int mi_col_top,
@@ -678,7 +675,7 @@
   }
 }
 
-static void dec_extend_all(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void dec_extend_all(AV1Decoder *const pbi, MACROBLOCKD *const xd,
                            const TileInfo *const tile, int block,
                            BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, int mi_row,
                            int mi_col, int mi_row_top, int mi_col_top,
@@ -701,13 +698,12 @@
                  mi_row_top, mi_col_top, dst_buf, dst_stride, 7);
 }
 
-static void dec_predict_sb_complex(VP10Decoder *const pbi,
-                                   MACROBLOCKD *const xd,
+static void dec_predict_sb_complex(AV1Decoder *const pbi, MACROBLOCKD *const xd,
                                    const TileInfo *const tile, int mi_row,
                                    int mi_col, int mi_row_top, int mi_col_top,
                                    BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
                                    uint8_t *dst_buf[3], int dst_stride[3]) {
-  const VP10_COMMON *const cm = &pbi->common;
+  const AV1_COMMON *const cm = &pbi->common;
   const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
   const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize);
   const BLOCK_SIZE subsize = get_subsize(bsize, partition);
@@ -725,7 +721,7 @@
   int dst_stride2[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
   int dst_stride3[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     int len = sizeof(uint16_t);
     dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -748,7 +744,7 @@
     dst_buf3[0] = tmp_buf3;
     dst_buf3[1] = tmp_buf3 + MAX_TX_SQUARE;
     dst_buf3[2] = tmp_buf3 + 2 * MAX_TX_SQUARE;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   }
 #endif
 
@@ -793,7 +789,7 @@
         // weighted average to smooth the boundary
         xd->plane[0].dst.buf = dst_buf[0];
         xd->plane[0].dst.stride = dst_stride[0];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
             0);
@@ -827,7 +823,7 @@
           for (i = 0; i < MAX_MB_PLANE; i++) {
             xd->plane[i].dst.buf = dst_buf[i];
             xd->plane[i].dst.stride = dst_stride[i];
-            vp10_build_masked_inter_predictor_complex(
+            av1_build_masked_inter_predictor_complex(
                 xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
                 mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
                 PARTITION_HORZ, i);
@@ -856,7 +852,7 @@
         // Smooth
         xd->plane[0].dst.buf = dst_buf[0];
         xd->plane[0].dst.stride = dst_stride[0];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
             0);
@@ -890,7 +886,7 @@
           for (i = 0; i < MAX_MB_PLANE; i++) {
             xd->plane[i].dst.buf = dst_buf[i];
             xd->plane[i].dst.stride = dst_stride[i];
-            vp10_build_masked_inter_predictor_complex(
+            av1_build_masked_inter_predictor_complex(
                 xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
                 mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
                 PARTITION_VERT, i);
@@ -943,22 +939,22 @@
         if (bsize == BLOCK_8X8 && i != 0)
           continue;  // Skip <4x4 chroma smoothing
         if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
-          vp10_build_masked_inter_predictor_complex(
+          av1_build_masked_inter_predictor_complex(
               xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
               mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
               PARTITION_VERT, i);
           if (mi_row + hbs < cm->mi_rows) {
-            vp10_build_masked_inter_predictor_complex(
+            av1_build_masked_inter_predictor_complex(
                 xd, dst_buf2[i], dst_stride2[i], dst_buf3[i], dst_stride3[i],
                 mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
                 PARTITION_VERT, i);
-            vp10_build_masked_inter_predictor_complex(
+            av1_build_masked_inter_predictor_complex(
                 xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
                 mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
                 PARTITION_HORZ, i);
           }
         } else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
-          vp10_build_masked_inter_predictor_complex(
+          av1_build_masked_inter_predictor_complex(
               xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
               mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
               PARTITION_HORZ, i);
@@ -993,13 +989,13 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
             i);
       }
       for (i = 0; i < MAX_MB_PLANE; i++) {
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
             i);
@@ -1034,13 +1030,13 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
             i);
       }
       for (i = 0; i < MAX_MB_PLANE; i++) {
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
             i);
@@ -1073,7 +1069,7 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf1[i];
         xd->plane[i].dst.stride = dst_stride1[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
             mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
             PARTITION_VERT, i);
@@ -1081,7 +1077,7 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
             i);
@@ -1114,7 +1110,7 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf1[i];
         xd->plane[i].dst.stride = dst_stride1[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
             mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
             PARTITION_HORZ, i);
@@ -1122,7 +1118,7 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
             i);
@@ -1133,14 +1129,13 @@
   }
 }
 
-static void set_segment_id_supertx(const VP10_COMMON *const cm,
-                                   const int mi_row, const int mi_col,
-                                   const BLOCK_SIZE bsize) {
+static void set_segment_id_supertx(const AV1_COMMON *const cm, const int mi_row,
+                                   const int mi_col, const BLOCK_SIZE bsize) {
   const struct segmentation *seg = &cm->seg;
   const int miw =
-      VPXMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
+      AOMMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
   const int mih =
-      VPXMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
+      AOMMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
   const int mi_offset = mi_row * cm->mi_stride + mi_col;
   MODE_INFO **const mip = cm->mi_grid_visible + mi_offset;
   int r, c;
@@ -1153,7 +1148,7 @@
     for (r = 0; r < mih; r++)
       for (c = 0; c < miw; c++)
         seg_id_supertx =
-            VPXMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
+            AOMMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
     assert(0 <= seg_id_supertx && seg_id_supertx < MAX_SEGMENTS);
   }
 
@@ -1164,21 +1159,21 @@
 }
 #endif  // CONFIG_SUPERTX
 
-static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
 #if CONFIG_SUPERTX
                          int supertx_enabled,
 #endif  // CONFIG_SUPERTX
-                         int mi_row, int mi_col, vp10_reader *r,
+                         int mi_row, int mi_col, aom_reader *r,
 #if CONFIG_EXT_PARTITION_TYPES
                          PARTITION_TYPE partition,
 #endif  // CONFIG_EXT_PARTITION_TYPES
                          BLOCK_SIZE bsize, int bwl, int bhl) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   const int less8x8 = bsize < BLOCK_8X8;
   const int bw = 1 << (bwl - 1);
   const int bh = 1 << (bhl - 1);
-  const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
-  const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
+  const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
+  const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
 
 #if CONFIG_SUPERTX
   MB_MODE_INFO *mbmi;
@@ -1191,22 +1186,21 @@
 #if CONFIG_EXT_PARTITION_TYPES
   xd->mi[0]->mbmi.partition = partition;
 #endif
-  vp10_read_mode_info(pbi, xd, supertx_enabled, mi_row, mi_col, r, x_mis,
-                      y_mis);
+  av1_read_mode_info(pbi, xd, supertx_enabled, mi_row, mi_col, r, x_mis, y_mis);
 #else
   MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis,
                                    y_mis, bwl, bhl);
 #if CONFIG_EXT_PARTITION_TYPES
   xd->mi[0]->mbmi.partition = partition;
 #endif
-  vp10_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
+  av1_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
 #endif  // CONFIG_SUPERTX
 
   if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
     const BLOCK_SIZE uv_subsize =
         ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
     if (uv_subsize == BLOCK_INVALID)
-      vpx_internal_error(xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+      aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
                          "Invalid block size.");
   }
 
@@ -1214,7 +1208,7 @@
   mbmi->segment_id_supertx = MAX_SEGMENTS;
 
   if (supertx_enabled) {
-    xd->corrupted |= vp10_reader_has_error(r);
+    xd->corrupted |= aom_reader_has_error(r);
     return;
   }
 #endif  // CONFIG_SUPERTX
@@ -1226,7 +1220,7 @@
     int plane;
     for (plane = 0; plane <= 1; ++plane) {
       if (mbmi->palette_mode_info.palette_size[plane])
-        vp10_decode_palette_tokens(xd, plane, r);
+        av1_decode_palette_tokens(xd, plane, r);
     }
     for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
       const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -1254,17 +1248,16 @@
     }
   } else {
     // Prediction
-    vp10_build_inter_predictors_sb(xd, mi_row, mi_col,
-                                   VPXMAX(bsize, BLOCK_8X8));
+    av1_build_inter_predictors_sb(xd, mi_row, mi_col, AOMMAX(bsize, BLOCK_8X8));
 #if CONFIG_OBMC
     if (mbmi->motion_variation == OBMC_CAUSAL) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
       DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
 #else
       DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
       DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
       int dst_width1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
       int dst_width2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
@@ -1274,7 +1267,7 @@
       int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
 
       assert(mbmi->sb_type >= BLOCK_8X8);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
         int len = sizeof(uint16_t);
         dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -1284,25 +1277,23 @@
         dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * len);
         dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * 2 * len);
       } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         dst_buf1[0] = tmp_buf1;
         dst_buf1[1] = tmp_buf1 + MAX_SB_SQUARE;
         dst_buf1[2] = tmp_buf1 + MAX_SB_SQUARE * 2;
         dst_buf2[0] = tmp_buf2;
         dst_buf2[1] = tmp_buf2 + MAX_SB_SQUARE;
         dst_buf2[2] = tmp_buf2 + MAX_SB_SQUARE * 2;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-      vp10_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
-                                           dst_width1, dst_height1,
-                                           dst_stride1);
-      vp10_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
-                                          dst_width2, dst_height2, dst_stride2);
-      vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row,
-                            mi_col);
-      vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
-                                       dst_stride1, dst_buf2, dst_stride2);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+      av1_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
+                                          dst_width1, dst_height1, dst_stride1);
+      av1_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
+                                         dst_width2, dst_height2, dst_stride2);
+      av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+      av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+                                      dst_stride1, dst_buf2, dst_stride2);
     }
 #endif  // CONFIG_OBMC
 
@@ -1319,7 +1310,7 @@
 #if CONFIG_VAR_TX
         // TODO(jingning): This can be simplified for decoder performance.
         const BLOCK_SIZE plane_bsize =
-            get_plane_block_size(VPXMAX(bsize, BLOCK_8X8), pd);
+            get_plane_block_size(AOMMAX(bsize, BLOCK_8X8), pd);
         const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
         int bw = num_4x4_blocks_wide_txsize_lookup[max_tx_size];
         int bh = num_4x4_blocks_high_txsize_lookup[max_tx_size];
@@ -1385,7 +1376,7 @@
     }
   }
 
-  xd->corrupted |= vp10_reader_has_error(r);
+  xd->corrupted |= aom_reader_has_error(r);
 }
 
 static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd, int mi_row,
@@ -1416,31 +1407,31 @@
 }
 #endif  // !CONFIG_EXT_PARTITION_TYPES
 
-static PARTITION_TYPE read_partition(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                     int mi_row, int mi_col, vp10_reader *r,
+static PARTITION_TYPE read_partition(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                     int mi_row, int mi_col, aom_reader *r,
                                      int has_rows, int has_cols,
 #if CONFIG_EXT_PARTITION_TYPES
                                      BLOCK_SIZE bsize,
 #endif
                                      int bsl) {
   const int ctx = dec_partition_plane_context(xd, mi_row, mi_col, bsl);
-  const vpx_prob *const probs = cm->fc->partition_prob[ctx];
+  const aom_prob *const probs = cm->fc->partition_prob[ctx];
   FRAME_COUNTS *counts = xd->counts;
   PARTITION_TYPE p;
 
   if (has_rows && has_cols)
 #if CONFIG_EXT_PARTITION_TYPES
     if (bsize <= BLOCK_8X8)
-      p = (PARTITION_TYPE)vp10_read_tree(r, vp10_partition_tree, probs);
+      p = (PARTITION_TYPE)aom_read_tree(r, av1_partition_tree, probs);
     else
-      p = (PARTITION_TYPE)vp10_read_tree(r, vp10_ext_partition_tree, probs);
+      p = (PARTITION_TYPE)aom_read_tree(r, av1_ext_partition_tree, probs);
 #else
-    p = (PARTITION_TYPE)vp10_read_tree(r, vp10_partition_tree, probs);
+    p = (PARTITION_TYPE)aom_read_tree(r, av1_partition_tree, probs);
 #endif  // CONFIG_EXT_PARTITION_TYPES
   else if (!has_rows && has_cols)
-    p = vp10_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
+    p = aom_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
   else if (has_rows && !has_cols)
-    p = vp10_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
+    p = aom_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
   else
     p = PARTITION_SPLIT;
 
@@ -1450,13 +1441,13 @@
 }
 
 #if CONFIG_SUPERTX
-static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
-                     vp10_reader *r) {
+static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+                     aom_reader *r) {
   if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
     return 1;
   } else {
-    const int ctx = vp10_get_skip_context(xd);
-    const int skip = vp10_read(r, cm->fc->skip_probs[ctx]);
+    const int ctx = av1_get_skip_context(xd);
+    const int skip = aom_read(r, cm->fc->skip_probs[ctx]);
     FRAME_COUNTS *counts = xd->counts;
     if (counts) ++counts->skip[ctx][skip];
     return skip;
@@ -1465,13 +1456,13 @@
 #endif  // CONFIG_SUPERTX
 
 // TODO(slavarnway): eliminate bsize and subsize in future commits
-static void decode_partition(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
 #if CONFIG_SUPERTX
                              int supertx_enabled,
 #endif
-                             int mi_row, int mi_col, vp10_reader *r,
+                             int mi_row, int mi_col, aom_reader *r,
                              BLOCK_SIZE bsize, int n4x4_l2) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   const int n8x8_l2 = n4x4_l2 - 1;
   const int num_8x8_wh = 1 << n8x8_l2;
   const int hbs = num_8x8_wh >> 1;
@@ -1503,7 +1494,7 @@
       bsize <= MAX_SUPERTX_BLOCK_SIZE && !supertx_enabled && !xd->lossless[0]) {
     const int supertx_context = partition_supertx_context_lookup[partition];
     supertx_enabled =
-        vp10_read(r, cm->fc->supertx_prob[supertx_context][supertx_size]);
+        aom_read(r, cm->fc->supertx_prob[supertx_context][supertx_size]);
     if (xd->counts)
       xd->counts->supertx[supertx_context][supertx_size][supertx_enabled]++;
 #if CONFIG_VAR_TX
@@ -1704,21 +1695,21 @@
       if (get_ext_tx_types(supertx_size, bsize, 1) > 1) {
         int eset = get_ext_tx_set(supertx_size, bsize, 1);
         if (eset > 0) {
-          txfm = vp10_read_tree(r, vp10_ext_tx_inter_tree[eset],
-                                cm->fc->inter_ext_tx_prob[eset][supertx_size]);
+          txfm = aom_read_tree(r, av1_ext_tx_inter_tree[eset],
+                               cm->fc->inter_ext_tx_prob[eset][supertx_size]);
           if (xd->counts) ++xd->counts->inter_ext_tx[eset][supertx_size][txfm];
         }
       }
 #else
       if (supertx_size < TX_32X32) {
-        txfm = vp10_read_tree(r, vp10_ext_tx_tree,
-                              cm->fc->inter_ext_tx_prob[supertx_size]);
+        txfm = aom_read_tree(r, av1_ext_tx_tree,
+                             cm->fc->inter_ext_tx_prob[supertx_size]);
         if (xd->counts) ++xd->counts->inter_ext_tx[supertx_size][txfm];
       }
 #endif  // CONFIG_EXT_TX
     }
 
-    vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+    av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
     for (i = 0; i < MAX_MB_PLANE; i++) {
       dst_buf[i] = xd->plane[i].dst.buf;
       dst_stride[i] = xd->plane[i].dst.stride;
@@ -1800,7 +1791,7 @@
   if (bsize == BLOCK_64X64) {
     if (cm->dering_level != 0 && !sb_all_skip(cm, mi_row, mi_col)) {
       cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain =
-          vpx_read_literal(r, DERING_REFINEMENT_BITS);
+          aom_read_literal(r, DERING_REFINEMENT_BITS);
     } else {
       cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain =
           0;
@@ -1813,26 +1804,26 @@
 #if !CONFIG_ANS
 static void setup_bool_decoder(const uint8_t *data, const uint8_t *data_end,
                                const size_t read_size,
-                               struct vpx_internal_error_info *error_info,
-                               vp10_reader *r, vpx_decrypt_cb decrypt_cb,
+                               struct aom_internal_error_info *error_info,
+                               aom_reader *r, aom_decrypt_cb decrypt_cb,
                                void *decrypt_state) {
   // Validate the calculated partition length. If the buffer
   // described by the partition can't be fully read, then restrict
   // it to the portion that can be (for EC mode) or throw an error.
   if (!read_is_valid(data, read_size, data_end))
-    vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
                        "Truncated packet or corrupt tile length");
 
-  if (vpx_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
-    vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
+  if (aom_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
+    aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate bool decoder %d", 1);
 }
 #else
 static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
                                 const size_t read_size,
-                                struct vpx_internal_error_info *error_info,
+                                struct aom_internal_error_info *error_info,
                                 struct AnsDecoder *const ans,
-                                vpx_decrypt_cb decrypt_cb,
+                                aom_decrypt_cb decrypt_cb,
                                 void *decrypt_state) {
   (void)decrypt_cb;
   (void)decrypt_state;
@@ -1840,104 +1831,103 @@
   // described by the partition can't be fully read, then restrict
   // it to the portion that can be (for EC mode) or throw an error.
   if (!read_is_valid(data, read_size, data_end))
-    vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
                        "Truncated packet or corrupt tile length");
 
   if (read_size > INT_MAX || ans_read_init(ans, data, (int)read_size))
-    vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate token decoder %d", 1);
 }
 #endif
 
-static void read_coef_probs_common(vp10_coeff_probs_model *coef_probs,
-                                   vp10_reader *r) {
+static void read_coef_probs_common(av1_coeff_probs_model *coef_probs,
+                                   aom_reader *r) {
   int i, j, k, l, m;
 
-  if (vp10_read_bit(r))
+  if (aom_read_bit(r))
     for (i = 0; i < PLANE_TYPES; ++i)
       for (j = 0; j < REF_TYPES; ++j)
         for (k = 0; k < COEF_BANDS; ++k)
           for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
             for (m = 0; m < UNCONSTRAINED_NODES; ++m)
-              vp10_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
+              av1_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
 }
 
-static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
-                            vp10_reader *r) {
+static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, aom_reader *r) {
   const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
   TX_SIZE tx_size;
   for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
     read_coef_probs_common(fc->coef_probs[tx_size], r);
 #if CONFIG_ANS
-  vp10_coef_pareto_cdfs(fc);
+  av1_coef_pareto_cdfs(fc);
 #endif  // CONFIG_ANS
 }
 
-static void setup_segmentation(VP10_COMMON *const cm,
-                               struct vpx_read_bit_buffer *rb) {
+static void setup_segmentation(AV1_COMMON *const cm,
+                               struct aom_read_bit_buffer *rb) {
   struct segmentation *const seg = &cm->seg;
   int i, j;
 
   seg->update_map = 0;
   seg->update_data = 0;
 
-  seg->enabled = vpx_rb_read_bit(rb);
+  seg->enabled = aom_rb_read_bit(rb);
   if (!seg->enabled) return;
 
   // Segmentation map update
   if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
     seg->update_map = 1;
   } else {
-    seg->update_map = vpx_rb_read_bit(rb);
+    seg->update_map = aom_rb_read_bit(rb);
   }
   if (seg->update_map) {
     if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
       seg->temporal_update = 0;
     } else {
-      seg->temporal_update = vpx_rb_read_bit(rb);
+      seg->temporal_update = aom_rb_read_bit(rb);
     }
   }
 
   // Segmentation data update
-  seg->update_data = vpx_rb_read_bit(rb);
+  seg->update_data = aom_rb_read_bit(rb);
   if (seg->update_data) {
-    seg->abs_delta = vpx_rb_read_bit(rb);
+    seg->abs_delta = aom_rb_read_bit(rb);
 
-    vp10_clearall_segfeatures(seg);
+    av1_clearall_segfeatures(seg);
 
     for (i = 0; i < MAX_SEGMENTS; i++) {
       for (j = 0; j < SEG_LVL_MAX; j++) {
         int data = 0;
-        const int feature_enabled = vpx_rb_read_bit(rb);
+        const int feature_enabled = aom_rb_read_bit(rb);
         if (feature_enabled) {
-          vp10_enable_segfeature(seg, i, j);
-          data = decode_unsigned_max(rb, vp10_seg_feature_data_max(j));
-          if (vp10_is_segfeature_signed(j))
-            data = vpx_rb_read_bit(rb) ? -data : data;
+          av1_enable_segfeature(seg, i, j);
+          data = decode_unsigned_max(rb, av1_seg_feature_data_max(j));
+          if (av1_is_segfeature_signed(j))
+            data = aom_rb_read_bit(rb) ? -data : data;
         }
-        vp10_set_segdata(seg, i, j, data);
+        av1_set_segdata(seg, i, j, data);
       }
     }
   }
 }
 
 #if CONFIG_LOOP_RESTORATION
-static void setup_restoration(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_restoration(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
   int i;
   RestorationInfo *rsi = &cm->rst_info;
   int ntiles;
-  if (vpx_rb_read_bit(rb)) {
-    if (vpx_rb_read_bit(rb)) {
+  if (aom_rb_read_bit(rb)) {
+    if (aom_rb_read_bit(rb)) {
       rsi->restoration_type = RESTORE_BILATERAL;
-      ntiles = vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width,
-                                           cm->height);
-      rsi->bilateral_level = (int *)vpx_realloc(
+      ntiles =
+          av1_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+      rsi->bilateral_level = (int *)aom_realloc(
           rsi->bilateral_level, sizeof(*rsi->bilateral_level) * ntiles);
       assert(rsi->bilateral_level != NULL);
       for (i = 0; i < ntiles; ++i) {
-        if (vpx_rb_read_bit(rb)) {
+        if (aom_rb_read_bit(rb)) {
           rsi->bilateral_level[i] =
-              vpx_rb_read_literal(rb, vp10_bilateral_level_bits(cm));
+              aom_rb_read_literal(rb, av1_bilateral_level_bits(cm));
         } else {
           rsi->bilateral_level[i] = -1;
         }
@@ -1945,30 +1935,30 @@
     } else {
       rsi->restoration_type = RESTORE_WIENER;
       ntiles =
-          vp10_get_restoration_ntiles(WIENER_TILESIZE, cm->width, cm->height);
-      rsi->wiener_level = (int *)vpx_realloc(
+          av1_get_restoration_ntiles(WIENER_TILESIZE, cm->width, cm->height);
+      rsi->wiener_level = (int *)aom_realloc(
           rsi->wiener_level, sizeof(*rsi->wiener_level) * ntiles);
       assert(rsi->wiener_level != NULL);
-      rsi->vfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+      rsi->vfilter = (int(*)[RESTORATION_HALFWIN])aom_realloc(
           rsi->vfilter, sizeof(*rsi->vfilter) * ntiles);
       assert(rsi->vfilter != NULL);
-      rsi->hfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+      rsi->hfilter = (int(*)[RESTORATION_HALFWIN])aom_realloc(
           rsi->hfilter, sizeof(*rsi->hfilter) * ntiles);
       assert(rsi->hfilter != NULL);
       for (i = 0; i < ntiles; ++i) {
-        rsi->wiener_level[i] = vpx_rb_read_bit(rb);
+        rsi->wiener_level[i] = aom_rb_read_bit(rb);
         if (rsi->wiener_level[i]) {
-          rsi->vfilter[i][0] = vpx_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
+          rsi->vfilter[i][0] = aom_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
                                WIENER_FILT_TAP0_MINV;
-          rsi->vfilter[i][1] = vpx_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
+          rsi->vfilter[i][1] = aom_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
                                WIENER_FILT_TAP1_MINV;
-          rsi->vfilter[i][2] = vpx_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
+          rsi->vfilter[i][2] = aom_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
                                WIENER_FILT_TAP2_MINV;
-          rsi->hfilter[i][0] = vpx_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
+          rsi->hfilter[i][0] = aom_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
                                WIENER_FILT_TAP0_MINV;
-          rsi->hfilter[i][1] = vpx_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
+          rsi->hfilter[i][1] = aom_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
                                WIENER_FILT_TAP1_MINV;
-          rsi->hfilter[i][2] = vpx_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
+          rsi->hfilter[i][2] = aom_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
                                WIENER_FILT_TAP2_MINV;
         } else {
           rsi->vfilter[i][0] = rsi->vfilter[i][1] = rsi->vfilter[i][2] = 0;
@@ -1982,60 +1972,60 @@
 }
 #endif  // CONFIG_LOOP_RESTORATION
 
-static void setup_loopfilter(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_loopfilter(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
   struct loopfilter *lf = &cm->lf;
-  lf->filter_level = vpx_rb_read_literal(rb, 6);
-  lf->sharpness_level = vpx_rb_read_literal(rb, 3);
+  lf->filter_level = aom_rb_read_literal(rb, 6);
+  lf->sharpness_level = aom_rb_read_literal(rb, 3);
 
   // Read in loop filter deltas applied at the MB level based on mode or ref
   // frame.
   lf->mode_ref_delta_update = 0;
 
-  lf->mode_ref_delta_enabled = vpx_rb_read_bit(rb);
+  lf->mode_ref_delta_enabled = aom_rb_read_bit(rb);
   if (lf->mode_ref_delta_enabled) {
-    lf->mode_ref_delta_update = vpx_rb_read_bit(rb);
+    lf->mode_ref_delta_update = aom_rb_read_bit(rb);
     if (lf->mode_ref_delta_update) {
       int i;
 
       for (i = 0; i < TOTAL_REFS_PER_FRAME; i++)
-        if (vpx_rb_read_bit(rb))
-          lf->ref_deltas[i] = vpx_rb_read_inv_signed_literal(rb, 6);
+        if (aom_rb_read_bit(rb))
+          lf->ref_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
 
       for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
-        if (vpx_rb_read_bit(rb))
-          lf->mode_deltas[i] = vpx_rb_read_inv_signed_literal(rb, 6);
+        if (aom_rb_read_bit(rb))
+          lf->mode_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
     }
   }
 }
 
 #if CONFIG_CLPF
-static void setup_clpf(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
-  cm->clpf = vpx_rb_read_literal(rb, 1);
+static void setup_clpf(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
+  cm->clpf = aom_rb_read_literal(rb, 1);
 }
 #endif
 
 #if CONFIG_DERING
-static void setup_dering(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
-  cm->dering_level = vpx_rb_read_literal(rb, DERING_LEVEL_BITS);
+static void setup_dering(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
+  cm->dering_level = aom_rb_read_literal(rb, DERING_LEVEL_BITS);
 }
 #endif  // CONFIG_DERING
 
-static INLINE int read_delta_q(struct vpx_read_bit_buffer *rb) {
-  return vpx_rb_read_bit(rb) ? vpx_rb_read_inv_signed_literal(rb, 6) : 0;
+static INLINE int read_delta_q(struct aom_read_bit_buffer *rb) {
+  return aom_rb_read_bit(rb) ? aom_rb_read_inv_signed_literal(rb, 6) : 0;
 }
 
-static void setup_quantization(VP10_COMMON *const cm,
-                               struct vpx_read_bit_buffer *rb) {
-  cm->base_qindex = vpx_rb_read_literal(rb, QINDEX_BITS);
+static void setup_quantization(AV1_COMMON *const cm,
+                               struct aom_read_bit_buffer *rb) {
+  cm->base_qindex = aom_rb_read_literal(rb, QINDEX_BITS);
   cm->y_dc_delta_q = read_delta_q(rb);
   cm->uv_dc_delta_q = read_delta_q(rb);
   cm->uv_ac_delta_q = read_delta_q(rb);
   cm->dequant_bit_depth = cm->bit_depth;
 #if CONFIG_AOM_QM
-  cm->using_qmatrix = vpx_rb_read_bit(rb);
+  cm->using_qmatrix = aom_rb_read_bit(rb);
   if (cm->using_qmatrix) {
-    cm->min_qmlevel = vpx_rb_read_literal(rb, QM_LEVEL_BITS);
-    cm->max_qmlevel = vpx_rb_read_literal(rb, QM_LEVEL_BITS);
+    cm->min_qmlevel = aom_rb_read_literal(rb, QM_LEVEL_BITS);
+    cm->max_qmlevel = aom_rb_read_literal(rb, QM_LEVEL_BITS);
   } else {
     cm->min_qmlevel = 0;
     cm->max_qmlevel = 0;
@@ -2043,7 +2033,7 @@
 #endif
 }
 
-static void setup_segmentation_dequant(VP10_COMMON *const cm) {
+static void setup_segmentation_dequant(AV1_COMMON *const cm) {
   // Build y/uv dequant values based on segmentation.
   int i = 0;
 #if CONFIG_AOM_QM
@@ -2060,14 +2050,14 @@
 #endif  //  CONFIG_NEW_QUANT
   if (cm->seg.enabled) {
     for (i = 0; i < MAX_SEGMENTS; ++i) {
-      const int qindex = vp10_get_qindex(&cm->seg, i, cm->base_qindex);
+      const int qindex = av1_get_qindex(&cm->seg, i, cm->base_qindex);
       cm->y_dequant[i][0] =
-          vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
-      cm->y_dequant[i][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
+          av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+      cm->y_dequant[i][1] = av1_ac_quant(qindex, 0, cm->bit_depth);
       cm->uv_dequant[i][0] =
-          vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+          av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
       cm->uv_dequant[i][1] =
-          vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+          av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
 #if CONFIG_AOM_QM
       lossless = qindex == 0 && cm->y_dc_delta_q == 0 &&
                  cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -2086,10 +2076,10 @@
 #if CONFIG_NEW_QUANT
       for (dq = 0; dq < QUANT_PROFILES; dq++) {
         for (b = 0; b < COEF_BANDS; ++b) {
-          vp10_get_dequant_val_nuq(cm->y_dequant[i][b != 0], qindex, b,
-                                   cm->y_dequant_nuq[i][dq][b], NULL, dq);
-          vp10_get_dequant_val_nuq(cm->uv_dequant[i][b != 0], qindex, b,
-                                   cm->uv_dequant_nuq[i][dq][b], NULL, dq);
+          av1_get_dequant_val_nuq(cm->y_dequant[i][b != 0], qindex, b,
+                                  cm->y_dequant_nuq[i][dq][b], NULL, dq);
+          av1_get_dequant_val_nuq(cm->uv_dequant[i][b != 0], qindex, b,
+                                  cm->uv_dequant_nuq[i][dq][b], NULL, dq);
         }
       }
 #endif  //  CONFIG_NEW_QUANT
@@ -2098,13 +2088,12 @@
     const int qindex = cm->base_qindex;
     // When segmentation is disabled, only the first value is used.  The
     // remaining are don't cares.
-    cm->y_dequant[0][0] =
-        vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
-    cm->y_dequant[0][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
+    cm->y_dequant[0][0] = av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+    cm->y_dequant[0][1] = av1_ac_quant(qindex, 0, cm->bit_depth);
     cm->uv_dequant[0][0] =
-        vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+        av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
     cm->uv_dequant[0][1] =
-        vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+        av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
 #if CONFIG_AOM_QM
     lossless = qindex == 0 && cm->y_dc_delta_q == 0 && cm->uv_dc_delta_q == 0 &&
                cm->uv_ac_delta_q == 0;
@@ -2122,41 +2111,41 @@
 #if CONFIG_NEW_QUANT
     for (dq = 0; dq < QUANT_PROFILES; dq++) {
       for (b = 0; b < COEF_BANDS; ++b) {
-        vp10_get_dequant_val_nuq(cm->y_dequant[0][b != 0], qindex, b,
-                                 cm->y_dequant_nuq[0][dq][b], NULL, dq);
-        vp10_get_dequant_val_nuq(cm->uv_dequant[0][b != 0], qindex, b,
-                                 cm->uv_dequant_nuq[0][dq][b], NULL, dq);
+        av1_get_dequant_val_nuq(cm->y_dequant[0][b != 0], qindex, b,
+                                cm->y_dequant_nuq[0][dq][b], NULL, dq);
+        av1_get_dequant_val_nuq(cm->uv_dequant[0][b != 0], qindex, b,
+                                cm->uv_dequant_nuq[0][dq][b], NULL, dq);
       }
     }
 #endif  //  CONFIG_NEW_QUANT
   }
 }
 
-static INTERP_FILTER read_interp_filter(struct vpx_read_bit_buffer *rb) {
-  return vpx_rb_read_bit(rb) ? SWITCHABLE
-                             : vpx_rb_read_literal(rb, 2 + CONFIG_EXT_INTERP);
+static INTERP_FILTER read_interp_filter(struct aom_read_bit_buffer *rb) {
+  return aom_rb_read_bit(rb) ? SWITCHABLE
+                             : aom_rb_read_literal(rb, 2 + CONFIG_EXT_INTERP);
 }
 
-static void setup_render_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_render_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
   cm->render_width = cm->width;
   cm->render_height = cm->height;
-  if (vpx_rb_read_bit(rb))
-    vp10_read_frame_size(rb, &cm->render_width, &cm->render_height);
+  if (aom_rb_read_bit(rb))
+    av1_read_frame_size(rb, &cm->render_width, &cm->render_height);
 }
 
-static void resize_mv_buffer(VP10_COMMON *cm) {
-  vpx_free(cm->cur_frame->mvs);
+static void resize_mv_buffer(AV1_COMMON *cm) {
+  aom_free(cm->cur_frame->mvs);
   cm->cur_frame->mi_rows = cm->mi_rows;
   cm->cur_frame->mi_cols = cm->mi_cols;
   CHECK_MEM_ERROR(cm, cm->cur_frame->mvs,
-                  (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
+                  (MV_REF *)aom_calloc(cm->mi_rows * cm->mi_cols,
                                        sizeof(*cm->cur_frame->mvs)));
 }
 
-static void resize_context_buffers(VP10_COMMON *cm, int width, int height) {
+static void resize_context_buffers(AV1_COMMON *cm, int width, int height) {
 #if CONFIG_SIZE_LIMIT
   if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
-    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                        "Dimensions of %dx%d beyond allowed size of %dx%d.",
                        width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT);
 #endif
@@ -2166,16 +2155,16 @@
     const int new_mi_cols =
         ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
 
-    // Allocations in vp10_alloc_context_buffers() depend on individual
+    // Allocations in av1_alloc_context_buffers() depend on individual
     // dimensions as well as the overall size.
     if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
-      if (vp10_alloc_context_buffers(cm, width, height))
-        vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+      if (av1_alloc_context_buffers(cm, width, height))
+        aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                            "Failed to allocate context buffers");
     } else {
-      vp10_set_mb_mi(cm, width, height);
+      av1_set_mb_mi(cm, width, height);
     }
-    vp10_init_context_buffers(cm);
+    av1_init_context_buffers(cm);
     cm->width = width;
     cm->height = height;
   }
@@ -2185,25 +2174,25 @@
   }
 }
 
-static void setup_frame_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_frame_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
   int width, height;
   BufferPool *const pool = cm->buffer_pool;
-  vp10_read_frame_size(rb, &width, &height);
+  av1_read_frame_size(rb, &width, &height);
   resize_context_buffers(cm, width, height);
   setup_render_size(cm, rb);
 
   lock_buffer_pool(pool);
-  if (vpx_realloc_frame_buffer(
+  if (aom_realloc_frame_buffer(
           get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
           cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           cm->use_highbitdepth,
 #endif
-          VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
+          AOM_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
           &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
           pool->cb_priv)) {
     unlock_buffer_pool(pool);
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate frame buffer");
   }
   unlock_buffer_pool(pool);
@@ -2217,22 +2206,22 @@
   pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
 }
 
-static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth,
+static INLINE int valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth,
                                           int ref_xss, int ref_yss,
-                                          vpx_bit_depth_t this_bit_depth,
+                                          aom_bit_depth_t this_bit_depth,
                                           int this_xss, int this_yss) {
   return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
          ref_yss == this_yss;
 }
 
-static void setup_frame_size_with_refs(VP10_COMMON *cm,
-                                       struct vpx_read_bit_buffer *rb) {
+static void setup_frame_size_with_refs(AV1_COMMON *cm,
+                                       struct aom_read_bit_buffer *rb) {
   int width, height;
   int found = 0, i;
   int has_valid_ref_frame = 0;
   BufferPool *const pool = cm->buffer_pool;
   for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
-    if (vpx_rb_read_bit(rb)) {
+    if (aom_rb_read_bit(rb)) {
       YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
       width = buf->y_crop_width;
       height = buf->y_crop_height;
@@ -2244,12 +2233,12 @@
   }
 
   if (!found) {
-    vp10_read_frame_size(rb, &width, &height);
+    av1_read_frame_size(rb, &width, &height);
     setup_render_size(cm, rb);
   }
 
   if (width <= 0 || height <= 0)
-    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                        "Invalid frame size");
 
   // Check to make sure at least one of frames that this frame references
@@ -2261,7 +2250,7 @@
                              ref_frame->buf->y_crop_height, width, height);
   }
   if (!has_valid_ref_frame)
-    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                        "Referenced frame has invalid size");
   for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
     RefBuffer *const ref_frame = &cm->frame_refs[i];
@@ -2269,24 +2258,24 @@
                                  ref_frame->buf->subsampling_x,
                                  ref_frame->buf->subsampling_y, cm->bit_depth,
                                  cm->subsampling_x, cm->subsampling_y))
-      vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+      aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                          "Referenced frame has incompatible color format");
   }
 
   resize_context_buffers(cm, width, height);
 
   lock_buffer_pool(pool);
-  if (vpx_realloc_frame_buffer(
+  if (aom_realloc_frame_buffer(
           get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
           cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           cm->use_highbitdepth,
 #endif
-          VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
+          AOM_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
           &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
           pool->cb_priv)) {
     unlock_buffer_pool(pool);
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate frame buffer");
   }
   unlock_buffer_pool(pool);
@@ -2300,27 +2289,27 @@
   pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
 }
 
-static void read_tile_info(VP10Decoder *const pbi,
-                           struct vpx_read_bit_buffer *const rb) {
-  VP10_COMMON *const cm = &pbi->common;
+static void read_tile_info(AV1Decoder *const pbi,
+                           struct aom_read_bit_buffer *const rb) {
+  AV1_COMMON *const cm = &pbi->common;
 #if CONFIG_EXT_TILE
 // Read the tile width/height
 #if CONFIG_EXT_PARTITION
   if (cm->sb_size == BLOCK_128X128) {
-    cm->tile_width = vpx_rb_read_literal(rb, 5) + 1;
-    cm->tile_height = vpx_rb_read_literal(rb, 5) + 1;
+    cm->tile_width = aom_rb_read_literal(rb, 5) + 1;
+    cm->tile_height = aom_rb_read_literal(rb, 5) + 1;
   } else
 #endif  // CONFIG_EXT_PARTITION
   {
-    cm->tile_width = vpx_rb_read_literal(rb, 6) + 1;
-    cm->tile_height = vpx_rb_read_literal(rb, 6) + 1;
+    cm->tile_width = aom_rb_read_literal(rb, 6) + 1;
+    cm->tile_height = aom_rb_read_literal(rb, 6) + 1;
   }
 
   cm->tile_width <<= cm->mib_size_log2;
   cm->tile_height <<= cm->mib_size_log2;
 
-  cm->tile_width = VPXMIN(cm->tile_width, cm->mi_cols);
-  cm->tile_height = VPXMIN(cm->tile_height, cm->mi_rows);
+  cm->tile_width = AOMMIN(cm->tile_width, cm->mi_cols);
+  cm->tile_height = AOMMIN(cm->tile_height, cm->mi_rows);
 
   // Get the number of tiles
   cm->tile_cols = 1;
@@ -2331,25 +2320,25 @@
 
   if (cm->tile_cols * cm->tile_rows > 1) {
     // Read the number of bytes used to store tile size
-    pbi->tile_col_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
-    pbi->tile_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
+    pbi->tile_col_size_bytes = aom_rb_read_literal(rb, 2) + 1;
+    pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
   }
 #else
   int min_log2_tile_cols, max_log2_tile_cols, max_ones;
-  vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+  av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
 
   // columns
   max_ones = max_log2_tile_cols - min_log2_tile_cols;
   cm->log2_tile_cols = min_log2_tile_cols;
-  while (max_ones-- && vpx_rb_read_bit(rb)) cm->log2_tile_cols++;
+  while (max_ones-- && aom_rb_read_bit(rb)) cm->log2_tile_cols++;
 
   if (cm->log2_tile_cols > 6)
-    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                        "Invalid number of tile columns");
 
   // rows
-  cm->log2_tile_rows = vpx_rb_read_bit(rb);
-  if (cm->log2_tile_rows) cm->log2_tile_rows += vpx_rb_read_bit(rb);
+  cm->log2_tile_rows = aom_rb_read_bit(rb);
+  if (cm->log2_tile_rows) cm->log2_tile_rows += aom_rb_read_bit(rb);
 
   cm->tile_cols = 1 << cm->log2_tile_cols;
   cm->tile_rows = 1 << cm->log2_tile_rows;
@@ -2365,7 +2354,7 @@
 
   // tile size magnitude
   if (cm->tile_rows > 1 || cm->tile_cols > 1) {
-    pbi->tile_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
+    pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
   }
 #endif  // CONFIG_EXT_TILE
 }
@@ -2384,8 +2373,8 @@
 // Reads the next tile returning its size and adjusting '*data' accordingly
 // based on 'is_last'.
 static void get_tile_buffer(const uint8_t *const data_end,
-                            struct vpx_internal_error_info *error_info,
-                            const uint8_t **data, vpx_decrypt_cb decrypt_cb,
+                            struct aom_internal_error_info *error_info,
+                            const uint8_t **data, aom_decrypt_cb decrypt_cb,
                             void *decrypt_state,
                             TileBufferDec (*const tile_buffers)[MAX_TILE_COLS],
                             int tile_size_bytes, int col, int row) {
@@ -2395,7 +2384,7 @@
   const uint8_t *copy_data = NULL;
 
   if (!read_is_valid(*data, tile_size_bytes, data_end))
-    vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
                        "Truncated packet or corrupt tile length");
   if (decrypt_cb) {
     uint8_t be_data[4];
@@ -2421,7 +2410,7 @@
   *data += tile_size_bytes;
 
   if (size > (size_t)(data_end - *data))
-    vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
                        "Truncated packet or corrupt tile size");
 
   if (size > 0) {
@@ -2438,9 +2427,9 @@
 }
 
 static void get_tile_buffers(
-    VP10Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
+    AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
     TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   const int tile_cols = cm->tile_cols;
   const int tile_rows = cm->tile_rows;
   const int have_tiles = tile_cols * tile_rows > 1;
@@ -2459,11 +2448,11 @@
     const uint8_t *tile_col_data_end[MAX_TILE_COLS];
     const uint8_t *const data_start = data;
 
-    const int dec_tile_row = VPXMIN(pbi->dec_tile_row, tile_rows);
+    const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
     const int single_row = pbi->dec_tile_row >= 0;
     const int tile_rows_start = single_row ? dec_tile_row : 0;
     const int tile_rows_end = single_row ? tile_rows_start + 1 : tile_rows;
-    const int dec_tile_col = VPXMIN(pbi->dec_tile_col, tile_cols);
+    const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
     const int single_col = pbi->dec_tile_col >= 0;
     const int tile_cols_start = single_col ? dec_tile_col : 0;
     const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
@@ -2529,14 +2518,14 @@
 // based on 'is_last'.
 static void get_tile_buffer(const uint8_t *const data_end,
                             const int tile_size_bytes, int is_last,
-                            struct vpx_internal_error_info *error_info,
-                            const uint8_t **data, vpx_decrypt_cb decrypt_cb,
+                            struct aom_internal_error_info *error_info,
+                            const uint8_t **data, aom_decrypt_cb decrypt_cb,
                             void *decrypt_state, TileBufferDec *const buf) {
   size_t size;
 
   if (!is_last) {
     if (!read_is_valid(*data, 4, data_end))
-      vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+      aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
                          "Truncated packet or corrupt tile length");
 
     if (decrypt_cb) {
@@ -2549,7 +2538,7 @@
     *data += tile_size_bytes;
 
     if (size > (size_t)(data_end - *data))
-      vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+      aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
                          "Truncated packet or corrupt tile size");
   } else {
     size = data_end - *data;
@@ -2562,9 +2551,9 @@
 }
 
 static void get_tile_buffers(
-    VP10Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
+    AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
     TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   int r, c;
   const int tile_cols = cm->tile_cols;
   const int tile_rows = cm->tile_rows;
@@ -2581,20 +2570,20 @@
 }
 #endif  // CONFIG_EXT_TILE
 
-static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
+static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
                                    const uint8_t *data_end) {
-  VP10_COMMON *const cm = &pbi->common;
-  const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+  AV1_COMMON *const cm = &pbi->common;
+  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
   const int tile_cols = cm->tile_cols;
   const int tile_rows = cm->tile_rows;
   const int n_tiles = tile_cols * tile_rows;
   TileBufferDec (*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
 #if CONFIG_EXT_TILE
-  const int dec_tile_row = VPXMIN(pbi->dec_tile_row, tile_rows);
+  const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
   const int single_row = pbi->dec_tile_row >= 0;
   const int tile_rows_start = single_row ? dec_tile_row : 0;
   const int tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
-  const int dec_tile_col = VPXMIN(pbi->dec_tile_col, tile_cols);
+  const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
   const int single_col = pbi->dec_tile_col >= 0;
   const int tile_cols_start = single_col ? dec_tile_col : 0;
   const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
@@ -2617,10 +2606,10 @@
   if (cm->lf.filter_level && !cm->skip_loop_filter &&
       pbi->lf_worker.data1 == NULL) {
     CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
-                    vpx_memalign(32, sizeof(LFWorkerData)));
-    pbi->lf_worker.hook = (VPxWorkerHook)vp10_loop_filter_worker;
+                    aom_memalign(32, sizeof(LFWorkerData)));
+    pbi->lf_worker.hook = (AVxWorkerHook)av1_loop_filter_worker;
     if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) {
-      vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+      aom_internal_error(&cm->error, AOM_CODEC_ERROR,
                          "Loop filter thread creation failed");
     }
   }
@@ -2629,8 +2618,8 @@
     LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
     // Be sure to sync as we might be resuming after a failed frame decode.
     winterface->sync(&pbi->lf_worker);
-    vp10_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
-                                pbi->mb.plane);
+    av1_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
+                               pbi->mb.plane);
   }
 
   assert(tile_rows <= MAX_TILE_ROWS);
@@ -2639,9 +2628,9 @@
   get_tile_buffers(pbi, data, data_end, tile_buffers);
 
   if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
-    vpx_free(pbi->tile_data);
+    aom_free(pbi->tile_data);
     CHECK_MEM_ERROR(cm, pbi->tile_data,
-                    vpx_memalign(32, n_tiles * (sizeof(*pbi->tile_data))));
+                    aom_memalign(32, n_tiles * (sizeof(*pbi->tile_data))));
     pbi->allocated_tiles = n_tiles;
   }
 
@@ -2658,8 +2647,8 @@
           cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
               ? &cm->counts
               : NULL;
-      vp10_zero(td->dqcoeff);
-      vp10_tile_init(&td->xd.tile, td->cm, tile_row, tile_col);
+      av1_zero(td->dqcoeff);
+      av1_tile_init(&td->xd.tile, td->cm, tile_row, tile_col);
 #if !CONFIG_ANS
       setup_bool_decoder(buf->data, data_end, buf->size, &cm->error,
                          &td->bit_reader, pbi->decrypt_cb, pbi->decrypt_state);
@@ -2667,7 +2656,7 @@
       setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
                           &td->bit_reader, pbi->decrypt_cb, pbi->decrypt_state);
 #endif
-      vp10_init_macroblockd(cm, &td->xd, td->dqcoeff);
+      av1_init_macroblockd(cm, &td->xd, td->dqcoeff);
       td->xd.plane[0].color_index_map = td->color_index_map[0];
       td->xd.plane[1].color_index_map = td->color_index_map[1];
     }
@@ -2678,21 +2667,21 @@
     int mi_row = 0;
     TileInfo tile_info;
 
-    vp10_tile_set_row(&tile_info, cm, row);
+    av1_tile_set_row(&tile_info, cm, row);
 
     for (tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) {
       const int col = inv_col_order ? tile_cols - 1 - tile_col : tile_col;
       TileData *const td = pbi->tile_data + tile_cols * row + col;
 
-      vp10_tile_set_col(&tile_info, cm, col);
+      av1_tile_set_col(&tile_info, cm, col);
 
-      vp10_zero_above_context(cm, tile_info.mi_col_start, tile_info.mi_col_end);
+      av1_zero_above_context(cm, tile_info.mi_col_start, tile_info.mi_col_end);
 
       for (mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end;
            mi_row += cm->mib_size) {
         int mi_col;
 
-        vp10_zero_left_context(&td->xd);
+        av1_zero_left_context(&td->xd);
 
         for (mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end;
              mi_col += cm->mib_size) {
@@ -2705,18 +2694,18 @@
         }
         pbi->mb.corrupted |= td->xd.corrupted;
         if (pbi->mb.corrupted)
-          vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+          aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                              "Failed to decode tile data");
 #if CONFIG_ENTROPY
         if (cm->do_subframe_update &&
             cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
           if ((mi_row + MI_SIZE) %
                       (MI_SIZE *
-                       VPXMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
+                       AOMMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
                   0 &&
               mi_row + MI_SIZE < cm->mi_rows &&
               cm->coef_probs_update_idx < COEF_PROBS_BUFS - 1) {
-            vp10_partial_adapt_probs(cm, mi_row, mi_col);
+            av1_partial_adapt_probs(cm, mi_row, mi_col);
             ++cm->coef_probs_update_idx;
           }
         }
@@ -2730,7 +2719,7 @@
     // Loopfilter one tile row.
     if (cm->lf.filter_level && !cm->skip_loop_filter) {
       LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
-      const int lf_start = VPXMAX(0, tile_info.mi_row_start - cm->mib_size);
+      const int lf_start = AOMMAX(0, tile_info.mi_row_start - cm->mib_size);
       const int lf_end = tile_info.mi_row_end - cm->mib_size;
 
       // Delay the loopfilter if the first tile row is only
@@ -2753,14 +2742,14 @@
     // After loopfiltering, the last 7 row pixels in each superblock row may
     // still be changed by the longest loopfilter of the next superblock row.
     if (cm->frame_parallel_decode)
-      vp10_frameworker_broadcast(pbi->cur_buf, mi_row << cm->mib_size_log2);
+      av1_frameworker_broadcast(pbi->cur_buf, mi_row << cm->mib_size_log2);
 #endif  // !CONFIG_VAR_TX
   }
 
 #if CONFIG_VAR_TX
   // Loopfilter the whole frame.
-  vp10_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
-                         cm->lf.filter_level, 0, 0);
+  av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
+                        cm->lf.filter_level, 0, 0);
 #else
   // Loopfilter remaining rows in the frame.
   if (cm->lf.filter_level && !cm->skip_loop_filter) {
@@ -2773,16 +2762,16 @@
 #endif  // CONFIG_VAR_TX
 #if CONFIG_CLPF
   if (cm->clpf && !cm->skip_loop_filter)
-    vp10_clpf_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
+    av1_clpf_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
 #endif
 #if CONFIG_DERING
   if (cm->dering_level && !cm->skip_loop_filter) {
-    vp10_dering_frame(&pbi->cur_buf->buf, cm, &pbi->mb, cm->dering_level);
+    av1_dering_frame(&pbi->cur_buf->buf, cm, &pbi->mb, cm->dering_level);
   }
 #endif  // CONFIG_DERING
 
   if (cm->frame_parallel_decode)
-    vp10_frameworker_broadcast(pbi->cur_buf, INT_MAX);
+    av1_frameworker_broadcast(pbi->cur_buf, INT_MAX);
 
 #if CONFIG_EXT_TILE
   if (n_tiles == 1) {
@@ -2790,7 +2779,7 @@
     return data_end;
 #else
     // Find the end of the single tile buffer
-    return vpx_reader_find_end(&pbi->tile_data->bit_reader);
+    return aom_reader_find_end(&pbi->tile_data->bit_reader);
 #endif  // CONFIG_ANS
   } else {
     // Return the end of the last tile buffer
@@ -2803,7 +2792,7 @@
   {
     // Get last tile data.
     TileData *const td = pbi->tile_data + tile_cols * tile_rows - 1;
-    return vpx_reader_find_end(&td->bit_reader);
+    return aom_reader_find_end(&td->bit_reader);
   }
 #endif  // CONFIG_ANS
 #endif  // CONFIG_EXT_TILE
@@ -2811,8 +2800,8 @@
 
 static int tile_worker_hook(TileWorkerData *const tile_data,
                             const TileInfo *const tile) {
-  VP10Decoder *const pbi = tile_data->pbi;
-  const VP10_COMMON *const cm = &pbi->common;
+  AV1Decoder *const pbi = tile_data->pbi;
+  const AV1_COMMON *const cm = &pbi->common;
   int mi_row, mi_col;
 
   if (setjmp(tile_data->error_info.jmp)) {
@@ -2824,11 +2813,11 @@
   tile_data->error_info.setjmp = 1;
   tile_data->xd.error_info = &tile_data->error_info;
 
-  vp10_zero_above_context(&pbi->common, tile->mi_col_start, tile->mi_col_end);
+  av1_zero_above_context(&pbi->common, tile->mi_col_start, tile->mi_col_end);
 
   for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
        mi_row += cm->mib_size) {
-    vp10_zero_left_context(&tile_data->xd);
+    av1_zero_left_context(&tile_data->xd);
 
     for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
          mi_col += cm->mib_size) {
@@ -2850,20 +2839,20 @@
   return (int)(buf2->size - buf1->size);
 }
 
-static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const uint8_t *data,
+static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data,
                                       const uint8_t *data_end) {
-  VP10_COMMON *const cm = &pbi->common;
-  const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+  AV1_COMMON *const cm = &pbi->common;
+  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
   const int tile_cols = cm->tile_cols;
   const int tile_rows = cm->tile_rows;
-  const int num_workers = VPXMIN(pbi->max_threads & ~1, tile_cols);
+  const int num_workers = AOMMIN(pbi->max_threads & ~1, tile_cols);
   TileBufferDec (*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
 #if CONFIG_EXT_TILE
-  const int dec_tile_row = VPXMIN(pbi->dec_tile_row, tile_rows);
+  const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
   const int single_row = pbi->dec_tile_row >= 0;
   const int tile_rows_start = single_row ? dec_tile_row : 0;
   const int tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
-  const int dec_tile_col = VPXMIN(pbi->dec_tile_col, tile_cols);
+  const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
   const int single_col = pbi->dec_tile_col >= 0;
   const int tile_cols_start = single_col ? dec_tile_col : 0;
   const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
@@ -2895,22 +2884,22 @@
   if (pbi->num_tile_workers == 0) {
     const int num_threads = pbi->max_threads & ~1;
     CHECK_MEM_ERROR(cm, pbi->tile_workers,
-                    vpx_malloc(num_threads * sizeof(*pbi->tile_workers)));
+                    aom_malloc(num_threads * sizeof(*pbi->tile_workers)));
     // Ensure tile data offsets will be properly aligned. This may fail on
     // platforms without DECLARE_ALIGNED().
     assert((sizeof(*pbi->tile_worker_data) % 16) == 0);
     CHECK_MEM_ERROR(
         cm, pbi->tile_worker_data,
-        vpx_memalign(32, num_threads * sizeof(*pbi->tile_worker_data)));
+        aom_memalign(32, num_threads * sizeof(*pbi->tile_worker_data)));
     CHECK_MEM_ERROR(cm, pbi->tile_worker_info,
-                    vpx_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
+                    aom_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
     for (i = 0; i < num_threads; ++i) {
-      VPxWorker *const worker = &pbi->tile_workers[i];
+      AVxWorker *const worker = &pbi->tile_workers[i];
       ++pbi->num_tile_workers;
 
       winterface->init(worker);
       if (i < num_threads - 1 && !winterface->reset(worker)) {
-        vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+        aom_internal_error(&cm->error, AOM_CODEC_ERROR,
                            "Tile decoder thread creation failed");
       }
     }
@@ -2918,9 +2907,9 @@
 
   // Reset tile decoding hook
   for (i = 0; i < num_workers; ++i) {
-    VPxWorker *const worker = &pbi->tile_workers[i];
+    AVxWorker *const worker = &pbi->tile_workers[i];
     winterface->sync(worker);
-    worker->hook = (VPxWorkerHook)tile_worker_hook;
+    worker->hook = (AVxWorkerHook)tile_worker_hook;
     worker->data1 = &pbi->tile_worker_data[i];
     worker->data2 = &pbi->tile_worker_info[i];
   }
@@ -2929,7 +2918,7 @@
   if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
     for (i = 0; i < num_workers; ++i) {
       TileWorkerData *const twd = (TileWorkerData *)pbi->tile_workers[i].data1;
-      vp10_zero(twd->counts);
+      av1_zero(twd->counts);
     }
   }
 
@@ -2950,7 +2939,7 @@
       int group_start;
       for (group_start = tile_cols_start; group_start < tile_cols_end;
            group_start += num_workers) {
-        const int group_end = VPXMIN(group_start + num_workers, tile_cols);
+        const int group_end = AOMMIN(group_start + num_workers, tile_cols);
         const TileBufferDec largest = tile_buffers[tile_row][group_start];
         memmove(&tile_buffers[tile_row][group_start],
                 &tile_buffers[tile_row][group_start + 1],
@@ -2964,7 +2953,7 @@
       for (i = 0; i < num_workers && tile_col < tile_cols_end;
            ++i, ++tile_col) {
         TileBufferDec *const buf = &tile_buffers[tile_row][tile_col];
-        VPxWorker *const worker = &pbi->tile_workers[i];
+        AVxWorker *const worker = &pbi->tile_workers[i];
         TileWorkerData *const twd = (TileWorkerData *)worker->data1;
         TileInfo *const tile_info = (TileInfo *)worker->data2;
 
@@ -2975,9 +2964,9 @@
             cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
                 ? &twd->counts
                 : NULL;
-        vp10_zero(twd->dqcoeff);
-        vp10_tile_init(tile_info, cm, tile_row, buf->col);
-        vp10_tile_init(&twd->xd.tile, cm, tile_row, buf->col);
+        av1_zero(twd->dqcoeff);
+        av1_tile_init(tile_info, cm, tile_row, buf->col);
+        av1_tile_init(&twd->xd.tile, cm, tile_row, buf->col);
 #if !CONFIG_ANS
         setup_bool_decoder(buf->data, data_end, buf->size, &cm->error,
                            &twd->bit_reader, pbi->decrypt_cb,
@@ -2987,7 +2976,7 @@
                             &twd->bit_reader, pbi->decrypt_cb,
                             pbi->decrypt_state);
 #endif  // CONFIG_ANS
-        vp10_init_macroblockd(cm, &twd->xd, twd->dqcoeff);
+        av1_init_macroblockd(cm, &twd->xd, twd->dqcoeff);
         twd->xd.plane[0].color_index_map = twd->color_index_map[0];
         twd->xd.plane[1].color_index_map = twd->color_index_map[1];
 
@@ -3007,9 +2996,9 @@
 
       // Sync all workers
       for (; i > 0; --i) {
-        VPxWorker *const worker = &pbi->tile_workers[i - 1];
+        AVxWorker *const worker = &pbi->tile_workers[i - 1];
         // TODO(jzern): The tile may have specific error data associated with
-        // its vpx_internal_error_info which could be propagated to the main
+        // its aom_internal_error_info which could be propagated to the main
         // info in cm. Additionally once the threads have been synced and an
         // error is detected, there's no point in continuing to decode tiles.
         pbi->mb.corrupted |= !winterface->sync(worker);
@@ -3021,7 +3010,7 @@
   if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
     for (i = 0; i < num_workers; ++i) {
       TileWorkerData *const twd = (TileWorkerData *)pbi->tile_workers[i].data1;
-      vp10_accumulate_frame_counts(cm, &twd->counts);
+      av1_accumulate_frame_counts(cm, &twd->counts);
     }
   }
 
@@ -3036,42 +3025,42 @@
   {
     TileWorkerData *const twd =
         (TileWorkerData *)pbi->tile_workers[final_worker].data1;
-    return vpx_reader_find_end(&twd->bit_reader);
+    return aom_reader_find_end(&twd->bit_reader);
   }
 #endif  // CONFIG_ANS
 #endif  // CONFIG_EXT_TILE
 }
 
 static void error_handler(void *data) {
-  VP10_COMMON *const cm = (VP10_COMMON *)data;
-  vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
+  AV1_COMMON *const cm = (AV1_COMMON *)data;
+  aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, "Truncated packet");
 }
 
-static void read_bitdepth_colorspace_sampling(VP10_COMMON *cm,
-                                              struct vpx_read_bit_buffer *rb) {
+static void read_bitdepth_colorspace_sampling(AV1_COMMON *cm,
+                                              struct aom_read_bit_buffer *rb) {
   if (cm->profile >= PROFILE_2) {
-    cm->bit_depth = vpx_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
-#if CONFIG_VP9_HIGHBITDEPTH
+    cm->bit_depth = aom_rb_read_bit(rb) ? AOM_BITS_12 : AOM_BITS_10;
+#if CONFIG_AOM_HIGHBITDEPTH
     cm->use_highbitdepth = 1;
 #endif
   } else {
-    cm->bit_depth = VPX_BITS_8;
-#if CONFIG_VP9_HIGHBITDEPTH
+    cm->bit_depth = AOM_BITS_8;
+#if CONFIG_AOM_HIGHBITDEPTH
     cm->use_highbitdepth = 0;
 #endif
   }
-  cm->color_space = vpx_rb_read_literal(rb, 3);
-  if (cm->color_space != VPX_CS_SRGB) {
+  cm->color_space = aom_rb_read_literal(rb, 3);
+  if (cm->color_space != AOM_CS_SRGB) {
     // [16,235] (including xvycc) vs [0,255] range
-    cm->color_range = vpx_rb_read_bit(rb);
+    cm->color_range = aom_rb_read_bit(rb);
     if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
-      cm->subsampling_x = vpx_rb_read_bit(rb);
-      cm->subsampling_y = vpx_rb_read_bit(rb);
+      cm->subsampling_x = aom_rb_read_bit(rb);
+      cm->subsampling_y = aom_rb_read_bit(rb);
       if (cm->subsampling_x == 1 && cm->subsampling_y == 1)
-        vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+        aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                            "4:2:0 color not supported in profile 1 or 3");
-      if (vpx_rb_read_bit(rb))
-        vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+      if (aom_rb_read_bit(rb))
+        aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                            "Reserved bit set");
     } else {
       cm->subsampling_y = cm->subsampling_x = 1;
@@ -3081,19 +3070,19 @@
       // Note if colorspace is SRGB then 4:4:4 chroma sampling is assumed.
       // 4:2:2 or 4:4:0 chroma sampling is not allowed.
       cm->subsampling_y = cm->subsampling_x = 0;
-      if (vpx_rb_read_bit(rb))
-        vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+      if (aom_rb_read_bit(rb))
+        aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                            "Reserved bit set");
     } else {
-      vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+      aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                          "4:4:4 color not supported in profile 0 or 2");
     }
   }
 }
 
-static size_t read_uncompressed_header(VP10Decoder *pbi,
-                                       struct vpx_read_bit_buffer *rb) {
-  VP10_COMMON *const cm = &pbi->common;
+static size_t read_uncompressed_header(AV1Decoder *pbi,
+                                       struct aom_read_bit_buffer *rb) {
+  AV1_COMMON *const cm = &pbi->common;
   MACROBLOCKD *const xd = &pbi->mb;
   BufferPool *const pool = cm->buffer_pool;
   RefCntBuffer *const frame_bufs = pool->frame_bufs;
@@ -3111,31 +3100,31 @@
   cm->is_reference_frame = 1;
 #endif  // CONFIG_EXT_REFS
 
-  if (vpx_rb_read_literal(rb, 2) != VPX_FRAME_MARKER)
-    vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+  if (aom_rb_read_literal(rb, 2) != AOM_FRAME_MARKER)
+    aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                        "Invalid frame marker");
 
-  cm->profile = vp10_read_profile(rb);
-#if CONFIG_VP9_HIGHBITDEPTH
+  cm->profile = av1_read_profile(rb);
+#if CONFIG_AOM_HIGHBITDEPTH
   if (cm->profile >= MAX_PROFILES)
-    vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+    aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                        "Unsupported bitstream profile");
 #else
   if (cm->profile >= PROFILE_2)
-    vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+    aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                        "Unsupported bitstream profile");
 #endif
 
-  cm->show_existing_frame = vpx_rb_read_bit(rb);
+  cm->show_existing_frame = aom_rb_read_bit(rb);
 
   if (cm->show_existing_frame) {
     // Show an existing frame directly.
-    const int frame_to_show = cm->ref_frame_map[vpx_rb_read_literal(rb, 3)];
+    const int frame_to_show = cm->ref_frame_map[aom_rb_read_literal(rb, 3)];
 
     lock_buffer_pool(pool);
     if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
       unlock_buffer_pool(pool);
-      vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+      aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                          "Buffer %d does not contain a decoded frame",
                          frame_to_show);
     }
@@ -3154,13 +3143,13 @@
     return 0;
   }
 
-  cm->frame_type = (FRAME_TYPE)vpx_rb_read_bit(rb);
-  cm->show_frame = vpx_rb_read_bit(rb);
-  cm->error_resilient_mode = vpx_rb_read_bit(rb);
+  cm->frame_type = (FRAME_TYPE)aom_rb_read_bit(rb);
+  cm->show_frame = aom_rb_read_bit(rb);
+  cm->error_resilient_mode = aom_rb_read_bit(rb);
 
   if (cm->frame_type == KEY_FRAME) {
-    if (!vp10_read_sync_code(rb))
-      vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+    if (!av1_read_sync_code(rb))
+      aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                          "Invalid frame sync code");
 
     read_bitdepth_colorspace_sampling(cm, rb);
@@ -3177,43 +3166,43 @@
       pbi->need_resync = 0;
     }
     if (frame_is_intra_only(cm))
-      cm->allow_screen_content_tools = vpx_rb_read_bit(rb);
+      cm->allow_screen_content_tools = aom_rb_read_bit(rb);
   } else {
-    cm->intra_only = cm->show_frame ? 0 : vpx_rb_read_bit(rb);
+    cm->intra_only = cm->show_frame ? 0 : aom_rb_read_bit(rb);
 
     if (cm->error_resilient_mode) {
       cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL;
     } else {
       if (cm->intra_only) {
-        cm->reset_frame_context = vpx_rb_read_bit(rb)
+        cm->reset_frame_context = aom_rb_read_bit(rb)
                                       ? RESET_FRAME_CONTEXT_ALL
                                       : RESET_FRAME_CONTEXT_CURRENT;
       } else {
-        cm->reset_frame_context = vpx_rb_read_bit(rb)
+        cm->reset_frame_context = aom_rb_read_bit(rb)
                                       ? RESET_FRAME_CONTEXT_CURRENT
                                       : RESET_FRAME_CONTEXT_NONE;
         if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT)
-          cm->reset_frame_context = vpx_rb_read_bit(rb)
+          cm->reset_frame_context = aom_rb_read_bit(rb)
                                         ? RESET_FRAME_CONTEXT_ALL
                                         : RESET_FRAME_CONTEXT_CURRENT;
       }
     }
 
     if (cm->intra_only) {
-      if (!vp10_read_sync_code(rb))
-        vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+      if (!av1_read_sync_code(rb))
+        aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                            "Invalid frame sync code");
 
       read_bitdepth_colorspace_sampling(cm, rb);
 
-      pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
+      pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
       setup_frame_size(cm, rb);
       if (pbi->need_resync) {
         memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
         pbi->need_resync = 0;
       }
     } else if (pbi->need_resync != 1) { /* Skip if need resync */
-      pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
+      pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
 
 #if CONFIG_EXT_REFS
       if (!pbi->refresh_frame_flags) {
@@ -3224,35 +3213,35 @@
 #endif  // CONFIG_EXT_REFS
 
       for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
-        const int ref = vpx_rb_read_literal(rb, REF_FRAMES_LOG2);
+        const int ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2);
         const int idx = cm->ref_frame_map[ref];
         RefBuffer *const ref_frame = &cm->frame_refs[i];
         ref_frame->idx = idx;
         ref_frame->buf = &frame_bufs[idx].buf;
-        cm->ref_frame_sign_bias[LAST_FRAME + i] = vpx_rb_read_bit(rb);
+        cm->ref_frame_sign_bias[LAST_FRAME + i] = aom_rb_read_bit(rb);
       }
 
       setup_frame_size_with_refs(cm, rb);
 
-      cm->allow_high_precision_mv = vpx_rb_read_bit(rb);
+      cm->allow_high_precision_mv = aom_rb_read_bit(rb);
       cm->interp_filter = read_interp_filter(rb);
 
       for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
         RefBuffer *const ref_buf = &cm->frame_refs[i];
-#if CONFIG_VP9_HIGHBITDEPTH
-        vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+        av1_setup_scale_factors_for_frame(
             &ref_buf->sf, ref_buf->buf->y_crop_width,
             ref_buf->buf->y_crop_height, cm->width, cm->height,
             cm->use_highbitdepth);
 #else
-        vp10_setup_scale_factors_for_frame(
+        av1_setup_scale_factors_for_frame(
             &ref_buf->sf, ref_buf->buf->y_crop_width,
             ref_buf->buf->y_crop_height, cm->width, cm->height);
 #endif
       }
     }
   }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
 #endif
   get_frame_new_buffer(cm)->color_space = cm->color_space;
@@ -3261,22 +3250,22 @@
   get_frame_new_buffer(cm)->render_height = cm->render_height;
 
   if (pbi->need_resync) {
-    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                        "Keyframe / intra-only frame required to reset decoder"
                        " state");
   }
 
   if (!cm->error_resilient_mode) {
-    cm->refresh_frame_context = vpx_rb_read_bit(rb)
+    cm->refresh_frame_context = aom_rb_read_bit(rb)
                                     ? REFRESH_FRAME_CONTEXT_FORWARD
                                     : REFRESH_FRAME_CONTEXT_BACKWARD;
   } else {
     cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_FORWARD;
   }
 
-  // This flag will be overridden by the call to vp10_setup_past_independence
+  // This flag will be overridden by the call to av1_setup_past_independence
   // below, forcing the use of context 0 for those frame types.
-  cm->frame_context_idx = vpx_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
+  cm->frame_context_idx = aom_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
 
   // Generate next_ref_frame_map.
   lock_buffer_pool(pool);
@@ -3304,10 +3293,10 @@
   pbi->hold_ref_buf = 1;
 
   if (frame_is_intra_only(cm) || cm->error_resilient_mode)
-    vp10_setup_past_independence(cm);
+    av1_setup_past_independence(cm);
 
 #if CONFIG_EXT_PARTITION
-  set_sb_size(cm, vpx_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64);
+  set_sb_size(cm, aom_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64);
 #else
   set_sb_size(cm, BLOCK_64X64);
 #endif  // CONFIG_EXT_PARTITION
@@ -3323,12 +3312,12 @@
   setup_restoration(cm, rb);
 #endif  // CONFIG_LOOP_RESTORATION
   setup_quantization(cm, rb);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   xd->bd = (int)cm->bit_depth;
 #endif
 
 #if CONFIG_ENTROPY
-  vp10_default_coef_probs(cm);
+  av1_default_coef_probs(cm);
   if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
       cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL) {
     for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = *cm->fc;
@@ -3343,7 +3332,7 @@
     int i;
     for (i = 0; i < MAX_SEGMENTS; ++i) {
       const int qindex = cm->seg.enabled
-                             ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+                             ? av1_get_qindex(&cm->seg, i, cm->base_qindex)
                              : cm->base_qindex;
       xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
                         cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -3356,36 +3345,36 @@
   cm->reference_mode = read_frame_reference_mode(cm, rb);
 
   read_tile_info(pbi, rb);
-  sz = vpx_rb_read_literal(rb, 16);
+  sz = aom_rb_read_literal(rb, 16);
 
   if (sz == 0)
-    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                        "Invalid header size");
 
   return sz;
 }
 
 #if CONFIG_EXT_TX
-static void read_ext_tx_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
   int i, j, k;
   int s;
   for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
-    if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+    if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
       for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
         if (!use_inter_ext_tx_for_txsize[s][i]) continue;
         for (j = 0; j < num_ext_tx_set_inter[s] - 1; ++j)
-          vp10_diff_update_prob(r, &fc->inter_ext_tx_prob[s][i][j]);
+          av1_diff_update_prob(r, &fc->inter_ext_tx_prob[s][i][j]);
       }
     }
   }
 
   for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
-    if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+    if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
       for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
         if (!use_intra_ext_tx_for_txsize[s][i]) continue;
         for (j = 0; j < INTRA_MODES; ++j)
           for (k = 0; k < num_ext_tx_set_intra[s] - 1; ++k)
-            vp10_diff_update_prob(r, &fc->intra_ext_tx_prob[s][i][j][k]);
+            av1_diff_update_prob(r, &fc->intra_ext_tx_prob[s][i][j][k]);
       }
     }
   }
@@ -3393,31 +3382,31 @@
 
 #else
 
-static void read_ext_tx_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
   int i, j, k;
-  if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+  if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
     for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
       for (j = 0; j < TX_TYPES; ++j)
         for (k = 0; k < TX_TYPES - 1; ++k)
-          vp10_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k]);
+          av1_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k]);
     }
   }
-  if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+  if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
     for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
       for (k = 0; k < TX_TYPES - 1; ++k)
-        vp10_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k]);
+        av1_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k]);
     }
   }
 }
 #endif  // CONFIG_EXT_TX
 
 #if CONFIG_SUPERTX
-static void read_supertx_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_supertx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
   int i, j;
-  if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+  if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
     for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
       for (j = 1; j < TX_SIZES; ++j) {
-        vp10_diff_update_prob(r, &fc->supertx_prob[i][j]);
+        av1_diff_update_prob(r, &fc->supertx_prob[i][j]);
       }
     }
   }
@@ -3426,44 +3415,44 @@
 
 #if CONFIG_GLOBAL_MOTION
 static void read_global_motion_params(Global_Motion_Params *params,
-                                      vpx_prob *probs, vp10_reader *r) {
+                                      aom_prob *probs, aom_reader *r) {
   GLOBAL_MOTION_TYPE gmtype =
-      vp10_read_tree(r, vp10_global_motion_types_tree, probs);
+      aom_read_tree(r, av1_global_motion_types_tree, probs);
   params->gmtype = gmtype;
   params->motion_params.wmtype = gm_to_trans_type(gmtype);
   switch (gmtype) {
     case GLOBAL_ZERO: break;
     case GLOBAL_AFFINE:
       params->motion_params.wmmat[4] =
-          (vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+          (aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
            GM_ALPHA_DECODE_FACTOR);
       params->motion_params.wmmat[5] =
-          vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+          aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
               GM_ALPHA_DECODE_FACTOR +
           (1 << WARPEDMODEL_PREC_BITS);
     // fallthrough intended
     case GLOBAL_ROTZOOM:
       params->motion_params.wmmat[2] =
-          (vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+          (aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
            GM_ALPHA_DECODE_FACTOR) +
           (1 << WARPEDMODEL_PREC_BITS);
       params->motion_params.wmmat[3] =
-          vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+          aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
           GM_ALPHA_DECODE_FACTOR;
     // fallthrough intended
     case GLOBAL_TRANSLATION:
       params->motion_params.wmmat[0] =
-          vp10_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
+          aom_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
           GM_TRANS_DECODE_FACTOR;
       params->motion_params.wmmat[1] =
-          vp10_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
+          aom_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
           GM_TRANS_DECODE_FACTOR;
       break;
     default: assert(0);
   }
 }
 
-static void read_global_motion(VP10_COMMON *cm, vp10_reader *r) {
+static void read_global_motion(AV1_COMMON *cm, aom_reader *r) {
   int frame;
   memset(cm->global_motion, 0, sizeof(cm->global_motion));
   for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
@@ -3473,24 +3462,24 @@
 }
 #endif  // CONFIG_GLOBAL_MOTION
 
-static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
+static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
                                   size_t partition_size) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
 #if CONFIG_SUPERTX
   MACROBLOCKD *const xd = &pbi->mb;
 #endif
   FRAME_CONTEXT *const fc = cm->fc;
-  vp10_reader r;
+  aom_reader r;
   int k, i, j;
 
 #if !CONFIG_ANS
-  if (vpx_reader_init(&r, data, partition_size, pbi->decrypt_cb,
+  if (aom_reader_init(&r, data, partition_size, pbi->decrypt_cb,
                       pbi->decrypt_state))
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate bool decoder 0");
 #else
   if (ans_read_init(&r, data, partition_size))
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate compressed header ANS decoder");
 #endif  // !CONFIG_ANS
 
@@ -3498,56 +3487,56 @@
     for (i = 0; i < TX_SIZES - 1; ++i)
       for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
         for (k = 0; k < i + 1; ++k)
-          vp10_diff_update_prob(&r, &fc->tx_size_probs[i][j][k]);
+          av1_diff_update_prob(&r, &fc->tx_size_probs[i][j][k]);
   }
 
   read_coef_probs(fc, cm->tx_mode, &r);
 
 #if CONFIG_VAR_TX
   for (k = 0; k < TXFM_PARTITION_CONTEXTS; ++k)
-    vp10_diff_update_prob(&r, &fc->txfm_partition_prob[k]);
+    av1_diff_update_prob(&r, &fc->txfm_partition_prob[k]);
 #endif
 
   for (k = 0; k < SKIP_CONTEXTS; ++k)
-    vp10_diff_update_prob(&r, &fc->skip_probs[k]);
+    av1_diff_update_prob(&r, &fc->skip_probs[k]);
 
   if (cm->seg.enabled && cm->seg.update_map) {
     if (cm->seg.temporal_update) {
       for (k = 0; k < PREDICTION_PROBS; k++)
-        vp10_diff_update_prob(&r, &cm->fc->seg.pred_probs[k]);
+        av1_diff_update_prob(&r, &cm->fc->seg.pred_probs[k]);
     }
     for (k = 0; k < MAX_SEGMENTS - 1; k++)
-      vp10_diff_update_prob(&r, &cm->fc->seg.tree_probs[k]);
+      av1_diff_update_prob(&r, &cm->fc->seg.tree_probs[k]);
   }
 
   for (j = 0; j < INTRA_MODES; j++)
     for (i = 0; i < INTRA_MODES - 1; ++i)
-      vp10_diff_update_prob(&r, &fc->uv_mode_prob[j][i]);
+      av1_diff_update_prob(&r, &fc->uv_mode_prob[j][i]);
 
 #if CONFIG_EXT_PARTITION_TYPES
   for (i = 0; i < PARTITION_TYPES - 1; ++i)
-    vp10_diff_update_prob(&r, &fc->partition_prob[0][i]);
+    av1_diff_update_prob(&r, &fc->partition_prob[0][i]);
   for (j = 1; j < PARTITION_CONTEXTS; ++j)
     for (i = 0; i < EXT_PARTITION_TYPES - 1; ++i)
-      vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+      av1_diff_update_prob(&r, &fc->partition_prob[j][i]);
 #else
   for (j = 0; j < PARTITION_CONTEXTS; ++j)
     for (i = 0; i < PARTITION_TYPES - 1; ++i)
-      vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+      av1_diff_update_prob(&r, &fc->partition_prob[j][i]);
 #endif  // CONFIG_EXT_PARTITION_TYPES
 
 #if CONFIG_EXT_INTRA
   for (i = 0; i < INTRA_FILTERS + 1; ++i)
     for (j = 0; j < INTRA_FILTERS - 1; ++j)
-      vp10_diff_update_prob(&r, &fc->intra_filter_probs[i][j]);
+      av1_diff_update_prob(&r, &fc->intra_filter_probs[i][j]);
 #endif  // CONFIG_EXT_INTRA
 
   if (frame_is_intra_only(cm)) {
-    vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob);
+    av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
     for (k = 0; k < INTRA_MODES; k++)
       for (j = 0; j < INTRA_MODES; j++)
         for (i = 0; i < INTRA_MODES - 1; ++i)
-          vp10_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
+          av1_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
   } else {
 #if !CONFIG_REF_MV
     nmv_context *const nmvc = &fc->nmvc;
@@ -3560,23 +3549,23 @@
     if (cm->reference_mode != COMPOUND_REFERENCE) {
       for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
         if (is_interintra_allowed_bsize_group(i)) {
-          vp10_diff_update_prob(&r, &fc->interintra_prob[i]);
+          av1_diff_update_prob(&r, &fc->interintra_prob[i]);
         }
       }
       for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
         for (j = 0; j < INTERINTRA_MODES - 1; j++)
-          vp10_diff_update_prob(&r, &fc->interintra_mode_prob[i][j]);
+          av1_diff_update_prob(&r, &fc->interintra_mode_prob[i][j]);
       }
       for (i = 0; i < BLOCK_SIZES; i++) {
         if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i)) {
-          vp10_diff_update_prob(&r, &fc->wedge_interintra_prob[i]);
+          av1_diff_update_prob(&r, &fc->wedge_interintra_prob[i]);
         }
       }
     }
     if (cm->reference_mode != SINGLE_REFERENCE) {
       for (i = 0; i < BLOCK_SIZES; i++) {
         if (is_interinter_wedge_used(i)) {
-          vp10_diff_update_prob(&r, &fc->wedge_interinter_prob[i]);
+          av1_diff_update_prob(&r, &fc->wedge_interinter_prob[i]);
         }
       }
     }
@@ -3585,14 +3574,14 @@
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
     for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i) {
       for (j = 0; j < MOTION_VARIATIONS - 1; ++j)
-        vp10_diff_update_prob(&r, &fc->motvar_prob[i][j]);
+        av1_diff_update_prob(&r, &fc->motvar_prob[i][j]);
     }
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 
     if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
 
     for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
-      vp10_diff_update_prob(&r, &fc->intra_inter_prob[i]);
+      av1_diff_update_prob(&r, &fc->intra_inter_prob[i]);
 
     if (cm->reference_mode != SINGLE_REFERENCE)
       setup_compound_reference_mode(cm);
@@ -3601,7 +3590,7 @@
 
     for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
       for (i = 0; i < INTRA_MODES - 1; ++i)
-        vp10_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
+        av1_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
 
 #if CONFIG_REF_MV
     for (i = 0; i < NMV_CONTEXTS; ++i)
@@ -3618,7 +3607,7 @@
 #endif  // CONFIG_GLOBAL_MOTION
   }
 
-  return vp10_reader_has_error(&r);
+  return aom_reader_has_error(&r);
 }
 
 #ifdef NDEBUG
@@ -3626,9 +3615,9 @@
 #else  // !NDEBUG
 // Counts should only be incremented when frame_parallel_decoding_mode and
 // error_resilient_mode are disabled.
-static void debug_check_frame_counts(const VP10_COMMON *const cm) {
+static void debug_check_frame_counts(const AV1_COMMON *const cm) {
   FRAME_COUNTS zero_counts;
-  vp10_zero(zero_counts);
+  av1_zero(zero_counts);
   assert(cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD ||
          cm->error_resilient_mode);
   assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
@@ -3689,14 +3678,14 @@
 }
 #endif  // NDEBUG
 
-static struct vpx_read_bit_buffer *init_read_bit_buffer(
-    VP10Decoder *pbi, struct vpx_read_bit_buffer *rb, const uint8_t *data,
-    const uint8_t *data_end, uint8_t clear_data[MAX_VPX_HEADER_SIZE]) {
+static struct aom_read_bit_buffer *init_read_bit_buffer(
+    AV1Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
+    const uint8_t *data_end, uint8_t clear_data[MAX_AV1_HEADER_SIZE]) {
   rb->bit_offset = 0;
   rb->error_handler = error_handler;
   rb->error_handler_data = &pbi->common;
   if (pbi->decrypt_cb) {
-    const int n = (int)VPXMIN(MAX_VPX_HEADER_SIZE, data_end - data);
+    const int n = (int)AOMMIN(MAX_AV1_HEADER_SIZE, data_end - data);
     pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n);
     rb->bit_buffer = clear_data;
     rb->bit_buffer_end = clear_data + n;
@@ -3709,32 +3698,32 @@
 
 //------------------------------------------------------------------------------
 
-int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb) {
-  return vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_0 &&
-         vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_1 &&
-         vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_2;
+int av1_read_sync_code(struct aom_read_bit_buffer *const rb) {
+  return aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_0 &&
+         aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_1 &&
+         aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_2;
 }
 
-void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
-                          int *height) {
-  *width = vpx_rb_read_literal(rb, 16) + 1;
-  *height = vpx_rb_read_literal(rb, 16) + 1;
+void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
+                         int *height) {
+  *width = aom_rb_read_literal(rb, 16) + 1;
+  *height = aom_rb_read_literal(rb, 16) + 1;
 }
 
-BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb) {
-  int profile = vpx_rb_read_bit(rb);
-  profile |= vpx_rb_read_bit(rb) << 1;
-  if (profile > 2) profile += vpx_rb_read_bit(rb);
+BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb) {
+  int profile = aom_rb_read_bit(rb);
+  profile |= aom_rb_read_bit(rb) << 1;
+  if (profile > 2) profile += aom_rb_read_bit(rb);
   return (BITSTREAM_PROFILE)profile;
 }
 
-void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
-                       const uint8_t *data_end, const uint8_t **p_data_end) {
-  VP10_COMMON *const cm = &pbi->common;
+void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
+                      const uint8_t *data_end, const uint8_t **p_data_end) {
+  AV1_COMMON *const cm = &pbi->common;
   MACROBLOCKD *const xd = &pbi->mb;
-  struct vpx_read_bit_buffer rb;
+  struct aom_read_bit_buffer rb;
   int context_updated = 0;
-  uint8_t clear_data[MAX_VPX_HEADER_SIZE];
+  uint8_t clear_data[MAX_AV1_HEADER_SIZE];
   const size_t first_partition_size = read_uncompressed_header(
       pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
   YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
@@ -3747,7 +3736,7 @@
 // showing a frame directly
 #if CONFIG_EXT_REFS
     if (cm->show_existing_frame)
-      *p_data_end = data + vpx_rb_bytes_read(&rb);
+      *p_data_end = data + aom_rb_bytes_read(&rb);
     else
 #endif  // CONFIG_EXT_REFS
       *p_data_end = data + (cm->profile <= PROFILE_2 ? 1 : 2);
@@ -3755,9 +3744,9 @@
     return;
   }
 
-  data += vpx_rb_bytes_read(&rb);
+  data += aom_rb_bytes_read(&rb);
   if (!read_is_valid(data, first_partition_size, data_end))
-    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                        "Truncated packet or corrupt header length");
 
   cm->use_prev_frame_mvs =
@@ -3782,46 +3771,46 @@
   }
 #endif  // CONFIG_EXT_REFS
 
-  vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+  av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
 
   *cm->fc = cm->frame_contexts[cm->frame_context_idx];
   if (!cm->fc->initialized)
-    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                        "Uninitialized entropy context.");
 
-  vp10_zero(cm->counts);
+  av1_zero(cm->counts);
 
   xd->corrupted = 0;
   new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
   if (new_fb->corrupted)
-    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                        "Decode failed. Frame data header is corrupted.");
 
   if (cm->lf.filter_level && !cm->skip_loop_filter) {
-    vp10_loop_filter_frame_init(cm, cm->lf.filter_level);
+    av1_loop_filter_frame_init(cm, cm->lf.filter_level);
   }
 
   // If encoded in frame parallel mode, frame context is ready after decoding
   // the frame header.
   if (cm->frame_parallel_decode &&
       cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD) {
-    VPxWorker *const worker = pbi->frame_worker_owner;
+    AVxWorker *const worker = pbi->frame_worker_owner;
     FrameWorkerData *const frame_worker_data = worker->data1;
     if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) {
       context_updated = 1;
       cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
     }
-    vp10_frameworker_lock_stats(worker);
+    av1_frameworker_lock_stats(worker);
     pbi->cur_buf->row = -1;
     pbi->cur_buf->col = -1;
     frame_worker_data->frame_context_ready = 1;
     // Signal the main thread that context is ready.
-    vp10_frameworker_signal_stats(worker);
-    vp10_frameworker_unlock_stats(worker);
+    av1_frameworker_signal_stats(worker);
+    av1_frameworker_unlock_stats(worker);
   }
 
 #if CONFIG_ENTROPY
-  vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+  av1_copy(cm->starting_coef_probs, cm->fc->coef_probs);
   cm->coef_probs_update_idx = 0;
 #endif  // CONFIG_ENTROPY
 
@@ -3836,12 +3825,12 @@
       if (!cm->skip_loop_filter) {
         // If multiple threads are used to decode tiles, then we use those
         // threads to do parallel loopfiltering.
-        vp10_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
-                                  cm->lf.filter_level, 0, 0, pbi->tile_workers,
-                                  pbi->num_tile_workers, &pbi->lf_row_sync);
+        av1_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level,
+                                 0, 0, pbi->tile_workers, pbi->num_tile_workers,
+                                 &pbi->lf_row_sync);
       }
     } else {
-      vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+      aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                          "Decode failed. Frame data is corrupted.");
     }
   } else {
@@ -3849,10 +3838,10 @@
   }
 #if CONFIG_LOOP_RESTORATION
   if (cm->rst_info.restoration_type != RESTORE_NONE) {
-    vp10_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
-                               cm->frame_type == KEY_FRAME, cm->width,
-                               cm->height);
-    vp10_loop_restoration_rows(new_fb, cm, 0, cm->mi_rows, 0);
+    av1_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
+                              cm->frame_type == KEY_FRAME, cm->width,
+                              cm->height);
+    av1_loop_restoration_rows(new_fb, cm, 0, cm->mi_rows, 0);
   }
 #endif  // CONFIG_LOOP_RESTORATION
 
@@ -3861,18 +3850,18 @@
 #if CONFIG_ENTROPY
       cm->partial_prob_update = 0;
 #endif  // CONFIG_ENTROPY
-      vp10_adapt_coef_probs(cm);
-      vp10_adapt_intra_frame_probs(cm);
+      av1_adapt_coef_probs(cm);
+      av1_adapt_intra_frame_probs(cm);
 
       if (!frame_is_intra_only(cm)) {
-        vp10_adapt_inter_frame_probs(cm);
-        vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+        av1_adapt_inter_frame_probs(cm);
+        av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
       }
     } else {
       debug_check_frame_counts(cm);
     }
   } else {
-    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
                        "Decode failed. Frame data is corrupted.");
   }
 
diff --git a/av1/decoder/decodeframe.h b/av1/decoder/decodeframe.h
index 7fdff0b..020c424 100644
--- a/av1/decoder/decodeframe.h
+++ b/av1/decoder/decodeframe.h
@@ -8,26 +8,26 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_DECODER_DECODEFRAME_H_
-#define VP10_DECODER_DECODEFRAME_H_
+#ifndef AV1_DECODER_DECODEFRAME_H_
+#define AV1_DECODER_DECODEFRAME_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-struct VP10Decoder;
-struct vpx_read_bit_buffer;
+struct AV1Decoder;
+struct aom_read_bit_buffer;
 
-int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb);
-void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
-                          int *height);
-BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb);
+int av1_read_sync_code(struct aom_read_bit_buffer *const rb);
+void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
+                         int *height);
+BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb);
 
-void vp10_decode_frame(struct VP10Decoder *pbi, const uint8_t *data,
-                       const uint8_t *data_end, const uint8_t **p_data_end);
+void av1_decode_frame(struct AV1Decoder *pbi, const uint8_t *data,
+                      const uint8_t *data_end, const uint8_t **p_data_end);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_DECODER_DECODEFRAME_H_
+#endif  // AV1_DECODER_DECODEFRAME_H_
diff --git a/av1/decoder/decodemv.c b/av1/decoder/decodemv.c
index ef776a0..47cfea6 100644
--- a/av1/decoder/decodemv.c
+++ b/av1/decoder/decodemv.c
@@ -22,27 +22,27 @@
 #include "av1/decoder/decodemv.h"
 #include "av1/decoder/decodeframe.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
-static INLINE int read_uniform(vp10_reader *r, int n) {
+static INLINE int read_uniform(aom_reader *r, int n) {
   int l = get_unsigned_bits(n);
   int m = (1 << l) - n;
-  int v = vp10_read_literal(r, l - 1);
+  int v = aom_read_literal(r, l - 1);
 
   assert(l != 0);
 
   if (v < m)
     return v;
   else
-    return (v << 1) - m + vp10_read_literal(r, 1);
+    return (v << 1) - m + aom_read_literal(r, 1);
 }
 
-static PREDICTION_MODE read_intra_mode(vp10_reader *r, const vpx_prob *p) {
-  return (PREDICTION_MODE)vp10_read_tree(r, vp10_intra_mode_tree, p);
+static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_prob *p) {
+  return (PREDICTION_MODE)aom_read_tree(r, av1_intra_mode_tree, p);
 }
 
-static PREDICTION_MODE read_intra_mode_y(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                         vp10_reader *r, int size_group) {
+static PREDICTION_MODE read_intra_mode_y(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                         aom_reader *r, int size_group) {
   const PREDICTION_MODE y_mode =
       read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
   FRAME_COUNTS *counts = xd->counts;
@@ -50,8 +50,8 @@
   return y_mode;
 }
 
-static PREDICTION_MODE read_intra_mode_uv(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                          vp10_reader *r,
+static PREDICTION_MODE read_intra_mode_uv(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                          aom_reader *r,
                                           PREDICTION_MODE y_mode) {
   const PREDICTION_MODE uv_mode =
       read_intra_mode(r, cm->fc->uv_mode_prob[y_mode]);
@@ -61,27 +61,27 @@
 }
 
 #if CONFIG_EXT_INTER
-static INTERINTRA_MODE read_interintra_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                            vp10_reader *r, int size_group) {
-  const INTERINTRA_MODE ii_mode = (INTERINTRA_MODE)vp10_read_tree(
-      r, vp10_interintra_mode_tree, cm->fc->interintra_mode_prob[size_group]);
+static INTERINTRA_MODE read_interintra_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                            aom_reader *r, int size_group) {
+  const INTERINTRA_MODE ii_mode = (INTERINTRA_MODE)aom_read_tree(
+      r, av1_interintra_mode_tree, cm->fc->interintra_mode_prob[size_group]);
   FRAME_COUNTS *counts = xd->counts;
   if (counts) ++counts->interintra_mode[size_group][ii_mode];
   return ii_mode;
 }
 #endif  // CONFIG_EXT_INTER
 
-static PREDICTION_MODE read_inter_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
+static PREDICTION_MODE read_inter_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
 #if CONFIG_REF_MV && CONFIG_EXT_INTER
                                        MB_MODE_INFO *mbmi,
 #endif
-                                       vp10_reader *r, int16_t ctx) {
+                                       aom_reader *r, int16_t ctx) {
 #if CONFIG_REF_MV
   FRAME_COUNTS *counts = xd->counts;
   int16_t mode_ctx = ctx & NEWMV_CTX_MASK;
-  vpx_prob mode_prob = cm->fc->newmv_prob[mode_ctx];
+  aom_prob mode_prob = cm->fc->newmv_prob[mode_ctx];
 
-  if (vp10_read(r, mode_prob) == 0) {
+  if (aom_read(r, mode_prob) == 0) {
     if (counts) ++counts->newmv_mode[mode_ctx][0];
 
 #if CONFIG_EXT_INTER
@@ -91,7 +91,7 @@
 #if CONFIG_EXT_INTER
     } else {
       mode_prob = cm->fc->new2mv_prob;
-      if (vp10_read(r, mode_prob) == 0) {
+      if (aom_read(r, mode_prob) == 0) {
         if (counts) ++counts->new2mv_mode[0];
         return NEWMV;
       } else {
@@ -108,7 +108,7 @@
   mode_ctx = (ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
 
   mode_prob = cm->fc->zeromv_prob[mode_ctx];
-  if (vp10_read(r, mode_prob) == 0) {
+  if (aom_read(r, mode_prob) == 0) {
     if (counts) ++counts->zeromv_mode[mode_ctx][0];
     return ZEROMV;
   }
@@ -122,7 +122,7 @@
 
   mode_prob = cm->fc->refmv_prob[mode_ctx];
 
-  if (vp10_read(r, mode_prob) == 0) {
+  if (aom_read(r, mode_prob) == 0) {
     if (counts) ++counts->refmv_mode[mode_ctx][0];
 
     return NEARESTMV;
@@ -135,7 +135,7 @@
   assert(0);
 #else
   const int mode =
-      vp10_read_tree(r, vp10_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
+      aom_read_tree(r, av1_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
   FRAME_COUNTS *counts = xd->counts;
   if (counts) ++counts->inter_mode[ctx][mode];
 
@@ -144,18 +144,18 @@
 }
 
 #if CONFIG_REF_MV
-static void read_drl_idx(const VP10_COMMON *cm, MACROBLOCKD *xd,
-                         MB_MODE_INFO *mbmi, vp10_reader *r) {
-  uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+static void read_drl_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
+                         MB_MODE_INFO *mbmi, aom_reader *r) {
+  uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
   mbmi->ref_mv_idx = 0;
 
   if (mbmi->mode == NEWMV) {
     int idx;
     for (idx = 0; idx < 2; ++idx) {
       if (xd->ref_mv_count[ref_frame_type] > idx + 1) {
-        uint8_t drl_ctx = vp10_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
-        vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
-        if (!vp10_read(r, drl_prob)) {
+        uint8_t drl_ctx = av1_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
+        aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+        if (!aom_read(r, drl_prob)) {
           mbmi->ref_mv_idx = idx;
           if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
           return;
@@ -173,9 +173,9 @@
     // mode is factored in.
     for (idx = 1; idx < 3; ++idx) {
       if (xd->ref_mv_count[ref_frame_type] > idx + 1) {
-        uint8_t drl_ctx = vp10_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
-        vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
-        if (!vp10_read(r, drl_prob)) {
+        uint8_t drl_ctx = av1_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
+        aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+        if (!aom_read(r, drl_prob)) {
           mbmi->ref_mv_idx = idx - 1;
           if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
           return;
@@ -189,11 +189,10 @@
 #endif
 
 #if CONFIG_EXT_INTER
-static PREDICTION_MODE read_inter_compound_mode(VP10_COMMON *cm,
-                                                MACROBLOCKD *xd, vp10_reader *r,
-                                                int16_t ctx) {
-  const int mode = vp10_read_tree(r, vp10_inter_compound_mode_tree,
-                                  cm->fc->inter_compound_mode_probs[ctx]);
+static PREDICTION_MODE read_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                                aom_reader *r, int16_t ctx) {
+  const int mode = aom_read_tree(r, av1_inter_compound_mode_tree,
+                                 cm->fc->inter_compound_mode_probs[ctx]);
   FRAME_COUNTS *counts = xd->counts;
 
   if (counts) ++counts->inter_compound_mode[ctx][mode];
@@ -203,16 +202,16 @@
 }
 #endif  // CONFIG_EXT_INTER
 
-static int read_segment_id(vp10_reader *r,
+static int read_segment_id(aom_reader *r,
                            const struct segmentation_probs *segp) {
-  return vp10_read_tree(r, vp10_segment_tree, segp->tree_probs);
+  return aom_read_tree(r, av1_segment_tree, segp->tree_probs);
 }
 
 #if CONFIG_VAR_TX
-static void read_tx_size_vartx(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd,
                                MB_MODE_INFO *mbmi, FRAME_COUNTS *counts,
                                TX_SIZE tx_size, int blk_row, int blk_col,
-                               vp10_reader *r) {
+                               aom_reader *r) {
   int is_split = 0;
   const int tx_row = blk_row >> 1;
   const int tx_col = blk_col >> 1;
@@ -229,7 +228,7 @@
 
   if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
 
-  is_split = vp10_read(r, cm->fc->txfm_partition_prob[ctx]);
+  is_split = aom_read(r, cm->fc->txfm_partition_prob[ctx]);
 
   if (is_split) {
     BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
@@ -268,18 +267,18 @@
 }
 #endif
 
-static TX_SIZE read_selected_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                     int tx_size_cat, vp10_reader *r) {
+static TX_SIZE read_selected_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                     int tx_size_cat, aom_reader *r) {
   FRAME_COUNTS *counts = xd->counts;
   const int ctx = get_tx_size_context(xd);
-  int tx_size = vp10_read_tree(r, vp10_tx_size_tree[tx_size_cat],
-                               cm->fc->tx_size_probs[tx_size_cat][ctx]);
+  int tx_size = aom_read_tree(r, av1_tx_size_tree[tx_size_cat],
+                              cm->fc->tx_size_probs[tx_size_cat][ctx]);
   if (counts) ++counts->tx_size[tx_size_cat][ctx][tx_size];
   return (TX_SIZE)tx_size;
 }
 
-static TX_SIZE read_tx_size_intra(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                  vp10_reader *r) {
+static TX_SIZE read_tx_size_intra(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                  aom_reader *r) {
   TX_MODE tx_mode = cm->tx_mode;
   BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
@@ -297,8 +296,8 @@
   }
 }
 
-static TX_SIZE read_tx_size_inter(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                  int allow_select, vp10_reader *r) {
+static TX_SIZE read_tx_size_inter(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                  int allow_select, aom_reader *r) {
   TX_MODE tx_mode = cm->tx_mode;
   BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
@@ -328,20 +327,20 @@
   }
 }
 
-static int dec_get_segment_id(const VP10_COMMON *cm, const uint8_t *segment_ids,
+static int dec_get_segment_id(const AV1_COMMON *cm, const uint8_t *segment_ids,
                               int mi_offset, int x_mis, int y_mis) {
   int x, y, segment_id = INT_MAX;
 
   for (y = 0; y < y_mis; y++)
     for (x = 0; x < x_mis; x++)
       segment_id =
-          VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
+          AOMMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
 
   assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
   return segment_id;
 }
 
-static void set_segment_id(VP10_COMMON *cm, int mi_offset, int x_mis, int y_mis,
+static void set_segment_id(AV1_COMMON *cm, int mi_offset, int x_mis, int y_mis,
                            int segment_id) {
   int x, y;
 
@@ -352,9 +351,9 @@
       cm->current_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
 }
 
-static int read_intra_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static int read_intra_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
                                  int mi_offset, int x_mis, int y_mis,
-                                 vp10_reader *r) {
+                                 aom_reader *r) {
   struct segmentation *const seg = &cm->seg;
   FRAME_COUNTS *counts = xd->counts;
   struct segmentation_probs *const segp = &cm->fc->seg;
@@ -370,7 +369,7 @@
   return segment_id;
 }
 
-static void copy_segment_id(const VP10_COMMON *cm,
+static void copy_segment_id(const AV1_COMMON *cm,
                             const uint8_t *last_segment_ids,
                             uint8_t *current_segment_ids, int mi_offset,
                             int x_mis, int y_mis) {
@@ -383,8 +382,8 @@
                            : 0;
 }
 
-static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
-                                 int mi_row, int mi_col, vp10_reader *r) {
+static int read_inter_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+                                 int mi_row, int mi_col, aom_reader *r) {
   struct segmentation *const seg = &cm->seg;
   FRAME_COUNTS *counts = xd->counts;
   struct segmentation_probs *const segp = &cm->fc->seg;
@@ -395,8 +394,8 @@
   const int bh = num_8x8_blocks_high_lookup[mbmi->sb_type];
 
   // TODO(slavarnway): move x_mis, y_mis into xd ?????
-  const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw);
-  const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh);
+  const int x_mis = AOMMIN(cm->mi_cols - mi_col, bw);
+  const int y_mis = AOMMIN(cm->mi_rows - mi_row, bh);
 
   if (!seg->enabled) return 0;  // Default for disabled segmentation
 
@@ -412,9 +411,9 @@
   }
 
   if (seg->temporal_update) {
-    const int ctx = vp10_get_pred_context_seg_id(xd);
-    const vpx_prob pred_prob = segp->pred_probs[ctx];
-    mbmi->seg_id_predicted = vp10_read(r, pred_prob);
+    const int ctx = av1_get_pred_context_seg_id(xd);
+    const aom_prob pred_prob = segp->pred_probs[ctx];
+    mbmi->seg_id_predicted = aom_read(r, pred_prob);
     if (counts) ++counts->seg.pred[ctx][mbmi->seg_id_predicted];
     if (mbmi->seg_id_predicted) {
       segment_id = predicted_segment_id;
@@ -430,21 +429,21 @@
   return segment_id;
 }
 
-static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
-                     vp10_reader *r) {
+static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+                     aom_reader *r) {
   if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
     return 1;
   } else {
-    const int ctx = vp10_get_skip_context(xd);
-    const int skip = vp10_read(r, cm->fc->skip_probs[ctx]);
+    const int ctx = av1_get_skip_context(xd);
+    const int skip = aom_read(r, cm->fc->skip_probs[ctx]);
     FRAME_COUNTS *counts = xd->counts;
     if (counts) ++counts->skip[ctx][skip];
     return skip;
   }
 }
 
-static void read_palette_mode_info(VP10_COMMON *const cm, MACROBLOCKD *const xd,
-                                   vp10_reader *r) {
+static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+                                   aom_reader *r) {
   MODE_INFO *const mi = xd->mi[0];
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   const MODE_INFO *const above_mi = xd->above_mi;
@@ -458,16 +457,16 @@
       palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
     if (left_mi)
       palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
-    if (vp10_read(
+    if (aom_read(
             r,
-            vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx])) {
+            av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx])) {
       pmi->palette_size[0] =
-          vp10_read_tree(r, vp10_palette_size_tree,
-                         vp10_default_palette_y_size_prob[bsize - BLOCK_8X8]) +
+          aom_read_tree(r, av1_palette_size_tree,
+                        av1_default_palette_y_size_prob[bsize - BLOCK_8X8]) +
           2;
       n = pmi->palette_size[0];
       for (i = 0; i < n; ++i)
-        pmi->palette_colors[i] = vp10_read_literal(r, cm->bit_depth);
+        pmi->palette_colors[i] = aom_read_literal(r, cm->bit_depth);
 
       xd->plane[0].color_index_map[0] = read_uniform(r, n);
       assert(xd->plane[0].color_index_map[0] < n);
@@ -475,18 +474,18 @@
   }
 
   if (mbmi->uv_mode == DC_PRED) {
-    if (vp10_read(
-            r, vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0])) {
+    if (aom_read(r,
+                 av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0])) {
       pmi->palette_size[1] =
-          vp10_read_tree(r, vp10_palette_size_tree,
-                         vp10_default_palette_uv_size_prob[bsize - BLOCK_8X8]) +
+          aom_read_tree(r, av1_palette_size_tree,
+                        av1_default_palette_uv_size_prob[bsize - BLOCK_8X8]) +
           2;
       n = pmi->palette_size[1];
       for (i = 0; i < n; ++i) {
         pmi->palette_colors[PALETTE_MAX_SIZE + i] =
-            vp10_read_literal(r, cm->bit_depth);
+            aom_read_literal(r, cm->bit_depth);
         pmi->palette_colors[2 * PALETTE_MAX_SIZE + i] =
-            vp10_read_literal(r, cm->bit_depth);
+            aom_read_literal(r, cm->bit_depth);
       }
       xd->plane[1].color_index_map[0] = read_uniform(r, n);
       assert(xd->plane[1].color_index_map[0] < n);
@@ -495,8 +494,8 @@
 }
 
 #if CONFIG_EXT_INTRA
-static void read_ext_intra_mode_info(VP10_COMMON *const cm,
-                                     MACROBLOCKD *const xd, vp10_reader *r) {
+static void read_ext_intra_mode_info(AV1_COMMON *const cm,
+                                     MACROBLOCKD *const xd, aom_reader *r) {
   MODE_INFO *const mi = xd->mi[0];
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   FRAME_COUNTS *counts = xd->counts;
@@ -506,7 +505,7 @@
 #endif
   if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0) {
     mbmi->ext_intra_mode_info.use_ext_intra_mode[0] =
-        vp10_read(r, cm->fc->ext_intra_probs[0]);
+        aom_read(r, cm->fc->ext_intra_probs[0]);
     if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
       mbmi->ext_intra_mode_info.ext_intra_mode[0] =
           read_uniform(r, FILTER_INTRA_MODES);
@@ -517,7 +516,7 @@
   if (mbmi->uv_mode == DC_PRED &&
       mbmi->palette_mode_info.palette_size[1] == 0) {
     mbmi->ext_intra_mode_info.use_ext_intra_mode[1] =
-        vp10_read(r, cm->fc->ext_intra_probs[1]);
+        aom_read(r, cm->fc->ext_intra_probs[1]);
     if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1]) {
       mbmi->ext_intra_mode_info.ext_intra_mode[1] =
           read_uniform(r, FILTER_INTRA_MODES);
@@ -527,11 +526,11 @@
   }
 }
 
-static void read_intra_angle_info(VP10_COMMON *const cm, MACROBLOCKD *const xd,
-                                  vp10_reader *r) {
+static void read_intra_angle_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+                                  aom_reader *r) {
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const BLOCK_SIZE bsize = mbmi->sb_type;
-  const int ctx = vp10_get_pred_context_intra_interp(xd);
+  const int ctx = av1_get_pred_context_intra_interp(xd);
   int p_angle;
 
   if (bsize < BLOCK_8X8) return;
@@ -540,10 +539,10 @@
     mbmi->angle_delta[0] =
         read_uniform(r, 2 * MAX_ANGLE_DELTAS + 1) - MAX_ANGLE_DELTAS;
     p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
-    if (vp10_is_intra_filter_switchable(p_angle)) {
+    if (av1_is_intra_filter_switchable(p_angle)) {
       FRAME_COUNTS *counts = xd->counts;
-      mbmi->intra_filter = vp10_read_tree(r, vp10_intra_filter_tree,
-                                          cm->fc->intra_filter_probs[ctx]);
+      mbmi->intra_filter = aom_read_tree(r, av1_intra_filter_tree,
+                                         cm->fc->intra_filter_probs[ctx]);
       if (counts) ++counts->intra_filter[ctx][mbmi->intra_filter];
     } else {
       mbmi->intra_filter = INTRA_FILTER_LINEAR;
@@ -557,9 +556,9 @@
 }
 #endif  // CONFIG_EXT_INTRA
 
-static void read_intra_frame_mode_info(VP10_COMMON *const cm,
+static void read_intra_frame_mode_info(AV1_COMMON *const cm,
                                        MACROBLOCKD *const xd, int mi_row,
-                                       int mi_col, vp10_reader *r) {
+                                       int mi_col, aom_reader *r) {
   MODE_INFO *const mi = xd->mi[0];
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   const MODE_INFO *above_mi = xd->above_mi;
@@ -571,8 +570,8 @@
   const int bh = xd->plane[0].n4_h >> 1;
 
   // TODO(slavarnway): move x_mis, y_mis into xd ?????
-  const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw);
-  const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh);
+  const int x_mis = AOMMIN(cm->mi_cols - mi_col, bw);
+  const int y_mis = AOMMIN(cm->mi_rows - mi_row, bh);
 
   mbmi->segment_id = read_intra_segment_id(cm, xd, mi_offset, x_mis, y_mis, r);
   mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r);
@@ -627,8 +626,8 @@
       FRAME_COUNTS *counts = xd->counts;
       int eset = get_ext_tx_set(mbmi->tx_size, mbmi->sb_type, 0);
       if (eset > 0) {
-        mbmi->tx_type = vp10_read_tree(
-            r, vp10_ext_tx_intra_tree[eset],
+        mbmi->tx_type = aom_read_tree(
+            r, av1_ext_tx_intra_tree[eset],
             cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode]);
         if (counts)
           ++counts
@@ -643,8 +642,8 @@
       FRAME_COUNTS *counts = xd->counts;
       TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
       mbmi->tx_type =
-          vp10_read_tree(r, vp10_ext_tx_tree,
-                         cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
+          aom_read_tree(r, av1_ext_tx_tree,
+                        cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
       if (counts)
         ++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
     } else {
@@ -654,64 +653,63 @@
   }
 }
 
-static int read_mv_component(vp10_reader *r, const nmv_component *mvcomp,
+static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
                              int usehp) {
   int mag, d, fr, hp;
-  const int sign = vp10_read(r, mvcomp->sign);
-  const int mv_class = vp10_read_tree(r, vp10_mv_class_tree, mvcomp->classes);
+  const int sign = aom_read(r, mvcomp->sign);
+  const int mv_class = aom_read_tree(r, av1_mv_class_tree, mvcomp->classes);
   const int class0 = mv_class == MV_CLASS_0;
 
   // Integer part
   if (class0) {
-    d = vp10_read_tree(r, vp10_mv_class0_tree, mvcomp->class0);
+    d = aom_read_tree(r, av1_mv_class0_tree, mvcomp->class0);
     mag = 0;
   } else {
     int i;
     const int n = mv_class + CLASS0_BITS - 1;  // number of bits
 
     d = 0;
-    for (i = 0; i < n; ++i) d |= vp10_read(r, mvcomp->bits[i]) << i;
+    for (i = 0; i < n; ++i) d |= aom_read(r, mvcomp->bits[i]) << i;
     mag = CLASS0_SIZE << (mv_class + 2);
   }
 
   // Fractional part
-  fr = vp10_read_tree(r, vp10_mv_fp_tree,
-                      class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
+  fr = aom_read_tree(r, av1_mv_fp_tree,
+                     class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
 
   // High precision part (if hp is not used, the default value of the hp is 1)
-  hp = usehp ? vp10_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
+  hp = usehp ? aom_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
 
   // Result
   mag += ((d << 3) | (fr << 1) | hp) + 1;
   return sign ? -mag : mag;
 }
 
-static INLINE void read_mv(vp10_reader *r, MV *mv, const MV *ref,
+static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
 #if CONFIG_REF_MV
                            int is_compound,
 #endif
                            const nmv_context *ctx, nmv_context_counts *counts,
                            int allow_hp) {
   MV_JOINT_TYPE joint_type;
-  const int use_hp = allow_hp && vp10_use_mv_hp(ref);
+  const int use_hp = allow_hp && av1_use_mv_hp(ref);
   MV diff = { 0, 0 };
 
 #if CONFIG_REF_MV && !CONFIG_EXT_INTER
   if (is_compound) {
-    int is_zero_rmv = vp10_read(r, ctx->zero_rmv);
+    int is_zero_rmv = aom_read(r, ctx->zero_rmv);
     if (is_zero_rmv) {
       joint_type = MV_JOINT_ZERO;
     } else {
       joint_type =
-          (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
+          (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
     }
   } else {
     joint_type =
-        (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
+        (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
   }
 #else
-  joint_type =
-      (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
+  joint_type = (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
 #endif
 
 #if CONFIG_REF_MV && CONFIG_EXT_INTER
@@ -724,19 +722,19 @@
   if (mv_joint_horizontal(joint_type))
     diff.col = read_mv_component(r, &ctx->comps[1], use_hp);
 
-  vp10_inc_mv(&diff, counts, use_hp);
+  av1_inc_mv(&diff, counts, use_hp);
 
   mv->row = ref->row + diff.row;
   mv->col = ref->col + diff.col;
 }
 
-static REFERENCE_MODE read_block_reference_mode(VP10_COMMON *cm,
+static REFERENCE_MODE read_block_reference_mode(AV1_COMMON *cm,
                                                 const MACROBLOCKD *xd,
-                                                vp10_reader *r) {
+                                                aom_reader *r) {
   if (cm->reference_mode == REFERENCE_MODE_SELECT) {
-    const int ctx = vp10_get_reference_mode_context(cm, xd);
+    const int ctx = av1_get_reference_mode_context(cm, xd);
     const REFERENCE_MODE mode =
-        (REFERENCE_MODE)vp10_read(r, cm->fc->comp_inter_prob[ctx]);
+        (REFERENCE_MODE)aom_read(r, cm->fc->comp_inter_prob[ctx]);
     FRAME_COUNTS *counts = xd->counts;
     if (counts) ++counts->comp_inter[ctx][mode];
     return mode;  // SINGLE_REFERENCE or COMPOUND_REFERENCE
@@ -746,8 +744,8 @@
 }
 
 // Read the referncence frame
-static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd,
-                            vp10_reader *r, int segment_id,
+static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+                            aom_reader *r, int segment_id,
                             MV_REFERENCE_FRAME ref_frame[2]) {
   FRAME_CONTEXT *const fc = cm->fc;
   FRAME_COUNTS *counts = xd->counts;
@@ -765,29 +763,29 @@
 #else
       const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
 #endif  // CONFIG_EXT_REFS
-      const int ctx = vp10_get_pred_context_comp_ref_p(cm, xd);
-      const int bit = vp10_read(r, fc->comp_ref_prob[ctx][0]);
+      const int ctx = av1_get_pred_context_comp_ref_p(cm, xd);
+      const int bit = aom_read(r, fc->comp_ref_prob[ctx][0]);
 
       if (counts) ++counts->comp_ref[ctx][0][bit];
 
 #if CONFIG_EXT_REFS
       // Decode forward references.
       if (!bit) {
-        const int ctx1 = vp10_get_pred_context_comp_ref_p1(cm, xd);
-        const int bit1 = vp10_read(r, fc->comp_ref_prob[ctx1][1]);
+        const int ctx1 = av1_get_pred_context_comp_ref_p1(cm, xd);
+        const int bit1 = aom_read(r, fc->comp_ref_prob[ctx1][1]);
         if (counts) ++counts->comp_ref[ctx1][1][bit1];
         ref_frame[!idx] = cm->comp_fwd_ref[bit1 ? 0 : 1];
       } else {
-        const int ctx2 = vp10_get_pred_context_comp_ref_p2(cm, xd);
-        const int bit2 = vp10_read(r, fc->comp_ref_prob[ctx2][2]);
+        const int ctx2 = av1_get_pred_context_comp_ref_p2(cm, xd);
+        const int bit2 = aom_read(r, fc->comp_ref_prob[ctx2][2]);
         if (counts) ++counts->comp_ref[ctx2][2][bit2];
         ref_frame[!idx] = cm->comp_fwd_ref[bit2 ? 3 : 2];
       }
 
       // Decode backward references.
       {
-        const int ctx_bwd = vp10_get_pred_context_comp_bwdref_p(cm, xd);
-        const int bit_bwd = vp10_read(r, fc->comp_bwdref_prob[ctx_bwd][0]);
+        const int ctx_bwd = av1_get_pred_context_comp_bwdref_p(cm, xd);
+        const int bit_bwd = aom_read(r, fc->comp_bwdref_prob[ctx_bwd][0]);
         if (counts) ++counts->comp_bwdref[ctx_bwd][0][bit_bwd];
         ref_frame[idx] = cm->comp_bwd_ref[bit_bwd];
       }
@@ -797,39 +795,39 @@
 #endif  // CONFIG_EXT_REFS
     } else if (mode == SINGLE_REFERENCE) {
 #if CONFIG_EXT_REFS
-      const int ctx0 = vp10_get_pred_context_single_ref_p1(xd);
-      const int bit0 = vp10_read(r, fc->single_ref_prob[ctx0][0]);
+      const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
+      const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0]);
       if (counts) ++counts->single_ref[ctx0][0][bit0];
 
       if (bit0) {
-        const int ctx1 = vp10_get_pred_context_single_ref_p2(xd);
-        const int bit1 = vp10_read(r, fc->single_ref_prob[ctx1][1]);
+        const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
+        const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1]);
         if (counts) ++counts->single_ref[ctx1][1][bit1];
         ref_frame[0] = bit1 ? ALTREF_FRAME : BWDREF_FRAME;
       } else {
-        const int ctx2 = vp10_get_pred_context_single_ref_p3(xd);
-        const int bit2 = vp10_read(r, fc->single_ref_prob[ctx2][2]);
+        const int ctx2 = av1_get_pred_context_single_ref_p3(xd);
+        const int bit2 = aom_read(r, fc->single_ref_prob[ctx2][2]);
         if (counts) ++counts->single_ref[ctx2][2][bit2];
         if (bit2) {
-          const int ctx4 = vp10_get_pred_context_single_ref_p5(xd);
-          const int bit4 = vp10_read(r, fc->single_ref_prob[ctx4][4]);
+          const int ctx4 = av1_get_pred_context_single_ref_p5(xd);
+          const int bit4 = aom_read(r, fc->single_ref_prob[ctx4][4]);
           if (counts) ++counts->single_ref[ctx4][4][bit4];
           ref_frame[0] = bit4 ? GOLDEN_FRAME : LAST3_FRAME;
         } else {
-          const int ctx3 = vp10_get_pred_context_single_ref_p4(xd);
-          const int bit3 = vp10_read(r, fc->single_ref_prob[ctx3][3]);
+          const int ctx3 = av1_get_pred_context_single_ref_p4(xd);
+          const int bit3 = aom_read(r, fc->single_ref_prob[ctx3][3]);
           if (counts) ++counts->single_ref[ctx3][3][bit3];
           ref_frame[0] = bit3 ? LAST2_FRAME : LAST_FRAME;
         }
       }
 #else
-      const int ctx0 = vp10_get_pred_context_single_ref_p1(xd);
-      const int bit0 = vp10_read(r, fc->single_ref_prob[ctx0][0]);
+      const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
+      const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0]);
       if (counts) ++counts->single_ref[ctx0][0][bit0];
 
       if (bit0) {
-        const int ctx1 = vp10_get_pred_context_single_ref_p2(xd);
-        const int bit1 = vp10_read(r, fc->single_ref_prob[ctx1][1]);
+        const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
+        const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1]);
         if (counts) ++counts->single_ref[ctx1][1][bit1];
         ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
       } else {
@@ -845,16 +843,16 @@
 }
 
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
-static MOTION_VARIATION read_motvar_block(VP10_COMMON *const cm,
+static MOTION_VARIATION read_motvar_block(AV1_COMMON *const cm,
                                           MACROBLOCKD *const xd,
-                                          vp10_reader *r) {
+                                          aom_reader *r) {
   BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   FRAME_COUNTS *counts = xd->counts;
   MOTION_VARIATION motvar;
 
   if (is_motvar_allowed(&xd->mi[0]->mbmi)) {
-    motvar = (MOTION_VARIATION)vp10_read_tree(r, vp10_motvar_tree,
-                                              cm->fc->motvar_prob[bsize]);
+    motvar = (MOTION_VARIATION)aom_read_tree(r, av1_motvar_tree,
+                                             cm->fc->motvar_prob[bsize]);
     if (counts) ++counts->motvar[bsize][motvar];
     return motvar;
   } else {
@@ -863,34 +861,34 @@
 }
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 
-static INLINE INTERP_FILTER read_interp_filter(VP10_COMMON *const cm,
+static INLINE INTERP_FILTER read_interp_filter(AV1_COMMON *const cm,
                                                MACROBLOCKD *const xd,
 #if CONFIG_DUAL_FILTER
                                                int dir,
 #endif
-                                               vp10_reader *r) {
+                                               aom_reader *r) {
 #if CONFIG_EXT_INTERP
-  if (!vp10_is_interp_needed(xd)) return EIGHTTAP_REGULAR;
+  if (!av1_is_interp_needed(xd)) return EIGHTTAP_REGULAR;
 #endif
   if (cm->interp_filter != SWITCHABLE) {
     return cm->interp_filter;
   } else {
 #if CONFIG_DUAL_FILTER
-    const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
+    const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
 #else
-    const int ctx = vp10_get_pred_context_switchable_interp(xd);
+    const int ctx = av1_get_pred_context_switchable_interp(xd);
 #endif
     FRAME_COUNTS *counts = xd->counts;
-    const INTERP_FILTER type = (INTERP_FILTER)vp10_read_tree(
-        r, vp10_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
+    const INTERP_FILTER type = (INTERP_FILTER)aom_read_tree(
+        r, av1_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
     if (counts) ++counts->switchable_interp[ctx][type];
     return type;
   }
 }
 
-static void read_intra_block_mode_info(VP10_COMMON *const cm,
+static void read_intra_block_mode_info(AV1_COMMON *const cm,
                                        MACROBLOCKD *const xd, MODE_INFO *mi,
-                                       vp10_reader *r) {
+                                       aom_reader *r) {
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   const BLOCK_SIZE bsize = mi->mbmi.sb_type;
   int i;
@@ -938,14 +936,14 @@
          mv->col < MV_UPP;
 }
 
-static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
                             PREDICTION_MODE mode,
 #if CONFIG_REF_MV
                             int block,
 #endif
                             int_mv mv[2], int_mv ref_mv[2],
                             int_mv nearest_mv[2], int_mv near_mv[2],
-                            int is_compound, int allow_hp, vp10_reader *r) {
+                            int is_compound, int allow_hp, aom_reader *r) {
   int i;
   int ret = 1;
 #if CONFIG_REF_MV
@@ -966,8 +964,8 @@
 #endif
       for (i = 0; i < 1 + is_compound; ++i) {
 #if CONFIG_REF_MV
-        int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
-                                   xd->ref_mv_stack[mbmi->ref_frame[i]]);
+        int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
+                                  xd->ref_mv_stack[mbmi->ref_frame[i]]);
         nmv_context_counts *const mv_counts =
             counts ? &counts->mv[nmv_ctx] : NULL;
         read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv,
@@ -1026,8 +1024,8 @@
       assert(is_compound);
       for (i = 0; i < 2; ++i) {
 #if CONFIG_REF_MV
-        int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
-                                   xd->ref_mv_stack[mbmi->ref_frame[i]]);
+        int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
+                                  xd->ref_mv_stack[mbmi->ref_frame[i]]);
         nmv_context_counts *const mv_counts =
             counts ? &counts->mv[nmv_ctx] : NULL;
         read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, is_compound,
@@ -1067,8 +1065,8 @@
     case NEW_NEARESTMV: {
       FRAME_COUNTS *counts = xd->counts;
 #if CONFIG_REF_MV
-      int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
-                                 xd->ref_mv_stack[mbmi->ref_frame[0]]);
+      int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
+                                xd->ref_mv_stack[mbmi->ref_frame[0]]);
       nmv_context_counts *const mv_counts =
           counts ? &counts->mv[nmv_ctx] : NULL;
       read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, is_compound,
@@ -1086,8 +1084,8 @@
     case NEAREST_NEWMV: {
       FRAME_COUNTS *counts = xd->counts;
 #if CONFIG_REF_MV
-      int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
-                                 xd->ref_mv_stack[mbmi->ref_frame[1]]);
+      int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
+                                xd->ref_mv_stack[mbmi->ref_frame[1]]);
       nmv_context_counts *const mv_counts =
           counts ? &counts->mv[nmv_ctx] : NULL;
       mv[0].as_int = nearest_mv[0].as_int;
@@ -1106,8 +1104,8 @@
     case NEAR_NEWMV: {
       FRAME_COUNTS *counts = xd->counts;
 #if CONFIG_REF_MV
-      int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
-                                 xd->ref_mv_stack[mbmi->ref_frame[1]]);
+      int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
+                                xd->ref_mv_stack[mbmi->ref_frame[1]]);
       nmv_context_counts *const mv_counts =
           counts ? &counts->mv[nmv_ctx] : NULL;
       mv[0].as_int = near_mv[0].as_int;
@@ -1127,8 +1125,8 @@
     case NEW_NEARMV: {
       FRAME_COUNTS *counts = xd->counts;
 #if CONFIG_REF_MV
-      int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
-                                 xd->ref_mv_stack[mbmi->ref_frame[0]]);
+      int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
+                                xd->ref_mv_stack[mbmi->ref_frame[0]]);
       nmv_context_counts *const mv_counts =
           counts ? &counts->mv[nmv_ctx] : NULL;
       read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, is_compound,
@@ -1155,13 +1153,13 @@
   return ret;
 }
 
-static int read_is_inter_block(VP10_COMMON *const cm, MACROBLOCKD *const xd,
-                               int segment_id, vp10_reader *r) {
+static int read_is_inter_block(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+                               int segment_id, aom_reader *r) {
   if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
     return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME;
   } else {
-    const int ctx = vp10_get_intra_inter_context(xd);
-    const int is_inter = vp10_read(r, cm->fc->intra_inter_prob[ctx]);
+    const int ctx = av1_get_intra_inter_context(xd);
+    const int is_inter = aom_read(r, cm->fc->intra_inter_prob[ctx]);
     FRAME_COUNTS *counts = xd->counts;
     if (counts) ++counts->intra_inter[ctx][is_inter];
     return is_inter;
@@ -1169,21 +1167,21 @@
 }
 
 static void fpm_sync(void *const data, int mi_row) {
-  VP10Decoder *const pbi = (VP10Decoder *)data;
-  vp10_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
-                        mi_row << pbi->common.mib_size_log2);
+  AV1Decoder *const pbi = (AV1Decoder *)data;
+  av1_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
+                       mi_row << pbi->common.mib_size_log2);
 }
 
-static void read_inter_block_mode_info(VP10Decoder *const pbi,
+static void read_inter_block_mode_info(AV1Decoder *const pbi,
                                        MACROBLOCKD *const xd,
                                        MODE_INFO *const mi,
 #if (CONFIG_OBMC || CONFIG_EXT_INTER) && CONFIG_SUPERTX
-                                       int mi_row, int mi_col, vp10_reader *r,
+                                       int mi_row, int mi_col, aom_reader *r,
                                        int supertx_enabled) {
 #else
-                                       int mi_row, int mi_col, vp10_reader *r) {
+                                       int mi_row, int mi_col, aom_reader *r) {
 #endif  // CONFIG_OBMC && CONFIG_SUPERTX
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   const BLOCK_SIZE bsize = mbmi->sb_type;
   const int allow_hp = cm->allow_high_precision_mv;
@@ -1211,22 +1209,22 @@
     RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
 
     xd->block_refs[ref] = ref_buf;
-    if ((!vp10_is_valid_scale(&ref_buf->sf)))
-      vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+    if ((!av1_is_valid_scale(&ref_buf->sf)))
+      aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
                          "Reference frame has invalid dimensions");
-    vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
+    av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
   }
 
   for (ref_frame = LAST_FRAME; ref_frame < MODE_CTX_REF_FRAMES; ++ref_frame) {
-    vp10_find_mv_refs(cm, xd, mi, ref_frame,
+    av1_find_mv_refs(cm, xd, mi, ref_frame,
 #if CONFIG_REF_MV
-                      &xd->ref_mv_count[ref_frame], xd->ref_mv_stack[ref_frame],
+                     &xd->ref_mv_count[ref_frame], xd->ref_mv_stack[ref_frame],
 #if CONFIG_EXT_INTER
-                      compound_inter_mode_ctx,
+                     compound_inter_mode_ctx,
 #endif  // CONFIG_EXT_INTER
 #endif
-                      ref_mvs[ref_frame], mi_row, mi_col, fpm_sync, (void *)pbi,
-                      inter_mode_ctx);
+                     ref_mvs[ref_frame], mi_row, mi_col, fpm_sync, (void *)pbi,
+                     inter_mode_ctx);
   }
 
 #if CONFIG_REF_MV
@@ -1236,7 +1234,7 @@
   else
 #endif  // CONFIG_EXT_INTER
     mode_ctx =
-        vp10_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame, bsize, -1);
+        av1_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame, bsize, -1);
   mbmi->ref_mv_idx = 0;
 #else
   mode_ctx = inter_mode_ctx[mbmi->ref_frame[0]];
@@ -1245,7 +1243,7 @@
   if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
     mbmi->mode = ZEROMV;
     if (bsize < BLOCK_8X8) {
-      vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+      aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
                          "Invalid usage of segement feature on small blocks");
       return;
     }
@@ -1275,8 +1273,8 @@
   if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
 #endif  // CONFIG_EXT_INTER
     for (ref = 0; ref < 1 + is_compound; ++ref) {
-      vp10_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
-                             &nearestmv[ref], &nearmv[ref]);
+      av1_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
+                            &nearestmv[ref], &nearmv[ref]);
     }
   }
 
@@ -1293,7 +1291,7 @@
   if (is_compound && bsize >= BLOCK_8X8 && mbmi->mode != NEWMV &&
       mbmi->mode != ZEROMV) {
 #endif  // CONFIG_EXT_INTER
-    uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+    uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
 
 #if CONFIG_EXT_INTER
     if (xd->ref_mv_count[ref_frame_type] > 0) {
@@ -1366,8 +1364,8 @@
 #if CONFIG_EXT_INTER
         if (!is_compound)
 #endif  // CONFIG_EXT_INTER
-          mode_ctx = vp10_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame,
-                                                bsize, j);
+          mode_ctx = av1_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame,
+                                               bsize, j);
 #endif
 #if CONFIG_EXT_INTER
         if (is_compound)
@@ -1395,24 +1393,24 @@
 #if CONFIG_EXT_INTER
           {
             int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
-            vp10_update_mv_context(xd, mi, mbmi->ref_frame[ref], mv_ref_list, j,
-                                   mi_row, mi_col, NULL);
+            av1_update_mv_context(xd, mi, mbmi->ref_frame[ref], mv_ref_list, j,
+                                  mi_row, mi_col, NULL);
 #endif  // CONFIG_EXT_INTER
-            vp10_append_sub8x8_mvs_for_idx(
-                cm, xd, j, ref, mi_row, mi_col,
+            av1_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
 #if CONFIG_REF_MV
-                ref_mv_stack[ref], &ref_mv_count[ref],
+                                          ref_mv_stack[ref], &ref_mv_count[ref],
 #endif
 #if CONFIG_EXT_INTER
-                mv_ref_list,
+                                          mv_ref_list,
 #endif  // CONFIG_EXT_INTER
-                &nearest_sub8x8[ref], &near_sub8x8[ref]);
+                                          &nearest_sub8x8[ref],
+                                          &near_sub8x8[ref]);
 #if CONFIG_EXT_INTER
             if (have_newmv_in_inter_mode(b_mode)) {
               mv_ref_list[0].as_int = nearest_sub8x8[ref].as_int;
               mv_ref_list[1].as_int = near_sub8x8[ref].as_int;
-              vp10_find_best_ref_mvs(allow_hp, mv_ref_list, &ref_mv[0][ref],
-                                     &ref_mv[1][ref]);
+              av1_find_best_ref_mvs(allow_hp, mv_ref_list, &ref_mv[0][ref],
+                                    &ref_mv[1][ref]);
             }
           }
 #endif  // CONFIG_EXT_INTER
@@ -1469,7 +1467,7 @@
 
     for (ref = 0; ref < 1 + is_compound && mbmi->mode == NEWMV; ++ref) {
 #if CONFIG_REF_MV
-      uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+      uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
       if (xd->ref_mv_count[ref_frame_type] > 1) {
         ref_mv[ref] =
             (ref == 0)
@@ -1503,7 +1501,7 @@
 #endif
       is_interintra_allowed(mbmi)) {
     const int bsize_group = size_group_lookup[bsize];
-    const int interintra = vp10_read(r, cm->fc->interintra_prob[bsize_group]);
+    const int interintra = aom_read(r, cm->fc->interintra_prob[bsize_group]);
     if (xd->counts) xd->counts->interintra[bsize_group][interintra]++;
     assert(mbmi->ref_frame[1] == NONE);
     if (interintra) {
@@ -1520,12 +1518,12 @@
 #endif  // CONFIG_EXT_INTRA
       if (is_interintra_wedge_used(bsize)) {
         mbmi->use_wedge_interintra =
-            vp10_read(r, cm->fc->wedge_interintra_prob[bsize]);
+            aom_read(r, cm->fc->wedge_interintra_prob[bsize]);
         if (xd->counts)
           xd->counts->wedge_interintra[bsize][mbmi->use_wedge_interintra]++;
         if (mbmi->use_wedge_interintra) {
           mbmi->interintra_wedge_index =
-              vp10_read_literal(r, get_wedge_bits_lookup(bsize));
+              aom_read_literal(r, get_wedge_bits_lookup(bsize));
           mbmi->interintra_wedge_sign = 0;
         }
       }
@@ -1554,13 +1552,13 @@
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
       is_interinter_wedge_used(bsize)) {
     mbmi->use_wedge_interinter =
-        vp10_read(r, cm->fc->wedge_interinter_prob[bsize]);
+        aom_read(r, cm->fc->wedge_interinter_prob[bsize]);
     if (xd->counts)
       xd->counts->wedge_interinter[bsize][mbmi->use_wedge_interinter]++;
     if (mbmi->use_wedge_interinter) {
       mbmi->interinter_wedge_index =
-          vp10_read_literal(r, get_wedge_bits_lookup(bsize));
-      mbmi->interinter_wedge_sign = vp10_read_bit(r);
+          aom_read_literal(r, get_wedge_bits_lookup(bsize));
+      mbmi->interinter_wedge_sign = aom_read_bit(r);
     }
   }
 #endif  // CONFIG_EXT_INTER
@@ -1588,13 +1586,13 @@
 #endif  // CONFIG_DUAL_FILTER
 }
 
-static void read_inter_frame_mode_info(VP10Decoder *const pbi,
+static void read_inter_frame_mode_info(AV1Decoder *const pbi,
                                        MACROBLOCKD *const xd,
 #if CONFIG_SUPERTX
                                        int supertx_enabled,
 #endif  // CONFIG_SUPERTX
-                                       int mi_row, int mi_col, vp10_reader *r) {
-  VP10_COMMON *const cm = &pbi->common;
+                                       int mi_row, int mi_col, aom_reader *r) {
+  AV1_COMMON *const cm = &pbi->common;
   MODE_INFO *const mi = xd->mi[0];
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   int inter_block = 1;
@@ -1693,8 +1691,8 @@
 
       if (inter_block) {
         if (eset > 0) {
-          mbmi->tx_type = vp10_read_tree(
-              r, vp10_ext_tx_inter_tree[eset],
+          mbmi->tx_type = aom_read_tree(
+              r, av1_ext_tx_inter_tree[eset],
               cm->fc->inter_ext_tx_prob[eset][txsize_sqr_map[mbmi->tx_size]]);
           if (counts)
             ++counts->inter_ext_tx[eset][txsize_sqr_map[mbmi->tx_size]]
@@ -1702,8 +1700,8 @@
         }
       } else if (ALLOW_INTRA_EXT_TX) {
         if (eset > 0) {
-          mbmi->tx_type = vp10_read_tree(
-              r, vp10_ext_tx_intra_tree[eset],
+          mbmi->tx_type = aom_read_tree(
+              r, av1_ext_tx_intra_tree[eset],
               cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode]);
           if (counts)
             ++counts->intra_ext_tx[eset][mbmi->tx_size][mbmi->mode]
@@ -1721,13 +1719,13 @@
         !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
       FRAME_COUNTS *counts = xd->counts;
       if (inter_block) {
-        mbmi->tx_type = vp10_read_tree(
-            r, vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
+        mbmi->tx_type = aom_read_tree(r, av1_ext_tx_tree,
+                                      cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
         if (counts) ++counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
       } else {
         const TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
-        mbmi->tx_type = vp10_read_tree(
-            r, vp10_ext_tx_tree,
+        mbmi->tx_type = aom_read_tree(
+            r, av1_ext_tx_tree,
             cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
         if (counts)
           ++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
@@ -1739,13 +1737,13 @@
   }
 }
 
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd,
+void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd,
 #if CONFIG_SUPERTX
-                         int supertx_enabled,
+                        int supertx_enabled,
 #endif  // CONFIG_SUPERTX
-                         int mi_row, int mi_col, vp10_reader *r, int x_mis,
-                         int y_mis) {
-  VP10_COMMON *const cm = &pbi->common;
+                        int mi_row, int mi_col, aom_reader *r, int x_mis,
+                        int y_mis) {
+  AV1_COMMON *const cm = &pbi->common;
   MODE_INFO *const mi = xd->mi[0];
   MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
   int w, h;
diff --git a/av1/decoder/decodemv.h b/av1/decoder/decodemv.h
index 59fdd70..cf3d917 100644
--- a/av1/decoder/decodemv.h
+++ b/av1/decoder/decodemv.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_DECODER_DECODEMV_H_
-#define VP10_DECODER_DECODEMV_H_
+#ifndef AV1_DECODER_DECODEMV_H_
+#define AV1_DECODER_DECODEMV_H_
 
 #include "av1/decoder/bitreader.h"
 
@@ -19,16 +19,16 @@
 extern "C" {
 #endif
 
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd,
+void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd,
 #if CONFIG_SUPERTX
-                         int supertx_enabled,
+                        int supertx_enabled,
 #endif
 
-                         int mi_row, int mi_col, vp10_reader *r, int x_mis,
-                         int y_mis);
+                        int mi_row, int mi_col, aom_reader *r, int x_mis,
+                        int y_mis);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_DECODER_DECODEMV_H_
+#endif  // AV1_DECODER_DECODEMV_H_
diff --git a/av1/decoder/decoder.c b/av1/decoder/decoder.c
index 4cea36b..58952c0 100644
--- a/av1/decoder/decoder.c
+++ b/av1/decoder/decoder.c
@@ -12,16 +12,16 @@
 #include <limits.h>
 #include <stdio.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_scale_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_scale_rtcd.h"
 
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/system_state.h"
-#include "aom_ports/vpx_once.h"
-#include "aom_ports/vpx_timer.h"
-#include "aom_scale/vpx_scale.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_ports/aom_once.h"
+#include "aom_ports/aom_timer.h"
+#include "aom_scale/aom_scale.h"
+#include "aom_util/aom_thread.h"
 
 #include "av1/common/alloccommon.h"
 #include "av1/common/loopfilter.h"
@@ -38,60 +38,60 @@
   static volatile int init_done = 0;
 
   if (!init_done) {
-    vp10_rtcd();
-    vpx_dsp_rtcd();
-    vpx_scale_rtcd();
-    vp10_init_intra_predictors();
+    av1_rtcd();
+    aom_dsp_rtcd();
+    aom_scale_rtcd();
+    av1_init_intra_predictors();
 #if CONFIG_EXT_INTER
-    vp10_init_wedge_masks();
+    av1_init_wedge_masks();
 #endif  // CONFIG_EXT_INTER
     init_done = 1;
   }
 }
 
-static void vp10_dec_setup_mi(VP10_COMMON *cm) {
+static void av1_dec_setup_mi(AV1_COMMON *cm) {
   cm->mi = cm->mip + cm->mi_stride + 1;
   cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
   memset(cm->mi_grid_base, 0,
          cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
 }
 
-static int vp10_dec_alloc_mi(VP10_COMMON *cm, int mi_size) {
-  cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
+static int av1_dec_alloc_mi(AV1_COMMON *cm, int mi_size) {
+  cm->mip = aom_calloc(mi_size, sizeof(*cm->mip));
   if (!cm->mip) return 1;
   cm->mi_alloc_size = mi_size;
-  cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+  cm->mi_grid_base = (MODE_INFO **)aom_calloc(mi_size, sizeof(MODE_INFO *));
   if (!cm->mi_grid_base) return 1;
   return 0;
 }
 
-static void vp10_dec_free_mi(VP10_COMMON *cm) {
-  vpx_free(cm->mip);
+static void av1_dec_free_mi(AV1_COMMON *cm) {
+  aom_free(cm->mip);
   cm->mip = NULL;
-  vpx_free(cm->mi_grid_base);
+  aom_free(cm->mi_grid_base);
   cm->mi_grid_base = NULL;
 }
 
-VP10Decoder *vp10_decoder_create(BufferPool *const pool) {
-  VP10Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
-  VP10_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
+AV1Decoder *av1_decoder_create(BufferPool *const pool) {
+  AV1Decoder *volatile const pbi = aom_memalign(32, sizeof(*pbi));
+  AV1_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
 
   if (!cm) return NULL;
 
-  vp10_zero(*pbi);
+  av1_zero(*pbi);
 
   if (setjmp(cm->error.jmp)) {
     cm->error.setjmp = 0;
-    vp10_decoder_remove(pbi);
+    av1_decoder_remove(pbi);
     return NULL;
   }
 
   cm->error.setjmp = 1;
 
-  CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+  CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)aom_calloc(1, sizeof(*cm->fc)));
   CHECK_MEM_ERROR(
       cm, cm->frame_contexts,
-      (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
+      (FRAME_CONTEXT *)aom_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
 
   pbi->need_resync = 1;
   once(initialize_dec);
@@ -104,50 +104,50 @@
   pbi->ready_for_new_data = 1;
   pbi->common.buffer_pool = pool;
 
-  cm->bit_depth = VPX_BITS_8;
-  cm->dequant_bit_depth = VPX_BITS_8;
+  cm->bit_depth = AOM_BITS_8;
+  cm->dequant_bit_depth = AOM_BITS_8;
 
-  cm->alloc_mi = vp10_dec_alloc_mi;
-  cm->free_mi = vp10_dec_free_mi;
-  cm->setup_mi = vp10_dec_setup_mi;
+  cm->alloc_mi = av1_dec_alloc_mi;
+  cm->free_mi = av1_dec_free_mi;
+  cm->setup_mi = av1_dec_setup_mi;
 
-  vp10_loop_filter_init(cm);
+  av1_loop_filter_init(cm);
 
 #if CONFIG_AOM_QM
   aom_qm_init(cm);
 #endif
 #if CONFIG_LOOP_RESTORATION
-  vp10_loop_restoration_precal();
+  av1_loop_restoration_precal();
 #endif  // CONFIG_LOOP_RESTORATION
 
   cm->error.setjmp = 0;
 
-  vpx_get_worker_interface()->init(&pbi->lf_worker);
+  aom_get_worker_interface()->init(&pbi->lf_worker);
 
   return pbi;
 }
 
-void vp10_decoder_remove(VP10Decoder *pbi) {
+void av1_decoder_remove(AV1Decoder *pbi) {
   int i;
 
   if (!pbi) return;
 
-  vpx_get_worker_interface()->end(&pbi->lf_worker);
-  vpx_free(pbi->lf_worker.data1);
-  vpx_free(pbi->tile_data);
+  aom_get_worker_interface()->end(&pbi->lf_worker);
+  aom_free(pbi->lf_worker.data1);
+  aom_free(pbi->tile_data);
   for (i = 0; i < pbi->num_tile_workers; ++i) {
-    VPxWorker *const worker = &pbi->tile_workers[i];
-    vpx_get_worker_interface()->end(worker);
+    AVxWorker *const worker = &pbi->tile_workers[i];
+    aom_get_worker_interface()->end(worker);
   }
-  vpx_free(pbi->tile_worker_data);
-  vpx_free(pbi->tile_worker_info);
-  vpx_free(pbi->tile_workers);
+  aom_free(pbi->tile_worker_data);
+  aom_free(pbi->tile_worker_info);
+  aom_free(pbi->tile_workers);
 
   if (pbi->num_tile_workers > 0) {
-    vp10_loop_filter_dealloc(&pbi->lf_row_sync);
+    av1_loop_filter_dealloc(&pbi->lf_row_sync);
   }
 
-  vpx_free(pbi);
+  aom_free(pbi);
 }
 
 static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
@@ -156,45 +156,45 @@
          a->uv_height == b->uv_height && a->uv_width == b->uv_width;
 }
 
-vpx_codec_err_t vp10_copy_reference_dec(VP10Decoder *pbi,
-                                        VPX_REFFRAME ref_frame_flag,
-                                        YV12_BUFFER_CONFIG *sd) {
-  VP10_COMMON *cm = &pbi->common;
+aom_codec_err_t av1_copy_reference_dec(AV1Decoder *pbi,
+                                       AOM_REFFRAME ref_frame_flag,
+                                       YV12_BUFFER_CONFIG *sd) {
+  AV1_COMMON *cm = &pbi->common;
 
   /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
    * encoder is using the frame buffers for. This is just a stub to keep the
-   * vpxenc --test-decode functionality working, and will be replaced in a
-   * later commit that adds VP9-specific controls for this functionality.
+   * aomenc --test-decode functionality working, and will be replaced in a
+   * later commit that adds AV1-specific controls for this functionality.
    */
-  if (ref_frame_flag == VPX_LAST_FLAG) {
+  if (ref_frame_flag == AOM_LAST_FLAG) {
     const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0);
     if (cfg == NULL) {
-      vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+      aom_internal_error(&cm->error, AOM_CODEC_ERROR,
                          "No 'last' reference frame");
-      return VPX_CODEC_ERROR;
+      return AOM_CODEC_ERROR;
     }
     if (!equal_dimensions(cfg, sd))
-      vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+      aom_internal_error(&cm->error, AOM_CODEC_ERROR,
                          "Incorrect buffer dimensions");
     else
-      vpx_yv12_copy_frame(cfg, sd);
+      aom_yv12_copy_frame(cfg, sd);
   } else {
-    vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
+    aom_internal_error(&cm->error, AOM_CODEC_ERROR, "Invalid reference frame");
   }
 
   return cm->error.error_code;
 }
 
-vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
-                                       VPX_REFFRAME ref_frame_flag,
-                                       YV12_BUFFER_CONFIG *sd) {
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
+                                      AOM_REFFRAME ref_frame_flag,
+                                      YV12_BUFFER_CONFIG *sd) {
   int idx;
   YV12_BUFFER_CONFIG *ref_buf = NULL;
 
   // TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
   // encoder is using the frame buffers for. This is just a stub to keep the
-  // vpxenc --test-decode functionality working, and will be replaced in a
-  // later commit that adds VP9-specific controls for this functionality.
+  // aomenc --test-decode functionality working, and will be replaced in a
+  // later commit that adds AV1-specific controls for this functionality.
 
   // (Yunqing) The set_reference control depends on the following setting in
   // encoder.
@@ -212,32 +212,32 @@
 
   // TODO(zoeliu): To revisit following code and reconsider what assumption we
   // may take on the reference frame buffer virtual indexes
-  if (ref_frame_flag == VPX_LAST_FLAG) {
+  if (ref_frame_flag == AOM_LAST_FLAG) {
     idx = cm->ref_frame_map[0];
 #if CONFIG_EXT_REFS
-  } else if (ref_frame_flag == VPX_LAST2_FLAG) {
+  } else if (ref_frame_flag == AOM_LAST2_FLAG) {
     idx = cm->ref_frame_map[1];
-  } else if (ref_frame_flag == VPX_LAST3_FLAG) {
+  } else if (ref_frame_flag == AOM_LAST3_FLAG) {
     idx = cm->ref_frame_map[2];
-  } else if (ref_frame_flag == VPX_GOLD_FLAG) {
+  } else if (ref_frame_flag == AOM_GOLD_FLAG) {
     idx = cm->ref_frame_map[3];
-  } else if (ref_frame_flag == VPX_BWD_FLAG) {
+  } else if (ref_frame_flag == AOM_BWD_FLAG) {
     idx = cm->ref_frame_map[4];
-  } else if (ref_frame_flag == VPX_ALT_FLAG) {
+  } else if (ref_frame_flag == AOM_ALT_FLAG) {
     idx = cm->ref_frame_map[5];
 #else
-  } else if (ref_frame_flag == VPX_GOLD_FLAG) {
+  } else if (ref_frame_flag == AOM_GOLD_FLAG) {
     idx = cm->ref_frame_map[1];
-  } else if (ref_frame_flag == VPX_ALT_FLAG) {
+  } else if (ref_frame_flag == AOM_ALT_FLAG) {
     idx = cm->ref_frame_map[2];
 #endif  // CONFIG_EXT_REFS
   } else {
-    vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
+    aom_internal_error(&cm->error, AOM_CODEC_ERROR, "Invalid reference frame");
     return cm->error.error_code;
   }
 
   if (idx < 0 || idx >= FRAME_BUFFERS) {
-    vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_ERROR,
                        "Invalid reference frame map");
     return cm->error.error_code;
   }
@@ -246,20 +246,20 @@
   ref_buf = &cm->buffer_pool->frame_bufs[idx].buf;
 
   if (!equal_dimensions(ref_buf, sd)) {
-    vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_ERROR,
                        "Incorrect buffer dimensions");
   } else {
     // Overwrite the reference frame buffer.
-    vpx_yv12_copy_frame(sd, ref_buf);
+    aom_yv12_copy_frame(sd, ref_buf);
   }
 
   return cm->error.error_code;
 }
 
 /* If any buffer updating is signaled it should be done here. */
-static void swap_frame_buffers(VP10Decoder *pbi) {
+static void swap_frame_buffers(AV1Decoder *pbi) {
   int ref_index = 0, mask;
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   BufferPool *const pool = cm->buffer_pool;
   RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
 
@@ -302,14 +302,14 @@
   }
 }
 
-int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
-                                 const uint8_t **psource) {
-  VP10_COMMON *volatile const cm = &pbi->common;
+int av1_receive_compressed_data(AV1Decoder *pbi, size_t size,
+                                const uint8_t **psource) {
+  AV1_COMMON *volatile const cm = &pbi->common;
   BufferPool *volatile const pool = cm->buffer_pool;
   RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
   const uint8_t *source = *psource;
   int retcode = 0;
-  cm->error.error_code = VPX_CODEC_OK;
+  cm->error.error_code = AOM_CODEC_OK;
 
   if (size == 0) {
     // This is used to signal that we are missing frames.
@@ -340,27 +340,27 @@
 
   // Find a free frame buffer. Return error if can not find any.
   cm->new_fb_idx = get_free_fb(cm);
-  if (cm->new_fb_idx == INVALID_IDX) return VPX_CODEC_MEM_ERROR;
+  if (cm->new_fb_idx == INVALID_IDX) return AOM_CODEC_MEM_ERROR;
 
   // Assign a MV array to the frame buffer.
   cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
 
   pbi->hold_ref_buf = 0;
   if (cm->frame_parallel_decode) {
-    VPxWorker *const worker = pbi->frame_worker_owner;
-    vp10_frameworker_lock_stats(worker);
+    AVxWorker *const worker = pbi->frame_worker_owner;
+    av1_frameworker_lock_stats(worker);
     frame_bufs[cm->new_fb_idx].frame_worker_owner = worker;
     // Reset decoding progress.
     pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
     pbi->cur_buf->row = -1;
     pbi->cur_buf->col = -1;
-    vp10_frameworker_unlock_stats(worker);
+    av1_frameworker_unlock_stats(worker);
   } else {
     pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
   }
 
   if (setjmp(cm->error.jmp)) {
-    const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+    const AVxWorkerInterface *const winterface = aom_get_worker_interface();
     int i;
 
     cm->error.setjmp = 0;
@@ -399,12 +399,12 @@
     decrease_ref_count(cm->new_fb_idx, frame_bufs, pool);
     unlock_buffer_pool(pool);
 
-    vpx_clear_system_state();
+    aom_clear_system_state();
     return -1;
   }
 
   cm->error.setjmp = 1;
-  vp10_decode_frame(pbi, source, source + size, psource);
+  av1_decode_frame(pbi, source, source + size, psource);
 
   swap_frame_buffers(pbi);
 
@@ -414,9 +414,9 @@
   // border.
   if (pbi->dec_tile_row == -1 && pbi->dec_tile_col == -1)
 #endif  // CONFIG_EXT_TILE
-    vpx_extend_frame_inner_borders(cm->frame_to_show);
+    aom_extend_frame_inner_borders(cm->frame_to_show);
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   if (!cm->show_existing_frame) {
     cm->last_show_frame = cm->show_frame;
@@ -428,24 +428,24 @@
       cm->prev_frame = cm->cur_frame;
 
     if (cm->seg.enabled && !cm->frame_parallel_decode)
-      vp10_swap_current_and_last_seg_map(cm);
+      av1_swap_current_and_last_seg_map(cm);
   }
 
   // Update progress in frame parallel decode.
   if (cm->frame_parallel_decode) {
     // Need to lock the mutex here as another thread may
     // be accessing this buffer.
-    VPxWorker *const worker = pbi->frame_worker_owner;
+    AVxWorker *const worker = pbi->frame_worker_owner;
     FrameWorkerData *const frame_worker_data = worker->data1;
-    vp10_frameworker_lock_stats(worker);
+    av1_frameworker_lock_stats(worker);
 
     if (cm->show_frame) {
       cm->current_video_frame++;
     }
     frame_worker_data->frame_decoded = 1;
     frame_worker_data->frame_context_ready = 1;
-    vp10_frameworker_signal_stats(worker);
-    vp10_frameworker_unlock_stats(worker);
+    av1_frameworker_signal_stats(worker);
+    av1_frameworker_unlock_stats(worker);
   } else {
     cm->last_width = cm->width;
     cm->last_height = cm->height;
@@ -458,8 +458,8 @@
   return retcode;
 }
 
-int vp10_get_raw_frame(VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
-  VP10_COMMON *const cm = &pbi->common;
+int av1_get_raw_frame(AV1Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
+  AV1_COMMON *const cm = &pbi->common;
   int ret = -1;
   if (pbi->ready_for_new_data == 1) return ret;
 
@@ -471,12 +471,12 @@
   pbi->ready_for_new_data = 1;
   *sd = *cm->frame_to_show;
   ret = 0;
-  vpx_clear_system_state();
+  aom_clear_system_state();
   return ret;
 }
 
-int vp10_get_frame_to_show(VP10Decoder *pbi, YV12_BUFFER_CONFIG *frame) {
-  VP10_COMMON *const cm = &pbi->common;
+int av1_get_frame_to_show(AV1Decoder *pbi, YV12_BUFFER_CONFIG *frame) {
+  AV1_COMMON *const cm = &pbi->common;
 
   if (!cm->show_frame || !cm->frame_to_show) return -1;
 
@@ -484,10 +484,10 @@
   return 0;
 }
 
-vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
-                                            uint32_t sizes[8], int *count,
-                                            vpx_decrypt_cb decrypt_cb,
-                                            void *decrypt_state) {
+aom_codec_err_t av1_parse_superframe_index(const uint8_t *data, size_t data_sz,
+                                           uint32_t sizes[8], int *count,
+                                           aom_decrypt_cb decrypt_cb,
+                                           void *decrypt_state) {
   // A chunk ending with a byte matching 0xc0 is an invalid chunk unless
   // it is a super frame index. If the last byte of real video compression
   // data is 0xc0 the encoder must add a 0 byte. If we have the marker but
@@ -508,7 +508,7 @@
 
     // This chunk is marked as having a superframe index but doesn't have
     // enough data for it, thus it's an invalid superframe index.
-    if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME;
+    if (data_sz < index_sz) return AOM_CODEC_CORRUPT_FRAME;
 
     {
       const uint8_t marker2 =
@@ -517,7 +517,7 @@
       // This chunk is marked as having a superframe index but doesn't have
       // the matching marker byte at the front of the index therefore it's an
       // invalid chunk.
-      if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME;
+      if (marker != marker2) return AOM_CODEC_CORRUPT_FRAME;
     }
 
     {
@@ -545,5 +545,5 @@
       *count = frames;
     }
   }
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
diff --git a/av1/decoder/decoder.h b/av1/decoder/decoder.h
index 47a5a7b..b399768 100644
--- a/av1/decoder/decoder.h
+++ b/av1/decoder/decoder.h
@@ -8,15 +8,15 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_DECODER_DECODER_H_
-#define VP10_DECODER_DECODER_H_
+#ifndef AV1_DECODER_DECODER_H_
+#define AV1_DECODER_DECODER_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
-#include "aom/vpx_codec.h"
+#include "aom/aom_codec.h"
 #include "av1/decoder/bitreader.h"
 #include "aom_scale/yv12config.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_util/aom_thread.h"
 
 #include "av1/common/thread_common.h"
 #include "av1/common/onyxc_int.h"
@@ -28,8 +28,8 @@
 
 // TODO(hkuang): combine this with TileWorkerData.
 typedef struct TileData {
-  VP10_COMMON *cm;
-  vp10_reader bit_reader;
+  AV1_COMMON *cm;
+  aom_reader bit_reader;
   DECLARE_ALIGNED(16, MACROBLOCKD, xd);
   /* dqcoeff are shared by all the planes. So planes must be decoded serially */
   DECLARE_ALIGNED(16, tran_low_t, dqcoeff[MAX_TX_SQUARE]);
@@ -37,14 +37,14 @@
 } TileData;
 
 typedef struct TileWorkerData {
-  struct VP10Decoder *pbi;
-  vp10_reader bit_reader;
+  struct AV1Decoder *pbi;
+  aom_reader bit_reader;
   FRAME_COUNTS counts;
   DECLARE_ALIGNED(16, MACROBLOCKD, xd);
   /* dqcoeff are shared by all the planes. So planes must be decoded serially */
   DECLARE_ALIGNED(16, tran_low_t, dqcoeff[MAX_TX_SQUARE]);
   DECLARE_ALIGNED(16, uint8_t, color_index_map[2][MAX_SB_SQUARE]);
-  struct vpx_internal_error_info error_info;
+  struct aom_internal_error_info error_info;
 } TileWorkerData;
 
 typedef struct TileBufferDec {
@@ -55,10 +55,10 @@
   int col;                      // only used with multi-threaded decoding
 } TileBufferDec;
 
-typedef struct VP10Decoder {
+typedef struct AV1Decoder {
   DECLARE_ALIGNED(16, MACROBLOCKD, mb);
 
-  DECLARE_ALIGNED(16, VP10_COMMON, common);
+  DECLARE_ALIGNED(16, AV1_COMMON, common);
 
   int ready_for_new_data;
 
@@ -68,9 +68,9 @@
   // the same.
   RefCntBuffer *cur_buf;  //  Current decoding frame buffer.
 
-  VPxWorker *frame_worker_owner;  // frame_worker that owns this pbi.
-  VPxWorker lf_worker;
-  VPxWorker *tile_workers;
+  AVxWorker *frame_worker_owner;  // frame_worker that owns this pbi.
+  AVxWorker lf_worker;
+  AVxWorker *tile_workers;
   TileWorkerData *tile_worker_data;
   TileInfo *tile_worker_info;
   int num_tile_workers;
@@ -80,9 +80,9 @@
 
   TileBufferDec tile_buffers[MAX_TILE_ROWS][MAX_TILE_COLS];
 
-  VP10LfSync lf_row_sync;
+  AV1LfSync lf_row_sync;
 
-  vpx_decrypt_cb decrypt_cb;
+  aom_decrypt_cb decrypt_cb;
   void *decrypt_state;
 
   int max_threads;
@@ -95,24 +95,24 @@
   int tile_col_size_bytes;
   int dec_tile_row, dec_tile_col;
 #endif  // CONFIG_EXT_TILE
-} VP10Decoder;
+} AV1Decoder;
 
-int vp10_receive_compressed_data(struct VP10Decoder *pbi, size_t size,
-                                 const uint8_t **dest);
+int av1_receive_compressed_data(struct AV1Decoder *pbi, size_t size,
+                                const uint8_t **dest);
 
-int vp10_get_raw_frame(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd);
+int av1_get_raw_frame(struct AV1Decoder *pbi, YV12_BUFFER_CONFIG *sd);
 
-int vp10_get_frame_to_show(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *frame);
+int av1_get_frame_to_show(struct AV1Decoder *pbi, YV12_BUFFER_CONFIG *frame);
 
-vpx_codec_err_t vp10_copy_reference_dec(struct VP10Decoder *pbi,
-                                        VPX_REFFRAME ref_frame_flag,
-                                        YV12_BUFFER_CONFIG *sd);
-
-vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
-                                       VPX_REFFRAME ref_frame_flag,
+aom_codec_err_t av1_copy_reference_dec(struct AV1Decoder *pbi,
+                                       AOM_REFFRAME ref_frame_flag,
                                        YV12_BUFFER_CONFIG *sd);
 
-static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
+                                      AOM_REFFRAME ref_frame_flag,
+                                      YV12_BUFFER_CONFIG *sd);
+
+static INLINE uint8_t read_marker(aom_decrypt_cb decrypt_cb,
                                   void *decrypt_state, const uint8_t *data) {
   if (decrypt_cb) {
     uint8_t marker;
@@ -124,14 +124,14 @@
 
 // This function is exposed for use in tests, as well as the inlined function
 // "read_marker".
-vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
-                                            uint32_t sizes[8], int *count,
-                                            vpx_decrypt_cb decrypt_cb,
-                                            void *decrypt_state);
+aom_codec_err_t av1_parse_superframe_index(const uint8_t *data, size_t data_sz,
+                                           uint32_t sizes[8], int *count,
+                                           aom_decrypt_cb decrypt_cb,
+                                           void *decrypt_state);
 
-struct VP10Decoder *vp10_decoder_create(BufferPool *const pool);
+struct AV1Decoder *av1_decoder_create(BufferPool *const pool);
 
-void vp10_decoder_remove(struct VP10Decoder *pbi);
+void av1_decoder_remove(struct AV1Decoder *pbi);
 
 static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
                                       BufferPool *const pool) {
@@ -149,9 +149,9 @@
 }
 
 #if CONFIG_EXT_REFS
-static INLINE int dec_is_ref_frame_buf(VP10Decoder *const pbi,
+static INLINE int dec_is_ref_frame_buf(AV1Decoder *const pbi,
                                        RefCntBuffer *frame_buf) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   int i;
   for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
     RefBuffer *const ref_frame = &cm->frame_refs[i];
@@ -166,4 +166,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_DECODER_DECODER_H_
+#endif  // AV1_DECODER_DECODER_H_
diff --git a/av1/decoder/detokenize.c b/av1/decoder/detokenize.c
index 0fba999..0935cdf 100644
--- a/av1/decoder/detokenize.c
+++ b/av1/decoder/detokenize.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #include "av1/common/ans.h"
@@ -37,9 +37,9 @@
   } while (0)
 
 #if !CONFIG_ANS
-static INLINE int read_coeff(const vpx_prob *probs, int n, vp10_reader *r) {
+static INLINE int read_coeff(const aom_prob *probs, int n, aom_reader *r) {
   int i, val = 0;
-  for (i = 0; i < n; ++i) val = (val << 1) | vp10_read(r, probs[i]);
+  for (i = 0; i < n; ++i) val = (val << 1) | aom_read(r, probs[i]);
   return val;
 }
 
@@ -47,7 +47,7 @@
 static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
                         tran_low_t *dqcoeff, TX_SIZE tx_size, TX_TYPE tx_type,
                         const int16_t *dq, int ctx, const int16_t *scan,
-                        const int16_t *nb, vp10_reader *r,
+                        const int16_t *nb, aom_reader *r,
                         const qm_val_t *iqm[2][TX_SIZES])
 #else
 static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
@@ -57,7 +57,7 @@
                         dequant_val_type_nuq *dq_val,
 #endif  // CONFIG_NEW_QUANT
                         int ctx, const int16_t *scan, const int16_t *nb,
-                        vp10_reader *r)
+                        aom_reader *r)
 #endif
 {
   FRAME_COUNTS *counts = xd->counts;
@@ -69,9 +69,9 @@
 #endif
   int band, c = 0;
   const int tx_size_ctx = txsize_sqr_map[tx_size];
-  const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+  const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
       fc->coef_probs[tx_size_ctx][type][ref];
-  const vpx_prob *prob;
+  const aom_prob *prob;
   unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
   unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
   uint8_t token_cache[MAX_TX_SQUARE];
@@ -94,38 +94,38 @@
     eob_branch_count = counts->eob_branch[tx_size_ctx][type][ref];
   }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (xd->bd > VPX_BITS_8) {
-    if (xd->bd == VPX_BITS_10) {
-      cat1_prob = vp10_cat1_prob_high10;
-      cat2_prob = vp10_cat2_prob_high10;
-      cat3_prob = vp10_cat3_prob_high10;
-      cat4_prob = vp10_cat4_prob_high10;
-      cat5_prob = vp10_cat5_prob_high10;
-      cat6_prob = vp10_cat6_prob_high10;
+#if CONFIG_AOM_HIGHBITDEPTH
+  if (xd->bd > AOM_BITS_8) {
+    if (xd->bd == AOM_BITS_10) {
+      cat1_prob = av1_cat1_prob_high10;
+      cat2_prob = av1_cat2_prob_high10;
+      cat3_prob = av1_cat3_prob_high10;
+      cat4_prob = av1_cat4_prob_high10;
+      cat5_prob = av1_cat5_prob_high10;
+      cat6_prob = av1_cat6_prob_high10;
     } else {
-      cat1_prob = vp10_cat1_prob_high12;
-      cat2_prob = vp10_cat2_prob_high12;
-      cat3_prob = vp10_cat3_prob_high12;
-      cat4_prob = vp10_cat4_prob_high12;
-      cat5_prob = vp10_cat5_prob_high12;
-      cat6_prob = vp10_cat6_prob_high12;
+      cat1_prob = av1_cat1_prob_high12;
+      cat2_prob = av1_cat2_prob_high12;
+      cat3_prob = av1_cat3_prob_high12;
+      cat4_prob = av1_cat4_prob_high12;
+      cat5_prob = av1_cat5_prob_high12;
+      cat6_prob = av1_cat6_prob_high12;
     }
   } else {
-    cat1_prob = vp10_cat1_prob;
-    cat2_prob = vp10_cat2_prob;
-    cat3_prob = vp10_cat3_prob;
-    cat4_prob = vp10_cat4_prob;
-    cat5_prob = vp10_cat5_prob;
-    cat6_prob = vp10_cat6_prob;
+    cat1_prob = av1_cat1_prob;
+    cat2_prob = av1_cat2_prob;
+    cat3_prob = av1_cat3_prob;
+    cat4_prob = av1_cat4_prob;
+    cat5_prob = av1_cat5_prob;
+    cat6_prob = av1_cat6_prob;
   }
 #else
-  cat1_prob = vp10_cat1_prob;
-  cat2_prob = vp10_cat2_prob;
-  cat3_prob = vp10_cat3_prob;
-  cat4_prob = vp10_cat4_prob;
-  cat5_prob = vp10_cat5_prob;
-  cat6_prob = vp10_cat6_prob;
+  cat1_prob = av1_cat1_prob;
+  cat2_prob = av1_cat2_prob;
+  cat3_prob = av1_cat3_prob;
+  cat4_prob = av1_cat4_prob;
+  cat5_prob = av1_cat5_prob;
+  cat6_prob = av1_cat6_prob;
 #endif
 
   dq_shift = get_tx_scale(xd, tx_type, tx_size);
@@ -135,7 +135,7 @@
     band = *band_translate++;
     prob = coef_probs[band][ctx];
     if (counts) ++eob_branch_count[band][ctx];
-    if (!vp10_read(r, prob[EOB_CONTEXT_NODE])) {
+    if (!aom_read(r, prob[EOB_CONTEXT_NODE])) {
       INCREMENT_COUNT(EOB_MODEL_TOKEN);
       break;
     }
@@ -144,7 +144,7 @@
     dqv_val = &dq_val[band][0];
 #endif  // CONFIG_NEW_QUANT
 
-    while (!vp10_read(r, prob[ZERO_CONTEXT_NODE])) {
+    while (!aom_read(r, prob[ZERO_CONTEXT_NODE])) {
       INCREMENT_COUNT(ZERO_TOKEN);
       dqv = dq[1];
       token_cache[scan[c]] = 0;
@@ -158,14 +158,14 @@
 #endif  // CONFIG_NEW_QUANT
     }
 
-    if (!vp10_read(r, prob[ONE_CONTEXT_NODE])) {
+    if (!aom_read(r, prob[ONE_CONTEXT_NODE])) {
       INCREMENT_COUNT(ONE_TOKEN);
       token = ONE_TOKEN;
       val = 1;
     } else {
       INCREMENT_COUNT(TWO_TOKEN);
-      token = vp10_read_tree(r, vp10_coef_con_tree,
-                             vp10_pareto8_full[prob[PIVOT_NODE] - 1]);
+      token = aom_read_tree(r, av1_coef_con_tree,
+                            av1_pareto8_full[prob[PIVOT_NODE] - 1]);
       switch (token) {
         case TWO_TOKEN:
         case THREE_TOKEN:
@@ -188,15 +188,15 @@
         case CATEGORY6_TOKEN: {
           const int skip_bits = TX_SIZES - 1 - txsize_sqr_up_map[tx_size];
           const uint8_t *cat6p = cat6_prob + skip_bits;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           switch (xd->bd) {
-            case VPX_BITS_8:
+            case AOM_BITS_8:
               val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r);
               break;
-            case VPX_BITS_10:
+            case AOM_BITS_10:
               val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, r);
               break;
-            case VPX_BITS_12:
+            case AOM_BITS_12:
               val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, r);
               break;
             default: assert(0); return -1;
@@ -210,7 +210,7 @@
     }
 #if CONFIG_NEW_QUANT
 
-    v = vp10_dequant_abscoeff_nuq(val, dqv, dqv_val);
+    v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val);
     v = dq_shift ? ROUND_POWER_OF_TWO(v, dq_shift) : v;
 #else
 #if CONFIG_AOM_QM
@@ -221,15 +221,15 @@
 #endif  // CONFIG_NEW_QUANT
 
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
-#if CONFIG_VP9_HIGHBITDEPTH
-    dqcoeff[scan[c]] = highbd_check_range((vp10_read_bit(r) ? -v : v), xd->bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+    dqcoeff[scan[c]] = highbd_check_range((aom_read_bit(r) ? -v : v), xd->bd);
 #else
-    dqcoeff[scan[c]] = check_range(vp10_read_bit(r) ? -v : v);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+    dqcoeff[scan[c]] = check_range(aom_read_bit(r) ? -v : v);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #else
-    dqcoeff[scan[c]] = vp10_read_bit(r) ? -v : v;
+    dqcoeff[scan[c]] = aom_read_bit(r) ? -v : v;
 #endif  // CONFIG_COEFFICIENT_RANGE_CHECKING
-    token_cache[scan[c]] = vp10_pt_energy_class[token];
+    token_cache[scan[c]] = av1_pt_energy_class[token];
     ++c;
     ctx = get_coef_context(nb, token_cache, c);
     dqv = dq[1];
@@ -238,7 +238,7 @@
   return c;
 }
 #else  // !CONFIG_ANS
-static INLINE int read_coeff(const vpx_prob *const probs, int n,
+static INLINE int read_coeff(const aom_prob *const probs, int n,
                              struct AnsDecoder *const ans) {
   int i, val = 0;
   for (i = 0; i < n; ++i) val = (val << 1) | uabs_read(ans, probs[i]);
@@ -260,11 +260,11 @@
   int band, c = 0;
   int skip_eob = 0;
   const int tx_size_ctx = txsize_sqr_map[tx_size];
-  const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+  const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
       fc->coef_probs[tx_size_ctx][type][ref];
   const rans_dec_lut(*coef_cdfs)[COEFF_CONTEXTS] =
       fc->coef_cdfs[tx_size_ctx][type][ref];
-  const vpx_prob *prob;
+  const aom_prob *prob;
   const rans_dec_lut *cdf;
   unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
   unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
@@ -290,38 +290,38 @@
     eob_branch_count = counts->eob_branch[tx_size_ctx][type][ref];
   }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (xd->bd > VPX_BITS_8) {
-    if (xd->bd == VPX_BITS_10) {
-      cat1_prob = vp10_cat1_prob_high10;
-      cat2_prob = vp10_cat2_prob_high10;
-      cat3_prob = vp10_cat3_prob_high10;
-      cat4_prob = vp10_cat4_prob_high10;
-      cat5_prob = vp10_cat5_prob_high10;
-      cat6_prob = vp10_cat6_prob_high10;
+#if CONFIG_AOM_HIGHBITDEPTH
+  if (xd->bd > AOM_BITS_8) {
+    if (xd->bd == AOM_BITS_10) {
+      cat1_prob = av1_cat1_prob_high10;
+      cat2_prob = av1_cat2_prob_high10;
+      cat3_prob = av1_cat3_prob_high10;
+      cat4_prob = av1_cat4_prob_high10;
+      cat5_prob = av1_cat5_prob_high10;
+      cat6_prob = av1_cat6_prob_high10;
     } else {
-      cat1_prob = vp10_cat1_prob_high12;
-      cat2_prob = vp10_cat2_prob_high12;
-      cat3_prob = vp10_cat3_prob_high12;
-      cat4_prob = vp10_cat4_prob_high12;
-      cat5_prob = vp10_cat5_prob_high12;
-      cat6_prob = vp10_cat6_prob_high12;
+      cat1_prob = av1_cat1_prob_high12;
+      cat2_prob = av1_cat2_prob_high12;
+      cat3_prob = av1_cat3_prob_high12;
+      cat4_prob = av1_cat4_prob_high12;
+      cat5_prob = av1_cat5_prob_high12;
+      cat6_prob = av1_cat6_prob_high12;
     }
   } else {
-    cat1_prob = vp10_cat1_prob;
-    cat2_prob = vp10_cat2_prob;
-    cat3_prob = vp10_cat3_prob;
-    cat4_prob = vp10_cat4_prob;
-    cat5_prob = vp10_cat5_prob;
-    cat6_prob = vp10_cat6_prob;
+    cat1_prob = av1_cat1_prob;
+    cat2_prob = av1_cat2_prob;
+    cat3_prob = av1_cat3_prob;
+    cat4_prob = av1_cat4_prob;
+    cat5_prob = av1_cat5_prob;
+    cat6_prob = av1_cat6_prob;
   }
 #else
-  cat1_prob = vp10_cat1_prob;
-  cat2_prob = vp10_cat2_prob;
-  cat3_prob = vp10_cat3_prob;
-  cat4_prob = vp10_cat4_prob;
-  cat5_prob = vp10_cat5_prob;
-  cat6_prob = vp10_cat6_prob;
+  cat1_prob = av1_cat1_prob;
+  cat2_prob = av1_cat2_prob;
+  cat3_prob = av1_cat3_prob;
+  cat4_prob = av1_cat4_prob;
+  cat5_prob = av1_cat5_prob;
+  cat6_prob = av1_cat6_prob;
 #endif
 
   while (c < max_eob) {
@@ -371,15 +371,15 @@
         case CATEGORY6_TOKEN: {
           const int skip_bits = TX_SIZES - 1 - txsize_sqr_up_map[tx_size];
           const uint8_t *cat6p = cat6_prob + skip_bits;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           switch (xd->bd) {
-            case VPX_BITS_8:
+            case AOM_BITS_8:
               val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, ans);
               break;
-            case VPX_BITS_10:
+            case AOM_BITS_10:
               val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, ans);
               break;
-            case VPX_BITS_12:
+            case AOM_BITS_12:
               val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, ans);
               break;
             default: assert(0); return -1;
@@ -390,23 +390,23 @@
         } break;
       }
 #if CONFIG_NEW_QUANT
-      v = vp10_dequant_abscoeff_nuq(val, dqv, dqv_val);
+      v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val);
       v = dq_shift ? ROUND_POWER_OF_TWO(v, dq_shift) : v;
 #else
       v = (val * dqv) >> dq_shift;
 #endif  // CONFIG_NEW_QUANT
 
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       dqcoeff[scan[c]] =
           highbd_check_range((uabs_read_bit(ans) ? -v : v), xd->bd);
 #else
       dqcoeff[scan[c]] = check_range(uabs_read_bit(ans) ? -v : v);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #else
       dqcoeff[scan[c]] = uabs_read_bit(ans) ? -v : v;
 #endif  // CONFIG_COEFFICIENT_RANGE_CHECKING
-      token_cache[scan[c]] = vp10_pt_energy_class[token];
+      token_cache[scan[c]] = av1_pt_energy_class[token];
       skip_eob = 0;
     }
     ++c;
@@ -418,8 +418,8 @@
 }
 #endif  // !CONFIG_ANS
 
-// TODO(slavarnway): Decode version of vp10_set_context.  Modify
-// vp10_set_context
+// TODO(slavarnway): Decode version of av1_set_context.  Modify
+// av1_set_context
 // after testing is complete, then delete this version.
 static void dec_set_contexts(const MACROBLOCKD *xd,
                              struct macroblockd_plane *pd, TX_SIZE tx_size,
@@ -459,8 +459,8 @@
   }
 }
 
-void vp10_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
-                                vp10_reader *r) {
+void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
+                               aom_reader *r) {
   MODE_INFO *const mi = xd->mi[0];
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   const BLOCK_SIZE bsize = mbmi->sb_type;
@@ -472,31 +472,31 @@
   int n = mbmi->palette_mode_info.palette_size[plane != 0];
   int i, j;
   uint8_t *color_map = xd->plane[plane != 0].color_index_map;
-  const vpx_prob (*const prob)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
-      plane ? vp10_default_palette_uv_color_prob
-            : vp10_default_palette_y_color_prob;
+  const aom_prob (*const prob)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
+      plane ? av1_default_palette_uv_color_prob
+            : av1_default_palette_y_color_prob;
 
   for (i = 0; i < rows; ++i) {
     for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
       color_ctx =
-          vp10_get_palette_color_context(color_map, cols, i, j, n, color_order);
-      color_idx = vp10_read_tree(r, vp10_palette_color_tree[n - 2],
-                                 prob[n - 2][color_ctx]);
+          av1_get_palette_color_context(color_map, cols, i, j, n, color_order);
+      color_idx = aom_read_tree(r, av1_palette_color_tree[n - 2],
+                                prob[n - 2][color_ctx]);
       assert(color_idx >= 0 && color_idx < n);
       color_map[i * cols + j] = color_order[color_idx];
     }
   }
 }
 
-int vp10_decode_block_tokens(MACROBLOCKD *const xd, int plane,
-                             const scan_order *sc, int x, int y,
-                             TX_SIZE tx_size, TX_TYPE tx_type,
+int av1_decode_block_tokens(MACROBLOCKD *const xd, int plane,
+                            const scan_order *sc, int x, int y, TX_SIZE tx_size,
+                            TX_TYPE tx_type,
 #if CONFIG_ANS
-                             struct AnsDecoder *const r,
+                            struct AnsDecoder *const r,
 #else
-                             vp10_reader *r,
+                            aom_reader *r,
 #endif  // CONFIG_ANS
-                             int seg_id) {
+                            int seg_id) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
   const int16_t *const dequant = pd->seg_dequant[seg_id];
   const int ctx =
@@ -528,7 +528,7 @@
 #endif  // !CONFIG_ANS
   dec_set_contexts(xd, pd, tx_size, eob > 0, x, y);
   /*
-  vp10_set_contexts(xd, pd,
+  av1_set_contexts(xd, pd,
                     get_plane_block_size(xd->mi[0]->mbmi.sb_type, pd),
                     tx_size, eob > 0, x, y);
                     */
diff --git a/av1/decoder/detokenize.h b/av1/decoder/detokenize.h
index 279c193..959e374 100644
--- a/av1/decoder/detokenize.h
+++ b/av1/decoder/detokenize.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_DECODER_DETOKENIZE_H_
-#define VP10_DECODER_DETOKENIZE_H_
+#ifndef AV1_DECODER_DETOKENIZE_H_
+#define AV1_DECODER_DETOKENIZE_H_
 
 #include "av1/decoder/decoder.h"
 #include "av1/common/ans.h"
@@ -19,20 +19,19 @@
 extern "C" {
 #endif
 
-void vp10_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
-                                vp10_reader *r);
-int vp10_decode_block_tokens(MACROBLOCKD *const xd, int plane,
-                             const scan_order *sc, int x, int y,
-                             TX_SIZE tx_size, TX_TYPE tx_type,
+void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane, aom_reader *r);
+int av1_decode_block_tokens(MACROBLOCKD *const xd, int plane,
+                            const scan_order *sc, int x, int y, TX_SIZE tx_size,
+                            TX_TYPE tx_type,
 #if CONFIG_ANS
-                             struct AnsDecoder *const r,
+                            struct AnsDecoder *const r,
 #else
-                             vp10_reader *r,
+                            aom_reader *r,
 #endif  // CONFIG_ANS
-                             int seg_id);
+                            int seg_id);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_DECODER_DETOKENIZE_H_
+#endif  // AV1_DECODER_DETOKENIZE_H_
diff --git a/av1/decoder/dsubexp.c b/av1/decoder/dsubexp.c
index 146a1de..c0fee8d 100644
--- a/av1/decoder/dsubexp.c
+++ b/av1/decoder/dsubexp.c
@@ -20,11 +20,11 @@
   return (v & 1) ? m - ((v + 1) >> 1) : m + (v >> 1);
 }
 
-static int decode_uniform(vp10_reader *r) {
+static int decode_uniform(aom_reader *r) {
   const int l = 8;
   const int m = (1 << l) - 190;
-  const int v = vp10_read_literal(r, l - 1);
-  return v < m ? v : (v << 1) - m + vp10_read_bit(r);
+  const int v = aom_read_literal(r, l - 1);
+  return v < m ? v : (v << 1) - m + aom_read_bit(r);
 }
 
 static int inv_remap_prob(int v, int m) {
@@ -57,24 +57,24 @@
   }
 }
 
-static int decode_term_subexp(vp10_reader *r) {
-  if (!vp10_read_bit(r)) return vp10_read_literal(r, 4);
-  if (!vp10_read_bit(r)) return vp10_read_literal(r, 4) + 16;
-  if (!vp10_read_bit(r)) return vp10_read_literal(r, 5) + 32;
+static int decode_term_subexp(aom_reader *r) {
+  if (!aom_read_bit(r)) return aom_read_literal(r, 4);
+  if (!aom_read_bit(r)) return aom_read_literal(r, 4) + 16;
+  if (!aom_read_bit(r)) return aom_read_literal(r, 5) + 32;
   return decode_uniform(r) + 64;
 }
 
-void vp10_diff_update_prob(vp10_reader *r, vpx_prob *p) {
-  if (vp10_read(r, DIFF_UPDATE_PROB)) {
+void av1_diff_update_prob(aom_reader *r, aom_prob *p) {
+  if (aom_read(r, DIFF_UPDATE_PROB)) {
     const int delp = decode_term_subexp(r);
-    *p = (vpx_prob)inv_remap_prob(delp, *p);
+    *p = (aom_prob)inv_remap_prob(delp, *p);
   }
 }
 
-int vp10_read_primitive_symmetric(vp10_reader *r, unsigned int mag_bits) {
-  if (vp10_read_bit(r)) {
-    int s = vp10_read_bit(r);
-    int x = vp10_read_literal(r, mag_bits) + 1;
+int aom_read_primitive_symmetric(aom_reader *r, unsigned int mag_bits) {
+  if (aom_read_bit(r)) {
+    int s = aom_read_bit(r);
+    int x = aom_read_literal(r, mag_bits) + 1;
     return (s > 0 ? -x : x);
   } else {
     return 0;
diff --git a/av1/decoder/dsubexp.h b/av1/decoder/dsubexp.h
index b8980f7..8587395 100644
--- a/av1/decoder/dsubexp.h
+++ b/av1/decoder/dsubexp.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_DECODER_DSUBEXP_H_
-#define VP10_DECODER_DSUBEXP_H_
+#ifndef AV1_DECODER_DSUBEXP_H_
+#define AV1_DECODER_DSUBEXP_H_
 
 #include "av1/decoder/bitreader.h"
 
@@ -17,7 +17,7 @@
 extern "C" {
 #endif
 
-void vp10_diff_update_prob(vp10_reader *r, vpx_prob *p);
+void av1_diff_update_prob(aom_reader *r, aom_prob *p);
 
 #ifdef __cplusplus
 }  // extern "C"
@@ -27,5 +27,5 @@
 // 2 * 2^mag_bits + 1, symmetric around 0, where one bit is used to
 // indicate 0 or non-zero, mag_bits bits are used to indicate magnitide
 // and 1 more bit for the sign if non-zero.
-int vp10_read_primitive_symmetric(vp10_reader *r, unsigned int mag_bits);
-#endif  // VP10_DECODER_DSUBEXP_H_
+int aom_read_primitive_symmetric(aom_reader *r, unsigned int mag_bits);
+#endif  // AV1_DECODER_DSUBEXP_H_
diff --git a/av1/decoder/dthread.c b/av1/decoder/dthread.c
index d9a2ce1..6f6a934 100644
--- a/av1/decoder/dthread.c
+++ b/av1/decoder/dthread.c
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_config.h"
+#include "aom_mem/aom_mem.h"
 #include "av1/common/reconinter.h"
 #include "av1/decoder/dthread.h"
 #include "av1/decoder/decoder.h"
@@ -17,7 +17,7 @@
 // #define DEBUG_THREAD
 
 // TODO(hkuang): Clean up all the #ifdef in this file.
-void vp10_frameworker_lock_stats(VPxWorker *const worker) {
+void av1_frameworker_lock_stats(AVxWorker *const worker) {
 #if CONFIG_MULTITHREAD
   FrameWorkerData *const worker_data = worker->data1;
   pthread_mutex_lock(&worker_data->stats_mutex);
@@ -26,7 +26,7 @@
 #endif
 }
 
-void vp10_frameworker_unlock_stats(VPxWorker *const worker) {
+void av1_frameworker_unlock_stats(AVxWorker *const worker) {
 #if CONFIG_MULTITHREAD
   FrameWorkerData *const worker_data = worker->data1;
   pthread_mutex_unlock(&worker_data->stats_mutex);
@@ -35,7 +35,7 @@
 #endif
 }
 
-void vp10_frameworker_signal_stats(VPxWorker *const worker) {
+void av1_frameworker_signal_stats(AVxWorker *const worker) {
 #if CONFIG_MULTITHREAD
   FrameWorkerData *const worker_data = worker->data1;
 
@@ -59,8 +59,8 @@
 #endif
 
 // TODO(hkuang): Remove worker parameter as it is only used in debug code.
-void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
-                           int row) {
+void av1_frameworker_wait(AVxWorker *const worker, RefCntBuffer *const ref_buf,
+                          int row) {
 #if CONFIG_MULTITHREAD
   if (!ref_buf) return;
 
@@ -73,10 +73,10 @@
   {
     // Find the worker thread that owns the reference frame. If the reference
     // frame has been fully decoded, it may not have owner.
-    VPxWorker *const ref_worker = ref_buf->frame_worker_owner;
+    AVxWorker *const ref_worker = ref_buf->frame_worker_owner;
     FrameWorkerData *const ref_worker_data =
         (FrameWorkerData *)ref_worker->data1;
-    const VP10Decoder *const pbi = ref_worker_data->pbi;
+    const AV1Decoder *const pbi = ref_worker_data->pbi;
 
 #ifdef DEBUG_THREAD
     {
@@ -87,7 +87,7 @@
     }
 #endif
 
-    vp10_frameworker_lock_stats(ref_worker);
+    av1_frameworker_lock_stats(ref_worker);
     while (ref_buf->row < row && pbi->cur_buf == ref_buf &&
            ref_buf->buf.corrupted != 1) {
       pthread_cond_wait(&ref_worker_data->stats_cond,
@@ -96,12 +96,12 @@
 
     if (ref_buf->buf.corrupted == 1) {
       FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
-      vp10_frameworker_unlock_stats(ref_worker);
-      vpx_internal_error(&worker_data->pbi->common.error,
-                         VPX_CODEC_CORRUPT_FRAME,
+      av1_frameworker_unlock_stats(ref_worker);
+      aom_internal_error(&worker_data->pbi->common.error,
+                         AOM_CODEC_CORRUPT_FRAME,
                          "Worker %p failed to decode frame", worker);
     }
-    vp10_frameworker_unlock_stats(ref_worker);
+    av1_frameworker_unlock_stats(ref_worker);
   }
 #else
   (void)worker;
@@ -111,9 +111,9 @@
 #endif  // CONFIG_MULTITHREAD
 }
 
-void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row) {
+void av1_frameworker_broadcast(RefCntBuffer *const buf, int row) {
 #if CONFIG_MULTITHREAD
-  VPxWorker *worker = buf->frame_worker_owner;
+  AVxWorker *worker = buf->frame_worker_owner;
 
 #ifdef DEBUG_THREAD
   {
@@ -123,27 +123,27 @@
   }
 #endif
 
-  vp10_frameworker_lock_stats(worker);
+  av1_frameworker_lock_stats(worker);
   buf->row = row;
-  vp10_frameworker_signal_stats(worker);
-  vp10_frameworker_unlock_stats(worker);
+  av1_frameworker_signal_stats(worker);
+  av1_frameworker_unlock_stats(worker);
 #else
   (void)buf;
   (void)row;
 #endif  // CONFIG_MULTITHREAD
 }
 
-void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
-                                   VPxWorker *const src_worker) {
+void av1_frameworker_copy_context(AVxWorker *const dst_worker,
+                                  AVxWorker *const src_worker) {
 #if CONFIG_MULTITHREAD
   FrameWorkerData *const src_worker_data = (FrameWorkerData *)src_worker->data1;
   FrameWorkerData *const dst_worker_data = (FrameWorkerData *)dst_worker->data1;
-  VP10_COMMON *const src_cm = &src_worker_data->pbi->common;
-  VP10_COMMON *const dst_cm = &dst_worker_data->pbi->common;
+  AV1_COMMON *const src_cm = &src_worker_data->pbi->common;
+  AV1_COMMON *const dst_cm = &dst_worker_data->pbi->common;
   int i;
 
   // Wait until source frame's context is ready.
-  vp10_frameworker_lock_stats(src_worker);
+  av1_frameworker_lock_stats(src_worker);
   while (!src_worker_data->frame_context_ready) {
     pthread_cond_wait(&src_worker_data->stats_cond,
                       &src_worker_data->stats_mutex);
@@ -153,10 +153,10 @@
                                    ? src_cm->current_frame_seg_map
                                    : src_cm->last_frame_seg_map;
   dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
-  vp10_frameworker_unlock_stats(src_worker);
+  av1_frameworker_unlock_stats(src_worker);
 
   dst_cm->bit_depth = src_cm->bit_depth;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   dst_cm->use_highbitdepth = src_cm->use_highbitdepth;
 #endif
 #if CONFIG_EXT_REFS
diff --git a/av1/decoder/dthread.h b/av1/decoder/dthread.h
index ef548b6..84fb714 100644
--- a/av1/decoder/dthread.h
+++ b/av1/decoder/dthread.h
@@ -8,24 +8,24 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_DECODER_DTHREAD_H_
-#define VP10_DECODER_DTHREAD_H_
+#ifndef AV1_DECODER_DTHREAD_H_
+#define AV1_DECODER_DTHREAD_H_
 
-#include "./vpx_config.h"
-#include "aom_util/vpx_thread.h"
-#include "aom/internal/vpx_codec_internal.h"
+#include "./aom_config.h"
+#include "aom_util/aom_thread.h"
+#include "aom/internal/aom_codec_internal.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-struct VP10Common;
-struct VP10Decoder;
+struct AV1Common;
+struct AV1Decoder;
 
 // WorkerData for the FrameWorker thread. It contains all the information of
 // the worker and decode structures for decoding a frame.
 typedef struct FrameWorkerData {
-  struct VP10Decoder *pbi;
+  struct AV1Decoder *pbi;
   const uint8_t *data;
   const uint8_t *data_end;
   size_t data_size;
@@ -48,27 +48,27 @@
   int frame_decoded;        // Finished decoding current frame.
 } FrameWorkerData;
 
-void vp10_frameworker_lock_stats(VPxWorker *const worker);
-void vp10_frameworker_unlock_stats(VPxWorker *const worker);
-void vp10_frameworker_signal_stats(VPxWorker *const worker);
+void av1_frameworker_lock_stats(AVxWorker *const worker);
+void av1_frameworker_unlock_stats(AVxWorker *const worker);
+void av1_frameworker_signal_stats(AVxWorker *const worker);
 
 // Wait until ref_buf has been decoded to row in real pixel unit.
 // Note: worker may already finish decoding ref_buf and release it in order to
 // start decoding next frame. So need to check whether worker is still decoding
 // ref_buf.
-void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
-                           int row);
+void av1_frameworker_wait(AVxWorker *const worker, RefCntBuffer *const ref_buf,
+                          int row);
 
 // FrameWorker broadcasts its decoding progress so other workers that are
 // waiting on it can resume decoding.
-void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row);
+void av1_frameworker_broadcast(RefCntBuffer *const buf, int row);
 
 // Copy necessary decoding context from src worker to dst worker.
-void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
-                                   VPxWorker *const src_worker);
+void av1_frameworker_copy_context(AVxWorker *const dst_worker,
+                                  AVxWorker *const src_worker);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_DECODER_DTHREAD_H_
+#endif  // AV1_DECODER_DTHREAD_H_
diff --git a/av1/encoder/aq_complexity.c b/av1/encoder/aq_complexity.c
index 173556e..485e4c9 100644
--- a/av1/encoder/aq_complexity.c
+++ b/av1/encoder/aq_complexity.c
@@ -16,7 +16,7 @@
 #include "av1/encoder/encodeframe.h"
 #include "av1/common/seg_common.h"
 #include "av1/encoder/segmentation.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/system_state.h"
 
 #define AQ_C_SEGMENTS 5
@@ -40,18 +40,18 @@
 
 #define DEFAULT_COMPLEXITY 64
 
-static int get_aq_c_strength(int q_index, vpx_bit_depth_t bit_depth) {
+static int get_aq_c_strength(int q_index, aom_bit_depth_t bit_depth) {
   // Approximate base quatizer (truncated to int)
-  const int base_quant = vp10_ac_quant(q_index, 0, bit_depth) / 4;
+  const int base_quant = av1_ac_quant(q_index, 0, bit_depth) / 4;
   return (base_quant > 10) + (base_quant > 25);
 }
 
-void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_setup_in_frame_q_adj(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   struct segmentation *const seg = &cm->seg;
 
   // Make SURE use of floating point in this function is safe.
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   if (frame_is_intra_only(cm) || cm->error_resilient_mode ||
       cpi->refresh_alt_ref_frame ||
@@ -62,22 +62,22 @@
     // Clear down the segment map.
     memset(cpi->segmentation_map, DEFAULT_AQ2_SEG, cm->mi_rows * cm->mi_cols);
 
-    vp10_clearall_segfeatures(seg);
+    av1_clearall_segfeatures(seg);
 
     // Segmentation only makes sense if the target bits per SB is above a
     // threshold. Below this the overheads will usually outweigh any benefit.
     if (cpi->rc.sb64_target_rate < 256) {
-      vp10_disable_segmentation(seg);
+      av1_disable_segmentation(seg);
       return;
     }
 
-    vp10_enable_segmentation(seg);
+    av1_enable_segmentation(seg);
 
     // Select delta coding method.
     seg->abs_delta = SEGMENT_DELTADATA;
 
     // Default segment "Q" feature is disabled so it defaults to the baseline Q.
-    vp10_disable_segfeature(seg, DEFAULT_AQ2_SEG, SEG_LVL_ALT_Q);
+    av1_disable_segfeature(seg, DEFAULT_AQ2_SEG, SEG_LVL_ALT_Q);
 
     // Use some of the segments for in frame Q adjustment.
     for (segment = 0; segment < AQ_C_SEGMENTS; ++segment) {
@@ -85,7 +85,7 @@
 
       if (segment == DEFAULT_AQ2_SEG) continue;
 
-      qindex_delta = vp10_compute_qdelta_by_rate(
+      qindex_delta = av1_compute_qdelta_by_rate(
           &cpi->rc, cm->frame_type, cm->base_qindex,
           aq_c_q_adj_factor[aq_strength][segment], cm->bit_depth);
 
@@ -97,8 +97,8 @@
         qindex_delta = -cm->base_qindex + 1;
       }
       if ((cm->base_qindex + qindex_delta) > 0) {
-        vp10_enable_segfeature(seg, segment, SEG_LVL_ALT_Q);
-        vp10_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta);
+        av1_enable_segfeature(seg, segment, SEG_LVL_ALT_Q);
+        av1_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta);
       }
     }
   }
@@ -110,13 +110,13 @@
 // Select a segment for the current block.
 // The choice of segment for a block depends on the ratio of the projected
 // bits for the block vs a target average and its spatial complexity.
-void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
-                             int mi_row, int mi_col, int projected_rate) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_caq_select_segment(AV1_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
+                            int mi_row, int mi_col, int projected_rate) {
+  AV1_COMMON *const cm = &cpi->common;
 
   const int mi_offset = mi_row * cm->mi_cols + mi_col;
-  const int xmis = VPXMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]);
-  const int ymis = VPXMIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[bs]);
+  const int xmis = AOMMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]);
+  const int ymis = AOMMIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[bs]);
   int x, y;
   int i;
   unsigned char segment;
@@ -132,13 +132,13 @@
     double low_var_thresh;
     const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
 
-    vpx_clear_system_state();
-    low_var_thresh = (cpi->oxcf.pass == 2) ? VPXMAX(cpi->twopass.mb_av_energy,
+    aom_clear_system_state();
+    low_var_thresh = (cpi->oxcf.pass == 2) ? AOMMAX(cpi->twopass.mb_av_energy,
                                                     MIN_DEFAULT_LV_THRESH)
                                            : DEFAULT_LV_THRESH;
 
-    vp10_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
-    logvar = vp10_log_block_var(cpi, mb, bs);
+    av1_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
+    logvar = av1_log_block_var(cpi, mb, bs);
 
     segment = AQ_C_SEGMENTS - 1;  // Just in case no break out below.
     for (i = 0; i < AQ_C_SEGMENTS; ++i) {
diff --git a/av1/encoder/aq_complexity.h b/av1/encoder/aq_complexity.h
index db85406..465b8d72 100644
--- a/av1/encoder/aq_complexity.h
+++ b/av1/encoder/aq_complexity.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_AQ_COMPLEXITY_H_
-#define VP10_ENCODER_AQ_COMPLEXITY_H_
+#ifndef AV1_ENCODER_AQ_COMPLEXITY_H_
+#define AV1_ENCODER_AQ_COMPLEXITY_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -17,20 +17,20 @@
 
 #include "av1/common/enums.h"
 
-struct VP10_COMP;
+struct AV1_COMP;
 struct macroblock;
 
 // Select a segment for the current Block.
-void vp10_caq_select_segment(struct VP10_COMP *cpi, struct macroblock *,
-                             BLOCK_SIZE bs, int mi_row, int mi_col,
-                             int projected_rate);
+void av1_caq_select_segment(struct AV1_COMP *cpi, struct macroblock *,
+                            BLOCK_SIZE bs, int mi_row, int mi_col,
+                            int projected_rate);
 
 // This function sets up a set of segments with delta Q values around
 // the baseline frame quantizer.
-void vp10_setup_in_frame_q_adj(struct VP10_COMP *cpi);
+void av1_setup_in_frame_q_adj(struct AV1_COMP *cpi);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_AQ_COMPLEXITY_H_
+#endif  // AV1_ENCODER_AQ_COMPLEXITY_H_
diff --git a/av1/encoder/aq_cyclicrefresh.c b/av1/encoder/aq_cyclicrefresh.c
index b7897f9..cbd8cc1 100644
--- a/av1/encoder/aq_cyclicrefresh.c
+++ b/av1/encoder/aq_cyclicrefresh.c
@@ -15,7 +15,7 @@
 #include "av1/encoder/aq_cyclicrefresh.h"
 #include "av1/encoder/ratectrl.h"
 #include "av1/encoder/segmentation.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/system_state.h"
 
 struct CYCLIC_REFRESH {
@@ -56,20 +56,20 @@
   int qindex_delta[3];
 };
 
-CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
+CYCLIC_REFRESH *av1_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
   size_t last_coded_q_map_size;
-  CYCLIC_REFRESH *const cr = vpx_calloc(1, sizeof(*cr));
+  CYCLIC_REFRESH *const cr = aom_calloc(1, sizeof(*cr));
   if (cr == NULL) return NULL;
 
-  cr->map = vpx_calloc(mi_rows * mi_cols, sizeof(*cr->map));
+  cr->map = aom_calloc(mi_rows * mi_cols, sizeof(*cr->map));
   if (cr->map == NULL) {
-    vp10_cyclic_refresh_free(cr);
+    av1_cyclic_refresh_free(cr);
     return NULL;
   }
   last_coded_q_map_size = mi_rows * mi_cols * sizeof(*cr->last_coded_q_map);
-  cr->last_coded_q_map = vpx_malloc(last_coded_q_map_size);
+  cr->last_coded_q_map = aom_malloc(last_coded_q_map_size);
   if (cr->last_coded_q_map == NULL) {
-    vp10_cyclic_refresh_free(cr);
+    av1_cyclic_refresh_free(cr);
     return NULL;
   }
   assert(MAXQ <= 255);
@@ -78,14 +78,14 @@
   return cr;
 }
 
-void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr) {
-  vpx_free(cr->map);
-  vpx_free(cr->last_coded_q_map);
-  vpx_free(cr);
+void av1_cyclic_refresh_free(CYCLIC_REFRESH *cr) {
+  aom_free(cr->map);
+  aom_free(cr->last_coded_q_map);
+  aom_free(cr);
 }
 
 // Check if we should turn off cyclic refresh based on bitrate condition.
-static int apply_cyclic_refresh_bitrate(const VP10_COMMON *cm,
+static int apply_cyclic_refresh_bitrate(const AV1_COMMON *cm,
                                         const RATE_CONTROL *rc) {
   // Turn off cyclic refresh if bits available per frame is not sufficiently
   // larger than bit cost of segmentation. Segment map bit cost should scale
@@ -133,11 +133,11 @@
 }
 
 // Compute delta-q for the segment.
-static int compute_deltaq(const VP10_COMP *cpi, int q, double rate_factor) {
+static int compute_deltaq(const AV1_COMP *cpi, int q, double rate_factor) {
   const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   const RATE_CONTROL *const rc = &cpi->rc;
-  int deltaq = vp10_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
-                                           rate_factor, cpi->common.bit_depth);
+  int deltaq = av1_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
+                                          rate_factor, cpi->common.bit_depth);
   if ((-deltaq) > cr->max_qdelta_perc * q / 100) {
     deltaq = -cr->max_qdelta_perc * q / 100;
   }
@@ -148,9 +148,9 @@
 // from non-base segment. For now ignore effect of multiple segments
 // (with different delta-q). Note this function is called in the postencode
 // (called from rc_update_rate_correction_factors()).
-int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi,
-                                           double correction_factor) {
-  const VP10_COMMON *const cm = &cpi->common;
+int av1_cyclic_refresh_estimate_bits_at_q(const AV1_COMP *cpi,
+                                          double correction_factor) {
+  const AV1_COMMON *const cm = &cpi->common;
   const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   int estimated_bits;
   int mbs = cm->MBs;
@@ -162,16 +162,16 @@
   // Take segment weighted average for estimated bits.
   estimated_bits =
       (int)((1.0 - weight_segment1 - weight_segment2) *
-                vp10_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
-                                        correction_factor, cm->bit_depth) +
+                av1_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
+                                       correction_factor, cm->bit_depth) +
             weight_segment1 *
-                vp10_estimate_bits_at_q(cm->frame_type,
-                                        cm->base_qindex + cr->qindex_delta[1],
-                                        mbs, correction_factor, cm->bit_depth) +
+                av1_estimate_bits_at_q(cm->frame_type,
+                                       cm->base_qindex + cr->qindex_delta[1],
+                                       mbs, correction_factor, cm->bit_depth) +
             weight_segment2 *
-                vp10_estimate_bits_at_q(cm->frame_type,
-                                        cm->base_qindex + cr->qindex_delta[2],
-                                        mbs, correction_factor, cm->bit_depth));
+                av1_estimate_bits_at_q(cm->frame_type,
+                                       cm->base_qindex + cr->qindex_delta[2],
+                                       mbs, correction_factor, cm->bit_depth));
   return estimated_bits;
 }
 
@@ -180,9 +180,9 @@
 // rc_regulate_q() to set the base qp index.
 // Note: the segment map is set to either 0/CR_SEGMENT_ID_BASE (no refresh) or
 // to 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock, prior to encoding.
-int vp10_cyclic_refresh_rc_bits_per_mb(const VP10_COMP *cpi, int i,
-                                       double correction_factor) {
-  const VP10_COMMON *const cm = &cpi->common;
+int av1_cyclic_refresh_rc_bits_per_mb(const AV1_COMP *cpi, int i,
+                                      double correction_factor) {
+  const AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   int bits_per_mb;
   int num8x8bl = cm->MBs << 2;
@@ -196,29 +196,28 @@
   // Compute delta-q corresponding to qindex i.
   int deltaq = compute_deltaq(cpi, i, cr->rate_ratio_qdelta);
   // Take segment weighted average for bits per mb.
-  bits_per_mb =
-      (int)((1.0 - weight_segment) * vp10_rc_bits_per_mb(cm->frame_type, i,
-                                                         correction_factor,
-                                                         cm->bit_depth) +
-            weight_segment * vp10_rc_bits_per_mb(cm->frame_type, i + deltaq,
-                                                 correction_factor,
-                                                 cm->bit_depth));
+  bits_per_mb = (int)((1.0 - weight_segment) *
+                          av1_rc_bits_per_mb(cm->frame_type, i,
+                                             correction_factor, cm->bit_depth) +
+                      weight_segment *
+                          av1_rc_bits_per_mb(cm->frame_type, i + deltaq,
+                                             correction_factor, cm->bit_depth));
   return bits_per_mb;
 }
 
 // Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
 // check if we should reset the segment_id, and update the cyclic_refresh map
 // and segmentation map.
-void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi,
-                                        MB_MODE_INFO *const mbmi, int mi_row,
-                                        int mi_col, BLOCK_SIZE bsize,
-                                        int64_t rate, int64_t dist, int skip) {
-  const VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_update_segment(AV1_COMP *const cpi,
+                                       MB_MODE_INFO *const mbmi, int mi_row,
+                                       int mi_col, BLOCK_SIZE bsize,
+                                       int64_t rate, int64_t dist, int skip) {
+  const AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   const int bw = num_8x8_blocks_wide_lookup[bsize];
   const int bh = num_8x8_blocks_high_lookup[bsize];
-  const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
-  const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
+  const int xmis = AOMMIN(cm->mi_cols - mi_col, bw);
+  const int ymis = AOMMIN(cm->mi_rows - mi_row, bh);
   const int block_index = mi_row * cm->mi_cols + mi_col;
   const int refresh_this_block =
       candidate_refresh_aq(cr, mbmi, rate, dist, bsize);
@@ -269,7 +268,7 @@
       } else if (is_inter_block(mbmi) && skip &&
                  mbmi->segment_id <= CR_SEGMENT_ID_BOOST2) {
         cr->last_coded_q_map[map_offset] =
-            VPXMIN(clamp(cm->base_qindex + cr->qindex_delta[mbmi->segment_id],
+            AOMMIN(clamp(cm->base_qindex + cr->qindex_delta[mbmi->segment_id],
                          0, MAXQ),
                    cr->last_coded_q_map[map_offset]);
       }
@@ -277,8 +276,8 @@
 }
 
 // Update the actual number of blocks that were applied the segment delta q.
-void vp10_cyclic_refresh_postencode(VP10_COMP *const cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_postencode(AV1_COMP *const cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   unsigned char *const seg_map = cpi->segmentation_map;
   int mi_row, mi_col;
@@ -297,7 +296,7 @@
 }
 
 // Set golden frame update interval, for 1 pass CBR mode.
-void vp10_cyclic_refresh_set_golden_update(VP10_COMP *const cpi) {
+void av1_cyclic_refresh_set_golden_update(AV1_COMP *const cpi) {
   RATE_CONTROL *const rc = &cpi->rc;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   // Set minimum gf_interval for GF update to a multiple (== 2) of refresh
@@ -313,8 +312,8 @@
 // background has high motion, refresh the golden frame. Otherwise, if the
 // golden reference is to be updated check if we should NOT update the golden
 // ref.
-void vp10_cyclic_refresh_check_golden_update(VP10_COMP *const cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_check_golden_update(AV1_COMP *const cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   int mi_row, mi_col;
   double fraction_low = 0.0;
@@ -356,7 +355,7 @@
   // the resolution (resize_pending != 0).
   if (cpi->resize_pending != 0 ||
       (cnt1 * 10 > (70 * rows * cols) && cnt2 * 20 < cnt1)) {
-    vp10_cyclic_refresh_set_golden_update(cpi);
+    av1_cyclic_refresh_set_golden_update(cpi);
     rc->frames_till_gf_update_due = rc->baseline_gf_interval;
 
     if (rc->frames_till_gf_update_due > rc->frames_to_key)
@@ -385,8 +384,8 @@
 // 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock.
 // Blocks labeled as BOOST1 may later get set to BOOST2 (during the
 // encoding of the superblock).
-static void cyclic_refresh_update_map(VP10_COMP *const cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   unsigned char *const seg_map = cpi->segmentation_map;
   int i, block_count, bl_index, sb_rows, sb_cols, sbs_in_frame;
@@ -411,15 +410,15 @@
     int mi_row = sb_row_index * cm->mib_size;
     int mi_col = sb_col_index * cm->mib_size;
     int qindex_thresh =
-        cpi->oxcf.content == VPX_CONTENT_SCREEN
-            ? vp10_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
+        cpi->oxcf.content == AOM_CONTENT_SCREEN
+            ? av1_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
             : 0;
     assert(mi_row >= 0 && mi_row < cm->mi_rows);
     assert(mi_col >= 0 && mi_col < cm->mi_cols);
     bl_index = mi_row * cm->mi_cols + mi_col;
     // Loop through all MI blocks in superblock and update map.
-    xmis = VPXMIN(cm->mi_cols - mi_col, cm->mib_size);
-    ymis = VPXMIN(cm->mi_rows - mi_row, cm->mib_size);
+    xmis = AOMMIN(cm->mi_cols - mi_col, cm->mib_size);
+    ymis = AOMMIN(cm->mi_rows - mi_row, cm->mib_size);
     for (y = 0; y < ymis; y++) {
       for (x = 0; x < xmis; x++) {
         const int bl_index2 = bl_index + y * cm->mi_cols + x;
@@ -451,9 +450,9 @@
 }
 
 // Set cyclic refresh parameters.
-void vp10_cyclic_refresh_update_parameters(VP10_COMP *const cpi) {
+void av1_cyclic_refresh_update_parameters(AV1_COMP *const cpi) {
   const RATE_CONTROL *const rc = &cpi->rc;
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   cr->percent_refresh = 10;
   cr->max_qdelta_perc = 50;
@@ -475,8 +474,8 @@
 }
 
 // Setup cyclic background refresh: set delta q and segmentation map.
-void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_setup(AV1_COMP *const cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *const rc = &cpi->rc;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   struct segmentation *const seg = &cm->seg;
@@ -487,7 +486,7 @@
     // Set segmentation map to 0 and disable.
     unsigned char *const seg_map = cpi->segmentation_map;
     memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
-    vp10_disable_segmentation(&cm->seg);
+    av1_disable_segmentation(&cm->seg);
     if (cm->frame_type == KEY_FRAME) {
       memset(cr->last_coded_q_map, MAXQ,
              cm->mi_rows * cm->mi_cols * sizeof(*cr->last_coded_q_map));
@@ -497,37 +496,37 @@
   } else {
     int qindex_delta = 0;
     int qindex2;
-    const double q = vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
-    vpx_clear_system_state();
+    const double q = av1_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
+    aom_clear_system_state();
     // Set rate threshold to some multiple (set to 2 for now) of the target
     // rate (target is given by sb64_target_rate and scaled by 256).
     cr->thresh_rate_sb = ((int64_t)(rc->sb64_target_rate) << 8) << 2;
     // Distortion threshold, quadratic in Q, scale factor to be adjusted.
     // q will not exceed 457, so (q * q) is within 32bit; see:
-    // vp10_convert_qindex_to_q(), vp10_ac_quant(), ac_qlookup*[].
+    // av1_convert_qindex_to_q(), av1_ac_quant(), ac_qlookup*[].
     cr->thresh_dist_sb = ((int64_t)(q * q)) << 2;
 
     // Set up segmentation.
     // Clear down the segment map.
-    vp10_enable_segmentation(&cm->seg);
-    vp10_clearall_segfeatures(seg);
+    av1_enable_segmentation(&cm->seg);
+    av1_clearall_segfeatures(seg);
     // Select delta coding method.
     seg->abs_delta = SEGMENT_DELTADATA;
 
     // Note: setting temporal_update has no effect, as the seg-map coding method
     // (temporal or spatial) is determined in
-    // vp10_choose_segmap_coding_method(),
+    // av1_choose_segmap_coding_method(),
     // based on the coding cost of each method. For error_resilient mode on the
     // last_frame_seg_map is set to 0, so if temporal coding is used, it is
     // relative to 0 previous map.
     // seg->temporal_update = 0;
 
     // Segment BASE "Q" feature is disabled so it defaults to the baseline Q.
-    vp10_disable_segfeature(seg, CR_SEGMENT_ID_BASE, SEG_LVL_ALT_Q);
+    av1_disable_segfeature(seg, CR_SEGMENT_ID_BASE, SEG_LVL_ALT_Q);
     // Use segment BOOST1 for in-frame Q adjustment.
-    vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q);
+    av1_enable_segfeature(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q);
     // Use segment BOOST2 for more aggressive in-frame Q adjustment.
-    vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q);
+    av1_enable_segfeature(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q);
 
     // Set the q delta for segment BOOST1.
     qindex_delta = compute_deltaq(cpi, cm->base_qindex, cr->rate_ratio_qdelta);
@@ -536,29 +535,29 @@
     // Compute rd-mult for segment BOOST1.
     qindex2 = clamp(cm->base_qindex + cm->y_dc_delta_q + qindex_delta, 0, MAXQ);
 
-    cr->rdmult = vp10_compute_rd_mult(cpi, qindex2);
+    cr->rdmult = av1_compute_rd_mult(cpi, qindex2);
 
-    vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q, qindex_delta);
+    av1_set_segdata(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q, qindex_delta);
 
     // Set a more aggressive (higher) q delta for segment BOOST2.
     qindex_delta = compute_deltaq(
         cpi, cm->base_qindex,
-        VPXMIN(CR_MAX_RATE_TARGET_RATIO,
+        AOMMIN(CR_MAX_RATE_TARGET_RATIO,
                0.1 * cr->rate_boost_fac * cr->rate_ratio_qdelta));
     cr->qindex_delta[2] = qindex_delta;
-    vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta);
+    av1_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta);
 
     // Update the segmentation and refresh map.
     cyclic_refresh_update_map(cpi);
   }
 }
 
-int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr) {
+int av1_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr) {
   return cr->rdmult;
 }
 
-void vp10_cyclic_refresh_reset_resize(VP10_COMP *const cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_reset_resize(AV1_COMP *const cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   memset(cr->map, 0, cm->mi_rows * cm->mi_cols);
   cr->sb_index = 0;
diff --git a/av1/encoder/aq_cyclicrefresh.h b/av1/encoder/aq_cyclicrefresh.h
index 24491fc..3e59dfd 100644
--- a/av1/encoder/aq_cyclicrefresh.h
+++ b/av1/encoder/aq_cyclicrefresh.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_AQ_CYCLICREFRESH_H_
-#define VP10_ENCODER_AQ_CYCLICREFRESH_H_
+#ifndef AV1_ENCODER_AQ_CYCLICREFRESH_H_
+#define AV1_ENCODER_AQ_CYCLICREFRESH_H_
 
 #include "av1/common/blockd.h"
 
@@ -26,55 +26,55 @@
 // Maximum rate target ratio for setting segment delta-qp.
 #define CR_MAX_RATE_TARGET_RATIO 4.0
 
-struct VP10_COMP;
+struct AV1_COMP;
 
 struct CYCLIC_REFRESH;
 typedef struct CYCLIC_REFRESH CYCLIC_REFRESH;
 
-CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols);
+CYCLIC_REFRESH *av1_cyclic_refresh_alloc(int mi_rows, int mi_cols);
 
-void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr);
+void av1_cyclic_refresh_free(CYCLIC_REFRESH *cr);
 
 // Estimate the bits, incorporating the delta-q from segment 1, after encoding
 // the frame.
-int vp10_cyclic_refresh_estimate_bits_at_q(const struct VP10_COMP *cpi,
-                                           double correction_factor);
+int av1_cyclic_refresh_estimate_bits_at_q(const struct AV1_COMP *cpi,
+                                          double correction_factor);
 
 // Estimate the bits per mb, for a given q = i and a corresponding delta-q
 // (for segment 1), prior to encoding the frame.
-int vp10_cyclic_refresh_rc_bits_per_mb(const struct VP10_COMP *cpi, int i,
-                                       double correction_factor);
+int av1_cyclic_refresh_rc_bits_per_mb(const struct AV1_COMP *cpi, int i,
+                                      double correction_factor);
 
 // Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
 // check if we should reset the segment_id, and update the cyclic_refresh map
 // and segmentation map.
-void vp10_cyclic_refresh_update_segment(struct VP10_COMP *const cpi,
-                                        MB_MODE_INFO *const mbmi, int mi_row,
-                                        int mi_col, BLOCK_SIZE bsize,
-                                        int64_t rate, int64_t dist, int skip);
+void av1_cyclic_refresh_update_segment(struct AV1_COMP *const cpi,
+                                       MB_MODE_INFO *const mbmi, int mi_row,
+                                       int mi_col, BLOCK_SIZE bsize,
+                                       int64_t rate, int64_t dist, int skip);
 
 // Update the segmentation map, and related quantities: cyclic refresh map,
 // refresh sb_index, and target number of blocks to be refreshed.
-void vp10_cyclic_refresh_update__map(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_update__map(struct AV1_COMP *const cpi);
 
 // Update the actual number of blocks that were applied the segment delta q.
-void vp10_cyclic_refresh_postencode(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_postencode(struct AV1_COMP *const cpi);
 
 // Set golden frame update interval, for 1 pass CBR mode.
-void vp10_cyclic_refresh_set_golden_update(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_set_golden_update(struct AV1_COMP *const cpi);
 
 // Check if we should not update golden reference, based on past refresh stats.
-void vp10_cyclic_refresh_check_golden_update(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_check_golden_update(struct AV1_COMP *const cpi);
 
 // Set/update global/frame level refresh parameters.
-void vp10_cyclic_refresh_update_parameters(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_update_parameters(struct AV1_COMP *const cpi);
 
 // Setup cyclic background refresh: set delta q and segmentation map.
-void vp10_cyclic_refresh_setup(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_setup(struct AV1_COMP *const cpi);
 
-int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr);
+int av1_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr);
 
-void vp10_cyclic_refresh_reset_resize(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_reset_resize(struct AV1_COMP *const cpi);
 
 static INLINE int cyclic_refresh_segment_id_boosted(int segment_id) {
   return segment_id == CR_SEGMENT_ID_BOOST1 ||
@@ -94,4 +94,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_AQ_CYCLICREFRESH_H_
+#endif  // AV1_ENCODER_AQ_CYCLICREFRESH_H_
diff --git a/av1/encoder/aq_variance.c b/av1/encoder/aq_variance.c
index 2a529a1..4e31c35 100644
--- a/av1/encoder/aq_variance.c
+++ b/av1/encoder/aq_variance.c
@@ -32,19 +32,19 @@
 
 #define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN]
 
-DECLARE_ALIGNED(16, static const uint8_t, vp10_all_zeros[MAX_SB_SIZE]) = { 0 };
-#if CONFIG_VP9_HIGHBITDEPTH
+DECLARE_ALIGNED(16, static const uint8_t, av1_all_zeros[MAX_SB_SIZE]) = { 0 };
+#if CONFIG_AOM_HIGHBITDEPTH
 DECLARE_ALIGNED(16, static const uint16_t,
-                vp10_highbd_all_zeros[MAX_SB_SIZE]) = { 0 };
+                av1_highbd_all_zeros[MAX_SB_SIZE]) = { 0 };
 #endif
 
-unsigned int vp10_vaq_segment_id(int energy) {
+unsigned int av1_vaq_segment_id(int energy) {
   ENERGY_IN_BOUNDS(energy);
   return SEGMENT_ID(energy);
 }
 
-void vp10_vaq_frame_setup(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
+void av1_vaq_frame_setup(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
   struct segmentation *seg = &cm->seg;
   int i;
 
@@ -53,17 +53,17 @@
       (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
     cpi->vaq_refresh = 1;
 
-    vp10_enable_segmentation(seg);
-    vp10_clearall_segfeatures(seg);
+    av1_enable_segmentation(seg);
+    av1_clearall_segfeatures(seg);
 
     seg->abs_delta = SEGMENT_DELTADATA;
 
-    vpx_clear_system_state();
+    aom_clear_system_state();
 
     for (i = 0; i < MAX_SEGMENTS; ++i) {
       int qindex_delta =
-          vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
-                                      rate_ratio[i], cm->bit_depth);
+          av1_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
+                                     rate_ratio[i], cm->bit_depth);
 
       // We don't allow qindex 0 in a segment if the base value is not 0.
       // Q index 0 (lossless) implies 4x4 encoding only and in AQ mode a segment
@@ -78,8 +78,8 @@
         continue;
       }
 
-      vp10_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta);
-      vp10_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
+      av1_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta);
+      av1_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
     }
   }
 }
@@ -107,7 +107,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
                                  const uint8_t *b8, int b_stride, int w, int h,
                                  uint64_t *sse, uint64_t *sum) {
@@ -138,9 +138,9 @@
   *sse = (unsigned int)sse_long;
   *sum = (int)sum_long;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static unsigned int block_variance(VP10_COMP *cpi, MACROBLOCK *x,
+static unsigned int block_variance(AV1_COMP *cpi, MACROBLOCK *x,
                                    BLOCK_SIZE bs) {
   MACROBLOCKD *xd = &x->e_mbd;
   unsigned int var, sse;
@@ -153,54 +153,54 @@
     const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow;
     const int bh = 8 * num_8x8_blocks_high_lookup[bs] - bottom_overflow;
     int avg;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       aq_highbd_8_variance(x->plane[0].src.buf, x->plane[0].src.stride,
-                           CONVERT_TO_BYTEPTR(vp10_highbd_all_zeros), 0, bw, bh,
+                           CONVERT_TO_BYTEPTR(av1_highbd_all_zeros), 0, bw, bh,
                            &sse, &avg);
       sse >>= 2 * (xd->bd - 8);
       avg >>= (xd->bd - 8);
     } else {
-      aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_all_zeros,
-                  0, bw, bh, &sse, &avg);
+      aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, av1_all_zeros, 0,
+                  bw, bh, &sse, &avg);
     }
 #else
-    aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_all_zeros, 0,
+    aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, av1_all_zeros, 0,
                 bw, bh, &sse, &avg);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     var = sse - (((int64_t)avg * avg) / (bw * bh));
     return (256 * var) / (bw * bh);
   } else {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-      var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
-                               CONVERT_TO_BYTEPTR(vp10_highbd_all_zeros), 0,
-                               &sse);
+      var =
+          cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+                             CONVERT_TO_BYTEPTR(av1_highbd_all_zeros), 0, &sse);
     } else {
       var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
-                               vp10_all_zeros, 0, &sse);
+                               av1_all_zeros, 0, &sse);
     }
 #else
     var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
-                             vp10_all_zeros, 0, &sse);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+                             av1_all_zeros, 0, &sse);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     return (256 * var) >> num_pels_log2_lookup[bs];
   }
 }
 
-double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
   unsigned int var = block_variance(cpi, x, bs);
-  vpx_clear_system_state();
+  aom_clear_system_state();
   return log(var + 1.0);
 }
 
 #define DEFAULT_E_MIDPOINT 10.0
-int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
   double energy;
   double energy_midpoint;
-  vpx_clear_system_state();
+  aom_clear_system_state();
   energy_midpoint =
       (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
-  energy = vp10_log_block_var(cpi, x, bs) - energy_midpoint;
+  energy = av1_log_block_var(cpi, x, bs) - energy_midpoint;
   return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX);
 }
diff --git a/av1/encoder/aq_variance.h b/av1/encoder/aq_variance.h
index a30a449..346b4c7 100644
--- a/av1/encoder/aq_variance.h
+++ b/av1/encoder/aq_variance.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_AQ_VARIANCE_H_
-#define VP10_ENCODER_AQ_VARIANCE_H_
+#ifndef AV1_ENCODER_AQ_VARIANCE_H_
+#define AV1_ENCODER_AQ_VARIANCE_H_
 
 #include "av1/encoder/encoder.h"
 
@@ -17,14 +17,14 @@
 extern "C" {
 #endif
 
-unsigned int vp10_vaq_segment_id(int energy);
-void vp10_vaq_frame_setup(VP10_COMP *cpi);
+unsigned int av1_vaq_segment_id(int energy);
+void av1_vaq_frame_setup(AV1_COMP *cpi);
 
-int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
-double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_AQ_VARIANCE_H_
+#endif  // AV1_ENCODER_AQ_VARIANCE_H_
diff --git a/av1/encoder/arm/neon/dct_neon.c b/av1/encoder/arm/neon/dct_neon.c
index 1d77bec..3626e79 100644
--- a/av1/encoder/arm/neon/dct_neon.c
+++ b/av1/encoder/arm/neon/dct_neon.c
@@ -10,24 +10,26 @@
 
 #include <arm_neon.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
 #include "av1/common/blockd.h"
 #include "aom_dsp/txfm_common.h"
 
-void vp10_fdct8x8_quant_neon(
-    const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
-    int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
-    const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
-    int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
-    uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+void av1_fdct8x8_quant_neon(const int16_t *input, int stride,
+                            int16_t *coeff_ptr, intptr_t n_coeffs,
+                            int skip_block, const int16_t *zbin_ptr,
+                            const int16_t *round_ptr, const int16_t *quant_ptr,
+                            const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+                            int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                            uint16_t *eob_ptr, const int16_t *scan_ptr,
+                            const int16_t *iscan_ptr) {
   int16_t temp_buffer[64];
   (void)coeff_ptr;
 
-  vpx_fdct8x8_neon(input, temp_buffer, stride);
-  vp10_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
-                        quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
-                        dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
+  aom_fdct8x8_neon(input, temp_buffer, stride);
+  av1_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
+                       quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
+                       dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
 }
diff --git a/av1/encoder/arm/neon/error_neon.c b/av1/encoder/arm/neon/error_neon.c
index 34805d3..65372b5 100644
--- a/av1/encoder/arm/neon/error_neon.c
+++ b/av1/encoder/arm/neon/error_neon.c
@@ -11,10 +11,10 @@
 #include <arm_neon.h>
 #include <assert.h>
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 
-int64_t vp10_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
-                                 int block_size) {
+int64_t av1_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
+                                int block_size) {
   int64x2_t error = vdupq_n_s64(0);
 
   assert(block_size >= 8);
diff --git a/av1/encoder/arm/neon/quantize_neon.c b/av1/encoder/arm/neon/quantize_neon.c
index db85b4d..5aeead1 100644
--- a/av1/encoder/arm/neon/quantize_neon.c
+++ b/av1/encoder/arm/neon/quantize_neon.c
@@ -12,7 +12,7 @@
 
 #include <math.h>
 
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 #include "av1/common/quant_common.h"
 #include "av1/common/seg_common.h"
@@ -21,13 +21,13 @@
 #include "av1/encoder/quantize.h"
 #include "av1/encoder/rd.h"
 
-void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
-                           int skip_block, const int16_t *zbin_ptr,
-                           const int16_t *round_ptr, const int16_t *quant_ptr,
-                           const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
-                           int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
-                           uint16_t *eob_ptr, const int16_t *scan,
-                           const int16_t *iscan) {
+void av1_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
+                          int skip_block, const int16_t *zbin_ptr,
+                          const int16_t *round_ptr, const int16_t *quant_ptr,
+                          const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+                          int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                          uint16_t *eob_ptr, const int16_t *scan,
+                          const int16_t *iscan) {
   // TODO(jingning) Decide the need of these arguments after the
   // quantization process is completed.
   (void)zbin_ptr;
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 30699b4..305a672 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -12,10 +12,10 @@
 #include <limits.h>
 #include <stdio.h>
 
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 #include "aom_dsp/bitwriter_buffer.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem_ops.h"
 #include "aom_ports/system_state.h"
 #include "aom_util/debug_util.h"
@@ -46,28 +46,28 @@
 #include "av1/encoder/subexp.h"
 #include "av1/encoder/tokenize.h"
 
-static const struct vp10_token intra_mode_encodings[INTRA_MODES] = {
+static const struct av1_token intra_mode_encodings[INTRA_MODES] = {
   { 0, 1 },  { 6, 3 },   { 28, 5 },  { 30, 5 }, { 58, 6 },
   { 59, 6 }, { 126, 7 }, { 127, 7 }, { 62, 6 }, { 2, 2 }
 };
 #if CONFIG_EXT_INTERP
-static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
+static const struct av1_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
     { { 0, 1 }, { 4, 3 }, { 6, 3 }, { 5, 3 }, { 7, 3 } };
 #else
-static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
+static const struct av1_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
     { { 0, 1 }, { 2, 2 }, { 3, 2 } };
 #endif  // CONFIG_EXT_INTERP
 #if CONFIG_EXT_PARTITION_TYPES
-static const struct vp10_token ext_partition_encodings[EXT_PARTITION_TYPES] = {
+static const struct av1_token ext_partition_encodings[EXT_PARTITION_TYPES] = {
   { 0, 1 },  { 4, 3 },  { 12, 4 }, { 7, 3 },
   { 10, 4 }, { 11, 4 }, { 26, 5 }, { 27, 5 }
 };
 #endif
-static const struct vp10_token partition_encodings[PARTITION_TYPES] = {
+static const struct av1_token partition_encodings[PARTITION_TYPES] = {
   { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
 };
 #if !CONFIG_REF_MV
-static const struct vp10_token inter_mode_encodings[INTER_MODES] =
+static const struct av1_token inter_mode_encodings[INTER_MODES] =
 #if CONFIG_EXT_INTER
     { { 2, 2 }, { 6, 3 }, { 0, 1 }, { 14, 4 }, { 15, 4 } };
 #else
@@ -75,16 +75,16 @@
 #endif  // CONFIG_EXT_INTER
 #endif
 #if CONFIG_EXT_INTER
-static const struct vp10_token
+static const struct av1_token
     inter_compound_mode_encodings[INTER_COMPOUND_MODES] = {
       { 2, 2 },  { 50, 6 }, { 51, 6 }, { 24, 5 }, { 52, 6 },
       { 53, 6 }, { 54, 6 }, { 55, 6 }, { 0, 1 },  { 7, 3 }
     };
 #endif  // CONFIG_EXT_INTER
-static const struct vp10_token palette_size_encodings[] = {
+static const struct av1_token palette_size_encodings[] = {
   { 0, 1 }, { 2, 2 }, { 6, 3 }, { 14, 4 }, { 30, 5 }, { 62, 6 }, { 63, 6 },
 };
-static const struct vp10_token
+static const struct av1_token
     palette_color_encodings[PALETTE_MAX_SIZE - 1][PALETTE_MAX_SIZE] = {
       { { 0, 1 }, { 1, 1 } },                                  // 2 colors
       { { 0, 1 }, { 2, 2 }, { 3, 2 } },                        // 3 colors
@@ -113,84 +113,84 @@
         { 127, 7 } },  // 8 colors
     };
 
-static const struct vp10_token tx_size_encodings[TX_SIZES - 1][TX_SIZES] = {
+static const struct av1_token tx_size_encodings[TX_SIZES - 1][TX_SIZES] = {
   { { 0, 1 }, { 1, 1 } },                      // Max tx_size is 8X8
   { { 0, 1 }, { 2, 2 }, { 3, 2 } },            // Max tx_size is 16X16
   { { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 } },  // Max tx_size is 32X32
 };
 
-static INLINE void write_uniform(vp10_writer *w, int n, int v) {
+static INLINE void write_uniform(aom_writer *w, int n, int v) {
   int l = get_unsigned_bits(n);
   int m = (1 << l) - n;
   if (l == 0) return;
   if (v < m) {
-    vp10_write_literal(w, v, l - 1);
+    aom_write_literal(w, v, l - 1);
   } else {
-    vp10_write_literal(w, m + ((v - m) >> 1), l - 1);
-    vp10_write_literal(w, (v - m) & 1, 1);
+    aom_write_literal(w, m + ((v - m) >> 1), l - 1);
+    aom_write_literal(w, (v - m) & 1, 1);
   }
 }
 
 #if CONFIG_EXT_TX
-static struct vp10_token ext_tx_inter_encodings[EXT_TX_SETS_INTER][TX_TYPES];
-static struct vp10_token ext_tx_intra_encodings[EXT_TX_SETS_INTRA][TX_TYPES];
+static struct av1_token ext_tx_inter_encodings[EXT_TX_SETS_INTER][TX_TYPES];
+static struct av1_token ext_tx_intra_encodings[EXT_TX_SETS_INTRA][TX_TYPES];
 #else
-static struct vp10_token ext_tx_encodings[TX_TYPES];
+static struct av1_token ext_tx_encodings[TX_TYPES];
 #endif  // CONFIG_EXT_TX
 #if CONFIG_GLOBAL_MOTION
-static struct vp10_token global_motion_types_encodings[GLOBAL_MOTION_TYPES];
+static struct av1_token global_motion_types_encodings[GLOBAL_MOTION_TYPES];
 #endif  // CONFIG_GLOBAL_MOTION
 #if CONFIG_EXT_INTRA
-static struct vp10_token intra_filter_encodings[INTRA_FILTERS];
+static struct av1_token intra_filter_encodings[INTRA_FILTERS];
 #endif  // CONFIG_EXT_INTRA
 #if CONFIG_EXT_INTER
-static struct vp10_token interintra_mode_encodings[INTERINTRA_MODES];
+static struct av1_token interintra_mode_encodings[INTERINTRA_MODES];
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
-static struct vp10_token motvar_encodings[MOTION_VARIATIONS];
+static struct av1_token motvar_encodings[MOTION_VARIATIONS];
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 
-void vp10_encode_token_init(void) {
+void av1_encode_token_init(void) {
 #if CONFIG_EXT_TX
   int s;
   for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
-    vp10_tokens_from_tree(ext_tx_inter_encodings[s], vp10_ext_tx_inter_tree[s]);
+    av1_tokens_from_tree(ext_tx_inter_encodings[s], av1_ext_tx_inter_tree[s]);
   }
   for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
-    vp10_tokens_from_tree(ext_tx_intra_encodings[s], vp10_ext_tx_intra_tree[s]);
+    av1_tokens_from_tree(ext_tx_intra_encodings[s], av1_ext_tx_intra_tree[s]);
   }
 #else
-  vp10_tokens_from_tree(ext_tx_encodings, vp10_ext_tx_tree);
+  av1_tokens_from_tree(ext_tx_encodings, av1_ext_tx_tree);
 #endif  // CONFIG_EXT_TX
 #if CONFIG_EXT_INTRA
-  vp10_tokens_from_tree(intra_filter_encodings, vp10_intra_filter_tree);
+  av1_tokens_from_tree(intra_filter_encodings, av1_intra_filter_tree);
 #endif  // CONFIG_EXT_INTRA
 #if CONFIG_EXT_INTER
-  vp10_tokens_from_tree(interintra_mode_encodings, vp10_interintra_mode_tree);
+  av1_tokens_from_tree(interintra_mode_encodings, av1_interintra_mode_tree);
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
-  vp10_tokens_from_tree(motvar_encodings, vp10_motvar_tree);
+  av1_tokens_from_tree(motvar_encodings, av1_motvar_tree);
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 #if CONFIG_GLOBAL_MOTION
-  vp10_tokens_from_tree(global_motion_types_encodings,
-                        vp10_global_motion_types_tree);
+  av1_tokens_from_tree(global_motion_types_encodings,
+                       av1_global_motion_types_tree);
 #endif  // CONFIG_GLOBAL_MOTION
 }
 
-static void write_intra_mode(vp10_writer *w, PREDICTION_MODE mode,
-                             const vpx_prob *probs) {
-  vp10_write_token(w, vp10_intra_mode_tree, probs, &intra_mode_encodings[mode]);
+static void write_intra_mode(aom_writer *w, PREDICTION_MODE mode,
+                             const aom_prob *probs) {
+  av1_write_token(w, av1_intra_mode_tree, probs, &intra_mode_encodings[mode]);
 }
 
 #if CONFIG_EXT_INTER
-static void write_interintra_mode(vp10_writer *w, INTERINTRA_MODE mode,
-                                  const vpx_prob *probs) {
-  vp10_write_token(w, vp10_interintra_mode_tree, probs,
-                   &interintra_mode_encodings[mode]);
+static void write_interintra_mode(aom_writer *w, INTERINTRA_MODE mode,
+                                  const aom_prob *probs) {
+  av1_write_token(w, av1_interintra_mode_tree, probs,
+                  &interintra_mode_encodings[mode]);
 }
 #endif  // CONFIG_EXT_INTER
 
-static void write_inter_mode(VP10_COMMON *cm, vp10_writer *w,
+static void write_inter_mode(AV1_COMMON *cm, aom_writer *w,
                              PREDICTION_MODE mode,
 #if CONFIG_REF_MV && CONFIG_EXT_INTER
                              int is_compound,
@@ -198,53 +198,53 @@
                              const int16_t mode_ctx) {
 #if CONFIG_REF_MV
   const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
-  const vpx_prob newmv_prob = cm->fc->newmv_prob[newmv_ctx];
+  const aom_prob newmv_prob = cm->fc->newmv_prob[newmv_ctx];
 #if CONFIG_EXT_INTER
-  vp10_write(w, mode != NEWMV && mode != NEWFROMNEARMV, newmv_prob);
+  aom_write(w, mode != NEWMV && mode != NEWFROMNEARMV, newmv_prob);
 
   if (!is_compound && (mode == NEWMV || mode == NEWFROMNEARMV))
-    vp10_write(w, mode == NEWFROMNEARMV, cm->fc->new2mv_prob);
+    aom_write(w, mode == NEWFROMNEARMV, cm->fc->new2mv_prob);
 
   if (mode != NEWMV && mode != NEWFROMNEARMV) {
 #else
-  vp10_write(w, mode != NEWMV, newmv_prob);
+  aom_write(w, mode != NEWMV, newmv_prob);
 
   if (mode != NEWMV) {
 #endif  // CONFIG_EXT_INTER
     const int16_t zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
-    const vpx_prob zeromv_prob = cm->fc->zeromv_prob[zeromv_ctx];
+    const aom_prob zeromv_prob = cm->fc->zeromv_prob[zeromv_ctx];
 
     if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET)) {
       assert(mode == ZEROMV);
       return;
     }
 
-    vp10_write(w, mode != ZEROMV, zeromv_prob);
+    aom_write(w, mode != ZEROMV, zeromv_prob);
 
     if (mode != ZEROMV) {
       int16_t refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
-      vpx_prob refmv_prob;
+      aom_prob refmv_prob;
 
       if (mode_ctx & (1 << SKIP_NEARESTMV_OFFSET)) refmv_ctx = 6;
       if (mode_ctx & (1 << SKIP_NEARMV_OFFSET)) refmv_ctx = 7;
       if (mode_ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) refmv_ctx = 8;
 
       refmv_prob = cm->fc->refmv_prob[refmv_ctx];
-      vp10_write(w, mode != NEARESTMV, refmv_prob);
+      aom_write(w, mode != NEARESTMV, refmv_prob);
     }
   }
 #else
-  const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
+  const aom_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
   assert(is_inter_mode(mode));
-  vp10_write_token(w, vp10_inter_mode_tree, inter_probs,
-                   &inter_mode_encodings[INTER_OFFSET(mode)]);
+  av1_write_token(w, av1_inter_mode_tree, inter_probs,
+                  &inter_mode_encodings[INTER_OFFSET(mode)]);
 #endif
 }
 
 #if CONFIG_REF_MV
-static void write_drl_idx(const VP10_COMMON *cm, const MB_MODE_INFO *mbmi,
-                          const MB_MODE_INFO_EXT *mbmi_ext, vp10_writer *w) {
-  uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+static void write_drl_idx(const AV1_COMMON *cm, const MB_MODE_INFO *mbmi,
+                          const MB_MODE_INFO_EXT *mbmi_ext, aom_writer *w) {
+  uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
 
   assert(mbmi->ref_mv_idx < 3);
 
@@ -253,10 +253,10 @@
     for (idx = 0; idx < 2; ++idx) {
       if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
         uint8_t drl_ctx =
-            vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
-        vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+            av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+        aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
 
-        vp10_write(w, mbmi->ref_mv_idx != idx, drl_prob);
+        aom_write(w, mbmi->ref_mv_idx != idx, drl_prob);
         if (mbmi->ref_mv_idx == idx) return;
       }
     }
@@ -269,10 +269,10 @@
     for (idx = 1; idx < 3; ++idx) {
       if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
         uint8_t drl_ctx =
-            vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
-        vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+            av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+        aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
 
-        vp10_write(w, mbmi->ref_mv_idx != (idx - 1), drl_prob);
+        aom_write(w, mbmi->ref_mv_idx != (idx - 1), drl_prob);
         if (mbmi->ref_mv_idx == (idx - 1)) return;
       }
     }
@@ -282,40 +282,40 @@
 #endif
 
 #if CONFIG_EXT_INTER
-static void write_inter_compound_mode(VP10_COMMON *cm, vp10_writer *w,
+static void write_inter_compound_mode(AV1_COMMON *cm, aom_writer *w,
                                       PREDICTION_MODE mode,
                                       const int16_t mode_ctx) {
-  const vpx_prob *const inter_compound_probs =
+  const aom_prob *const inter_compound_probs =
       cm->fc->inter_compound_mode_probs[mode_ctx];
 
   assert(is_inter_compound_mode(mode));
-  vp10_write_token(w, vp10_inter_compound_mode_tree, inter_compound_probs,
-                   &inter_compound_mode_encodings[INTER_COMPOUND_OFFSET(mode)]);
+  av1_write_token(w, av1_inter_compound_mode_tree, inter_compound_probs,
+                  &inter_compound_mode_encodings[INTER_COMPOUND_OFFSET(mode)]);
 }
 #endif  // CONFIG_EXT_INTER
 
-static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, int data,
+static void encode_unsigned_max(struct aom_write_bit_buffer *wb, int data,
                                 int max) {
-  vpx_wb_write_literal(wb, data, get_unsigned_bits(max));
+  aom_wb_write_literal(wb, data, get_unsigned_bits(max));
 }
 
-static void prob_diff_update(const vpx_tree_index *tree,
-                             vpx_prob probs[/*n - 1*/],
+static void prob_diff_update(const aom_tree_index *tree,
+                             aom_prob probs[/*n - 1*/],
                              const unsigned int counts[/*n - 1*/], int n,
-                             vp10_writer *w) {
+                             aom_writer *w) {
   int i;
   unsigned int branch_ct[32][2];
 
   // Assuming max number of probabilities <= 32
   assert(n <= 32);
 
-  vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+  av1_tree_probs_from_distribution(tree, branch_ct, counts);
   for (i = 0; i < n - 1; ++i)
-    vp10_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
+    av1_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
 }
 
-static int prob_diff_update_savings(const vpx_tree_index *tree,
-                                    vpx_prob probs[/*n - 1*/],
+static int prob_diff_update_savings(const aom_tree_index *tree,
+                                    aom_prob probs[/*n - 1*/],
                                     const unsigned int counts[/*n - 1*/],
                                     int n) {
   int i;
@@ -324,17 +324,17 @@
 
   // Assuming max number of probabilities <= 32
   assert(n <= 32);
-  vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+  av1_tree_probs_from_distribution(tree, branch_ct, counts);
   for (i = 0; i < n - 1; ++i) {
-    savings += vp10_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
+    savings += av1_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
   }
   return savings;
 }
 
 #if CONFIG_VAR_TX
-static void write_tx_size_vartx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void write_tx_size_vartx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                                 const MB_MODE_INFO *mbmi, TX_SIZE tx_size,
-                                int blk_row, int blk_col, vp10_writer *w) {
+                                int blk_row, int blk_col, aom_writer *w) {
   const int tx_row = blk_row >> 1;
   const int tx_col = blk_col >> 1;
   int max_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
@@ -348,14 +348,14 @@
   if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
 
   if (tx_size == mbmi->inter_tx_size[tx_row][tx_col]) {
-    vp10_write(w, 0, cm->fc->txfm_partition_prob[ctx]);
+    aom_write(w, 0, cm->fc->txfm_partition_prob[ctx]);
     txfm_partition_update(xd->above_txfm_context + tx_col,
                           xd->left_txfm_context + tx_row, tx_size);
   } else {
     const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
     int bsl = b_width_log2_lookup[bsize];
     int i;
-    vp10_write(w, 1, cm->fc->txfm_partition_prob[ctx]);
+    aom_write(w, 1, cm->fc->txfm_partition_prob[ctx]);
 
     if (tx_size == TX_8X8) {
       txfm_partition_update(xd->above_txfm_context + tx_col,
@@ -373,17 +373,17 @@
   }
 }
 
-static void update_txfm_partition_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_txfm_partition_probs(AV1_COMMON *cm, aom_writer *w,
                                         FRAME_COUNTS *counts) {
   int k;
   for (k = 0; k < TXFM_PARTITION_CONTEXTS; ++k)
-    vp10_cond_prob_diff_update(w, &cm->fc->txfm_partition_prob[k],
-                               counts->txfm_partition[k]);
+    av1_cond_prob_diff_update(w, &cm->fc->txfm_partition_prob[k],
+                              counts->txfm_partition[k]);
 }
 #endif
 
-static void write_selected_tx_size(const VP10_COMMON *cm, const MACROBLOCKD *xd,
-                                   vp10_writer *w) {
+static void write_selected_tx_size(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+                                   aom_writer *w) {
   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const BLOCK_SIZE bsize = mbmi->sb_type;
   // For sub8x8 blocks the tx_size symbol does not need to be sent
@@ -401,89 +401,87 @@
         IMPLIES(is_rect_tx(tx_size), tx_size == max_txsize_rect_lookup[bsize]));
 #endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
 
-    vp10_write_token(w, vp10_tx_size_tree[tx_size_cat],
-                     cm->fc->tx_size_probs[tx_size_cat][tx_size_ctx],
-                     &tx_size_encodings[tx_size_cat][coded_tx_size]);
+    av1_write_token(w, av1_tx_size_tree[tx_size_cat],
+                    cm->fc->tx_size_probs[tx_size_cat][tx_size_ctx],
+                    &tx_size_encodings[tx_size_cat][coded_tx_size]);
   }
 }
 
 #if CONFIG_REF_MV
-static void update_inter_mode_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_inter_mode_probs(AV1_COMMON *cm, aom_writer *w,
                                     FRAME_COUNTS *counts) {
   int i;
   for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
-    vp10_cond_prob_diff_update(w, &cm->fc->newmv_prob[i],
-                               counts->newmv_mode[i]);
+    av1_cond_prob_diff_update(w, &cm->fc->newmv_prob[i], counts->newmv_mode[i]);
   for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
-    vp10_cond_prob_diff_update(w, &cm->fc->zeromv_prob[i],
-                               counts->zeromv_mode[i]);
+    av1_cond_prob_diff_update(w, &cm->fc->zeromv_prob[i],
+                              counts->zeromv_mode[i]);
   for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
-    vp10_cond_prob_diff_update(w, &cm->fc->refmv_prob[i],
-                               counts->refmv_mode[i]);
+    av1_cond_prob_diff_update(w, &cm->fc->refmv_prob[i], counts->refmv_mode[i]);
   for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
-    vp10_cond_prob_diff_update(w, &cm->fc->drl_prob[i], counts->drl_mode[i]);
+    av1_cond_prob_diff_update(w, &cm->fc->drl_prob[i], counts->drl_mode[i]);
 #if CONFIG_EXT_INTER
-  vp10_cond_prob_diff_update(w, &cm->fc->new2mv_prob, counts->new2mv_mode);
+  av1_cond_prob_diff_update(w, &cm->fc->new2mv_prob, counts->new2mv_mode);
 #endif  // CONFIG_EXT_INTER
 }
 #endif
 
 #if CONFIG_EXT_INTER
-static void update_inter_compound_mode_probs(VP10_COMMON *cm, vp10_writer *w) {
-  const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
-                             vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_inter_compound_mode_probs(AV1_COMMON *cm, aom_writer *w) {
+  const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+                             av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
   int i;
   int savings = 0;
   int do_update = 0;
   for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
     savings += prob_diff_update_savings(
-        vp10_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
+        av1_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
         cm->counts.inter_compound_mode[i], INTER_COMPOUND_MODES);
   }
   do_update = savings > savings_thresh;
-  vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+  aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
   if (do_update) {
     for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
       prob_diff_update(
-          vp10_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
+          av1_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
           cm->counts.inter_compound_mode[i], INTER_COMPOUND_MODES, w);
     }
   }
 }
 #endif  // CONFIG_EXT_INTER
 
-static int write_skip(const VP10_COMMON *cm, const MACROBLOCKD *xd,
-                      int segment_id, const MODE_INFO *mi, vp10_writer *w) {
+static int write_skip(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+                      int segment_id, const MODE_INFO *mi, aom_writer *w) {
   if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
     return 1;
   } else {
     const int skip = mi->mbmi.skip;
-    vp10_write(w, skip, vp10_get_skip_prob(cm, xd));
+    aom_write(w, skip, av1_get_skip_prob(cm, xd));
     return skip;
   }
 }
 
-static void update_skip_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_skip_probs(AV1_COMMON *cm, aom_writer *w,
                               FRAME_COUNTS *counts) {
   int k;
 
   for (k = 0; k < SKIP_CONTEXTS; ++k)
-    vp10_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
+    av1_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
 }
 
-static void update_switchable_interp_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_switchable_interp_probs(AV1_COMMON *cm, aom_writer *w,
                                            FRAME_COUNTS *counts) {
   int j;
   for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
-    prob_diff_update(vp10_switchable_interp_tree,
+    prob_diff_update(av1_switchable_interp_tree,
                      cm->fc->switchable_interp_prob[j],
                      counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
 }
 
 #if CONFIG_EXT_TX
-static void update_ext_tx_probs(VP10_COMMON *cm, vp10_writer *w) {
-  const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
-                             vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
+  const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+                             av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
   int i, j;
   int s;
   for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
@@ -492,16 +490,16 @@
     for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
       if (!use_inter_ext_tx_for_txsize[s][i]) continue;
       savings += prob_diff_update_savings(
-          vp10_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
+          av1_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
           cm->counts.inter_ext_tx[s][i], num_ext_tx_set_inter[s]);
     }
     do_update = savings > savings_thresh;
-    vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+    aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
     if (do_update) {
       for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
         if (!use_inter_ext_tx_for_txsize[s][i]) continue;
         prob_diff_update(
-            vp10_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
+            av1_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
             cm->counts.inter_ext_tx[s][i], num_ext_tx_set_inter[s], w);
       }
     }
@@ -514,17 +512,17 @@
       if (!use_intra_ext_tx_for_txsize[s][i]) continue;
       for (j = 0; j < INTRA_MODES; ++j)
         savings += prob_diff_update_savings(
-            vp10_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
+            av1_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
             cm->counts.intra_ext_tx[s][i][j], num_ext_tx_set_intra[s]);
     }
     do_update = savings > savings_thresh;
-    vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+    aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
     if (do_update) {
       for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
         if (!use_intra_ext_tx_for_txsize[s][i]) continue;
         for (j = 0; j < INTRA_MODES; ++j)
           prob_diff_update(
-              vp10_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
+              av1_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
               cm->counts.intra_ext_tx[s][i][j], num_ext_tx_set_intra[s], w);
       }
     }
@@ -533,9 +531,9 @@
 
 #else
 
-static void update_ext_tx_probs(VP10_COMMON *cm, vp10_writer *w) {
-  const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
-                             vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
+  const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+                             av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
   int i, j;
 
   int savings = 0;
@@ -543,43 +541,43 @@
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     for (j = 0; j < TX_TYPES; ++j)
       savings += prob_diff_update_savings(
-          vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+          av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
           cm->counts.intra_ext_tx[i][j], TX_TYPES);
   }
   do_update = savings > savings_thresh;
-  vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+  aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
   if (do_update) {
     for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
       for (j = 0; j < TX_TYPES; ++j)
-        prob_diff_update(vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+        prob_diff_update(av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
                          cm->counts.intra_ext_tx[i][j], TX_TYPES, w);
     }
   }
   savings = 0;
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     savings +=
-        prob_diff_update_savings(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+        prob_diff_update_savings(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
                                  cm->counts.inter_ext_tx[i], TX_TYPES);
   }
   do_update = savings > savings_thresh;
-  vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+  aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
   if (do_update) {
     for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
-      prob_diff_update(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+      prob_diff_update(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
                        cm->counts.inter_ext_tx[i], TX_TYPES, w);
     }
   }
 }
 #endif  // CONFIG_EXT_TX
 
-static void pack_palette_tokens(vp10_writer *w, const TOKENEXTRA **tp, int n,
+static void pack_palette_tokens(aom_writer *w, const TOKENEXTRA **tp, int n,
                                 int num) {
   int i;
   const TOKENEXTRA *p = *tp;
 
   for (i = 0; i < num; ++i) {
-    vp10_write_token(w, vp10_palette_color_tree[n - 2], p->context_tree,
-                     &palette_color_encodings[n - 2][p->token]);
+    av1_write_token(w, av1_palette_color_tree[n - 2], p->context_tree,
+                    &palette_color_encodings[n - 2][p->token]);
     ++p;
   }
 
@@ -587,25 +585,25 @@
 }
 
 #if CONFIG_SUPERTX
-static void update_supertx_probs(VP10_COMMON *cm, vp10_writer *w) {
-  const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
-                             vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_supertx_probs(AV1_COMMON *cm, aom_writer *w) {
+  const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+                             av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
   int i, j;
   int savings = 0;
   int do_update = 0;
   for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
     for (j = 1; j < TX_SIZES; ++j) {
-      savings += vp10_cond_prob_diff_update_savings(&cm->fc->supertx_prob[i][j],
-                                                    cm->counts.supertx[i][j]);
+      savings += av1_cond_prob_diff_update_savings(&cm->fc->supertx_prob[i][j],
+                                                   cm->counts.supertx[i][j]);
     }
   }
   do_update = savings > savings_thresh;
-  vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+  aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
   if (do_update) {
     for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
       for (j = 1; j < TX_SIZES; ++j) {
-        vp10_cond_prob_diff_update(w, &cm->fc->supertx_prob[i][j],
-                                   cm->counts.supertx[i][j]);
+        av1_cond_prob_diff_update(w, &cm->fc->supertx_prob[i][j],
+                                  cm->counts.supertx[i][j]);
       }
     }
   }
@@ -613,9 +611,9 @@
 #endif  // CONFIG_SUPERTX
 
 #if !CONFIG_ANS
-static void pack_mb_tokens(vp10_writer *w, const TOKENEXTRA **tp,
+static void pack_mb_tokens(aom_writer *w, const TOKENEXTRA **tp,
                            const TOKENEXTRA *const stop,
-                           vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
+                           aom_bit_depth_t bit_depth, const TX_SIZE tx) {
   const TOKENEXTRA *p = *tp;
 #if CONFIG_VAR_TX
   int count = 0;
@@ -624,39 +622,39 @@
 
   while (p < stop && p->token != EOSB_TOKEN) {
     const int t = p->token;
-    const struct vp10_token *const a = &vp10_coef_encodings[t];
+    const struct av1_token *const a = &av1_coef_encodings[t];
     int v = a->value;
     int n = a->len;
-#if CONFIG_VP9_HIGHBITDEPTH
-    const vp10_extra_bit *b;
-    if (bit_depth == VPX_BITS_12)
-      b = &vp10_extra_bits_high12[t];
-    else if (bit_depth == VPX_BITS_10)
-      b = &vp10_extra_bits_high10[t];
+#if CONFIG_AOM_HIGHBITDEPTH
+    const av1_extra_bit *b;
+    if (bit_depth == AOM_BITS_12)
+      b = &av1_extra_bits_high12[t];
+    else if (bit_depth == AOM_BITS_10)
+      b = &av1_extra_bits_high10[t];
     else
-      b = &vp10_extra_bits[t];
+      b = &av1_extra_bits[t];
 #else
-    const vp10_extra_bit *const b = &vp10_extra_bits[t];
+    const av1_extra_bit *const b = &av1_extra_bits[t];
     (void)bit_depth;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     /* skip one or two nodes */
     if (p->skip_eob_node)
       n -= p->skip_eob_node;
     else
-      vp10_write(w, t != EOB_TOKEN, p->context_tree[0]);
+      aom_write(w, t != EOB_TOKEN, p->context_tree[0]);
 
     if (t != EOB_TOKEN) {
-      vp10_write(w, t != ZERO_TOKEN, p->context_tree[1]);
+      aom_write(w, t != ZERO_TOKEN, p->context_tree[1]);
 
       if (t != ZERO_TOKEN) {
-        vp10_write(w, t != ONE_TOKEN, p->context_tree[2]);
+        aom_write(w, t != ONE_TOKEN, p->context_tree[2]);
 
         if (t != ONE_TOKEN) {
           int len = UNCONSTRAINED_NODES - p->skip_eob_node;
-          vp10_write_tree(w, vp10_coef_con_tree,
-                          vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
-                          n - len, 0);
+          av1_write_tree(w, av1_coef_con_tree,
+                         av1_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
+                         n - len, 0);
         }
       }
     }
@@ -679,13 +677,13 @@
             skip_bits--;
             assert(!bb);
           } else {
-            vp10_write(w, bb, pb[i >> 1]);
+            aom_write(w, bb, pb[i >> 1]);
           }
           i = b->tree[i + bb];
         } while (n);
       }
 
-      vp10_write_bit(w, e & 1);
+      aom_write_bit(w, e & 1);
     }
     ++p;
 
@@ -702,7 +700,7 @@
 // coder.
 static void pack_mb_tokens(struct BufAnsCoder *ans, const TOKENEXTRA **tp,
                            const TOKENEXTRA *const stop,
-                           vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
+                           aom_bit_depth_t bit_depth, const TX_SIZE tx) {
   const TOKENEXTRA *p = *tp;
 #if CONFIG_VAR_TX
   int count = 0;
@@ -711,18 +709,18 @@
 
   while (p < stop && p->token != EOSB_TOKEN) {
     const int t = p->token;
-#if CONFIG_VP9_HIGHBITDEPTH
-    const vp10_extra_bit *b;
-    if (bit_depth == VPX_BITS_12)
-      b = &vp10_extra_bits_high12[t];
-    else if (bit_depth == VPX_BITS_10)
-      b = &vp10_extra_bits_high10[t];
+#if CONFIG_AOM_HIGHBITDEPTH
+    const av1_extra_bit *b;
+    if (bit_depth == AOM_BITS_12)
+      b = &av1_extra_bits_high12[t];
+    else if (bit_depth == AOM_BITS_10)
+      b = &av1_extra_bits_high10[t];
     else
-      b = &vp10_extra_bits[t];
+      b = &av1_extra_bits[t];
 #else
-    const vp10_extra_bit *const b = &vp10_extra_bits[t];
+    const av1_extra_bit *const b = &av1_extra_bits[t];
     (void)bit_depth;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     /* skip one or two nodes */
     if (!p->skip_eob_node)
@@ -776,10 +774,10 @@
 #endif  // !CONFIG_ANS
 
 #if CONFIG_VAR_TX
-static void pack_txb_tokens(vp10_writer *w, const TOKENEXTRA **tp,
+static void pack_txb_tokens(aom_writer *w, const TOKENEXTRA **tp,
                             const TOKENEXTRA *const tok_end, MACROBLOCKD *xd,
                             MB_MODE_INFO *mbmi, int plane,
-                            BLOCK_SIZE plane_bsize, vpx_bit_depth_t bit_depth,
+                            BLOCK_SIZE plane_bsize, aom_bit_depth_t bit_depth,
                             int block, int blk_row, int blk_col,
                             TX_SIZE tx_size) {
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -824,16 +822,16 @@
 }
 #endif
 
-static void write_segment_id(vp10_writer *w, const struct segmentation *seg,
+static void write_segment_id(aom_writer *w, const struct segmentation *seg,
                              const struct segmentation_probs *segp,
                              int segment_id) {
   if (seg->enabled && seg->update_map)
-    vp10_write_tree(w, vp10_segment_tree, segp->tree_probs, segment_id, 3, 0);
+    av1_write_tree(w, av1_segment_tree, segp->tree_probs, segment_id, 3, 0);
 }
 
 // This function encodes the reference frame
-static void write_ref_frames(const VP10_COMMON *cm, const MACROBLOCKD *xd,
-                             vp10_writer *w) {
+static void write_ref_frames(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+                             aom_writer *w) {
   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const int is_compound = has_second_ref(mbmi);
   const int segment_id = mbmi->segment_id;
@@ -848,7 +846,7 @@
     // does the feature use compound prediction or not
     // (if not specified at the frame/segment level)
     if (cm->reference_mode == REFERENCE_MODE_SELECT) {
-      vp10_write(w, is_compound, vp10_get_reference_mode_prob(cm, xd));
+      aom_write(w, is_compound, av1_get_reference_mode_prob(cm, xd));
     } else {
       assert((!is_compound) == (cm->reference_mode == SINGLE_REFERENCE));
     }
@@ -862,47 +860,47 @@
       const int bit = mbmi->ref_frame[0] == GOLDEN_FRAME;
 #endif  // CONFIG_EXT_REFS
 
-      vp10_write(w, bit, vp10_get_pred_prob_comp_ref_p(cm, xd));
+      aom_write(w, bit, av1_get_pred_prob_comp_ref_p(cm, xd));
 
 #if CONFIG_EXT_REFS
       if (!bit) {
         const int bit1 = mbmi->ref_frame[0] == LAST_FRAME;
-        vp10_write(w, bit1, vp10_get_pred_prob_comp_ref_p1(cm, xd));
+        aom_write(w, bit1, av1_get_pred_prob_comp_ref_p1(cm, xd));
       } else {
         const int bit2 = mbmi->ref_frame[0] == GOLDEN_FRAME;
-        vp10_write(w, bit2, vp10_get_pred_prob_comp_ref_p2(cm, xd));
+        aom_write(w, bit2, av1_get_pred_prob_comp_ref_p2(cm, xd));
       }
-      vp10_write(w, bit_bwd, vp10_get_pred_prob_comp_bwdref_p(cm, xd));
+      aom_write(w, bit_bwd, av1_get_pred_prob_comp_bwdref_p(cm, xd));
 #endif  // CONFIG_EXT_REFS
     } else {
 #if CONFIG_EXT_REFS
       const int bit0 = (mbmi->ref_frame[0] == ALTREF_FRAME ||
                         mbmi->ref_frame[0] == BWDREF_FRAME);
-      vp10_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd));
+      aom_write(w, bit0, av1_get_pred_prob_single_ref_p1(cm, xd));
 
       if (bit0) {
         const int bit1 = mbmi->ref_frame[0] == ALTREF_FRAME;
-        vp10_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd));
+        aom_write(w, bit1, av1_get_pred_prob_single_ref_p2(cm, xd));
       } else {
         const int bit2 = (mbmi->ref_frame[0] == LAST3_FRAME ||
                           mbmi->ref_frame[0] == GOLDEN_FRAME);
-        vp10_write(w, bit2, vp10_get_pred_prob_single_ref_p3(cm, xd));
+        aom_write(w, bit2, av1_get_pred_prob_single_ref_p3(cm, xd));
 
         if (!bit2) {
           const int bit3 = mbmi->ref_frame[0] != LAST_FRAME;
-          vp10_write(w, bit3, vp10_get_pred_prob_single_ref_p4(cm, xd));
+          aom_write(w, bit3, av1_get_pred_prob_single_ref_p4(cm, xd));
         } else {
           const int bit4 = mbmi->ref_frame[0] != LAST3_FRAME;
-          vp10_write(w, bit4, vp10_get_pred_prob_single_ref_p5(cm, xd));
+          aom_write(w, bit4, av1_get_pred_prob_single_ref_p5(cm, xd));
         }
       }
 #else   // CONFIG_EXT_REFS
       const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
-      vp10_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd));
+      aom_write(w, bit0, av1_get_pred_prob_single_ref_p1(cm, xd));
 
       if (bit0) {
         const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
-        vp10_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd));
+        aom_write(w, bit1, av1_get_pred_prob_single_ref_p2(cm, xd));
       }
 #endif  // CONFIG_EXT_REFS
     }
@@ -910,15 +908,15 @@
 }
 
 #if CONFIG_EXT_INTRA
-static void write_ext_intra_mode_info(const VP10_COMMON *const cm,
+static void write_ext_intra_mode_info(const AV1_COMMON *const cm,
                                       const MB_MODE_INFO *const mbmi,
-                                      vp10_writer *w) {
+                                      aom_writer *w) {
 #if !ALLOW_FILTER_INTRA_MODES
   return;
 #endif
   if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0) {
-    vp10_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[0],
-               cm->fc->ext_intra_probs[0]);
+    aom_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[0],
+              cm->fc->ext_intra_probs[0]);
     if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
       EXT_INTRA_MODE mode = mbmi->ext_intra_mode_info.ext_intra_mode[0];
       write_uniform(w, FILTER_INTRA_MODES, mode);
@@ -927,8 +925,8 @@
 
   if (mbmi->uv_mode == DC_PRED &&
       mbmi->palette_mode_info.palette_size[1] == 0) {
-    vp10_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[1],
-               cm->fc->ext_intra_probs[1]);
+    aom_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[1],
+              cm->fc->ext_intra_probs[1]);
     if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1]) {
       EXT_INTRA_MODE mode = mbmi->ext_intra_mode_info.ext_intra_mode[1];
       write_uniform(w, FILTER_INTRA_MODES, mode);
@@ -936,11 +934,11 @@
   }
 }
 
-static void write_intra_angle_info(const VP10_COMMON *cm, const MACROBLOCKD *xd,
-                                   vp10_writer *w) {
+static void write_intra_angle_info(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+                                   aom_writer *w) {
   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const BLOCK_SIZE bsize = mbmi->sb_type;
-  const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+  const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
   int p_angle;
 
   if (bsize < BLOCK_8X8) return;
@@ -949,10 +947,10 @@
     write_uniform(w, 2 * MAX_ANGLE_DELTAS + 1,
                   MAX_ANGLE_DELTAS + mbmi->angle_delta[0]);
     p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
-    if (vp10_is_intra_filter_switchable(p_angle)) {
-      vp10_write_token(w, vp10_intra_filter_tree,
-                       cm->fc->intra_filter_probs[intra_filter_ctx],
-                       &intra_filter_encodings[mbmi->intra_filter]);
+    if (av1_is_intra_filter_switchable(p_angle)) {
+      av1_write_token(w, av1_intra_filter_tree,
+                      cm->fc->intra_filter_probs[intra_filter_ctx],
+                      &intra_filter_encodings[mbmi->intra_filter]);
     }
   }
 
@@ -963,10 +961,9 @@
 }
 #endif  // CONFIG_EXT_INTRA
 
-static void write_switchable_interp_filter(VP10_COMP *cpi,
-                                           const MACROBLOCKD *xd,
-                                           vp10_writer *w) {
-  VP10_COMMON *const cm = &cpi->common;
+static void write_switchable_interp_filter(AV1_COMP *cpi, const MACROBLOCKD *xd,
+                                           aom_writer *w) {
+  AV1_COMMON *const cm = &cpi->common;
   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
 #if CONFIG_DUAL_FILTER
   int dir;
@@ -974,12 +971,12 @@
   if (cm->interp_filter == SWITCHABLE) {
 #if CONFIG_EXT_INTERP
 #if CONFIG_DUAL_FILTER
-    if (!vp10_is_interp_needed(xd)) {
+    if (!av1_is_interp_needed(xd)) {
       assert(mbmi->interp_filter[0] == EIGHTTAP_REGULAR);
       return;
     }
 #else
-    if (!vp10_is_interp_needed(xd)) {
+    if (!av1_is_interp_needed(xd)) {
 #if CONFIG_DUAL_FILTER
       assert(mbmi->interp_filter[0] == EIGHTTAP_REGULAR);
       assert(mbmi->interp_filter[1] == EIGHTTAP_REGULAR);
@@ -995,28 +992,27 @@
       if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
           (mbmi->ref_frame[1] > INTRA_FRAME &&
            has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
-        const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
-        vp10_write_token(
-            w, vp10_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx],
-            &switchable_interp_encodings[mbmi->interp_filter[dir]]);
+        const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
+        av1_write_token(w, av1_switchable_interp_tree,
+                        cm->fc->switchable_interp_prob[ctx],
+                        &switchable_interp_encodings[mbmi->interp_filter[dir]]);
         ++cpi->interp_filter_selected[0][mbmi->interp_filter[dir]];
       }
     }
 #else
     {
-      const int ctx = vp10_get_pred_context_switchable_interp(xd);
-      vp10_write_token(w, vp10_switchable_interp_tree,
-                       cm->fc->switchable_interp_prob[ctx],
-                       &switchable_interp_encodings[mbmi->interp_filter]);
+      const int ctx = av1_get_pred_context_switchable_interp(xd);
+      av1_write_token(w, av1_switchable_interp_tree,
+                      cm->fc->switchable_interp_prob[ctx],
+                      &switchable_interp_encodings[mbmi->interp_filter]);
       ++cpi->interp_filter_selected[0][mbmi->interp_filter];
     }
 #endif
   }
 }
 
-static void write_palette_mode_info(const VP10_COMMON *cm,
-                                    const MACROBLOCKD *xd,
-                                    const MODE_INFO *const mi, vp10_writer *w) {
+static void write_palette_mode_info(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+                                    const MODE_INFO *const mi, aom_writer *w) {
   const MB_MODE_INFO *const mbmi = &mi->mbmi;
   const MODE_INFO *const above_mi = xd->above_mi;
   const MODE_INFO *const left_mi = xd->left_mi;
@@ -1031,44 +1027,43 @@
       palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
     if (left_mi)
       palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
-    vp10_write(
-        w, n > 0,
-        vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx]);
+    aom_write(w, n > 0,
+              av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx]);
     if (n > 0) {
-      vp10_write_token(w, vp10_palette_size_tree,
-                       vp10_default_palette_y_size_prob[bsize - BLOCK_8X8],
-                       &palette_size_encodings[n - 2]);
+      av1_write_token(w, av1_palette_size_tree,
+                      av1_default_palette_y_size_prob[bsize - BLOCK_8X8],
+                      &palette_size_encodings[n - 2]);
       for (i = 0; i < n; ++i)
-        vp10_write_literal(w, pmi->palette_colors[i], cm->bit_depth);
+        aom_write_literal(w, pmi->palette_colors[i], cm->bit_depth);
       write_uniform(w, n, pmi->palette_first_color_idx[0]);
     }
   }
 
   if (mbmi->uv_mode == DC_PRED) {
     n = pmi->palette_size[1];
-    vp10_write(w, n > 0,
-               vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0]);
+    aom_write(w, n > 0,
+              av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0]);
     if (n > 0) {
-      vp10_write_token(w, vp10_palette_size_tree,
-                       vp10_default_palette_uv_size_prob[bsize - BLOCK_8X8],
-                       &palette_size_encodings[n - 2]);
+      av1_write_token(w, av1_palette_size_tree,
+                      av1_default_palette_uv_size_prob[bsize - BLOCK_8X8],
+                      &palette_size_encodings[n - 2]);
       for (i = 0; i < n; ++i) {
-        vp10_write_literal(w, pmi->palette_colors[PALETTE_MAX_SIZE + i],
-                           cm->bit_depth);
-        vp10_write_literal(w, pmi->palette_colors[2 * PALETTE_MAX_SIZE + i],
-                           cm->bit_depth);
+        aom_write_literal(w, pmi->palette_colors[PALETTE_MAX_SIZE + i],
+                          cm->bit_depth);
+        aom_write_literal(w, pmi->palette_colors[2 * PALETTE_MAX_SIZE + i],
+                          cm->bit_depth);
       }
       write_uniform(w, n, pmi->palette_first_color_idx[1]);
     }
   }
 }
 
-static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
+static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
 #if CONFIG_SUPERTX
                                 int supertx_enabled,
 #endif
-                                vp10_writer *w) {
-  VP10_COMMON *const cm = &cpi->common;
+                                aom_writer *w) {
+  AV1_COMMON *const cm = &cpi->common;
 #if !CONFIG_REF_MV
   const nmv_context *nmvc = &cm->fc->nmvc;
 #endif
@@ -1089,8 +1084,8 @@
   if (seg->update_map) {
     if (seg->temporal_update) {
       const int pred_flag = mbmi->seg_id_predicted;
-      vpx_prob pred_prob = vp10_get_pred_prob_seg_id(segp, xd);
-      vp10_write(w, pred_flag, pred_prob);
+      aom_prob pred_prob = av1_get_pred_prob_seg_id(segp, xd);
+      aom_write(w, pred_flag, pred_prob);
       if (!pred_flag) write_segment_id(w, seg, segp, segment_id);
     } else {
       write_segment_id(w, seg, segp, segment_id);
@@ -1110,7 +1105,7 @@
   if (!supertx_enabled)
 #endif  // CONFIG_SUPERTX
     if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
-      vp10_write(w, is_inter, vp10_get_intra_inter_prob(cm, xd));
+      aom_write(w, is_inter, av1_get_intra_inter_prob(cm, xd));
 
   if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
 #if CONFIG_SUPERTX
@@ -1172,8 +1167,8 @@
       mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
     else
 #endif  // CONFIG_EXT_INTER
-      mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
-                                            mbmi->ref_frame, bsize, -1);
+      mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+                                           mbmi->ref_frame, bsize, -1);
 #endif
 
     // If segment skip is not enabled code the mode.
@@ -1213,8 +1208,8 @@
 #if CONFIG_EXT_INTER
           if (!is_compound)
 #endif  // CONFIG_EXT_INTER
-            mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
-                                                  mbmi->ref_frame, bsize, j);
+            mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+                                                 mbmi->ref_frame, bsize, j);
 #endif
 #if CONFIG_EXT_INTER
           if (is_inter_compound_mode(b_mode))
@@ -1236,53 +1231,53 @@
             for (ref = 0; ref < 1 + is_compound; ++ref) {
 #if CONFIG_REF_MV
               int nmv_ctx =
-                  vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
-                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
+                  av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
+                              mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
               const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
 #endif
-              vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
+              av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
 #if CONFIG_EXT_INTER
-                             &mi->bmi[j].ref_mv[ref].as_mv,
+                            &mi->bmi[j].ref_mv[ref].as_mv,
 #if CONFIG_REF_MV
-                             is_compound,
+                            is_compound,
 #endif
 #else
 #if CONFIG_REF_MV
-                             &mi->bmi[j].pred_mv_s8[ref].as_mv, is_compound,
+                            &mi->bmi[j].pred_mv_s8[ref].as_mv, is_compound,
 #else
-                             &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
+                            &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
 #endif  // CONFIG_REF_MV
 #endif  // CONFIG_EXT_INTER
-                             nmvc, allow_hp);
+                            nmvc, allow_hp);
             }
           }
 #if CONFIG_EXT_INTER
           else if (b_mode == NEAREST_NEWMV || b_mode == NEAR_NEWMV) {
 #if CONFIG_REF_MV
             int nmv_ctx =
-                vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
-                             mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+                av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                            mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
             const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
 #endif
-            vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
-                           &mi->bmi[j].ref_mv[1].as_mv,
+            av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
+                          &mi->bmi[j].ref_mv[1].as_mv,
 #if CONFIG_REF_MV
-                           is_compound,
+                          is_compound,
 #endif
-                           nmvc, allow_hp);
+                          nmvc, allow_hp);
           } else if (b_mode == NEW_NEARESTMV || b_mode == NEW_NEARMV) {
 #if CONFIG_REF_MV
             int nmv_ctx =
-                vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
-                             mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+                av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                            mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
             const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
 #endif
-            vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
-                           &mi->bmi[j].ref_mv[0].as_mv,
+            av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
+                          &mi->bmi[j].ref_mv[0].as_mv,
 #if CONFIG_REF_MV
-                           is_compound,
+                          is_compound,
 #endif
-                           nmvc, allow_hp);
+                          nmvc, allow_hp);
           }
 #endif  // CONFIG_EXT_INTER
         }
@@ -1297,52 +1292,52 @@
         for (ref = 0; ref < 1 + is_compound; ++ref) {
 #if CONFIG_REF_MV
           int nmv_ctx =
-              vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
-                           mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
+              av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
+                          mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
           const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
 #endif
           ref_mv = mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0];
 #if CONFIG_EXT_INTER
           if (mode == NEWFROMNEARMV)
-            vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
-                           &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][1].as_mv,
+            av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
+                          &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][1].as_mv,
 #if CONFIG_REF_MV
-                           is_compound,
+                          is_compound,
 #endif
-                           nmvc, allow_hp);
+                          nmvc, allow_hp);
           else
 #endif  // CONFIG_EXT_INTER
-            vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, &ref_mv.as_mv,
+            av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, &ref_mv.as_mv,
 #if CONFIG_REF_MV
-                           is_compound,
+                          is_compound,
 #endif
-                           nmvc, allow_hp);
+                          nmvc, allow_hp);
         }
 #if CONFIG_EXT_INTER
       } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
 #if CONFIG_REF_MV
-        int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
-                                   mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+        int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                                  mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
         const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
 #endif
-        vp10_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
-                       &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv,
+        av1_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
+                      &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv,
 #if CONFIG_REF_MV
-                       is_compound,
+                      is_compound,
 #endif
-                       nmvc, allow_hp);
+                      nmvc, allow_hp);
       } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
 #if CONFIG_REF_MV
-        int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
-                                   mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+        int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                                  mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
         const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
 #endif
-        vp10_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
-                       &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv,
+        av1_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
+                      &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv,
 #if CONFIG_REF_MV
-                       is_compound,
+                      is_compound,
 #endif
-                       nmvc, allow_hp);
+                      nmvc, allow_hp);
 #endif  // CONFIG_EXT_INTER
       }
     }
@@ -1355,16 +1350,16 @@
         is_interintra_allowed(mbmi)) {
       const int interintra = mbmi->ref_frame[1] == INTRA_FRAME;
       const int bsize_group = size_group_lookup[bsize];
-      vp10_write(w, interintra, cm->fc->interintra_prob[bsize_group]);
+      aom_write(w, interintra, cm->fc->interintra_prob[bsize_group]);
       if (interintra) {
         write_interintra_mode(w, mbmi->interintra_mode,
                               cm->fc->interintra_mode_prob[bsize_group]);
         if (is_interintra_wedge_used(bsize)) {
-          vp10_write(w, mbmi->use_wedge_interintra,
-                     cm->fc->wedge_interintra_prob[bsize]);
+          aom_write(w, mbmi->use_wedge_interintra,
+                    cm->fc->wedge_interintra_prob[bsize]);
           if (mbmi->use_wedge_interintra) {
-            vp10_write_literal(w, mbmi->interintra_wedge_index,
-                               get_wedge_bits_lookup(bsize));
+            aom_write_literal(w, mbmi->interintra_wedge_index,
+                              get_wedge_bits_lookup(bsize));
             assert(mbmi->interintra_wedge_sign == 0);
           }
         }
@@ -1384,8 +1379,8 @@
           // is not active, and assume SIMPLE_TRANSLATION in the decoder if
           // it is active.
           assert(mbmi->motion_variation < MOTION_VARIATIONS);
-          vp10_write_token(w, vp10_motvar_tree, cm->fc->motvar_prob[bsize],
-                           &motvar_encodings[mbmi->motion_variation]);
+          av1_write_token(w, av1_motvar_tree, cm->fc->motvar_prob[bsize],
+                          &motvar_encodings[mbmi->motion_variation]);
         }
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 
@@ -1397,12 +1392,12 @@
           mbmi->motion_variation != SIMPLE_TRANSLATION) &&
 #endif  // CONFIG_OBMC
         is_interinter_wedge_used(bsize)) {
-      vp10_write(w, mbmi->use_wedge_interinter,
-                 cm->fc->wedge_interinter_prob[bsize]);
+      aom_write(w, mbmi->use_wedge_interinter,
+                cm->fc->wedge_interinter_prob[bsize]);
       if (mbmi->use_wedge_interinter) {
-        vp10_write_literal(w, mbmi->interinter_wedge_index,
-                           get_wedge_bits_lookup(bsize));
-        vp10_write_bit(w, mbmi->interinter_wedge_sign);
+        aom_write_literal(w, mbmi->interinter_wedge_index,
+                          get_wedge_bits_lookup(bsize));
+        aom_write_bit(w, mbmi->interinter_wedge_sign);
       }
     }
 #endif  // CONFIG_EXT_INTER
@@ -1424,14 +1419,14 @@
       if (is_inter) {
         assert(ext_tx_used_inter[eset][mbmi->tx_type]);
         if (eset > 0)
-          vp10_write_token(
-              w, vp10_ext_tx_inter_tree[eset],
+          av1_write_token(
+              w, av1_ext_tx_inter_tree[eset],
               cm->fc->inter_ext_tx_prob[eset][txsize_sqr_map[mbmi->tx_size]],
               &ext_tx_inter_encodings[eset][mbmi->tx_type]);
       } else if (ALLOW_INTRA_EXT_TX) {
         if (eset > 0)
-          vp10_write_token(
-              w, vp10_ext_tx_intra_tree[eset],
+          av1_write_token(
+              w, av1_ext_tx_intra_tree[eset],
               cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode],
               &ext_tx_intra_encodings[eset][mbmi->tx_type]);
       }
@@ -1443,12 +1438,12 @@
 #endif  // CONFIG_SUPERTX
         !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
       if (is_inter) {
-        vp10_write_token(w, vp10_ext_tx_tree,
-                         cm->fc->inter_ext_tx_prob[mbmi->tx_size],
-                         &ext_tx_encodings[mbmi->tx_type]);
+        av1_write_token(w, av1_ext_tx_tree,
+                        cm->fc->inter_ext_tx_prob[mbmi->tx_size],
+                        &ext_tx_encodings[mbmi->tx_type]);
       } else {
-        vp10_write_token(
-            w, vp10_ext_tx_tree,
+        av1_write_token(
+            w, av1_ext_tx_tree,
             cm->fc->intra_ext_tx_prob
                 [mbmi->tx_size][intra_mode_to_tx_type_context[mbmi->mode]],
             &ext_tx_encodings[mbmi->tx_type]);
@@ -1465,8 +1460,8 @@
   }
 }
 
-static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
-                              MODE_INFO **mi_8x8, vp10_writer *w) {
+static void write_mb_modes_kf(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+                              MODE_INFO **mi_8x8, aom_writer *w) {
   const struct segmentation *const seg = &cm->seg;
   const struct segmentation_probs *const segp = &cm->fc->seg;
   const MODE_INFO *const mi = mi_8x8[0];
@@ -1518,16 +1513,16 @@
         ALLOW_INTRA_EXT_TX) {
       int eset = get_ext_tx_set(mbmi->tx_size, bsize, 0);
       if (eset > 0)
-        vp10_write_token(
-            w, vp10_ext_tx_intra_tree[eset],
+        av1_write_token(
+            w, av1_ext_tx_intra_tree[eset],
             cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode],
             &ext_tx_intra_encodings[eset][mbmi->tx_type]);
     }
 #else
     if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
         !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
-      vp10_write_token(
-          w, vp10_ext_tx_tree,
+      av1_write_token(
+          w, av1_ext_tx_tree,
           cm->fc->intra_ext_tx_prob[mbmi->tx_size]
                                    [intra_mode_to_tx_type_context[mbmi->mode]],
           &ext_tx_encodings[mbmi->tx_type]);
@@ -1546,14 +1541,14 @@
   write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col)
 #endif  // CONFIG_ANS && CONFIG_SUPERTX
 
-static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
-                          vp10_writer *w, const TOKENEXTRA **tok,
+static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
+                          aom_writer *w, const TOKENEXTRA **tok,
                           const TOKENEXTRA *const tok_end,
 #if CONFIG_SUPERTX
                           int supertx_enabled,
 #endif
                           int mi_row, int mi_col) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
   MODE_INFO *m;
   int plane;
@@ -1584,8 +1579,8 @@
         xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
 #endif
 #if CONFIG_EXT_INTERP
-    // vp10_is_interp_needed needs the ref frame buffers set up to look
-    // up if they are scaled. vp10_is_interp_needed is in turn needed by
+    // av1_is_interp_needed needs the ref frame buffers set up to look
+    // up if they are scaled. av1_is_interp_needed is in turn needed by
     // write_switchable_interp_filter, which is called by pack_inter_mode_mvs.
     set_ref_ptrs(cm, xd, m->mbmi.ref_frame[0], m->mbmi.ref_frame[1]);
 #endif  // CONFIG_EXT_INTERP
@@ -1645,7 +1640,7 @@
       MB_MODE_INFO *mbmi = &m->mbmi;
       BLOCK_SIZE bsize = mbmi->sb_type;
       const BLOCK_SIZE plane_bsize =
-          get_plane_block_size(VPXMAX(bsize, BLOCK_8X8), pd);
+          get_plane_block_size(AOMMAX(bsize, BLOCK_8X8), pd);
 
       const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
       const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
@@ -1692,31 +1687,31 @@
   }
 }
 
-static void write_partition(const VP10_COMMON *const cm,
+static void write_partition(const AV1_COMMON *const cm,
                             const MACROBLOCKD *const xd, int hbs, int mi_row,
                             int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
-                            vp10_writer *w) {
+                            aom_writer *w) {
   const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
-  const vpx_prob *const probs = cm->fc->partition_prob[ctx];
+  const aom_prob *const probs = cm->fc->partition_prob[ctx];
   const int has_rows = (mi_row + hbs) < cm->mi_rows;
   const int has_cols = (mi_col + hbs) < cm->mi_cols;
 
   if (has_rows && has_cols) {
 #if CONFIG_EXT_PARTITION_TYPES
     if (bsize <= BLOCK_8X8)
-      vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]);
+      av1_write_token(w, av1_partition_tree, probs, &partition_encodings[p]);
     else
-      vp10_write_token(w, vp10_ext_partition_tree, probs,
-                       &ext_partition_encodings[p]);
+      av1_write_token(w, av1_ext_partition_tree, probs,
+                      &ext_partition_encodings[p]);
 #else
-    vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]);
+    av1_write_token(w, av1_partition_tree, probs, &partition_encodings[p]);
 #endif  // CONFIG_EXT_PARTITION_TYPES
   } else if (!has_rows && has_cols) {
     assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
-    vp10_write(w, p == PARTITION_SPLIT, probs[1]);
+    aom_write(w, p == PARTITION_SPLIT, probs[1]);
   } else if (has_rows && !has_cols) {
     assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
-    vp10_write(w, p == PARTITION_SPLIT, probs[2]);
+    aom_write(w, p == PARTITION_SPLIT, probs[2]);
   } else {
     assert(p == PARTITION_SPLIT);
   }
@@ -1733,14 +1728,14 @@
   write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, bsize)
 #endif  // CONFIG_ANS && CONFIG_SUPERTX
 
-static void write_modes_sb(VP10_COMP *const cpi, const TileInfo *const tile,
-                           vp10_writer *const w, const TOKENEXTRA **tok,
+static void write_modes_sb(AV1_COMP *const cpi, const TileInfo *const tile,
+                           aom_writer *const w, const TOKENEXTRA **tok,
                            const TOKENEXTRA *const tok_end,
 #if CONFIG_SUPERTX
                            int supertx_enabled,
 #endif
                            int mi_row, int mi_col, BLOCK_SIZE bsize) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
   const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
   const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize);
@@ -1764,12 +1759,12 @@
   if (!supertx_enabled && !frame_is_intra_only(cm) &&
       partition != PARTITION_NONE && bsize <= MAX_SUPERTX_BLOCK_SIZE &&
       !xd->lossless[0]) {
-    vpx_prob prob;
+    aom_prob prob;
     supertx_size = max_txsize_lookup[bsize];
     prob = cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
                                [supertx_size];
     supertx_enabled = (xd->mi[0]->mbmi.tx_size == supertx_size);
-    vp10_write(w, supertx_enabled, prob);
+    aom_write(w, supertx_enabled, prob);
   }
 #endif  // CONFIG_SUPERTX
   if (subsize < BLOCK_8X8) {
@@ -1858,16 +1853,16 @@
     if (get_ext_tx_types(supertx_size, bsize, 1) > 1 && !skip) {
       int eset = get_ext_tx_set(supertx_size, bsize, 1);
       if (eset > 0) {
-        vp10_write_token(w, vp10_ext_tx_inter_tree[eset],
-                         cm->fc->inter_ext_tx_prob[eset][supertx_size],
-                         &ext_tx_inter_encodings[eset][mbmi->tx_type]);
+        av1_write_token(w, av1_ext_tx_inter_tree[eset],
+                        cm->fc->inter_ext_tx_prob[eset][supertx_size],
+                        &ext_tx_inter_encodings[eset][mbmi->tx_type]);
       }
     }
 #else
     if (supertx_size < TX_32X32 && !skip) {
-      vp10_write_token(w, vp10_ext_tx_tree,
-                       cm->fc->inter_ext_tx_prob[supertx_size],
-                       &ext_tx_encodings[mbmi->tx_type]);
+      av1_write_token(w, av1_ext_tx_tree,
+                      cm->fc->inter_ext_tx_prob[supertx_size],
+                      &ext_tx_encodings[mbmi->tx_type]);
     }
 #endif  // CONFIG_EXT_TX
 
@@ -1904,7 +1899,7 @@
 #if DERING_REFINEMENT
   if (bsize == BLOCK_64X64 && cm->dering_level != 0 &&
       !sb_all_skip(cm, mi_row, mi_col)) {
-    vpx_write_literal(
+    aom_write_literal(
         w,
         cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain,
         DERING_REFINEMENT_BITS);
@@ -1913,10 +1908,10 @@
 #endif  // CONFIG_EXT_PARTITION_TYPES
 }
 
-static void write_modes(VP10_COMP *const cpi, const TileInfo *const tile,
-                        vp10_writer *const w, const TOKENEXTRA **tok,
+static void write_modes(AV1_COMP *const cpi, const TileInfo *const tile,
+                        aom_writer *const w, const TOKENEXTRA **tok,
                         const TOKENEXTRA *const tok_end) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
   const int mi_row_start = tile->mi_row_start;
   const int mi_row_end = tile->mi_row_end;
@@ -1924,10 +1919,10 @@
   const int mi_col_end = tile->mi_col_end;
   int mi_row, mi_col;
 
-  vp10_zero_above_context(cm, mi_col_start, mi_col_end);
+  av1_zero_above_context(cm, mi_col_start, mi_col_end);
 
   for (mi_row = mi_row_start; mi_row < mi_row_end; mi_row += cm->mib_size) {
-    vp10_zero_left_context(xd);
+    av1_zero_left_context(xd);
 
     for (mi_col = mi_col_start; mi_col < mi_col_end; mi_col += cm->mib_size) {
       write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, 0, mi_row, mi_col,
@@ -1936,10 +1931,10 @@
   }
 }
 
-static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size,
-                                    vp10_coeff_stats *coef_branch_ct,
-                                    vp10_coeff_probs_model *coef_probs) {
-  vp10_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
+static void build_tree_distribution(AV1_COMP *cpi, TX_SIZE tx_size,
+                                    av1_coeff_stats *coef_branch_ct,
+                                    av1_coeff_probs_model *coef_probs) {
+  av1_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
   unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
       cpi->common.counts.eob_branch[tx_size];
   int i, j, k, l, m;
@@ -1948,9 +1943,9 @@
     for (j = 0; j < REF_TYPES; ++j) {
       for (k = 0; k < COEF_BANDS; ++k) {
         for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
-          vp10_tree_probs_from_distribution(vp10_coef_tree,
-                                            coef_branch_ct[i][j][k][l],
-                                            coef_counts[i][j][k][l]);
+          av1_tree_probs_from_distribution(av1_coef_tree,
+                                           coef_branch_ct[i][j][k][l],
+                                           coef_counts[i][j][k][l]);
           coef_branch_ct[i][j][k][l][0][1] =
               eob_branch_ct[i][j][k][l] - coef_branch_ct[i][j][k][l][0][0];
           for (m = 0; m < UNCONSTRAINED_NODES; ++m)
@@ -1963,12 +1958,12 @@
   }
 }
 
-static void update_coef_probs_common(vp10_writer *const bc, VP10_COMP *cpi,
+static void update_coef_probs_common(aom_writer *const bc, AV1_COMP *cpi,
                                      TX_SIZE tx_size,
-                                     vp10_coeff_stats *frame_branch_ct,
-                                     vp10_coeff_probs_model *new_coef_probs) {
-  vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
-  const vpx_prob upd = DIFF_UPDATE_PROB;
+                                     av1_coeff_stats *frame_branch_ct,
+                                     av1_coeff_probs_model *new_coef_probs) {
+  av1_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
+  const aom_prob upd = DIFF_UPDATE_PROB;
   const int entropy_nodes_update = UNCONSTRAINED_NODES;
   int i, j, k, l, t;
   int stepsize = cpi->sf.coeff_prob_appx_step;
@@ -1983,22 +1978,22 @@
           for (k = 0; k < COEF_BANDS; ++k) {
             for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
               for (t = 0; t < entropy_nodes_update; ++t) {
-                vpx_prob newp = new_coef_probs[i][j][k][l][t];
-                const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
+                aom_prob newp = new_coef_probs[i][j][k][l][t];
+                const aom_prob oldp = old_coef_probs[i][j][k][l][t];
                 int s;
                 int u = 0;
                 if (t == PIVOT_NODE)
-                  s = vp10_prob_diff_update_savings_search_model(
+                  s = av1_prob_diff_update_savings_search_model(
                       frame_branch_ct[i][j][k][l][0],
                       old_coef_probs[i][j][k][l], &newp, upd, stepsize);
                 else
-                  s = vp10_prob_diff_update_savings_search(
+                  s = av1_prob_diff_update_savings_search(
                       frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
                 if (s > 0 && newp != oldp) u = 1;
                 if (u)
-                  savings += s - (int)(vp10_cost_zero(upd));
+                  savings += s - (int)(av1_cost_zero(upd));
                 else
-                  savings -= (int)(vp10_cost_zero(upd));
+                  savings -= (int)(av1_cost_zero(upd));
                 update[u]++;
               }
             }
@@ -2008,33 +2003,33 @@
 
       /* Is coef updated at all */
       if (update[1] == 0 || savings < 0) {
-        vp10_write_bit(bc, 0);
+        aom_write_bit(bc, 0);
         return;
       }
-      vp10_write_bit(bc, 1);
+      aom_write_bit(bc, 1);
       for (i = 0; i < PLANE_TYPES; ++i) {
         for (j = 0; j < REF_TYPES; ++j) {
           for (k = 0; k < COEF_BANDS; ++k) {
             for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
               // calc probs and branch cts for this frame only
               for (t = 0; t < entropy_nodes_update; ++t) {
-                vpx_prob newp = new_coef_probs[i][j][k][l][t];
-                vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
-                const vpx_prob upd = DIFF_UPDATE_PROB;
+                aom_prob newp = new_coef_probs[i][j][k][l][t];
+                aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
+                const aom_prob upd = DIFF_UPDATE_PROB;
                 int s;
                 int u = 0;
                 if (t == PIVOT_NODE)
-                  s = vp10_prob_diff_update_savings_search_model(
+                  s = av1_prob_diff_update_savings_search_model(
                       frame_branch_ct[i][j][k][l][0],
                       old_coef_probs[i][j][k][l], &newp, upd, stepsize);
                 else
-                  s = vp10_prob_diff_update_savings_search(
+                  s = av1_prob_diff_update_savings_search(
                       frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
                 if (s > 0 && newp != *oldp) u = 1;
-                vp10_write(bc, u, upd);
+                aom_write(bc, u, upd);
                 if (u) {
                   /* send/use new probability */
-                  vp10_write_prob_diff_update(bc, newp, *oldp);
+                  av1_write_prob_diff_update(bc, newp, *oldp);
                   *oldp = newp;
                 }
               }
@@ -2054,17 +2049,17 @@
             for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
               // calc probs and branch cts for this frame only
               for (t = 0; t < entropy_nodes_update; ++t) {
-                vpx_prob newp = new_coef_probs[i][j][k][l][t];
-                vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
+                aom_prob newp = new_coef_probs[i][j][k][l][t];
+                aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
                 int s;
                 int u = 0;
 
                 if (t == PIVOT_NODE) {
-                  s = vp10_prob_diff_update_savings_search_model(
+                  s = av1_prob_diff_update_savings_search_model(
                       frame_branch_ct[i][j][k][l][0],
                       old_coef_probs[i][j][k][l], &newp, upd, stepsize);
                 } else {
-                  s = vp10_prob_diff_update_savings_search(
+                  s = av1_prob_diff_update_savings_search(
                       frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
                 }
 
@@ -2077,14 +2072,14 @@
                 if (u == 1 && updates == 1) {
                   int v;
                   // first update
-                  vp10_write_bit(bc, 1);
+                  aom_write_bit(bc, 1);
                   for (v = 0; v < noupdates_before_first; ++v)
-                    vp10_write(bc, 0, upd);
+                    aom_write(bc, 0, upd);
                 }
-                vp10_write(bc, u, upd);
+                aom_write(bc, u, upd);
                 if (u) {
                   /* send/use new probability */
-                  vp10_write_prob_diff_update(bc, newp, *oldp);
+                  av1_write_prob_diff_update(bc, newp, *oldp);
                   *oldp = newp;
                 }
               }
@@ -2093,7 +2088,7 @@
         }
       }
       if (updates == 0) {
-        vp10_write_bit(bc, 0);  // no updates
+        aom_write_bit(bc, 0);  // no updates
       }
       return;
     }
@@ -2104,8 +2099,8 @@
 #if CONFIG_ENTROPY
 // Calculate the token counts between subsequent subframe updates.
 static void get_coef_counts_diff(
-    VP10_COMP *cpi, int index,
-    vp10_coeff_count coef_counts[TX_SIZES][PLANE_TYPES],
+    AV1_COMP *cpi, int index,
+    av1_coeff_count coef_counts[TX_SIZES][PLANE_TYPES],
     unsigned int eob_counts[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS]
                            [COEFF_CONTEXTS]) {
   int i, j, k, l, m, tx_size, val;
@@ -2151,11 +2146,11 @@
 }
 
 static void update_coef_probs_subframe(
-    vp10_writer *const bc, VP10_COMP *cpi, TX_SIZE tx_size,
-    vp10_coeff_stats branch_ct[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES],
-    vp10_coeff_probs_model *new_coef_probs) {
-  vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
-  const vpx_prob upd = DIFF_UPDATE_PROB;
+    aom_writer *const bc, AV1_COMP *cpi, TX_SIZE tx_size,
+    av1_coeff_stats branch_ct[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES],
+    av1_coeff_probs_model *new_coef_probs) {
+  av1_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
+  const aom_prob upd = DIFF_UPDATE_PROB;
   const int entropy_nodes_update = UNCONSTRAINED_NODES;
   int i, j, k, l, t;
   int stepsize = cpi->sf.coeff_prob_appx_step;
@@ -2180,22 +2175,22 @@
                 }
               }
               for (t = 0; t < entropy_nodes_update; ++t) {
-                vpx_prob newp = new_coef_probs[i][j][k][l][t];
-                const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
+                aom_prob newp = new_coef_probs[i][j][k][l][t];
+                const aom_prob oldp = old_coef_probs[i][j][k][l][t];
                 int s, u = 0;
 
                 if (t == PIVOT_NODE)
-                  s = vp10_prob_update_search_model_subframe(
+                  s = av1_prob_update_search_model_subframe(
                       this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
                       stepsize, max_idx);
                 else
-                  s = vp10_prob_update_search_subframe(this_branch_ct[t], oldp,
-                                                       &newp, upd, max_idx);
+                  s = av1_prob_update_search_subframe(this_branch_ct[t], oldp,
+                                                      &newp, upd, max_idx);
                 if (s > 0 && newp != oldp) u = 1;
                 if (u)
-                  savings += s - (int)(vp10_cost_zero(upd));
+                  savings += s - (int)(av1_cost_zero(upd));
                 else
-                  savings -= (int)(vp10_cost_zero(upd));
+                  savings -= (int)(av1_cost_zero(upd));
                 update[u]++;
               }
             }
@@ -2205,10 +2200,10 @@
 
       /* Is coef updated at all */
       if (update[1] == 0 || savings < 0) {
-        vp10_write_bit(bc, 0);
+        aom_write_bit(bc, 0);
         return;
       }
-      vp10_write_bit(bc, 1);
+      aom_write_bit(bc, 1);
       for (i = 0; i < PLANE_TYPES; ++i) {
         for (j = 0; j < REF_TYPES; ++j) {
           for (k = 0; k < COEF_BANDS; ++k) {
@@ -2221,24 +2216,24 @@
                 }
               }
               for (t = 0; t < entropy_nodes_update; ++t) {
-                vpx_prob newp = new_coef_probs[i][j][k][l][t];
-                vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
-                const vpx_prob upd = DIFF_UPDATE_PROB;
+                aom_prob newp = new_coef_probs[i][j][k][l][t];
+                aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
+                const aom_prob upd = DIFF_UPDATE_PROB;
                 int s;
                 int u = 0;
 
                 if (t == PIVOT_NODE)
-                  s = vp10_prob_update_search_model_subframe(
+                  s = av1_prob_update_search_model_subframe(
                       this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
                       stepsize, max_idx);
                 else
-                  s = vp10_prob_update_search_subframe(this_branch_ct[t], *oldp,
-                                                       &newp, upd, max_idx);
+                  s = av1_prob_update_search_subframe(this_branch_ct[t], *oldp,
+                                                      &newp, upd, max_idx);
                 if (s > 0 && newp != *oldp) u = 1;
-                vp10_write(bc, u, upd);
+                aom_write(bc, u, upd);
                 if (u) {
                   /* send/use new probability */
-                  vp10_write_prob_diff_update(bc, newp, *oldp);
+                  av1_write_prob_diff_update(bc, newp, *oldp);
                   *oldp = newp;
                 }
               }
@@ -2264,18 +2259,18 @@
                 }
               }
               for (t = 0; t < entropy_nodes_update; ++t) {
-                vpx_prob newp = new_coef_probs[i][j][k][l][t];
-                vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
+                aom_prob newp = new_coef_probs[i][j][k][l][t];
+                aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
                 int s;
                 int u = 0;
 
                 if (t == PIVOT_NODE)
-                  s = vp10_prob_update_search_model_subframe(
+                  s = av1_prob_update_search_model_subframe(
                       this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
                       stepsize, max_idx);
                 else
-                  s = vp10_prob_update_search_subframe(this_branch_ct[t], *oldp,
-                                                       &newp, upd, max_idx);
+                  s = av1_prob_update_search_subframe(this_branch_ct[t], *oldp,
+                                                      &newp, upd, max_idx);
                 if (s > 0 && newp != *oldp) u = 1;
                 updates += u;
                 if (u == 0 && updates == 0) {
@@ -2285,14 +2280,14 @@
                 if (u == 1 && updates == 1) {
                   int v;
                   // first update
-                  vp10_write_bit(bc, 1);
+                  aom_write_bit(bc, 1);
                   for (v = 0; v < noupdates_before_first; ++v)
-                    vp10_write(bc, 0, upd);
+                    aom_write(bc, 0, upd);
                 }
-                vp10_write(bc, u, upd);
+                aom_write(bc, u, upd);
                 if (u) {
                   /* send/use new probability */
-                  vp10_write_prob_diff_update(bc, newp, *oldp);
+                  av1_write_prob_diff_update(bc, newp, *oldp);
                   *oldp = newp;
                 }
               }
@@ -2301,7 +2296,7 @@
         }
       }
       if (updates == 0) {
-        vp10_write_bit(bc, 0);  // no updates
+        aom_write_bit(bc, 0);  // no updates
       }
       return;
     }
@@ -2310,7 +2305,7 @@
 }
 #endif  // CONFIG_ENTROPY
 
-static void update_coef_probs(VP10_COMP *cpi, vp10_writer *w) {
+static void update_coef_probs(AV1_COMP *cpi, aom_writer *w) {
   const TX_MODE tx_mode = cpi->common.tx_mode;
   const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
   TX_SIZE tx_size;
@@ -2318,17 +2313,17 @@
   int update = 0;
 #endif  // CONFIG_ANS
 #if CONFIG_ENTROPY
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
   SUBFRAME_STATS *subframe_stats = &cpi->subframe_stats;
   unsigned int eob_counts_copy[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS]
                               [COEFF_CONTEXTS];
   int i;
-  vp10_coeff_probs_model dummy_frame_coef_probs[PLANE_TYPES];
+  av1_coeff_probs_model dummy_frame_coef_probs[PLANE_TYPES];
 
   if (cm->do_subframe_update &&
       cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
-    vp10_copy(cpi->common.fc->coef_probs,
-              subframe_stats->enc_starting_coef_probs);
+    av1_copy(cpi->common.fc->coef_probs,
+             subframe_stats->enc_starting_coef_probs);
     for (i = 0; i <= cpi->common.coef_probs_update_idx; ++i) {
       get_coef_counts_diff(cpi, i, cpi->wholeframe_stats.coef_counts_buf[i],
                            cpi->wholeframe_stats.eob_counts_buf[i]);
@@ -2337,32 +2332,32 @@
 #endif  // CONFIG_ENTROPY
 
   for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) {
-    vp10_coeff_stats frame_branch_ct[PLANE_TYPES];
-    vp10_coeff_probs_model frame_coef_probs[PLANE_TYPES];
+    av1_coeff_stats frame_branch_ct[PLANE_TYPES];
+    av1_coeff_probs_model frame_coef_probs[PLANE_TYPES];
     if (cpi->td.counts->tx_size_totals[tx_size] <= 20 ||
         (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
-      vp10_write_bit(w, 0);
+      aom_write_bit(w, 0);
     } else {
 #if CONFIG_ENTROPY
       if (cm->do_subframe_update &&
           cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
         unsigned int
             eob_counts_copy[PLANE_TYPES][REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
-        vp10_coeff_count coef_counts_copy[PLANE_TYPES];
-        vp10_copy(eob_counts_copy, cpi->common.counts.eob_branch[tx_size]);
-        vp10_copy(coef_counts_copy, cpi->td.rd_counts.coef_counts[tx_size]);
+        av1_coeff_count coef_counts_copy[PLANE_TYPES];
+        av1_copy(eob_counts_copy, cpi->common.counts.eob_branch[tx_size]);
+        av1_copy(coef_counts_copy, cpi->td.rd_counts.coef_counts[tx_size]);
         build_tree_distribution(cpi, tx_size, frame_branch_ct,
                                 frame_coef_probs);
         for (i = 0; i <= cpi->common.coef_probs_update_idx; ++i) {
-          vp10_copy(cpi->common.counts.eob_branch[tx_size],
-                    cpi->wholeframe_stats.eob_counts_buf[i][tx_size]);
-          vp10_copy(cpi->td.rd_counts.coef_counts[tx_size],
-                    cpi->wholeframe_stats.coef_counts_buf[i][tx_size]);
+          av1_copy(cpi->common.counts.eob_branch[tx_size],
+                   cpi->wholeframe_stats.eob_counts_buf[i][tx_size]);
+          av1_copy(cpi->td.rd_counts.coef_counts[tx_size],
+                   cpi->wholeframe_stats.coef_counts_buf[i][tx_size]);
           build_tree_distribution(cpi, tx_size, cpi->branch_ct_buf[i][tx_size],
                                   dummy_frame_coef_probs);
         }
-        vp10_copy(cpi->common.counts.eob_branch[tx_size], eob_counts_copy);
-        vp10_copy(cpi->td.rd_counts.coef_counts[tx_size], coef_counts_copy);
+        av1_copy(cpi->common.counts.eob_branch[tx_size], eob_counts_copy);
+        av1_copy(cpi->td.rd_counts.coef_counts[tx_size], coef_counts_copy);
 
         update_coef_probs_subframe(w, cpi, tx_size, cpi->branch_ct_buf,
                                    frame_coef_probs);
@@ -2385,65 +2380,65 @@
   }
 
 #if CONFIG_ENTROPY
-  vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
-  vp10_copy(subframe_stats->coef_probs_buf[0], cm->fc->coef_probs);
+  av1_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+  av1_copy(subframe_stats->coef_probs_buf[0], cm->fc->coef_probs);
   if (cm->do_subframe_update &&
       cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
-    vp10_copy(eob_counts_copy, cm->counts.eob_branch);
+    av1_copy(eob_counts_copy, cm->counts.eob_branch);
     for (i = 1; i <= cpi->common.coef_probs_update_idx; ++i) {
       for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
-        vp10_full_to_model_counts(cm->counts.coef[tx_size],
-                                  subframe_stats->coef_counts_buf[i][tx_size]);
-      vp10_copy(cm->counts.eob_branch, subframe_stats->eob_counts_buf[i]);
-      vp10_partial_adapt_probs(cm, 0, 0);
-      vp10_copy(subframe_stats->coef_probs_buf[i], cm->fc->coef_probs);
+        av1_full_to_model_counts(cm->counts.coef[tx_size],
+                                 subframe_stats->coef_counts_buf[i][tx_size]);
+      av1_copy(cm->counts.eob_branch, subframe_stats->eob_counts_buf[i]);
+      av1_partial_adapt_probs(cm, 0, 0);
+      av1_copy(subframe_stats->coef_probs_buf[i], cm->fc->coef_probs);
     }
-    vp10_copy(cm->fc->coef_probs, subframe_stats->coef_probs_buf[0]);
-    vp10_copy(cm->counts.eob_branch, eob_counts_copy);
+    av1_copy(cm->fc->coef_probs, subframe_stats->coef_probs_buf[0]);
+    av1_copy(cm->counts.eob_branch, eob_counts_copy);
   }
 #endif  // CONFIG_ENTROPY
 #if CONFIG_ANS
-  if (update) vp10_coef_pareto_cdfs(cpi->common.fc);
+  if (update) av1_coef_pareto_cdfs(cpi->common.fc);
 #endif  // CONFIG_ANS
 }
 
 #if CONFIG_LOOP_RESTORATION
-static void encode_restoration(VP10_COMMON *cm,
-                               struct vpx_write_bit_buffer *wb) {
+static void encode_restoration(AV1_COMMON *cm,
+                               struct aom_write_bit_buffer *wb) {
   int i;
   RestorationInfo *rst = &cm->rst_info;
-  vpx_wb_write_bit(wb, rst->restoration_type != RESTORE_NONE);
+  aom_wb_write_bit(wb, rst->restoration_type != RESTORE_NONE);
   if (rst->restoration_type != RESTORE_NONE) {
     if (rst->restoration_type == RESTORE_BILATERAL) {
-      vpx_wb_write_bit(wb, 1);
+      aom_wb_write_bit(wb, 1);
       for (i = 0; i < cm->rst_internal.ntiles; ++i) {
         if (rst->bilateral_level[i] >= 0) {
-          vpx_wb_write_bit(wb, 1);
-          vpx_wb_write_literal(wb, rst->bilateral_level[i],
-                               vp10_bilateral_level_bits(cm));
+          aom_wb_write_bit(wb, 1);
+          aom_wb_write_literal(wb, rst->bilateral_level[i],
+                               av1_bilateral_level_bits(cm));
         } else {
-          vpx_wb_write_bit(wb, 0);
+          aom_wb_write_bit(wb, 0);
         }
       }
     } else {
-      vpx_wb_write_bit(wb, 0);
+      aom_wb_write_bit(wb, 0);
       for (i = 0; i < cm->rst_internal.ntiles; ++i) {
         if (rst->wiener_level[i]) {
-          vpx_wb_write_bit(wb, 1);
-          vpx_wb_write_literal(wb, rst->vfilter[i][0] - WIENER_FILT_TAP0_MINV,
+          aom_wb_write_bit(wb, 1);
+          aom_wb_write_literal(wb, rst->vfilter[i][0] - WIENER_FILT_TAP0_MINV,
                                WIENER_FILT_TAP0_BITS);
-          vpx_wb_write_literal(wb, rst->vfilter[i][1] - WIENER_FILT_TAP1_MINV,
+          aom_wb_write_literal(wb, rst->vfilter[i][1] - WIENER_FILT_TAP1_MINV,
                                WIENER_FILT_TAP1_BITS);
-          vpx_wb_write_literal(wb, rst->vfilter[i][2] - WIENER_FILT_TAP2_MINV,
+          aom_wb_write_literal(wb, rst->vfilter[i][2] - WIENER_FILT_TAP2_MINV,
                                WIENER_FILT_TAP2_BITS);
-          vpx_wb_write_literal(wb, rst->hfilter[i][0] - WIENER_FILT_TAP0_MINV,
+          aom_wb_write_literal(wb, rst->hfilter[i][0] - WIENER_FILT_TAP0_MINV,
                                WIENER_FILT_TAP0_BITS);
-          vpx_wb_write_literal(wb, rst->hfilter[i][1] - WIENER_FILT_TAP1_MINV,
+          aom_wb_write_literal(wb, rst->hfilter[i][1] - WIENER_FILT_TAP1_MINV,
                                WIENER_FILT_TAP1_BITS);
-          vpx_wb_write_literal(wb, rst->hfilter[i][2] - WIENER_FILT_TAP2_MINV,
+          aom_wb_write_literal(wb, rst->hfilter[i][2] - WIENER_FILT_TAP2_MINV,
                                WIENER_FILT_TAP2_BITS);
         } else {
-          vpx_wb_write_bit(wb, 0);
+          aom_wb_write_bit(wb, 0);
         }
       }
     }
@@ -2451,39 +2446,38 @@
 }
 #endif  // CONFIG_LOOP_RESTORATION
 
-static void encode_loopfilter(VP10_COMMON *cm,
-                              struct vpx_write_bit_buffer *wb) {
+static void encode_loopfilter(AV1_COMMON *cm, struct aom_write_bit_buffer *wb) {
   int i;
   struct loopfilter *lf = &cm->lf;
 
   // Encode the loop filter level and type
-  vpx_wb_write_literal(wb, lf->filter_level, 6);
-  vpx_wb_write_literal(wb, lf->sharpness_level, 3);
+  aom_wb_write_literal(wb, lf->filter_level, 6);
+  aom_wb_write_literal(wb, lf->sharpness_level, 3);
 
   // Write out loop filter deltas applied at the MB level based on mode or
   // ref frame (if they are enabled).
-  vpx_wb_write_bit(wb, lf->mode_ref_delta_enabled);
+  aom_wb_write_bit(wb, lf->mode_ref_delta_enabled);
 
   if (lf->mode_ref_delta_enabled) {
-    vpx_wb_write_bit(wb, lf->mode_ref_delta_update);
+    aom_wb_write_bit(wb, lf->mode_ref_delta_update);
     if (lf->mode_ref_delta_update) {
       for (i = 0; i < TOTAL_REFS_PER_FRAME; i++) {
         const int delta = lf->ref_deltas[i];
         const int changed = delta != lf->last_ref_deltas[i];
-        vpx_wb_write_bit(wb, changed);
+        aom_wb_write_bit(wb, changed);
         if (changed) {
           lf->last_ref_deltas[i] = delta;
-          vpx_wb_write_inv_signed_literal(wb, delta, 6);
+          aom_wb_write_inv_signed_literal(wb, delta, 6);
         }
       }
 
       for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
         const int delta = lf->mode_deltas[i];
         const int changed = delta != lf->last_mode_deltas[i];
-        vpx_wb_write_bit(wb, changed);
+        aom_wb_write_bit(wb, changed);
         if (changed) {
           lf->last_mode_deltas[i] = delta;
-          vpx_wb_write_inv_signed_literal(wb, delta, 6);
+          aom_wb_write_inv_signed_literal(wb, delta, 6);
         }
       }
     }
@@ -2491,84 +2485,83 @@
 }
 
 #if CONFIG_CLPF
-static void encode_clpf(const VP10_COMMON *cm,
-                        struct vpx_write_bit_buffer *wb) {
-  vpx_wb_write_literal(wb, cm->clpf, 1);
+static void encode_clpf(const AV1_COMMON *cm, struct aom_write_bit_buffer *wb) {
+  aom_wb_write_literal(wb, cm->clpf, 1);
 }
 #endif
 
 #if CONFIG_DERING
-static void encode_dering(int level, struct vpx_write_bit_buffer *wb) {
-  vpx_wb_write_literal(wb, level, DERING_LEVEL_BITS);
+static void encode_dering(int level, struct aom_write_bit_buffer *wb) {
+  aom_wb_write_literal(wb, level, DERING_LEVEL_BITS);
 }
 #endif  // CONFIG_DERING
 
-static void write_delta_q(struct vpx_write_bit_buffer *wb, int delta_q) {
+static void write_delta_q(struct aom_write_bit_buffer *wb, int delta_q) {
   if (delta_q != 0) {
-    vpx_wb_write_bit(wb, 1);
-    vpx_wb_write_inv_signed_literal(wb, delta_q, 6);
+    aom_wb_write_bit(wb, 1);
+    aom_wb_write_inv_signed_literal(wb, delta_q, 6);
   } else {
-    vpx_wb_write_bit(wb, 0);
+    aom_wb_write_bit(wb, 0);
   }
 }
 
-static void encode_quantization(const VP10_COMMON *const cm,
-                                struct vpx_write_bit_buffer *wb) {
-  vpx_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
+static void encode_quantization(const AV1_COMMON *const cm,
+                                struct aom_write_bit_buffer *wb) {
+  aom_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
   write_delta_q(wb, cm->y_dc_delta_q);
   write_delta_q(wb, cm->uv_dc_delta_q);
   write_delta_q(wb, cm->uv_ac_delta_q);
 #if CONFIG_AOM_QM
-  vpx_wb_write_bit(wb, cm->using_qmatrix);
+  aom_wb_write_bit(wb, cm->using_qmatrix);
   if (cm->using_qmatrix) {
-    vpx_wb_write_literal(wb, cm->min_qmlevel, QM_LEVEL_BITS);
-    vpx_wb_write_literal(wb, cm->max_qmlevel, QM_LEVEL_BITS);
+    aom_wb_write_literal(wb, cm->min_qmlevel, QM_LEVEL_BITS);
+    aom_wb_write_literal(wb, cm->max_qmlevel, QM_LEVEL_BITS);
   }
 #endif
 }
 
-static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                struct vpx_write_bit_buffer *wb) {
+static void encode_segmentation(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                struct aom_write_bit_buffer *wb) {
   int i, j;
   const struct segmentation *seg = &cm->seg;
 
-  vpx_wb_write_bit(wb, seg->enabled);
+  aom_wb_write_bit(wb, seg->enabled);
   if (!seg->enabled) return;
 
   // Segmentation map
   if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
-    vpx_wb_write_bit(wb, seg->update_map);
+    aom_wb_write_bit(wb, seg->update_map);
   } else {
     assert(seg->update_map == 1);
   }
   if (seg->update_map) {
     // Select the coding strategy (temporal or spatial)
-    vp10_choose_segmap_coding_method(cm, xd);
+    av1_choose_segmap_coding_method(cm, xd);
 
     // Write out the chosen coding method.
     if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
-      vpx_wb_write_bit(wb, seg->temporal_update);
+      aom_wb_write_bit(wb, seg->temporal_update);
     } else {
       assert(seg->temporal_update == 0);
     }
   }
 
   // Segmentation data
-  vpx_wb_write_bit(wb, seg->update_data);
+  aom_wb_write_bit(wb, seg->update_data);
   if (seg->update_data) {
-    vpx_wb_write_bit(wb, seg->abs_delta);
+    aom_wb_write_bit(wb, seg->abs_delta);
 
     for (i = 0; i < MAX_SEGMENTS; i++) {
       for (j = 0; j < SEG_LVL_MAX; j++) {
         const int active = segfeature_active(seg, i, j);
-        vpx_wb_write_bit(wb, active);
+        aom_wb_write_bit(wb, active);
         if (active) {
           const int data = get_segdata(seg, i, j);
-          const int data_max = vp10_seg_feature_data_max(j);
+          const int data_max = av1_seg_feature_data_max(j);
 
-          if (vp10_is_segfeature_signed(j)) {
+          if (av1_is_segfeature_signed(j)) {
             encode_unsigned_max(wb, abs(data), data_max);
-            vpx_wb_write_bit(wb, data < 0);
+            aom_wb_write_bit(wb, data < 0);
           } else {
             encode_unsigned_max(wb, data, data_max);
           }
@@ -2578,8 +2571,8 @@
   }
 }
 
-static void update_seg_probs(VP10_COMP *cpi, vp10_writer *w) {
-  VP10_COMMON *cm = &cpi->common;
+static void update_seg_probs(AV1_COMP *cpi, aom_writer *w) {
+  AV1_COMMON *cm = &cpi->common;
 
   if (!cm->seg.enabled || !cm->seg.update_map) return;
 
@@ -2587,41 +2580,41 @@
     int i;
 
     for (i = 0; i < PREDICTION_PROBS; i++)
-      vp10_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
-                                 cm->counts.seg.pred[i]);
+      av1_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
+                                cm->counts.seg.pred[i]);
 
-    prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+    prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
                      cm->counts.seg.tree_mispred, MAX_SEGMENTS, w);
   } else {
-    prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+    prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
                      cm->counts.seg.tree_total, MAX_SEGMENTS, w);
   }
 }
 
-static void write_txfm_mode(TX_MODE mode, struct vpx_write_bit_buffer *wb) {
-  vpx_wb_write_bit(wb, mode == TX_MODE_SELECT);
-  if (mode != TX_MODE_SELECT) vpx_wb_write_literal(wb, mode, 2);
+static void write_txfm_mode(TX_MODE mode, struct aom_write_bit_buffer *wb) {
+  aom_wb_write_bit(wb, mode == TX_MODE_SELECT);
+  if (mode != TX_MODE_SELECT) aom_wb_write_literal(wb, mode, 2);
 }
 
-static void update_txfm_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_txfm_probs(AV1_COMMON *cm, aom_writer *w,
                               FRAME_COUNTS *counts) {
   if (cm->tx_mode == TX_MODE_SELECT) {
     int i, j;
     for (i = 0; i < TX_SIZES - 1; ++i)
       for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
-        prob_diff_update(vp10_tx_size_tree[i], cm->fc->tx_size_probs[i][j],
+        prob_diff_update(av1_tx_size_tree[i], cm->fc->tx_size_probs[i][j],
                          counts->tx_size[i][j], i + 2, w);
   }
 }
 
 static void write_interp_filter(INTERP_FILTER filter,
-                                struct vpx_write_bit_buffer *wb) {
-  vpx_wb_write_bit(wb, filter == SWITCHABLE);
+                                struct aom_write_bit_buffer *wb) {
+  aom_wb_write_bit(wb, filter == SWITCHABLE);
   if (filter != SWITCHABLE)
-    vpx_wb_write_literal(wb, filter, 2 + CONFIG_EXT_INTERP);
+    aom_wb_write_literal(wb, filter, 2 + CONFIG_EXT_INTERP);
 }
 
-static void fix_interp_filter(VP10_COMMON *cm, FRAME_COUNTS *counts) {
+static void fix_interp_filter(AV1_COMMON *cm, FRAME_COUNTS *counts) {
   if (cm->interp_filter == SWITCHABLE) {
     // Check to see if only one of the filters is actually used
     int count[SWITCHABLE_FILTERS];
@@ -2644,8 +2637,8 @@
   }
 }
 
-static void write_tile_info(const VP10_COMMON *const cm,
-                            struct vpx_write_bit_buffer *wb) {
+static void write_tile_info(const AV1_COMMON *const cm,
+                            struct aom_write_bit_buffer *wb) {
 #if CONFIG_EXT_TILE
   const int tile_width =
       ALIGN_POWER_OF_TWO(cm->tile_width, cm->mib_size_log2) >>
@@ -2662,33 +2655,33 @@
   if (cm->sb_size == BLOCK_128X128) {
     assert(tile_width <= 32);
     assert(tile_height <= 32);
-    vpx_wb_write_literal(wb, tile_width - 1, 5);
-    vpx_wb_write_literal(wb, tile_height - 1, 5);
+    aom_wb_write_literal(wb, tile_width - 1, 5);
+    aom_wb_write_literal(wb, tile_height - 1, 5);
   } else
 #endif  // CONFIG_EXT_PARTITION
   {
     assert(tile_width <= 64);
     assert(tile_height <= 64);
-    vpx_wb_write_literal(wb, tile_width - 1, 6);
-    vpx_wb_write_literal(wb, tile_height - 1, 6);
+    aom_wb_write_literal(wb, tile_width - 1, 6);
+    aom_wb_write_literal(wb, tile_height - 1, 6);
   }
 #else
   int min_log2_tile_cols, max_log2_tile_cols, ones;
-  vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+  av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
 
   // columns
   ones = cm->log2_tile_cols - min_log2_tile_cols;
-  while (ones--) vpx_wb_write_bit(wb, 1);
+  while (ones--) aom_wb_write_bit(wb, 1);
 
-  if (cm->log2_tile_cols < max_log2_tile_cols) vpx_wb_write_bit(wb, 0);
+  if (cm->log2_tile_cols < max_log2_tile_cols) aom_wb_write_bit(wb, 0);
 
   // rows
-  vpx_wb_write_bit(wb, cm->log2_tile_rows != 0);
-  if (cm->log2_tile_rows != 0) vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
+  aom_wb_write_bit(wb, cm->log2_tile_rows != 0);
+  if (cm->log2_tile_rows != 0) aom_wb_write_bit(wb, cm->log2_tile_rows != 1);
 #endif  // CONFIG_EXT_TILE
 }
 
-static int get_refresh_mask(VP10_COMP *cpi) {
+static int get_refresh_mask(AV1_COMP *cpi) {
   int refresh_mask = 0;
 
 #if CONFIG_EXT_REFS
@@ -2712,12 +2705,12 @@
   refresh_mask |= (cpi->refresh_last_frame << cpi->lst_fb_idx);
 #endif  // CONFIG_EXT_REFS
 
-  if (vp10_preserve_existing_gf(cpi)) {
+  if (av1_preserve_existing_gf(cpi)) {
     // We have decided to preserve the previously existing golden frame as our
     // new ARF frame. However, in the short term we leave it in the GF slot and,
     // if we're updating the GF with the current decoded frame, we save it
     // instead to the ARF slot.
-    // Later, in the function vp10_encoder.c:vp10_update_reference_frames() we
+    // Later, in the function av1_encoder.c:av1_update_reference_frames() we
     // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
     // there so that it can be done outside of the recode loop.
     // Note: This is highly specific to the use of ARF as a forward reference,
@@ -2793,14 +2786,14 @@
 }
 #endif  // CONFIG_EXT_TILE
 
-static uint32_t write_tiles(VP10_COMP *const cpi, uint8_t *const dst,
+static uint32_t write_tiles(AV1_COMP *const cpi, uint8_t *const dst,
                             unsigned int *max_tile_size,
                             unsigned int *max_tile_col_size) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
 #if CONFIG_ANS
   struct AnsCoder token_ans;
 #else
-  vp10_writer mode_bc;
+  aom_writer mode_bc;
 #endif  // CONFIG_ANS
   int tile_row, tile_col;
   TOKENEXTRA *(*const tok_buffers)[MAX_TILE_COLS] = cpi->tile_tok;
@@ -2827,7 +2820,7 @@
     const int is_last_col = (tile_col == tile_cols - 1);
     const size_t col_offset = total_size;
 
-    vp10_tile_set_col(&tile_info, cm, tile_col);
+    av1_tile_set_col(&tile_info, cm, tile_col);
 
     // The last column does not have a column header
     if (!is_last_col) total_size += 4;
@@ -2839,7 +2832,7 @@
       const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
       const int data_offset = have_tiles ? 4 : 0;
 
-      vp10_tile_set_row(&tile_info, cm, tile_row);
+      av1_tile_set_row(&tile_info, cm, tile_row);
 
       buf->data = dst + total_size;
 
@@ -2847,10 +2840,10 @@
       // even for the last one, unless no tiling is used at all.
       total_size += data_offset;
 #if !CONFIG_ANS
-      vpx_start_encode(&mode_bc, buf->data + data_offset);
+      aom_start_encode(&mode_bc, buf->data + data_offset);
       write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
       assert(tok == tok_end);
-      vpx_stop_encode(&mode_bc);
+      aom_stop_encode(&mode_bc);
       tile_size = mode_bc.pos;
 #else
       buf_ans_write_reset(buf_ans);
@@ -2864,7 +2857,7 @@
       buf->size = tile_size;
 
       // Record the maximum tile size we see, so we can compact headers later.
-      *max_tile_size = VPXMAX(*max_tile_size, tile_size);
+      *max_tile_size = AOMMAX(*max_tile_size, tile_size);
 
       if (have_tiles) {
         // tile header: size of this tile, or copy offset
@@ -2896,7 +2889,7 @@
 
       // If it is not final packing, record the maximum tile column size we see,
       // otherwise, check if the tile size is out of the range.
-      *max_tile_col_size = VPXMAX(*max_tile_col_size, col_size);
+      *max_tile_col_size = AOMMAX(*max_tile_col_size, col_size);
     }
   }
 #else
@@ -2904,7 +2897,7 @@
     TileInfo tile_info;
     const int is_last_row = (tile_row == tile_rows - 1);
 
-    vp10_tile_set_row(&tile_info, cm, tile_row);
+    av1_tile_set_row(&tile_info, cm, tile_row);
 
     for (tile_col = 0; tile_col < tile_cols; tile_col++) {
       TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
@@ -2914,7 +2907,7 @@
       const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
       const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
 
-      vp10_tile_set_col(&tile_info, cm, tile_col);
+      av1_tile_set_col(&tile_info, cm, tile_col);
 
       buf->data = dst + total_size;
 
@@ -2922,10 +2915,10 @@
       if (!is_last_tile) total_size += 4;
 
 #if !CONFIG_ANS
-      vpx_start_encode(&mode_bc, dst + total_size);
+      aom_start_encode(&mode_bc, dst + total_size);
       write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
       assert(tok == tok_end);
-      vpx_stop_encode(&mode_bc);
+      aom_stop_encode(&mode_bc);
       tile_size = mode_bc.pos;
 #else
       buf_ans_write_reset(buf_ans);
@@ -2941,7 +2934,7 @@
       buf->size = tile_size;
 
       if (!is_last_tile) {
-        *max_tile_size = VPXMAX(*max_tile_size, tile_size);
+        *max_tile_size = AOMMAX(*max_tile_size, tile_size);
         // size of this tile
         mem_put_le32(buf->data, tile_size);
       }
@@ -2953,28 +2946,28 @@
   return (uint32_t)total_size;
 }
 
-static void write_render_size(const VP10_COMMON *cm,
-                              struct vpx_write_bit_buffer *wb) {
+static void write_render_size(const AV1_COMMON *cm,
+                              struct aom_write_bit_buffer *wb) {
   const int scaling_active =
       cm->width != cm->render_width || cm->height != cm->render_height;
-  vpx_wb_write_bit(wb, scaling_active);
+  aom_wb_write_bit(wb, scaling_active);
   if (scaling_active) {
-    vpx_wb_write_literal(wb, cm->render_width - 1, 16);
-    vpx_wb_write_literal(wb, cm->render_height - 1, 16);
+    aom_wb_write_literal(wb, cm->render_width - 1, 16);
+    aom_wb_write_literal(wb, cm->render_height - 1, 16);
   }
 }
 
-static void write_frame_size(const VP10_COMMON *cm,
-                             struct vpx_write_bit_buffer *wb) {
-  vpx_wb_write_literal(wb, cm->width - 1, 16);
-  vpx_wb_write_literal(wb, cm->height - 1, 16);
+static void write_frame_size(const AV1_COMMON *cm,
+                             struct aom_write_bit_buffer *wb) {
+  aom_wb_write_literal(wb, cm->width - 1, 16);
+  aom_wb_write_literal(wb, cm->height - 1, 16);
 
   write_render_size(cm, wb);
 }
 
-static void write_frame_size_with_refs(VP10_COMP *cpi,
-                                       struct vpx_write_bit_buffer *wb) {
-  VP10_COMMON *const cm = &cpi->common;
+static void write_frame_size_with_refs(AV1_COMP *cpi,
+                                       struct aom_write_bit_buffer *wb) {
+  AV1_COMMON *const cm = &cpi->common;
   int found = 0;
 
   MV_REFERENCE_FRAME ref_frame;
@@ -2987,66 +2980,66 @@
       found &= cm->render_width == cfg->render_width &&
                cm->render_height == cfg->render_height;
     }
-    vpx_wb_write_bit(wb, found);
+    aom_wb_write_bit(wb, found);
     if (found) {
       break;
     }
   }
 
   if (!found) {
-    vpx_wb_write_literal(wb, cm->width - 1, 16);
-    vpx_wb_write_literal(wb, cm->height - 1, 16);
+    aom_wb_write_literal(wb, cm->width - 1, 16);
+    aom_wb_write_literal(wb, cm->height - 1, 16);
     write_render_size(cm, wb);
   }
 }
 
-static void write_sync_code(struct vpx_write_bit_buffer *wb) {
-  vpx_wb_write_literal(wb, VP10_SYNC_CODE_0, 8);
-  vpx_wb_write_literal(wb, VP10_SYNC_CODE_1, 8);
-  vpx_wb_write_literal(wb, VP10_SYNC_CODE_2, 8);
+static void write_sync_code(struct aom_write_bit_buffer *wb) {
+  aom_wb_write_literal(wb, AV1_SYNC_CODE_0, 8);
+  aom_wb_write_literal(wb, AV1_SYNC_CODE_1, 8);
+  aom_wb_write_literal(wb, AV1_SYNC_CODE_2, 8);
 }
 
 static void write_profile(BITSTREAM_PROFILE profile,
-                          struct vpx_write_bit_buffer *wb) {
+                          struct aom_write_bit_buffer *wb) {
   switch (profile) {
-    case PROFILE_0: vpx_wb_write_literal(wb, 0, 2); break;
-    case PROFILE_1: vpx_wb_write_literal(wb, 2, 2); break;
-    case PROFILE_2: vpx_wb_write_literal(wb, 1, 2); break;
-    case PROFILE_3: vpx_wb_write_literal(wb, 6, 3); break;
+    case PROFILE_0: aom_wb_write_literal(wb, 0, 2); break;
+    case PROFILE_1: aom_wb_write_literal(wb, 2, 2); break;
+    case PROFILE_2: aom_wb_write_literal(wb, 1, 2); break;
+    case PROFILE_3: aom_wb_write_literal(wb, 6, 3); break;
     default: assert(0);
   }
 }
 
 static void write_bitdepth_colorspace_sampling(
-    VP10_COMMON *const cm, struct vpx_write_bit_buffer *wb) {
+    AV1_COMMON *const cm, struct aom_write_bit_buffer *wb) {
   if (cm->profile >= PROFILE_2) {
-    assert(cm->bit_depth > VPX_BITS_8);
-    vpx_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1);
+    assert(cm->bit_depth > AOM_BITS_8);
+    aom_wb_write_bit(wb, cm->bit_depth == AOM_BITS_10 ? 0 : 1);
   }
-  vpx_wb_write_literal(wb, cm->color_space, 3);
-  if (cm->color_space != VPX_CS_SRGB) {
+  aom_wb_write_literal(wb, cm->color_space, 3);
+  if (cm->color_space != AOM_CS_SRGB) {
     // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
-    vpx_wb_write_bit(wb, cm->color_range);
+    aom_wb_write_bit(wb, cm->color_range);
     if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
       assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
-      vpx_wb_write_bit(wb, cm->subsampling_x);
-      vpx_wb_write_bit(wb, cm->subsampling_y);
-      vpx_wb_write_bit(wb, 0);  // unused
+      aom_wb_write_bit(wb, cm->subsampling_x);
+      aom_wb_write_bit(wb, cm->subsampling_y);
+      aom_wb_write_bit(wb, 0);  // unused
     } else {
       assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
     }
   } else {
     assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
-    vpx_wb_write_bit(wb, 0);  // unused
+    aom_wb_write_bit(wb, 0);  // unused
   }
 }
 
-static void write_uncompressed_header(VP10_COMP *cpi,
-                                      struct vpx_write_bit_buffer *wb) {
-  VP10_COMMON *const cm = &cpi->common;
+static void write_uncompressed_header(AV1_COMP *cpi,
+                                      struct aom_write_bit_buffer *wb) {
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
 
-  vpx_wb_write_literal(wb, VPX_FRAME_MARKER, 2);
+  aom_wb_write_literal(wb, AOM_FRAME_MARKER, 2);
 
   write_profile(cm->profile, wb);
 
@@ -3059,45 +3052,45 @@
     const int frame_to_show = cm->ref_frame_map[cpi->existing_fb_idx_to_show];
 
     if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
-      vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+      aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                          "Buffer %d does not contain a reconstructed frame",
                          frame_to_show);
     }
     ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
 
-    vpx_wb_write_bit(wb, 1);  // show_existing_frame
-    vpx_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3);
+    aom_wb_write_bit(wb, 1);  // show_existing_frame
+    aom_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3);
 
     return;
   } else {
 #endif                        // CONFIG_EXT_REFS
-    vpx_wb_write_bit(wb, 0);  // show_existing_frame
+    aom_wb_write_bit(wb, 0);  // show_existing_frame
 #if CONFIG_EXT_REFS
   }
 #endif  // CONFIG_EXT_REFS
 
-  vpx_wb_write_bit(wb, cm->frame_type);
-  vpx_wb_write_bit(wb, cm->show_frame);
-  vpx_wb_write_bit(wb, cm->error_resilient_mode);
+  aom_wb_write_bit(wb, cm->frame_type);
+  aom_wb_write_bit(wb, cm->show_frame);
+  aom_wb_write_bit(wb, cm->error_resilient_mode);
 
   if (cm->frame_type == KEY_FRAME) {
     write_sync_code(wb);
     write_bitdepth_colorspace_sampling(cm, wb);
     write_frame_size(cm, wb);
     if (frame_is_intra_only(cm))
-      vpx_wb_write_bit(wb, cm->allow_screen_content_tools);
+      aom_wb_write_bit(wb, cm->allow_screen_content_tools);
   } else {
-    if (!cm->show_frame) vpx_wb_write_bit(wb, cm->intra_only);
+    if (!cm->show_frame) aom_wb_write_bit(wb, cm->intra_only);
 
     if (!cm->error_resilient_mode) {
       if (cm->intra_only) {
-        vpx_wb_write_bit(wb,
+        aom_wb_write_bit(wb,
                          cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
       } else {
-        vpx_wb_write_bit(wb,
+        aom_wb_write_bit(wb,
                          cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE);
         if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE)
-          vpx_wb_write_bit(wb,
+          aom_wb_write_bit(wb,
                            cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
       }
     }
@@ -3111,18 +3104,18 @@
       write_bitdepth_colorspace_sampling(cm, wb);
 
 #if CONFIG_EXT_REFS
-      vpx_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
+      aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
 #else
-      vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
+      aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
 #endif  // CONFIG_EXT_REFS
       write_frame_size(cm, wb);
     } else {
       MV_REFERENCE_FRAME ref_frame;
 
 #if CONFIG_EXT_REFS
-      vpx_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
+      aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
 #else
-      vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
+      aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
 #endif  // CONFIG_EXT_REFS
 
 #if CONFIG_EXT_REFS
@@ -3135,14 +3128,14 @@
 
       for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
         assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
-        vpx_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
+        aom_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
                              REF_FRAMES_LOG2);
-        vpx_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
+        aom_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
       }
 
       write_frame_size_with_refs(cpi, wb);
 
-      vpx_wb_write_bit(wb, cm->allow_high_precision_mv);
+      aom_wb_write_bit(wb, cm->allow_high_precision_mv);
 
       fix_interp_filter(cm, cpi->td.counts);
       write_interp_filter(cm->interp_filter, wb);
@@ -3150,17 +3143,17 @@
   }
 
   if (!cm->error_resilient_mode) {
-    vpx_wb_write_bit(
+    aom_wb_write_bit(
         wb, cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD);
   }
 
-  vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
+  aom_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
 
   assert(cm->mib_size == num_8x8_blocks_wide_lookup[cm->sb_size]);
   assert(cm->mib_size == 1 << cm->mib_size_log2);
 #if CONFIG_EXT_PARTITION
   assert(cm->sb_size == BLOCK_128X128 || cm->sb_size == BLOCK_64X64);
-  vpx_wb_write_bit(wb, cm->sb_size == BLOCK_128X128 ? 1 : 0);
+  aom_wb_write_bit(wb, cm->sb_size == BLOCK_128X128 ? 1 : 0);
 #else
   assert(cm->sb_size == BLOCK_64X64);
 #endif  // CONFIG_EXT_PARTITION
@@ -3186,8 +3179,8 @@
     const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
     const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
 
-    vpx_wb_write_bit(wb, use_hybrid_pred);
-    if (!use_hybrid_pred) vpx_wb_write_bit(wb, use_compound_pred);
+    aom_wb_write_bit(wb, use_hybrid_pred);
+    if (!use_hybrid_pred) aom_wb_write_bit(wb, use_compound_pred);
   }
 
   write_tile_info(cm, wb);
@@ -3195,35 +3188,35 @@
 
 #if CONFIG_GLOBAL_MOTION
 static void write_global_motion_params(Global_Motion_Params *params,
-                                       vpx_prob *probs, vp10_writer *w) {
+                                       aom_prob *probs, aom_writer *w) {
   GLOBAL_MOTION_TYPE gmtype = get_gmtype(params);
-  vp10_write_token(w, vp10_global_motion_types_tree, probs,
-                   &global_motion_types_encodings[gmtype]);
+  av1_write_token(w, av1_global_motion_types_tree, probs,
+                  &global_motion_types_encodings[gmtype]);
   switch (gmtype) {
     case GLOBAL_ZERO: break;
     case GLOBAL_AFFINE:
-      vp10_write_primitive_symmetric(
+      av1_write_primitive_symmetric(
           w, params->motion_params.wmmat[4] >> GM_ALPHA_PREC_DIFF,
           GM_ABS_ALPHA_BITS);
-      vp10_write_primitive_symmetric(
+      av1_write_primitive_symmetric(
           w, (params->motion_params.wmmat[5] >> GM_ALPHA_PREC_DIFF) -
                  (1 << GM_ALPHA_PREC_BITS),
           GM_ABS_ALPHA_BITS);
     // fallthrough intended
     case GLOBAL_ROTZOOM:
-      vp10_write_primitive_symmetric(
+      aom_write_primitive_symmetric(
           w, (params->motion_params.wmmat[2] >> GM_ALPHA_PREC_DIFF) -
                  (1 << GM_ALPHA_PREC_BITS),
           GM_ABS_ALPHA_BITS);
-      vp10_write_primitive_symmetric(
+      aom_write_primitive_symmetric(
           w, params->motion_params.wmmat[3] >> GM_ALPHA_PREC_DIFF,
           GM_ABS_ALPHA_BITS);
     // fallthrough intended
     case GLOBAL_TRANSLATION:
-      vp10_write_primitive_symmetric(
+      aom_write_primitive_symmetric(
           w, params->motion_params.wmmat[0] >> GM_TRANS_PREC_DIFF,
           GM_ABS_TRANS_BITS);
-      vp10_write_primitive_symmetric(
+      aom_write_primitive_symmetric(
           w, params->motion_params.wmmat[1] >> GM_TRANS_PREC_DIFF,
           GM_ABS_TRANS_BITS);
       break;
@@ -3231,8 +3224,8 @@
   }
 }
 
-static void write_global_motion(VP10_COMP *cpi, vp10_writer *w) {
-  VP10_COMMON *const cm = &cpi->common;
+static void write_global_motion(AV1_COMP *cpi, aom_writer *w) {
+  AV1_COMMON *const cm = &cpi->common;
   int frame;
   for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
     if (!cpi->global_motion_used[frame]) {
@@ -3244,14 +3237,14 @@
 }
 #endif
 
-static uint32_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
-  VP10_COMMON *const cm = &cpi->common;
+static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
+  AV1_COMMON *const cm = &cpi->common;
 #if CONFIG_SUPERTX
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
 #endif  // CONFIG_SUPERTX
   FRAME_CONTEXT *const fc = cm->fc;
   FRAME_COUNTS *counts = cpi->td.counts;
-  vp10_writer *header_bc;
+  aom_writer *header_bc;
   int i, j;
 
 #if CONFIG_ANS
@@ -3260,9 +3253,9 @@
   header_bc = &cpi->buf_ans;
   buf_ans_write_reset(header_bc);
 #else
-  vp10_writer real_header_bc;
+  aom_writer real_header_bc;
   header_bc = &real_header_bc;
-  vpx_start_encode(header_bc, data);
+  aom_start_encode(header_bc, data);
 #endif
   update_txfm_probs(cm, header_bc, counts);
   update_coef_probs(cpi, header_bc);
@@ -3275,39 +3268,39 @@
   update_seg_probs(cpi, header_bc);
 
   for (i = 0; i < INTRA_MODES; ++i)
-    prob_diff_update(vp10_intra_mode_tree, fc->uv_mode_prob[i],
+    prob_diff_update(av1_intra_mode_tree, fc->uv_mode_prob[i],
                      counts->uv_mode[i], INTRA_MODES, header_bc);
 
 #if CONFIG_EXT_PARTITION_TYPES
-  prob_diff_update(vp10_partition_tree, fc->partition_prob[0],
+  prob_diff_update(av1_partition_tree, fc->partition_prob[0],
                    counts->partition[0], PARTITION_TYPES, header_bc);
   for (i = 1; i < PARTITION_CONTEXTS; ++i)
-    prob_diff_update(vp10_ext_partition_tree, fc->partition_prob[i],
+    prob_diff_update(av1_ext_partition_tree, fc->partition_prob[i],
                      counts->partition[i], EXT_PARTITION_TYPES, header_bc);
 #else
   for (i = 0; i < PARTITION_CONTEXTS; ++i)
-    prob_diff_update(vp10_partition_tree, fc->partition_prob[i],
+    prob_diff_update(av1_partition_tree, fc->partition_prob[i],
                      counts->partition[i], PARTITION_TYPES, header_bc);
 #endif  // CONFIG_EXT_PARTITION_TYPES
 
 #if CONFIG_EXT_INTRA
   for (i = 0; i < INTRA_FILTERS + 1; ++i)
-    prob_diff_update(vp10_intra_filter_tree, fc->intra_filter_probs[i],
+    prob_diff_update(av1_intra_filter_tree, fc->intra_filter_probs[i],
                      counts->intra_filter[i], INTRA_FILTERS, header_bc);
 #endif  // CONFIG_EXT_INTRA
 
   if (frame_is_intra_only(cm)) {
-    vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob);
+    av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
     for (i = 0; i < INTRA_MODES; ++i)
       for (j = 0; j < INTRA_MODES; ++j)
-        prob_diff_update(vp10_intra_mode_tree, cm->kf_y_prob[i][j],
+        prob_diff_update(av1_intra_mode_tree, cm->kf_y_prob[i][j],
                          counts->kf_y_mode[i][j], INTRA_MODES, header_bc);
   } else {
 #if CONFIG_REF_MV
     update_inter_mode_probs(cm, header_bc, counts);
 #else
     for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
-      prob_diff_update(vp10_inter_mode_tree, cm->fc->inter_mode_probs[i],
+      prob_diff_update(av1_inter_mode_tree, cm->fc->inter_mode_probs[i],
                        counts->inter_mode[i], INTER_MODES, header_bc);
 #endif
 
@@ -3317,32 +3310,32 @@
     if (cm->reference_mode != COMPOUND_REFERENCE) {
       for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
         if (is_interintra_allowed_bsize_group(i)) {
-          vp10_cond_prob_diff_update(header_bc, &fc->interintra_prob[i],
-                                     cm->counts.interintra[i]);
+          av1_cond_prob_diff_update(header_bc, &fc->interintra_prob[i],
+                                    cm->counts.interintra[i]);
         }
       }
       for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
         prob_diff_update(
-            vp10_interintra_mode_tree, cm->fc->interintra_mode_prob[i],
+            av1_interintra_mode_tree, cm->fc->interintra_mode_prob[i],
             counts->interintra_mode[i], INTERINTRA_MODES, header_bc);
       }
       for (i = 0; i < BLOCK_SIZES; i++) {
         if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i))
-          vp10_cond_prob_diff_update(header_bc, &fc->wedge_interintra_prob[i],
-                                     cm->counts.wedge_interintra[i]);
+          av1_cond_prob_diff_update(header_bc, &fc->wedge_interintra_prob[i],
+                                    cm->counts.wedge_interintra[i]);
       }
     }
     if (cm->reference_mode != SINGLE_REFERENCE) {
       for (i = 0; i < BLOCK_SIZES; i++)
         if (is_interinter_wedge_used(i))
-          vp10_cond_prob_diff_update(header_bc, &fc->wedge_interinter_prob[i],
-                                     cm->counts.wedge_interinter[i]);
+          av1_cond_prob_diff_update(header_bc, &fc->wedge_interinter_prob[i],
+                                    cm->counts.wedge_interinter[i]);
     }
 #endif  // CONFIG_EXT_INTER
 
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
     for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i)
-      prob_diff_update(vp10_motvar_tree, fc->motvar_prob[i], counts->motvar[i],
+      prob_diff_update(av1_motvar_tree, fc->motvar_prob[i], counts->motvar[i],
                        MOTION_VARIATIONS, header_bc);
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 
@@ -3350,22 +3343,22 @@
       update_switchable_interp_probs(cm, header_bc, counts);
 
     for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
-      vp10_cond_prob_diff_update(header_bc, &fc->intra_inter_prob[i],
-                                 counts->intra_inter[i]);
+      av1_cond_prob_diff_update(header_bc, &fc->intra_inter_prob[i],
+                                counts->intra_inter[i]);
 
     if (cpi->allow_comp_inter_inter) {
       const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
       if (use_hybrid_pred)
         for (i = 0; i < COMP_INTER_CONTEXTS; i++)
-          vp10_cond_prob_diff_update(header_bc, &fc->comp_inter_prob[i],
-                                     counts->comp_inter[i]);
+          av1_cond_prob_diff_update(header_bc, &fc->comp_inter_prob[i],
+                                    counts->comp_inter[i]);
     }
 
     if (cm->reference_mode != COMPOUND_REFERENCE) {
       for (i = 0; i < REF_CONTEXTS; i++) {
         for (j = 0; j < (SINGLE_REFS - 1); j++) {
-          vp10_cond_prob_diff_update(header_bc, &fc->single_ref_prob[i][j],
-                                     counts->single_ref[i][j]);
+          av1_cond_prob_diff_update(header_bc, &fc->single_ref_prob[i][j],
+                                    counts->single_ref[i][j]);
         }
       }
     }
@@ -3374,31 +3367,31 @@
       for (i = 0; i < REF_CONTEXTS; i++) {
 #if CONFIG_EXT_REFS
         for (j = 0; j < (FWD_REFS - 1); j++) {
-          vp10_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
-                                     counts->comp_ref[i][j]);
+          av1_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
+                                    counts->comp_ref[i][j]);
         }
         for (j = 0; j < (BWD_REFS - 1); j++) {
-          vp10_cond_prob_diff_update(header_bc, &fc->comp_bwdref_prob[i][j],
-                                     counts->comp_bwdref[i][j]);
+          av1_cond_prob_diff_update(header_bc, &fc->comp_bwdref_prob[i][j],
+                                    counts->comp_bwdref[i][j]);
         }
 #else
         for (j = 0; j < (COMP_REFS - 1); j++) {
-          vp10_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
-                                     counts->comp_ref[i][j]);
+          av1_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
+                                    counts->comp_ref[i][j]);
         }
 #endif  // CONFIG_EXT_REFS
       }
     }
 
     for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
-      prob_diff_update(vp10_intra_mode_tree, cm->fc->y_mode_prob[i],
+      prob_diff_update(av1_intra_mode_tree, cm->fc->y_mode_prob[i],
                        counts->y_mode[i], INTRA_MODES, header_bc);
 
-    vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc,
+    aom_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc,
 #if CONFIG_REF_MV
-                         counts->mv);
+                        counts->mv);
 #else
-                         &counts->mv);
+                        &counts->mv);
 #endif
     update_ext_tx_probs(cm, header_bc);
 #if CONFIG_SUPERTX
@@ -3415,7 +3408,7 @@
   assert(header_size <= 0xffff);
   return header_size;
 #else
-  vpx_stop_encode(header_bc);
+  aom_stop_encode(header_bc);
   assert(header_bc->pos <= 0xffff);
   return header_bc->pos;
 #endif  // CONFIG_ANS
@@ -3451,7 +3444,7 @@
   }
 }
 
-static int remux_tiles(const VP10_COMMON *const cm, uint8_t *dst,
+static int remux_tiles(const AV1_COMMON *const cm, uint8_t *dst,
                        const uint32_t data_size, const uint32_t max_tile_size,
                        const uint32_t max_tile_col_size,
                        int *const tile_size_bytes,
@@ -3549,19 +3542,19 @@
   }
 }
 
-void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dst, size_t *size) {
+void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size) {
   uint8_t *data = dst;
   uint32_t compressed_header_size;
   uint32_t uncompressed_header_size;
   uint32_t data_size;
-  struct vpx_write_bit_buffer wb = { data, 0 };
-  struct vpx_write_bit_buffer saved_wb;
+  struct aom_write_bit_buffer wb = { data, 0 };
+  struct aom_write_bit_buffer saved_wb;
   unsigned int max_tile_size;
   unsigned int max_tile_col_size;
   int tile_size_bytes;
   int tile_col_size_bytes;
 
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const int have_tiles = cm->tile_cols * cm->tile_rows > 1;
 
 #if CONFIG_BITSTREAM_DEBUG
@@ -3573,7 +3566,7 @@
 
 #if CONFIG_EXT_REFS
   if (cm->show_existing_frame) {
-    *size = vpx_wb_bytes_written(&wb);
+    *size = aom_wb_bytes_written(&wb);
     return;
   }
 #endif  // CONFIG_EXT_REFS
@@ -3586,18 +3579,18 @@
 // describing tile configuration.
 #if CONFIG_EXT_TILE
     // Number of bytes in tile column size - 1
-    vpx_wb_write_literal(&wb, 0, 2);
+    aom_wb_write_literal(&wb, 0, 2);
 #endif  // CONFIG_EXT_TILE
     // Number of bytes in tile size - 1
-    vpx_wb_write_literal(&wb, 0, 2);
+    aom_wb_write_literal(&wb, 0, 2);
   }
   // Size of compressed header
-  vpx_wb_write_literal(&wb, 0, 16);
+  aom_wb_write_literal(&wb, 0, 16);
 
-  uncompressed_header_size = (uint32_t)vpx_wb_bytes_written(&wb);
+  uncompressed_header_size = (uint32_t)aom_wb_bytes_written(&wb);
   data += uncompressed_header_size;
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   // Write the compressed header
   compressed_header_size = write_compressed_header(cpi, data);
@@ -3618,14 +3611,14 @@
   if (have_tiles) {
 #if CONFIG_EXT_TILE
     assert(tile_col_size_bytes >= 1 && tile_col_size_bytes <= 4);
-    vpx_wb_write_literal(&saved_wb, tile_col_size_bytes - 1, 2);
+    aom_wb_write_literal(&saved_wb, tile_col_size_bytes - 1, 2);
 #endif  // CONFIG_EXT_TILE
     assert(tile_size_bytes >= 1 && tile_size_bytes <= 4);
-    vpx_wb_write_literal(&saved_wb, tile_size_bytes - 1, 2);
+    aom_wb_write_literal(&saved_wb, tile_size_bytes - 1, 2);
   }
   // TODO(jbb): Figure out what to do if compressed_header_size > 16 bits.
   assert(compressed_header_size <= 0xffff);
-  vpx_wb_write_literal(&saved_wb, compressed_header_size, 16);
+  aom_wb_write_literal(&saved_wb, compressed_header_size, 16);
 
   *size = data - dst;
 }
diff --git a/av1/encoder/bitstream.h b/av1/encoder/bitstream.h
index 01d2c8d..5a4fb19 100644
--- a/av1/encoder/bitstream.h
+++ b/av1/encoder/bitstream.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_BITSTREAM_H_
-#define VP10_ENCODER_BITSTREAM_H_
+#ifndef AV1_ENCODER_BITSTREAM_H_
+#define AV1_ENCODER_BITSTREAM_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -17,11 +17,11 @@
 
 #include "av1/encoder/encoder.h"
 
-void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size);
+void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dest, size_t *size);
 
-void vp10_encode_token_init(void);
+void av1_encode_token_init(void);
 
-static INLINE int vp10_preserve_existing_gf(VP10_COMP *cpi) {
+static INLINE int av1_preserve_existing_gf(AV1_COMP *cpi) {
 #if CONFIG_EXT_REFS
   // Do not swap gf and arf indices for internal overlay frames
   return !cpi->multi_arf_allowed && cpi->rc.is_src_frame_alt_ref &&
@@ -36,4 +36,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_BITSTREAM_H_
+#endif  // AV1_ENCODER_BITSTREAM_H_
diff --git a/av1/encoder/bitwriter.h b/av1/encoder/bitwriter.h
index 8cc674b..2deffeb 100644
--- a/av1/encoder/bitwriter.h
+++ b/av1/encoder/bitwriter.h
@@ -11,25 +11,25 @@
 /* The purpose of this header is to provide compile time pluggable bit writer
  * implementations with a common interface. */
 
-#ifndef VPX10_ENCODER_BITWRITER_H_
-#define VPX10_ENCODER_BITWRITER_H_
+#ifndef AOM10_ENCODER_BITWRITER_H_
+#define AOM10_ENCODER_BITWRITER_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_dsp/prob.h"
 
 #if CONFIG_ANS
 typedef struct BufAnsCoder BufAnsCoder;
 #include "av1/encoder/buf_ans.h"
-#define vp10_writer BufAnsCoder
-#define vp10_write buf_uabs_write
-#define vp10_write_bit buf_uabs_write_bit
-#define vp10_write_literal buf_uabs_write_literal
+#define aom_writer BufAnsCoder
+#define aom_write buf_uabs_write
+#define aom_write_bit buf_uabs_write_bit
+#define aom_write_literal buf_uabs_write_literal
 #else
 #include "aom_dsp/bitwriter.h"
-#define vp10_writer vpx_writer
-#define vp10_write vpx_write
-#define vp10_write_bit vpx_write_bit
-#define vp10_write_literal vpx_write_literal
+#define aom_writer aom_writer
+#define aom_write aom_write
+#define aom_write_bit aom_write_bit
+#define aom_write_literal aom_write_literal
 #endif
 
-#endif  // VPX10_ENCODER_BITWRITER_H_
+#endif  // AOM10_ENCODER_BITWRITER_H_
diff --git a/av1/encoder/block.h b/av1/encoder/block.h
index 65bb1e2..5daa436 100644
--- a/av1/encoder/block.h
+++ b/av1/encoder/block.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_BLOCK_H_
-#define VP10_ENCODER_BLOCK_H_
+#ifndef AV1_ENCODER_BLOCK_H_
+#define AV1_ENCODER_BLOCK_H_
 
 #include "av1/common/entropymv.h"
 #include "av1/common/entropy.h"
@@ -50,8 +50,8 @@
 
 /* The [2] dimension is for whether we skip the EOB node (i.e. if previous
  * coefficient in this block was zero) or not. */
-typedef unsigned int vp10_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
-                                    [COEFF_CONTEXTS][ENTROPY_TOKENS];
+typedef unsigned int av1_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
+                                   [COEFF_CONTEXTS][ENTROPY_TOKENS];
 
 typedef struct {
   int_mv ref_mvs[MODE_CTX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
@@ -150,7 +150,7 @@
   int encode_breakout;
 
   // note that token_costs is the cost when eob node is skipped
-  vp10_coeff_cost token_costs[TX_SIZES];
+  av1_coeff_cost token_costs[TX_SIZES];
 
   int optimize;
 
@@ -179,4 +179,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_BLOCK_H_
+#endif  // AV1_ENCODER_BLOCK_H_
diff --git a/av1/encoder/blockiness.c b/av1/encoder/blockiness.c
index 97e201a..487ffe3 100644
--- a/av1/encoder/blockiness.c
+++ b/av1/encoder/blockiness.c
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "av1/common/common.h"
 #include "av1/common/filter.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_filter.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/system_state.h"
 
@@ -119,12 +119,12 @@
 
 // This function returns the blockiness for the entire frame currently by
 // looking at all borders in steps of 4.
-double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
-                           const unsigned char *img2, int img2_pitch, int width,
-                           int height) {
+double av1_get_blockiness(const unsigned char *img1, int img1_pitch,
+                          const unsigned char *img2, int img2_pitch, int width,
+                          int height) {
   double blockiness = 0;
   int i, j;
-  vpx_clear_system_state();
+  aom_clear_system_state();
   for (i = 0; i < height;
        i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) {
     for (j = 0; j < width; j += 4) {
diff --git a/av1/encoder/buf_ans.c b/av1/encoder/buf_ans.c
index f87c1e1..d20edc3 100644
--- a/av1/encoder/buf_ans.c
+++ b/av1/encoder/buf_ans.c
@@ -13,29 +13,29 @@
 #include "av1/common/common.h"
 #include "av1/encoder/buf_ans.h"
 #include "av1/encoder/encoder.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
-void vp10_buf_ans_alloc(struct BufAnsCoder *c, struct VP10Common *cm,
-                        int size_hint) {
+void av1_buf_ans_alloc(struct BufAnsCoder *c, struct AV1Common *cm,
+                       int size_hint) {
   c->cm = cm;
   c->size = size_hint;
-  CHECK_MEM_ERROR(cm, c->buf, vpx_malloc(c->size * sizeof(*c->buf)));
+  CHECK_MEM_ERROR(cm, c->buf, aom_malloc(c->size * sizeof(*c->buf)));
   // Initialize to overfull to trigger the assert in write.
   c->offset = c->size + 1;
 }
 
-void vp10_buf_ans_free(struct BufAnsCoder *c) {
-  vpx_free(c->buf);
+void av1_buf_ans_free(struct BufAnsCoder *c) {
+  aom_free(c->buf);
   c->buf = NULL;
   c->size = 0;
 }
 
-void vp10_buf_ans_grow(struct BufAnsCoder *c) {
+void av1_buf_ans_grow(struct BufAnsCoder *c) {
   struct buffered_ans_symbol *new_buf = NULL;
   int new_size = c->size * 2;
-  CHECK_MEM_ERROR(c->cm, new_buf, vpx_malloc(new_size * sizeof(*new_buf)));
+  CHECK_MEM_ERROR(c->cm, new_buf, aom_malloc(new_size * sizeof(*new_buf)));
   memcpy(new_buf, c->buf, c->size * sizeof(*c->buf));
-  vpx_free(c->buf);
+  aom_free(c->buf);
   c->buf = new_buf;
   c->size = new_size;
 }
diff --git a/av1/encoder/buf_ans.h b/av1/encoder/buf_ans.h
index 8a88c32..1ba6e6c 100644
--- a/av1/encoder/buf_ans.h
+++ b/av1/encoder/buf_ans.h
@@ -8,15 +8,15 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_BUF_ANS_H_
-#define VP10_ENCODER_BUF_ANS_H_
+#ifndef AV1_ENCODER_BUF_ANS_H_
+#define AV1_ENCODER_BUF_ANS_H_
 // Buffered forward ANS writer.
 // Symbols are written to the writer in forward (decode) order and serialzed
 // backwards due to ANS's stack like behavior.
 
 #include <assert.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 #include "av1/common/ans.h"
 
 #ifdef __cplusplus
@@ -34,18 +34,18 @@
 };
 
 struct BufAnsCoder {
-  struct VP10Common *cm;
+  struct AV1Common *cm;
   struct buffered_ans_symbol *buf;
   int size;
   int offset;
 };
 
-void vp10_buf_ans_alloc(struct BufAnsCoder *c, struct VP10Common *cm,
-                        int size_hint);
+void av1_buf_ans_alloc(struct BufAnsCoder *c, struct AV1Common *cm,
+                       int size_hint);
 
-void vp10_buf_ans_free(struct BufAnsCoder *c);
+void av1_buf_ans_free(struct BufAnsCoder *c);
 
-void vp10_buf_ans_grow(struct BufAnsCoder *c);
+void av1_buf_ans_grow(struct BufAnsCoder *c);
 
 static INLINE void buf_ans_write_reset(struct BufAnsCoder *const c) {
   c->offset = 0;
@@ -55,7 +55,7 @@
                                   AnsP8 prob) {
   assert(c->offset <= c->size);
   if (c->offset == c->size) {
-    vp10_buf_ans_grow(c);
+    av1_buf_ans_grow(c);
   }
   c->buf[c->offset].method = ANS_METHOD_UABS;
   c->buf[c->offset].val_start = val;
@@ -67,7 +67,7 @@
                                   const struct rans_sym *const sym) {
   assert(c->offset <= c->size);
   if (c->offset == c->size) {
-    vp10_buf_ans_grow(c);
+    av1_buf_ans_grow(c);
   }
   c->buf[c->offset].method = ANS_METHOD_RANS;
   c->buf[c->offset].val_start = sym->cum_prob;
@@ -106,4 +106,4 @@
 #ifdef __cplusplus
 }  // extern "C"
 #endif  // __cplusplus
-#endif  // VP10_ENCODER_BUF_ANS_H_
+#endif  // AV1_ENCODER_BUF_ANS_H_
diff --git a/av1/encoder/context_tree.c b/av1/encoder/context_tree.c
index 9346e1c..2a105fc 100644
--- a/av1/encoder/context_tree.c
+++ b/av1/encoder/context_tree.c
@@ -18,7 +18,7 @@
 #endif  // CONFIG_EXT_PARTITION
 };
 
-static void alloc_mode_context(VP10_COMMON *cm, int num_4x4_blk,
+static void alloc_mode_context(AV1_COMMON *cm, int num_4x4_blk,
 #if CONFIG_EXT_PARTITION_TYPES
                                PARTITION_TYPE partition,
 #endif
@@ -33,17 +33,17 @@
 
   for (i = 0; i < MAX_MB_PLANE; ++i) {
 #if CONFIG_VAR_TX
-    CHECK_MEM_ERROR(cm, ctx->blk_skip[i], vpx_calloc(num_blk, sizeof(uint8_t)));
+    CHECK_MEM_ERROR(cm, ctx->blk_skip[i], aom_calloc(num_blk, sizeof(uint8_t)));
 #endif
     for (k = 0; k < 3; ++k) {
       CHECK_MEM_ERROR(cm, ctx->coeff[i][k],
-                      vpx_memalign(32, num_pix * sizeof(*ctx->coeff[i][k])));
+                      aom_memalign(32, num_pix * sizeof(*ctx->coeff[i][k])));
       CHECK_MEM_ERROR(cm, ctx->qcoeff[i][k],
-                      vpx_memalign(32, num_pix * sizeof(*ctx->qcoeff[i][k])));
+                      aom_memalign(32, num_pix * sizeof(*ctx->qcoeff[i][k])));
       CHECK_MEM_ERROR(cm, ctx->dqcoeff[i][k],
-                      vpx_memalign(32, num_pix * sizeof(*ctx->dqcoeff[i][k])));
+                      aom_memalign(32, num_pix * sizeof(*ctx->dqcoeff[i][k])));
       CHECK_MEM_ERROR(cm, ctx->eobs[i][k],
-                      vpx_memalign(32, num_blk * sizeof(*ctx->eobs[i][k])));
+                      aom_memalign(32, num_blk * sizeof(*ctx->eobs[i][k])));
     }
   }
 
@@ -51,7 +51,7 @@
     for (i = 0; i < 2; ++i) {
       CHECK_MEM_ERROR(
           cm, ctx->color_index_map[i],
-          vpx_memalign(32, num_pix * sizeof(*ctx->color_index_map[i])));
+          aom_memalign(32, num_pix * sizeof(*ctx->color_index_map[i])));
     }
   }
 }
@@ -60,28 +60,28 @@
   int i, k;
   for (i = 0; i < MAX_MB_PLANE; ++i) {
 #if CONFIG_VAR_TX
-    vpx_free(ctx->blk_skip[i]);
+    aom_free(ctx->blk_skip[i]);
     ctx->blk_skip[i] = 0;
 #endif
     for (k = 0; k < 3; ++k) {
-      vpx_free(ctx->coeff[i][k]);
+      aom_free(ctx->coeff[i][k]);
       ctx->coeff[i][k] = 0;
-      vpx_free(ctx->qcoeff[i][k]);
+      aom_free(ctx->qcoeff[i][k]);
       ctx->qcoeff[i][k] = 0;
-      vpx_free(ctx->dqcoeff[i][k]);
+      aom_free(ctx->dqcoeff[i][k]);
       ctx->dqcoeff[i][k] = 0;
-      vpx_free(ctx->eobs[i][k]);
+      aom_free(ctx->eobs[i][k]);
       ctx->eobs[i][k] = 0;
     }
   }
 
   for (i = 0; i < 2; ++i) {
-    vpx_free(ctx->color_index_map[i]);
+    aom_free(ctx->color_index_map[i]);
     ctx->color_index_map[i] = 0;
   }
 }
 
-static void alloc_tree_contexts(VP10_COMMON *cm, PC_TREE *tree,
+static void alloc_tree_contexts(AV1_COMMON *cm, PC_TREE *tree,
                                 int num_4x4_blk) {
 #if CONFIG_EXT_PARTITION_TYPES
   alloc_mode_context(cm, num_4x4_blk, PARTITION_NONE, &tree->none);
@@ -180,7 +180,7 @@
 // partition level. There are contexts for none, horizontal, vertical, and
 // split.  Along with a block_size value and a selected block_size which
 // represents the state of our search.
-void vp10_setup_pc_tree(VP10_COMMON *cm, ThreadData *td) {
+void av1_setup_pc_tree(AV1_COMMON *cm, ThreadData *td) {
   int i, j;
 #if CONFIG_EXT_PARTITION
   const int leaf_nodes = 256;
@@ -195,12 +195,12 @@
   int square_index = 1;
   int nodes;
 
-  vpx_free(td->leaf_tree);
+  aom_free(td->leaf_tree);
   CHECK_MEM_ERROR(cm, td->leaf_tree,
-                  vpx_calloc(leaf_nodes, sizeof(*td->leaf_tree)));
-  vpx_free(td->pc_tree);
+                  aom_calloc(leaf_nodes, sizeof(*td->leaf_tree)));
+  aom_free(td->pc_tree);
   CHECK_MEM_ERROR(cm, td->pc_tree,
-                  vpx_calloc(tree_nodes, sizeof(*td->pc_tree)));
+                  aom_calloc(tree_nodes, sizeof(*td->pc_tree)));
 
   this_pc = &td->pc_tree[0];
   this_leaf = &td->leaf_tree[0];
@@ -248,7 +248,7 @@
   }
 }
 
-void vp10_free_pc_tree(ThreadData *td) {
+void av1_free_pc_tree(ThreadData *td) {
 #if CONFIG_EXT_PARTITION
   const int leaf_nodes = 256;
   const int tree_nodes = 256 + 64 + 16 + 4 + 1;
@@ -264,8 +264,8 @@
   // Sets up all the leaf nodes in the tree.
   for (i = 0; i < tree_nodes; ++i) free_tree_contexts(&td->pc_tree[i]);
 
-  vpx_free(td->pc_tree);
+  aom_free(td->pc_tree);
   td->pc_tree = NULL;
-  vpx_free(td->leaf_tree);
+  aom_free(td->leaf_tree);
   td->leaf_tree = NULL;
 }
diff --git a/av1/encoder/context_tree.h b/av1/encoder/context_tree.h
index 18f00bb..e121543 100644
--- a/av1/encoder/context_tree.h
+++ b/av1/encoder/context_tree.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_CONTEXT_TREE_H_
-#define VP10_ENCODER_CONTEXT_TREE_H_
+#ifndef AV1_ENCODER_CONTEXT_TREE_H_
+#define AV1_ENCODER_CONTEXT_TREE_H_
 
 #include "av1/common/blockd.h"
 #include "av1/encoder/block.h"
@@ -18,8 +18,8 @@
 extern "C" {
 #endif
 
-struct VP10_COMP;
-struct VP10Common;
+struct AV1_COMP;
+struct AV1Common;
 struct ThreadData;
 
 // Structure to hold snapshot of coding context during the mode picking process
@@ -93,11 +93,11 @@
 #endif
 } PC_TREE;
 
-void vp10_setup_pc_tree(struct VP10Common *cm, struct ThreadData *td);
-void vp10_free_pc_tree(struct ThreadData *td);
+void av1_setup_pc_tree(struct AV1Common *cm, struct ThreadData *td);
+void av1_free_pc_tree(struct ThreadData *td);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif /* VP10_ENCODER_CONTEXT_TREE_H_ */
+#endif /* AV1_ENCODER_CONTEXT_TREE_H_ */
diff --git a/av1/encoder/corner_detect.c b/av1/encoder/corner_detect.c
index 2b2d82d..a0500e3 100644
--- a/av1/encoder/corner_detect.c
+++ b/av1/encoder/corner_detect.c
@@ -14,7 +14,7 @@
 #include <math.h>
 #include <assert.h>
 
-#include "vp10/encoder/corner_detect.h"
+#include "av1/encoder/corner_detect.h"
 #include "third_party/fastfeat/fast.h"
 
 // Fast_9 wrapper
diff --git a/av1/encoder/corner_detect.h b/av1/encoder/corner_detect.h
index 8db713e..f658a6b 100644
--- a/av1/encoder/corner_detect.h
+++ b/av1/encoder/corner_detect.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_CORNER_DETECT_H_
-#define VP10_ENCODER_CORNER_DETECT_H_
+#ifndef AV1_ENCODER_CORNER_DETECT_H_
+#define AV1_ENCODER_CORNER_DETECT_H_
 
 #include <stdio.h>
 #include <stdlib.h>
@@ -18,4 +18,4 @@
 int FastCornerDetect(unsigned char *buf, int width, int height, int stride,
                      int *points, int max_points);
 
-#endif  // VP10_ENCODER_CORNER_DETECT_H
+#endif  // AV1_ENCODER_CORNER_DETECT_H
diff --git a/av1/encoder/corner_match.c b/av1/encoder/corner_match.c
index 6b19d5b..02e8212 100644
--- a/av1/encoder/corner_match.c
+++ b/av1/encoder/corner_match.c
@@ -13,7 +13,7 @@
 #include <memory.h>
 #include <math.h>
 
-#include "vp10/encoder/corner_match.h"
+#include "av1/encoder/corner_match.h"
 
 #define MATCH_SZ 15
 #define MATCH_SZ_BY2 ((MATCH_SZ - 1) / 2)
diff --git a/av1/encoder/corner_match.h b/av1/encoder/corner_match.h
index 3bc8cb9..01c0ea4 100644
--- a/av1/encoder/corner_match.h
+++ b/av1/encoder/corner_match.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_CORNER_MATCH_H_
-#define VP10_ENCODER_CORNER_MATCH_H_
+#ifndef AV1_ENCODER_CORNER_MATCH_H_
+#define AV1_ENCODER_CORNER_MATCH_H_
 
 #include <stdio.h>
 #include <stdlib.h>
@@ -26,4 +26,4 @@
                              int height, int frm_stride, int ref_stride,
                              double *correspondence_pts);
 
-#endif  // VP10_ENCODER_CORNER_MATCH_H
+#endif  // AV1_ENCODER_CORNER_MATCH_H
diff --git a/av1/encoder/cost.c b/av1/encoder/cost.c
index 4542638..8a87f8f 100644
--- a/av1/encoder/cost.c
+++ b/av1/encoder/cost.c
@@ -15,9 +15,9 @@
 #endif  // CONFIG_ANS
 #include "av1/common/entropy.h"
 
-/* round(-log2(i/256.) * (1 << VP10_PROB_COST_SHIFT))
+/* round(-log2(i/256.) * (1 << AV1_PROB_COST_SHIFT))
    Begins with a bogus entry for simpler addressing. */
-const uint16_t vp10_prob_cost[256] = {
+const uint16_t av1_prob_cost[256] = {
   4096, 4096, 3584, 3284, 3072, 2907, 2772, 2659, 2560, 2473, 2395, 2325, 2260,
   2201, 2147, 2096, 2048, 2003, 1961, 1921, 1883, 1847, 1813, 1780, 1748, 1718,
   1689, 1661, 1635, 1609, 1584, 1559, 1536, 1513, 1491, 1470, 1449, 1429, 1409,
@@ -41,8 +41,8 @@
 };
 
 #if CONFIG_ANS
-// round(-log2(i/1024.) * (1 << VP10_PROB_COST_SHIFT))
-static const uint16_t vp10_prob_cost10[1024] = {
+// round(-log2(i/1024.) * (1 << AV1_PROB_COST_SHIFT))
+static const uint16_t av1_prob_cost10[1024] = {
   5120, 5120, 4608, 4308, 4096, 3931, 3796, 3683, 3584, 3497, 3419, 3349, 3284,
   3225, 3171, 3120, 3072, 3027, 2985, 2945, 2907, 2871, 2837, 2804, 2772, 2742,
   2713, 2685, 2659, 2633, 2608, 2583, 2560, 2537, 2515, 2494, 2473, 2453, 2433,
@@ -125,15 +125,15 @@
 };
 #endif  // CONFIG_ANS
 
-static void cost(int *costs, vpx_tree tree, const vpx_prob *probs, int i,
+static void cost(int *costs, aom_tree tree, const aom_prob *probs, int i,
                  int c) {
-  const vpx_prob prob = probs[i / 2];
+  const aom_prob prob = probs[i / 2];
   int b;
 
   assert(prob != 0);
   for (b = 0; b <= 1; ++b) {
-    const int cc = c + vp10_cost_bit(prob, b);
-    const vpx_tree_index ii = tree[i + b];
+    const int cc = c + av1_cost_bit(prob, b);
+    const aom_tree_index ii = tree[i + b];
 
     if (ii <= 0)
       costs[-ii] = cc;
@@ -143,26 +143,26 @@
 }
 
 #if CONFIG_ANS
-void vp10_cost_tokens_ans(int *costs, const vpx_prob *tree_probs,
-                          const rans_dec_lut token_cdf, int skip_eob) {
+void av1_cost_tokens_ans(int *costs, const aom_prob *tree_probs,
+                         const rans_dec_lut token_cdf, int skip_eob) {
   int c_tree = 0;  // Cost of the "tree" nodes EOB and ZERO.
   int i;
-  costs[EOB_TOKEN] = vp10_cost_bit(tree_probs[0], 0);
-  if (!skip_eob) c_tree = vp10_cost_bit(tree_probs[0], 1);
+  costs[EOB_TOKEN] = av1_cost_bit(tree_probs[0], 0);
+  if (!skip_eob) c_tree = av1_cost_bit(tree_probs[0], 1);
   for (i = ZERO_TOKEN; i <= CATEGORY6_TOKEN; ++i) {
     const int p = token_cdf[i + 1] - token_cdf[i];
-    costs[i] = c_tree + vp10_prob_cost10[p];
+    costs[i] = c_tree + av1_prob_cost10[p];
   }
 }
 #endif  // CONFIG_ANS
 
-void vp10_cost_tokens(int *costs, const vpx_prob *probs, vpx_tree tree) {
+void av1_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree) {
   cost(costs, tree, probs, 0, 0);
 }
 
-void vp10_cost_tokens_skip(int *costs, const vpx_prob *probs, vpx_tree tree) {
+void av1_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree) {
   assert(tree[0] <= 0 && tree[1] > 0);
 
-  costs[-tree[0]] = vp10_cost_bit(probs[0], 0);
+  costs[-tree[0]] = av1_cost_bit(probs[0], 0);
   cost(costs, tree, probs, 2, 0);
 }
diff --git a/av1/encoder/cost.h b/av1/encoder/cost.h
index 5ae2a79..4e4d9bb 100644
--- a/av1/encoder/cost.h
+++ b/av1/encoder/cost.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_COST_H_
-#define VP10_ENCODER_COST_H_
+#ifndef AV1_ENCODER_COST_H_
+#define AV1_ENCODER_COST_H_
 
 #include "aom_dsp/prob.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #if CONFIG_ANS
 #include "av1/common/ans.h"
 #endif  // CONFIG_ANS
@@ -21,50 +21,50 @@
 extern "C" {
 #endif
 
-extern const uint16_t vp10_prob_cost[256];
+extern const uint16_t av1_prob_cost[256];
 
-// The factor to scale from cost in bits to cost in vp10_prob_cost units.
-#define VP10_PROB_COST_SHIFT 9
+// The factor to scale from cost in bits to cost in av1_prob_cost units.
+#define AV1_PROB_COST_SHIFT 9
 
-#define vp10_cost_zero(prob) (vp10_prob_cost[prob])
+#define av1_cost_zero(prob) (av1_prob_cost[prob])
 
-#define vp10_cost_one(prob) vp10_cost_zero(256 - (prob))
+#define av1_cost_one(prob) av1_cost_zero(256 - (prob))
 
-#define vp10_cost_bit(prob, bit) vp10_cost_zero((bit) ? 256 - (prob) : (prob))
+#define av1_cost_bit(prob, bit) av1_cost_zero((bit) ? 256 - (prob) : (prob))
 
 // Cost of coding an n bit literal, using 128 (i.e. 50%) probability
 // for each bit.
-#define vp10_cost_literal(n) ((n) * (1 << VP10_PROB_COST_SHIFT))
+#define av1_cost_literal(n) ((n) * (1 << AV1_PROB_COST_SHIFT))
 
 static INLINE unsigned int cost_branch256(const unsigned int ct[2],
-                                          vpx_prob p) {
-  return ct[0] * vp10_cost_zero(p) + ct[1] * vp10_cost_one(p);
+                                          aom_prob p) {
+  return ct[0] * av1_cost_zero(p) + ct[1] * av1_cost_one(p);
 }
 
-static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs, int bits,
+static INLINE int treed_cost(aom_tree tree, const aom_prob *probs, int bits,
                              int len) {
   int cost = 0;
-  vpx_tree_index i = 0;
+  aom_tree_index i = 0;
 
   do {
     const int bit = (bits >> --len) & 1;
-    cost += vp10_cost_bit(probs[i >> 1], bit);
+    cost += av1_cost_bit(probs[i >> 1], bit);
     i = tree[i + bit];
   } while (len);
 
   return cost;
 }
 
-void vp10_cost_tokens(int *costs, const vpx_prob *probs, vpx_tree tree);
-void vp10_cost_tokens_skip(int *costs, const vpx_prob *probs, vpx_tree tree);
+void av1_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree);
+void av1_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree);
 
 #if CONFIG_ANS
-void vp10_cost_tokens_ans(int *costs, const vpx_prob *tree_probs,
-                          const rans_dec_lut token_cdf, int skip_eob);
+void av1_cost_tokens_ans(int *costs, const aom_prob *tree_probs,
+                         const rans_dec_lut token_cdf, int skip_eob);
 #endif
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_COST_H_
+#endif  // AV1_ENCODER_COST_H_
diff --git a/av1/encoder/dct.c b/av1/encoder/dct.c
index 8f7812e..fc56436 100644
--- a/av1/encoder/dct.c
+++ b/av1/encoder/dct.c
@@ -11,9 +11,9 @@
 #include <assert.h>
 #include <math.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "av1/common/blockd.h"
 #include "av1/common/idct.h"
 #include "aom_dsp/fwd_txfm.h"
@@ -1329,10 +1329,10 @@
 };
 #endif  // CONFIG_EXT_TX
 
-void vp10_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
-                   int tx_type) {
+void av1_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+                  int tx_type) {
   if (tx_type == DCT_DCT) {
-    vpx_fdct4x4_c(input, output, stride);
+    aom_fdct4x4_c(input, output, stride);
   } else {
     tran_low_t out[4 * 4];
     int i, j;
@@ -1362,8 +1362,8 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
-                   int tx_type) {
+void av1_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
+                  int tx_type) {
   const int n = 4;
   const int n2 = 8;
   tran_low_t out[8 * 4];
@@ -1391,8 +1391,8 @@
   // Note: overall scale factor of transform is 8 times unitary
 }
 
-void vp10_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
-                   int tx_type) {
+void av1_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
+                  int tx_type) {
   const int n = 4;
   const int n2 = 8;
   tran_low_t out[8 * 4];
@@ -1420,8 +1420,8 @@
   // Note: overall scale factor of transform is 8 times unitary
 }
 
-void vp10_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
-                    int tx_type) {
+void av1_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
+                   int tx_type) {
   const int n = 8;
   const int n2 = 16;
   tran_low_t out[16 * 8];
@@ -1449,8 +1449,8 @@
   // Note: overall scale factor of transform is 8 times unitary
 }
 
-void vp10_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
-                    int tx_type) {
+void av1_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
+                   int tx_type) {
   const int n = 8;
   const int n2 = 16;
   tran_low_t out[16 * 8];
@@ -1478,8 +1478,8 @@
   // Note: overall scale factor of transform is 8 times unitary
 }
 
-void vp10_fht16x32_c(const int16_t *input, tran_low_t *output, int stride,
-                     int tx_type) {
+void av1_fht16x32_c(const int16_t *input, tran_low_t *output, int stride,
+                    int tx_type) {
   const int n = 16;
   const int n2 = 32;
   tran_low_t out[32 * 16];
@@ -1508,8 +1508,8 @@
   // Note: overall scale factor of transform is 4 times unitary
 }
 
-void vp10_fht32x16_c(const int16_t *input, tran_low_t *output, int stride,
-                     int tx_type) {
+void av1_fht32x16_c(const int16_t *input, tran_low_t *output, int stride,
+                    int tx_type) {
   const int n = 16;
   const int n2 = 32;
   tran_low_t out[32 * 16];
@@ -1540,19 +1540,19 @@
 
 #endif  // CONFIG_EXT_TX
 
-void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
-                          tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                          int skip_block, const int16_t *zbin_ptr,
-                          const int16_t *round_ptr, const int16_t *quant_ptr,
-                          const int16_t *quant_shift_ptr,
-                          tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                          const int16_t *dequant_ptr, uint16_t *eob_ptr,
-                          const int16_t *scan, const int16_t *iscan
+void av1_fdct8x8_quant_c(const int16_t *input, int stride,
+                         tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                         int skip_block, const int16_t *zbin_ptr,
+                         const int16_t *round_ptr, const int16_t *quant_ptr,
+                         const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+                         tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                         uint16_t *eob_ptr, const int16_t *scan,
+                         const int16_t *iscan
 #if CONFIG_AOM_QM
-                          ,
-                          const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
+                         ,
+                         const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
 #endif
-                          ) {
+                         ) {
   int eob = -1;
 
   int i, j;
@@ -1666,10 +1666,10 @@
   *eob_ptr = eob + 1;
 }
 
-void vp10_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
-                   int tx_type) {
+void av1_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+                  int tx_type) {
   if (tx_type == DCT_DCT) {
-    vpx_fdct8x8_c(input, output, stride);
+    aom_fdct8x8_c(input, output, stride);
   } else {
     tran_low_t out[64];
     int i, j;
@@ -1700,7 +1700,7 @@
 
 /* 4-point reversible, orthonormal Walsh-Hadamard in 3.5 adds, 0.5 shifts per
    pixel. */
-void vp10_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) {
   int i;
   tran_high_t a1, b1, c1, d1, e1;
   const int16_t *ip_pass0 = input;
@@ -1754,10 +1754,10 @@
   }
 }
 
-void vp10_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
-                     int tx_type) {
+void av1_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
+                    int tx_type) {
   if (tx_type == DCT_DCT) {
-    vpx_fdct16x16_c(input, output, stride);
+    aom_fdct16x16_c(input, output, stride);
   } else {
     tran_low_t out[256];
     int i, j;
@@ -1786,65 +1786,65 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
-                          int tx_type) {
-  vp10_fht4x4_c(input, output, stride, tx_type);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+                         int tx_type) {
+  av1_fht4x4_c(input, output, stride, tx_type);
 }
 
 #if CONFIG_EXT_TX
-void vp10_highbd_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_highbd_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
+                         int tx_type) {
+  av1_fht4x8_c(input, output, stride, tx_type);
+}
+
+void av1_highbd_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
+                         int tx_type) {
+  av1_fht8x4_c(input, output, stride, tx_type);
+}
+
+void av1_highbd_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
                           int tx_type) {
-  vp10_fht4x8_c(input, output, stride, tx_type);
+  av1_fht8x16_c(input, output, stride, tx_type);
 }
 
-void vp10_highbd_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_highbd_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
                           int tx_type) {
-  vp10_fht8x4_c(input, output, stride, tx_type);
+  av1_fht16x8_c(input, output, stride, tx_type);
 }
 
-void vp10_highbd_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_highbd_fht16x32_c(const int16_t *input, tran_low_t *output, int stride,
                            int tx_type) {
-  vp10_fht8x16_c(input, output, stride, tx_type);
+  av1_fht16x32_c(input, output, stride, tx_type);
 }
 
-void vp10_highbd_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_highbd_fht32x16_c(const int16_t *input, tran_low_t *output, int stride,
                            int tx_type) {
-  vp10_fht16x8_c(input, output, stride, tx_type);
-}
-
-void vp10_highbd_fht16x32_c(const int16_t *input, tran_low_t *output,
-                            int stride, int tx_type) {
-  vp10_fht16x32_c(input, output, stride, tx_type);
-}
-
-void vp10_highbd_fht32x16_c(const int16_t *input, tran_low_t *output,
-                            int stride, int tx_type) {
-  vp10_fht32x16_c(input, output, stride, tx_type);
+  av1_fht32x16_c(input, output, stride, tx_type);
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
-                          int tx_type) {
-  vp10_fht8x8_c(input, output, stride, tx_type);
+void av1_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+                         int tx_type) {
+  av1_fht8x8_c(input, output, stride, tx_type);
 }
 
-void vp10_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output,
-                           int stride) {
-  vp10_fwht4x4_c(input, output, stride);
+void av1_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output,
+                          int stride) {
+  av1_fwht4x4_c(input, output, stride);
 }
 
-void vp10_highbd_fht16x16_c(const int16_t *input, tran_low_t *output,
-                            int stride, int tx_type) {
-  vp10_fht16x16_c(input, output, stride, tx_type);
+void av1_highbd_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
+                           int tx_type) {
+  av1_fht16x16_c(input, output, stride, tx_type);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #if CONFIG_EXT_TX
-void vp10_fht32x32_c(const int16_t *input, tran_low_t *output, int stride,
-                     int tx_type) {
+void av1_fht32x32_c(const int16_t *input, tran_low_t *output, int stride,
+                    int tx_type) {
   if (tx_type == DCT_DCT) {
-    vpx_fdct32x32_c(input, output, stride);
+    aom_fdct32x32_c(input, output, stride);
   } else {
     tran_low_t out[1024];
     int i, j;
@@ -1874,8 +1874,8 @@
 }
 
 // Forward identity transform.
-void vp10_fwd_idtx_c(const int16_t *src_diff, tran_low_t *coeff, int stride,
-                     int bs, int tx_type) {
+void av1_fwd_idtx_c(const int16_t *src_diff, tran_low_t *coeff, int stride,
+                    int bs, int tx_type) {
   int r, c;
   const int shift = bs < 32 ? 3 : 2;
   if (tx_type == IDTX) {
@@ -1887,10 +1887,10 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_fht32x32_c(const int16_t *input, tran_low_t *output,
-                            int stride, int tx_type) {
-  vp10_fht32x32_c(input, output, stride, tx_type);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fht32x32_c(const int16_t *input, tran_low_t *output, int stride,
+                           int tx_type) {
+  av1_fht32x32_c(input, output, stride, tx_type);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_EXT_TX
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index b2635b4..a7183f9 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -12,13 +12,13 @@
 #include <math.h>
 #include <stdio.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
-#include "aom_ports/vpx_timer.h"
+#include "aom_ports/aom_timer.h"
 #include "aom_ports/system_state.h"
 
 #include "av1/common/common.h"
@@ -52,23 +52,22 @@
 #include "av1/encoder/segmentation.h"
 #include "av1/encoder/tokenize.h"
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define IF_HBD(...) __VA_ARGS__
 #else
 #define IF_HBD(...)
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+static void encode_superblock(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
                               int output_enabled, int mi_row, int mi_col,
                               BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
 
 #if CONFIG_SUPERTX
 static int check_intra_b(PICK_MODE_CONTEXT *ctx);
 
-static int check_intra_sb(VP10_COMP *cpi, const TileInfo *const tile,
-                          int mi_row, int mi_col, BLOCK_SIZE bsize,
-                          PC_TREE *pc_tree);
-static void predict_superblock(VP10_COMP *cpi, ThreadData *td,
+static int check_intra_sb(AV1_COMP *cpi, const TileInfo *const tile, int mi_row,
+                          int mi_col, BLOCK_SIZE bsize, PC_TREE *pc_tree);
+static void predict_superblock(AV1_COMP *cpi, ThreadData *td,
 #if CONFIG_EXT_INTER
                                int mi_row_ori, int mi_col_ori,
 #endif  // CONFIG_EXT_INTER
@@ -76,17 +75,17 @@
                                BLOCK_SIZE bsize_pred, int b_sub8x8, int block);
 static int check_supertx_sb(BLOCK_SIZE bsize, TX_SIZE supertx_size,
                             PC_TREE *pc_tree);
-static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
+static void predict_sb_complex(AV1_COMP *cpi, ThreadData *td,
                                const TileInfo *const tile, int mi_row,
                                int mi_col, int mi_row_ori, int mi_col_ori,
                                int output_enabled, BLOCK_SIZE bsize,
                                BLOCK_SIZE top_bsize, uint8_t *dst_buf[3],
                                int dst_stride[3], PC_TREE *pc_tree);
-static void update_state_sb_supertx(VP10_COMP *cpi, ThreadData *td,
+static void update_state_sb_supertx(AV1_COMP *cpi, ThreadData *td,
                                     const TileInfo *const tile, int mi_row,
                                     int mi_col, BLOCK_SIZE bsize,
                                     int output_enabled, PC_TREE *pc_tree);
-static void rd_supertx_sb(VP10_COMP *cpi, ThreadData *td,
+static void rd_supertx_sb(AV1_COMP *cpi, ThreadData *td,
                           const TileInfo *const tile, int mi_row, int mi_col,
                           BLOCK_SIZE bsize, int *tmp_rate, int64_t *tmp_dist,
                           TX_TYPE *best_tx, PC_TREE *pc_tree);
@@ -96,7 +95,7 @@
 //  purposes of activity masking.
 // Eventually this should be replaced by custom no-reference routines,
 //  which will be faster.
-static const uint8_t VP10_VAR_OFFS[MAX_SB_SIZE] = {
+static const uint8_t AV1_VAR_OFFS[MAX_SB_SIZE] = {
   128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
   128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
   128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
@@ -111,8 +110,8 @@
 #endif  // CONFIG_EXT_PARTITION
 };
 
-#if CONFIG_VP9_HIGHBITDEPTH
-static const uint16_t VP10_HIGH_VAR_OFFS_8[MAX_SB_SIZE] = {
+#if CONFIG_AOM_HIGHBITDEPTH
+static const uint16_t AV1_HIGH_VAR_OFFS_8[MAX_SB_SIZE] = {
   128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
   128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
   128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
@@ -127,7 +126,7 @@
 #endif  // CONFIG_EXT_PARTITION
 };
 
-static const uint16_t VP10_HIGH_VAR_OFFS_10[MAX_SB_SIZE] = {
+static const uint16_t AV1_HIGH_VAR_OFFS_10[MAX_SB_SIZE] = {
   128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
   128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
   128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
@@ -148,7 +147,7 @@
 #endif  // CONFIG_EXT_PARTITION
 };
 
-static const uint16_t VP10_HIGH_VAR_OFFS_12[MAX_SB_SIZE] = {
+static const uint16_t AV1_HIGH_VAR_OFFS_12[MAX_SB_SIZE] = {
   128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
   128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
   128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
@@ -172,45 +171,45 @@
   128 * 16
 #endif  // CONFIG_EXT_PARTITION
 };
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
-                                            const struct buf_2d *ref,
-                                            BLOCK_SIZE bs) {
+unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
+                                           const struct buf_2d *ref,
+                                           BLOCK_SIZE bs) {
   unsigned int sse;
   const unsigned int var =
-      cpi->fn_ptr[bs].vf(ref->buf, ref->stride, VP10_VAR_OFFS, 0, &sse);
+      cpi->fn_ptr[bs].vf(ref->buf, ref->stride, AV1_VAR_OFFS, 0, &sse);
   return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
-                                                 const struct buf_2d *ref,
-                                                 BLOCK_SIZE bs, int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
+                                                const struct buf_2d *ref,
+                                                BLOCK_SIZE bs, int bd) {
   unsigned int var, sse;
   switch (bd) {
     case 10:
-      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
-                               CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_10), 0,
-                               &sse);
+      var =
+          cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+                             CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_10), 0, &sse);
       break;
     case 12:
-      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
-                               CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_12), 0,
-                               &sse);
+      var =
+          cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+                             CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_12), 0, &sse);
       break;
     case 8:
     default:
       var =
           cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
-                             CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_8), 0, &sse);
+                             CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_8), 0, &sse);
       break;
   }
   return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static unsigned int get_sby_perpixel_diff_variance(VP10_COMP *cpi,
+static unsigned int get_sby_perpixel_diff_variance(AV1_COMP *cpi,
                                                    const struct buf_2d *ref,
                                                    int mi_row, int mi_col,
                                                    BLOCK_SIZE bs) {
@@ -225,9 +224,8 @@
   return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
 }
 
-static BLOCK_SIZE get_rd_var_based_fixed_partition(VP10_COMP *cpi,
-                                                   MACROBLOCK *x, int mi_row,
-                                                   int mi_col) {
+static BLOCK_SIZE get_rd_var_based_fixed_partition(AV1_COMP *cpi, MACROBLOCK *x,
+                                                   int mi_row, int mi_col) {
   unsigned int var = get_sby_perpixel_diff_variance(
       cpi, &x->plane[0].src, mi_row, mi_col, BLOCK_64X64);
   if (var < 8)
@@ -242,21 +240,21 @@
 
 // Lighter version of set_offsets that only sets the mode info
 // pointers.
-static void set_mode_info_offsets(VP10_COMP *const cpi, MACROBLOCK *const x,
+static void set_mode_info_offsets(AV1_COMP *const cpi, MACROBLOCK *const x,
                                   MACROBLOCKD *const xd, int mi_row,
                                   int mi_col) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const int idx_str = xd->mi_stride * mi_row + mi_col;
   xd->mi = cm->mi_grid_visible + idx_str;
   xd->mi[0] = cm->mi + idx_str;
   x->mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
 }
 
-static void set_offsets_without_segment_id(VP10_COMP *cpi,
+static void set_offsets_without_segment_id(AV1_COMP *cpi,
                                            const TileInfo *const tile,
                                            MACROBLOCK *const x, int mi_row,
                                            int mi_col, BLOCK_SIZE bsize) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   const int mi_width = num_8x8_blocks_wide_lookup[bsize];
   const int mi_height = num_8x8_blocks_high_lookup[bsize];
@@ -273,14 +271,14 @@
 #endif
 
   // Set up destination pointers.
-  vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+  av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
 
   // Set up limit values for MV components.
   // Mv beyond the range do not produce new/different prediction block.
-  x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VPX_INTERP_EXTEND);
-  x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VPX_INTERP_EXTEND);
-  x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VPX_INTERP_EXTEND;
-  x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VPX_INTERP_EXTEND;
+  x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + AOM_INTERP_EXTEND);
+  x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + AOM_INTERP_EXTEND);
+  x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + AOM_INTERP_EXTEND;
+  x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + AOM_INTERP_EXTEND;
 
   // Set up distance of MB to edge of frame in 1/8th pel units.
   assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
@@ -288,20 +286,20 @@
                  cm->mi_cols);
 
   // Set up source buffers.
-  vp10_setup_src_planes(x, cpi->Source, mi_row, mi_col);
+  av1_setup_src_planes(x, cpi->Source, mi_row, mi_col);
 
   // R/D setup.
   x->rddiv = cpi->rd.RDDIV;
   x->rdmult = cpi->rd.RDMULT;
 
-  // required by vp10_append_sub8x8_mvs_for_idx() and vp10_find_best_ref_mvs()
+  // required by av1_append_sub8x8_mvs_for_idx() and av1_find_best_ref_mvs()
   xd->tile = *tile;
 }
 
-static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
+static void set_offsets(AV1_COMP *cpi, const TileInfo *const tile,
                         MACROBLOCK *const x, int mi_row, int mi_col,
                         BLOCK_SIZE bsize) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *mbmi;
   const struct segmentation *const seg = &cm->seg;
@@ -317,7 +315,7 @@
           seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
       mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
     }
-    vp10_init_plane_quantizers(cpi, x, mbmi->segment_id);
+    av1_init_plane_quantizers(cpi, x, mbmi->segment_id);
 
     x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
   } else {
@@ -331,11 +329,11 @@
 }
 
 #if CONFIG_SUPERTX
-static void set_offsets_supertx(VP10_COMP *cpi, ThreadData *td,
+static void set_offsets_supertx(AV1_COMP *cpi, ThreadData *td,
                                 const TileInfo *const tile, int mi_row,
                                 int mi_col, BLOCK_SIZE bsize) {
   MACROBLOCK *const x = &td->mb;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   const int mi_width = num_8x8_blocks_wide_lookup[bsize];
   const int mi_height = num_8x8_blocks_high_lookup[bsize];
@@ -348,7 +346,7 @@
                  cm->mi_cols);
 }
 
-static void set_offsets_extend(VP10_COMP *cpi, ThreadData *td,
+static void set_offsets_extend(AV1_COMP *cpi, ThreadData *td,
                                const TileInfo *const tile, int mi_row_pred,
                                int mi_col_pred, int mi_row_ori, int mi_col_ori,
                                BLOCK_SIZE bsize_pred) {
@@ -356,7 +354,7 @@
   // (mi_row_ori, mi_col_ori, bsize_ori): region for mv
   // (mi_row_pred, mi_col_pred, bsize_pred): region to predict
   MACROBLOCK *const x = &td->mb;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   const int mi_width = num_8x8_blocks_wide_lookup[bsize_pred];
   const int mi_height = num_8x8_blocks_high_lookup[bsize_pred];
@@ -365,10 +363,10 @@
 
   // Set up limit values for MV components.
   // Mv beyond the range do not produce new/different prediction block.
-  x->mv_row_min = -(((mi_row_pred + mi_height) * MI_SIZE) + VPX_INTERP_EXTEND);
-  x->mv_col_min = -(((mi_col_pred + mi_width) * MI_SIZE) + VPX_INTERP_EXTEND);
-  x->mv_row_max = (cm->mi_rows - mi_row_pred) * MI_SIZE + VPX_INTERP_EXTEND;
-  x->mv_col_max = (cm->mi_cols - mi_col_pred) * MI_SIZE + VPX_INTERP_EXTEND;
+  x->mv_row_min = -(((mi_row_pred + mi_height) * MI_SIZE) + AOM_INTERP_EXTEND);
+  x->mv_col_min = -(((mi_col_pred + mi_width) * MI_SIZE) + AOM_INTERP_EXTEND);
+  x->mv_row_max = (cm->mi_rows - mi_row_pred) * MI_SIZE + AOM_INTERP_EXTEND;
+  x->mv_col_max = (cm->mi_cols - mi_col_pred) * MI_SIZE + AOM_INTERP_EXTEND;
 
   // Set up distance of MB to edge of frame in 1/8th pel units.
   assert(!(mi_col_pred & (mi_width - 1)) && !(mi_row_pred & (mi_height - 1)));
@@ -382,15 +380,15 @@
   x->rdmult = cpi->rd.RDMULT;
 }
 
-static void set_segment_id_supertx(const VP10_COMP *const cpi,
+static void set_segment_id_supertx(const AV1_COMP *const cpi,
                                    MACROBLOCK *const x, const int mi_row,
                                    const int mi_col, const BLOCK_SIZE bsize) {
-  const VP10_COMMON *cm = &cpi->common;
+  const AV1_COMMON *cm = &cpi->common;
   const struct segmentation *seg = &cm->seg;
   const int miw =
-      VPXMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
+      AOMMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
   const int mih =
-      VPXMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
+      AOMMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
   const int mi_offset = mi_row * cm->mi_stride + mi_col;
   MODE_INFO **const mip = cm->mi_grid_visible + mi_offset;
   int r, c;
@@ -404,11 +402,11 @@
     for (r = 0; r < mih; r++)
       for (c = 0; c < miw; c++)
         seg_id_supertx =
-            VPXMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
+            AOMMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
     assert(0 <= seg_id_supertx && seg_id_supertx < MAX_SEGMENTS);
 
     // Initialize plane quantisers
-    vp10_init_plane_quantizers(cpi, x, seg_id_supertx);
+    av1_init_plane_quantizers(cpi, x, seg_id_supertx);
     x->encode_breakout = cpi->segment_encode_breakout[seg_id_supertx];
   }
 
@@ -419,7 +417,7 @@
 }
 #endif  // CONFIG_SUPERTX
 
-static void set_block_size(VP10_COMP *const cpi, MACROBLOCK *const x,
+static void set_block_size(AV1_COMP *const cpi, MACROBLOCK *const x,
                            MACROBLOCKD *const xd, int mi_row, int mi_col,
                            BLOCK_SIZE bsize) {
   if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
@@ -428,11 +426,11 @@
   }
 }
 
-static void set_vt_partitioning(VP10_COMP *cpi, MACROBLOCK *const x,
+static void set_vt_partitioning(AV1_COMP *cpi, MACROBLOCK *const x,
                                 MACROBLOCKD *const xd, VAR_TREE *vt, int mi_row,
                                 int mi_col, const int64_t *const threshold,
                                 const BLOCK_SIZE *const bsize_min) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const int hbw = num_8x8_blocks_wide_lookup[vt->bsize] / 2;
   const int hbh = num_8x8_blocks_high_lookup[vt->bsize] / 2;
   const int has_cols = mi_col + hbw < cm->mi_cols;
@@ -522,8 +520,8 @@
 // 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
 // 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
 // currently only used on key frame.
-static void set_vbp_thresholds(VP10_COMP *cpi, int64_t thresholds[], int q) {
-  VP10_COMMON *const cm = &cpi->common;
+static void set_vbp_thresholds(AV1_COMP *cpi, int64_t thresholds[], int q) {
+  AV1_COMMON *const cm = &cpi->common;
   const int is_key_frame = (cm->frame_type == KEY_FRAME);
   const int threshold_multiplier = is_key_frame ? 20 : 1;
   const int64_t threshold_base =
@@ -549,8 +547,8 @@
   thresholds[0] = INT64_MIN;
 }
 
-void vp10_set_variance_partition_thresholds(VP10_COMP *cpi, int q) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_set_variance_partition_thresholds(AV1_COMP *cpi, int q) {
+  AV1_COMMON *const cm = &cpi->common;
   SPEED_FEATURES *const sf = &cpi->sf;
   const int is_key_frame = (cm->frame_type == KEY_FRAME);
   if (sf->partition_search_type != VAR_BASED_PARTITION &&
@@ -578,7 +576,7 @@
 // Compute the minmax over the 8x8 subblocks.
 static int compute_minmax_8x8(const uint8_t *src, int src_stride,
                               const uint8_t *ref, int ref_stride,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                               int highbd,
 #endif
                               int pixels_wide, int pixels_high) {
@@ -594,16 +592,16 @@
     if (x8_idx < pixels_wide && y8_idx < pixels_high) {
       const int src_offset = y8_idx * src_stride + x8_idx;
       const int ref_offset = y8_idx * ref_stride + x8_idx;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (highbd) {
-        vpx_highbd_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
+        aom_highbd_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
                               ref_stride, &min, &max);
       } else {
-        vpx_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
+        aom_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
                        ref_stride, &min, &max);
       }
 #else
-      vpx_minmax_8x8(src + src_offset, src_stride, ref + ref_offset, ref_stride,
+      aom_minmax_8x8(src + src_offset, src_stride, ref + ref_offset, ref_stride,
                      &min, &max);
 #endif
       if ((max - min) > minmax_max) minmax_max = (max - min);
@@ -613,38 +611,38 @@
   return (minmax_max - minmax_min);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE int avg_4x4(const uint8_t *const src, const int stride,
                           const int highbd) {
   if (highbd) {
-    return vpx_highbd_avg_4x4(src, stride);
+    return aom_highbd_avg_4x4(src, stride);
   } else {
-    return vpx_avg_4x4(src, stride);
+    return aom_avg_4x4(src, stride);
   }
 }
 #else
 static INLINE int avg_4x4(const uint8_t *const src, const int stride) {
-  return vpx_avg_4x4(src, stride);
+  return aom_avg_4x4(src, stride);
 }
 #endif
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE int avg_8x8(const uint8_t *const src, const int stride,
                           const int highbd) {
   if (highbd) {
-    return vpx_highbd_avg_8x8(src, stride);
+    return aom_highbd_avg_8x8(src, stride);
   } else {
-    return vpx_avg_8x8(src, stride);
+    return aom_avg_8x8(src, stride);
   }
 }
 #else
 static INLINE int avg_8x8(const uint8_t *const src, const int stride) {
-  return vpx_avg_8x8(src, stride);
+  return aom_avg_8x8(src, stride);
 }
 #endif
 
 static void init_variance_tree(VAR_TREE *const vt,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                const int highbd,
 #endif
                                BLOCK_SIZE bsize, BLOCK_SIZE leaf_size,
@@ -665,37 +663,37 @@
   vt->width = width;
   vt->height = height;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   vt->highbd = highbd;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   if (bsize > leaf_size) {
     const BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
     const int px = num_4x4_blocks_wide_lookup[subsize] * 4;
 
     init_variance_tree(vt->split[0],
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                        highbd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                       subsize, leaf_size, VPXMIN(px, width),
-                       VPXMIN(px, height), src, src_stride, ref, ref_stride);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                       subsize, leaf_size, AOMMIN(px, width),
+                       AOMMIN(px, height), src, src_stride, ref, ref_stride);
     init_variance_tree(vt->split[1],
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                        highbd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                       subsize, leaf_size, width - px, VPXMIN(px, height),
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                       subsize, leaf_size, width - px, AOMMIN(px, height),
                        src + px, src_stride, ref + px, ref_stride);
     init_variance_tree(vt->split[2],
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                        highbd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                       subsize, leaf_size, VPXMIN(px, width), height - px,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                       subsize, leaf_size, AOMMIN(px, width), height - px,
                        src + px * src_stride, src_stride, ref + px * ref_stride,
                        ref_stride);
     init_variance_tree(vt->split[3],
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                        highbd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
                        subsize, leaf_size, width - px, height - px,
                        src + px * src_stride + px, src_stride,
                        ref + px * ref_stride + px, ref_stride);
@@ -771,7 +769,7 @@
   return vt->force_split;
 }
 
-static int check_split(VP10_COMP *const cpi, VAR_TREE *const vt,
+static int check_split(AV1_COMP *const cpi, VAR_TREE *const vt,
                        const int segment_id, const int64_t *const thresholds) {
   if (vt->bsize == BLOCK_16X16) {
     vt->force_split = vt->variances.none.variance > thresholds[0];
@@ -782,7 +780,7 @@
       // force split to 8x8 block for this 16x16 block.
       int minmax =
           compute_minmax_8x8(vt->src, vt->src_stride, vt->ref, vt->ref_stride,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                              vt->highbd,
 #endif
                              vt->width, vt->height);
@@ -809,10 +807,10 @@
 // This function chooses partitioning based on the variance between source and
 // reconstructed last (or golden), where variance is computed for down-sampled
 // inputs.
-static void choose_partitioning(VP10_COMP *const cpi, ThreadData *const td,
+static void choose_partitioning(AV1_COMP *const cpi, ThreadData *const td,
                                 const TileInfo *const tile, MACROBLOCK *const x,
                                 const int mi_row, const int mi_col) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   VAR_TREE *const vt = td->var_root[cm->mib_size_log2 - MIN_MIB_SIZE_LOG2];
   int i;
@@ -843,7 +841,7 @@
     segment_id = get_segment_id(cm, map, cm->sb_size, mi_row, mi_col);
 
     if (cyclic_refresh_segment_id_boosted(segment_id)) {
-      int q = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+      int q = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
       set_vbp_thresholds(cpi, thresholds, q);
     }
   }
@@ -880,8 +878,8 @@
     assert(yv12 != NULL);
 
     if (yv12_g && yv12_g != yv12) {
-      vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
-                            &cm->frame_refs[GOLDEN_FRAME - 1].sf);
+      av1_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
+                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
       y_sad_g = cpi->fn_ptr[bsize].sdf(
           x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
           xd->plane[0].pre[0].stride);
@@ -889,8 +887,8 @@
       y_sad_g = UINT_MAX;
     }
 
-    vp10_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
-                          &cm->frame_refs[LAST_FRAME - 1].sf);
+    av1_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
+                         &cm->frame_refs[LAST_FRAME - 1].sf);
     mbmi->ref_frame[0] = LAST_FRAME;
     mbmi->ref_frame[1] = NONE;
     mbmi->sb_type = cm->sb_size;
@@ -901,11 +899,11 @@
     mbmi->interp_filter = BILINEAR;
 #endif
 
-    y_sad = vp10_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
+    y_sad = av1_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
 
     if (y_sad_g < y_sad) {
-      vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
-                            &cm->frame_refs[GOLDEN_FRAME - 1].sf);
+      av1_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
+                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
       mbmi->ref_frame[0] = GOLDEN_FRAME;
       mbmi->mv[0].as_int = 0;
       y_sad = y_sad_g;
@@ -913,7 +911,7 @@
       x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
     }
 
-    vp10_build_inter_predictors_sb(xd, mi_row, mi_col, cm->sb_size);
+    av1_build_inter_predictors_sb(xd, mi_row, mi_col, cm->sb_size);
 
     for (i = 1; i < MAX_MB_PLANE; ++i) {
       struct macroblock_plane *p = &x->plane[i];
@@ -941,25 +939,25 @@
       }
     }
   } else {
-    ref = VP10_VAR_OFFS;
+    ref = AV1_VAR_OFFS;
     ref_stride = 0;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       switch (xd->bd) {
-        case 10: ref = CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_10); break;
-        case 12: ref = CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_12); break;
+        case 10: ref = CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_10); break;
+        case 12: ref = CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_12); break;
         case 8:
-        default: ref = CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_8); break;
+        default: ref = CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_8); break;
       }
     }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 
   init_variance_tree(
       vt,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       cm->sb_size, (is_key_frame || low_res) ? BLOCK_4X4 : BLOCK_8X8,
       pixels_wide, pixels_high, src, src_stride, ref, ref_stride);
 
@@ -984,7 +982,7 @@
 }
 
 #if CONFIG_DUAL_FILTER
-static void reset_intmv_filter_type(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void reset_intmv_filter_type(AV1_COMMON *cm, MACROBLOCKD *xd,
                                     MB_MODE_INFO *mbmi) {
   int dir;
   for (dir = 0; dir < 2; ++dir) {
@@ -1006,7 +1004,7 @@
     if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
         (mbmi->ref_frame[1] > INTRA_FRAME &&
          has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
-      const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
+      const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
       ++counts->switchable_interp[ctx][mbmi->interp_filter[dir]];
     }
   }
@@ -1014,8 +1012,7 @@
 #endif
 #if CONFIG_GLOBAL_MOTION
 static void update_global_motion_used(PREDICTION_MODE mode,
-                                      const MB_MODE_INFO *mbmi,
-                                      VP10_COMP *cpi) {
+                                      const MB_MODE_INFO *mbmi, AV1_COMP *cpi) {
   if (mode == ZEROMV) {
     ++cpi->global_motion_used[mbmi->ref_frame[0]];
     if (has_second_ref(mbmi)) ++cpi->global_motion_used[mbmi->ref_frame[1]];
@@ -1023,11 +1020,11 @@
 }
 #endif  // CONFIG_GLOBAL_MOTION
 
-static void update_state(VP10_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
+static void update_state(AV1_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
                          int mi_row, int mi_col, BLOCK_SIZE bsize,
                          int output_enabled) {
   int i, x_idx, y;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   RD_COUNTS *const rdc = &td->rd_counts;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -1039,8 +1036,8 @@
   const struct segmentation *const seg = &cm->seg;
   const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
   const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
-  const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
-  const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
+  const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
+  const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
   MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
   int w, h;
 
@@ -1065,7 +1062,7 @@
 #endif
 
 #if CONFIG_REF_MV
-  rf_type = vp10_ref_frame_type(mbmi->ref_frame);
+  rf_type = av1_ref_frame_type(mbmi->ref_frame);
   if (x->mbmi_ext->ref_mv_count[rf_type] > 1 && mbmi->sb_type >= BLOCK_8X8 &&
       mbmi->mode == NEWMV) {
     for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
@@ -1091,8 +1088,8 @@
     // Else for cyclic refresh mode update the segment map, set the segment id
     // and then update the quantizer.
     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
-      vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
-                                         bsize, ctx->rate, ctx->dist, x->skip);
+      av1_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
+                                        bsize, ctx->rate, ctx->dist, x->skip);
     }
   }
 
@@ -1123,7 +1120,7 @@
       }
 
   if (cpi->oxcf.aq_mode)
-    vp10_init_plane_quantizers(cpi, x, xd->mi[0]->mbmi.segment_id);
+    av1_init_plane_quantizers(cpi, x, xd->mi[0]->mbmi.segment_id);
 
   if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
     mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
@@ -1157,7 +1154,7 @@
 #endif
   if (!frame_is_intra_only(cm)) {
     if (is_inter_block(mbmi)) {
-      vp10_update_mv_count(td);
+      av1_update_mv_count(td);
 #if CONFIG_GLOBAL_MOTION
       if (bsize >= BLOCK_8X8) {
         update_global_motion_used(mbmi->mode, mbmi, cpi);
@@ -1175,13 +1172,13 @@
 #endif  // CONFIG_GLOBAL_MOTION
       if (cm->interp_filter == SWITCHABLE
 #if CONFIG_EXT_INTERP
-          && vp10_is_interp_needed(xd)
+          && av1_is_interp_needed(xd)
 #endif
               ) {
 #if CONFIG_DUAL_FILTER
         update_filter_type_count(td->counts, xd, mbmi);
 #else
-        const int ctx = vp10_get_pred_context_switchable_interp(xd);
+        const int ctx = av1_get_pred_context_switchable_interp(xd);
         ++td->counts->switchable_interp[ctx][mbmi->interp_filter];
 #endif
       }
@@ -1205,14 +1202,14 @@
 }
 
 #if CONFIG_SUPERTX
-static void update_state_supertx(VP10_COMP *cpi, ThreadData *td,
+static void update_state_supertx(AV1_COMP *cpi, ThreadData *td,
                                  PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
                                  BLOCK_SIZE bsize, int output_enabled) {
   int y, x_idx;
 #if CONFIG_VAR_TX || CONFIG_REF_MV
   int i;
 #endif
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   RD_COUNTS *const rdc = &td->rd_counts;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -1223,8 +1220,8 @@
   const int mis = cm->mi_stride;
   const int mi_width = num_8x8_blocks_wide_lookup[bsize];
   const int mi_height = num_8x8_blocks_high_lookup[bsize];
-  const int x_mis = VPXMIN(mi_width, cm->mi_cols - mi_col);
-  const int y_mis = VPXMIN(mi_height, cm->mi_rows - mi_row);
+  const int x_mis = AOMMIN(mi_width, cm->mi_cols - mi_col);
+  const int y_mis = AOMMIN(mi_height, cm->mi_rows - mi_row);
   MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
   int w, h;
 
@@ -1242,7 +1239,7 @@
 #endif
 
 #if CONFIG_REF_MV
-  rf_type = vp10_ref_frame_type(mbmi->ref_frame);
+  rf_type = av1_ref_frame_type(mbmi->ref_frame);
   if (x->mbmi_ext->ref_mv_count[rf_type] > 1 && mbmi->sb_type >= BLOCK_8X8 &&
       mbmi->mode == NEWMV) {
     for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
@@ -1261,15 +1258,14 @@
   // If segmentation in use
   if (seg->enabled) {
     if (cpi->vaq_refresh) {
-      const int energy = bsize <= BLOCK_16X16
-                             ? x->mb_energy
-                             : vp10_block_energy(cpi, x, bsize);
-      mi_addr->mbmi.segment_id = vp10_vaq_segment_id(energy);
+      const int energy =
+          bsize <= BLOCK_16X16 ? x->mb_energy : av1_block_energy(cpi, x, bsize);
+      mi_addr->mbmi.segment_id = av1_vaq_segment_id(energy);
     } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
       // For cyclic refresh mode, now update the segment map
       // and set the segment id.
-      vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
-                                         bsize, ctx->rate, ctx->dist, 1);
+      av1_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
+                                        bsize, ctx->rate, ctx->dist, 1);
     } else {
       // Otherwise just set the segment id based on the current segment map
       const uint8_t *const map =
@@ -1316,17 +1312,17 @@
   if (!output_enabled) return;
 
   if (!frame_is_intra_only(cm)) {
-    vp10_update_mv_count(td);
+    av1_update_mv_count(td);
 
     if (cm->interp_filter == SWITCHABLE
 #if CONFIG_EXT_INTERP
-        && vp10_is_interp_needed(xd)
+        && av1_is_interp_needed(xd)
 #endif
             ) {
 #if CONFIG_DUAL_FILTER
       update_filter_type_count(td->counts, xd, mbmi);
 #else
-      const int ctx = vp10_get_pred_context_switchable_interp(xd);
+      const int ctx = av1_get_pred_context_switchable_interp(xd);
       ++td->counts->switchable_interp[ctx][mbmi->interp_filter];
 #endif
     }
@@ -1348,11 +1344,11 @@
   }
 }
 
-static void update_state_sb_supertx(VP10_COMP *cpi, ThreadData *td,
+static void update_state_sb_supertx(AV1_COMP *cpi, ThreadData *td,
                                     const TileInfo *const tile, int mi_row,
                                     int mi_col, BLOCK_SIZE bsize,
                                     int output_enabled, PC_TREE *pc_tree) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   struct macroblock_plane *const p = x->plane;
@@ -1369,7 +1365,7 @@
   if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
 
   if (bsize == BLOCK_16X16 && cpi->vaq_refresh)
-    x->mb_energy = vp10_block_energy(cpi, x, bsize);
+    x->mb_energy = av1_block_energy(cpi, x, bsize);
 
   switch (partition) {
     case PARTITION_NONE:
@@ -1504,10 +1500,10 @@
   ctx->mic.mbmi.tx_type = best_tx;
 }
 
-static void update_supertx_param_sb(VP10_COMP *cpi, ThreadData *td, int mi_row,
+static void update_supertx_param_sb(AV1_COMP *cpi, ThreadData *td, int mi_row,
                                     int mi_col, BLOCK_SIZE bsize, int best_tx,
                                     TX_SIZE supertx_size, PC_TREE *pc_tree) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
   PARTITION_TYPE partition = pc_tree->partitioning;
   BLOCK_SIZE subsize = get_subsize(bsize, partition);
@@ -1571,8 +1567,8 @@
 }
 #endif  // CONFIG_SUPERTX
 
-void vp10_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
-                           int mi_row, int mi_col) {
+void av1_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
+                          int mi_row, int mi_col) {
   uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer };
   const int widths[3] = { src->y_crop_width, src->uv_crop_width,
                           src->uv_crop_width };
@@ -1591,17 +1587,17 @@
                      x->e_mbd.plane[i].subsampling_y);
 }
 
-static int set_segment_rdmult(VP10_COMP *const cpi, MACROBLOCK *const x,
+static int set_segment_rdmult(AV1_COMP *const cpi, MACROBLOCK *const x,
                               int8_t segment_id) {
   int segment_qindex;
-  VP10_COMMON *const cm = &cpi->common;
-  vp10_init_plane_quantizers(cpi, x, segment_id);
-  vpx_clear_system_state();
-  segment_qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
-  return vp10_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
+  AV1_COMMON *const cm = &cpi->common;
+  av1_init_plane_quantizers(cpi, x, segment_id);
+  aom_clear_system_state();
+  segment_qindex = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+  return av1_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
 }
 
-static void rd_pick_sb_modes(VP10_COMP *cpi, TileDataEnc *tile_data,
+static void rd_pick_sb_modes(AV1_COMP *cpi, TileDataEnc *tile_data,
                              MACROBLOCK *const x, int mi_row, int mi_col,
                              RD_COST *rd_cost,
 #if CONFIG_SUPERTX
@@ -1612,7 +1608,7 @@
 #endif
                              BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
                              int64_t best_rd) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *mbmi;
@@ -1621,7 +1617,7 @@
   const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
   int i, orig_rdmult;
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   // Use the lower precision, but faster, 32x32 fdct for mode selection.
   x->use_lp32x32fdct = 1;
@@ -1657,30 +1653,29 @@
   // Set to zero to make sure we do not use the previous encoded frame stats
   mbmi->skip = 0;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    x->source_variance = vp10_high_get_sby_perpixel_variance(
+    x->source_variance = av1_high_get_sby_perpixel_variance(
         cpi, &x->plane[0].src, bsize, xd->bd);
   } else {
     x->source_variance =
-        vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+        av1_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
   }
 #else
   x->source_variance =
-      vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+      av1_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // Save rdmult before it might be changed, so it can be restored later.
   orig_rdmult = x->rdmult;
 
   if (aq_mode == VARIANCE_AQ) {
     if (cpi->vaq_refresh) {
-      const int energy = bsize <= BLOCK_16X16
-                             ? x->mb_energy
-                             : vp10_block_energy(cpi, x, bsize);
-      mbmi->segment_id = vp10_vaq_segment_id(energy);
+      const int energy =
+          bsize <= BLOCK_16X16 ? x->mb_energy : av1_block_energy(cpi, x, bsize);
+      mbmi->segment_id = av1_vaq_segment_id(energy);
       // Re-initialise quantiser
-      vp10_init_plane_quantizers(cpi, x, mbmi->segment_id);
+      av1_init_plane_quantizers(cpi, x, mbmi->segment_id);
       x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
     }
     x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
@@ -1689,30 +1684,30 @@
   } else if (aq_mode == CYCLIC_REFRESH_AQ) {
     // If segment is boosted, use rdmult for that segment.
     if (cyclic_refresh_segment_id_boosted(mbmi->segment_id))
-      x->rdmult = vp10_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
+      x->rdmult = av1_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
   }
 
   // Find best coding mode & reconstruct the MB so it is available
   // as a predictor for MBs that follow in the SB
   if (frame_is_intra_only(cm)) {
-    vp10_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
+    av1_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
 #if CONFIG_SUPERTX
     *totalrate_nocoef = 0;
 #endif  // CONFIG_SUPERTX
   } else {
     if (bsize >= BLOCK_8X8) {
       if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
-        vp10_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
-                                            ctx, best_rd);
+        av1_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
+                                           ctx, best_rd);
 #if CONFIG_SUPERTX
         *totalrate_nocoef = rd_cost->rate;
 #endif  // CONFIG_SUPERTX
       } else {
-        vp10_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+        av1_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
 #if CONFIG_SUPERTX
-                                   totalrate_nocoef,
+                                  totalrate_nocoef,
 #endif  // CONFIG_SUPERTX
-                                   bsize, ctx, best_rd);
+                                  bsize, ctx, best_rd);
 #if CONFIG_SUPERTX
         assert(*totalrate_nocoef >= 0);
 #endif  // CONFIG_SUPERTX
@@ -1722,12 +1717,12 @@
         // The decoder rejects sub8x8 partitions when SEG_LVL_SKIP is set.
         rd_cost->rate = INT_MAX;
       } else {
-        vp10_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
-                                       rd_cost,
+        av1_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
+                                      rd_cost,
 #if CONFIG_SUPERTX
-                                       totalrate_nocoef,
+                                      totalrate_nocoef,
 #endif  // CONFIG_SUPERTX
-                                       bsize, ctx, best_rd);
+                                      bsize, ctx, best_rd);
 #if CONFIG_SUPERTX
         assert(*totalrate_nocoef >= 0);
 #endif  // CONFIG_SUPERTX
@@ -1740,7 +1735,7 @@
       (bsize >= BLOCK_16X16) &&
       (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
        (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
-    vp10_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
+    av1_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
   }
 
   x->rdmult = orig_rdmult;
@@ -1793,7 +1788,7 @@
 }
 #endif
 
-static void update_stats(VP10_COMMON *cm, ThreadData *td
+static void update_stats(AV1_COMMON *cm, ThreadData *td
 #if CONFIG_SUPERTX
                          ,
                          int supertx_enabled
@@ -1815,7 +1810,7 @@
 #if CONFIG_SUPERTX
       if (!supertx_enabled)
 #endif
-        counts->intra_inter[vp10_get_intra_inter_context(xd)][inter_block]++;
+        counts->intra_inter[av1_get_intra_inter_context(xd)][inter_block]++;
       // If the segment reference feature is enabled we have only a single
       // reference frame allowed for the segment so exclude it from
       // the reference frame counts used to work out probabilities.
@@ -1826,53 +1821,53 @@
 #endif  // CONFIG_EXT_REFS
 
         if (cm->reference_mode == REFERENCE_MODE_SELECT)
-          counts->comp_inter[vp10_get_reference_mode_context(
+          counts->comp_inter[av1_get_reference_mode_context(
               cm, xd)][has_second_ref(mbmi)]++;
 
         if (has_second_ref(mbmi)) {
 #if CONFIG_EXT_REFS
           const int bit = (ref0 == GOLDEN_FRAME || ref0 == LAST3_FRAME);
 
-          counts->comp_ref[vp10_get_pred_context_comp_ref_p(cm, xd)][0][bit]++;
+          counts->comp_ref[av1_get_pred_context_comp_ref_p(cm, xd)][0][bit]++;
           if (!bit) {
-            counts->comp_ref[vp10_get_pred_context_comp_ref_p1(
+            counts->comp_ref[av1_get_pred_context_comp_ref_p1(
                 cm, xd)][1][ref0 == LAST_FRAME]++;
           } else {
-            counts->comp_ref[vp10_get_pred_context_comp_ref_p2(
+            counts->comp_ref[av1_get_pred_context_comp_ref_p2(
                 cm, xd)][2][ref0 == GOLDEN_FRAME]++;
           }
 
-          counts->comp_bwdref[vp10_get_pred_context_comp_bwdref_p(
+          counts->comp_bwdref[av1_get_pred_context_comp_bwdref_p(
               cm, xd)][0][ref1 == ALTREF_FRAME]++;
 #else
-          counts->comp_ref[vp10_get_pred_context_comp_ref_p(
+          counts->comp_ref[av1_get_pred_context_comp_ref_p(
               cm, xd)][0][ref0 == GOLDEN_FRAME]++;
 #endif  // CONFIG_EXT_REFS
         } else {
 #if CONFIG_EXT_REFS
           const int bit = (ref0 == ALTREF_FRAME || ref0 == BWDREF_FRAME);
 
-          counts->single_ref[vp10_get_pred_context_single_ref_p1(xd)][0][bit]++;
+          counts->single_ref[av1_get_pred_context_single_ref_p1(xd)][0][bit]++;
           if (bit) {
-            counts->single_ref[vp10_get_pred_context_single_ref_p2(
+            counts->single_ref[av1_get_pred_context_single_ref_p2(
                 xd)][1][ref0 != BWDREF_FRAME]++;
           } else {
             const int bit1 = !(ref0 == LAST2_FRAME || ref0 == LAST_FRAME);
-            counts->single_ref[vp10_get_pred_context_single_ref_p3(
-                xd)][2][bit1]++;
+            counts
+                ->single_ref[av1_get_pred_context_single_ref_p3(xd)][2][bit1]++;
             if (!bit1) {
-              counts->single_ref[vp10_get_pred_context_single_ref_p4(
+              counts->single_ref[av1_get_pred_context_single_ref_p4(
                   xd)][3][ref0 != LAST_FRAME]++;
             } else {
-              counts->single_ref[vp10_get_pred_context_single_ref_p5(
+              counts->single_ref[av1_get_pred_context_single_ref_p5(
                   xd)][4][ref0 != LAST3_FRAME]++;
             }
           }
 #else
-          counts->single_ref[vp10_get_pred_context_single_ref_p1(
+          counts->single_ref[av1_get_pred_context_single_ref_p1(
               xd)][0][ref0 != LAST_FRAME]++;
           if (ref0 != LAST_FRAME) {
-            counts->single_ref[vp10_get_pred_context_single_ref_p2(
+            counts->single_ref[av1_get_pred_context_single_ref_p2(
                 xd)][1][ref0 != GOLDEN_FRAME]++;
           }
 #endif  // CONFIG_EXT_REFS
@@ -1933,8 +1928,8 @@
           ++counts->inter_compound_mode[mode_ctx][INTER_COMPOUND_OFFSET(mode)];
         } else {
 #endif  // CONFIG_EXT_INTER
-          mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
-                                                mbmi->ref_frame, bsize, -1);
+          mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+                                               mbmi->ref_frame, bsize, -1);
           update_inter_mode_stats(counts, mode,
 #if CONFIG_EXT_INTER
                                   has_second_ref(mbmi),
@@ -1942,13 +1937,13 @@
                                   mode_ctx);
 
           if (mode == NEWMV) {
-            uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+            uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
             int idx;
 
             for (idx = 0; idx < 2; ++idx) {
               if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
                 uint8_t drl_ctx =
-                    vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+                    av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
                 ++counts->drl_mode[drl_ctx][mbmi->ref_mv_idx != idx];
 
                 if (mbmi->ref_mv_idx == idx) break;
@@ -1957,13 +1952,13 @@
           }
 
           if (mode == NEARMV) {
-            uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+            uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
             int idx;
 
             for (idx = 1; idx < 3; ++idx) {
               if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
                 uint8_t drl_ctx =
-                    vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+                    av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
                 ++counts->drl_mode[drl_ctx][mbmi->ref_mv_idx != idx - 1];
 
                 if (mbmi->ref_mv_idx == idx - 1) break;
@@ -1997,8 +1992,8 @@
                   b_mode)];
             } else {
 #endif  // CONFIG_EXT_INTER
-              mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
-                                                    mbmi->ref_frame, bsize, j);
+              mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+                                                   mbmi->ref_frame, bsize, j);
               update_inter_mode_stats(counts, b_mode,
 #if CONFIG_EXT_INTER
                                       has_second_ref(mbmi),
@@ -2105,7 +2100,7 @@
 #endif
 }
 
-static void encode_b(VP10_COMP *cpi, const TileInfo *const tile, ThreadData *td,
+static void encode_b(AV1_COMP *cpi, const TileInfo *const tile, ThreadData *td,
                      TOKENEXTRA **tp, int mi_row, int mi_col,
                      int output_enabled, BLOCK_SIZE bsize,
 #if CONFIG_EXT_PARTITION_TYPES
@@ -2129,11 +2124,10 @@
   }
 }
 
-static void encode_sb(VP10_COMP *cpi, ThreadData *td,
-                      const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
-                      int mi_col, int output_enabled, BLOCK_SIZE bsize,
-                      PC_TREE *pc_tree) {
-  const VP10_COMMON *const cm = &cpi->common;
+static void encode_sb(AV1_COMP *cpi, ThreadData *td, const TileInfo *const tile,
+                      TOKENEXTRA **tp, int mi_row, int mi_col,
+                      int output_enabled, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
+  const AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
 
@@ -2168,8 +2162,7 @@
       update_state_sb_supertx(cpi, td, tile, mi_row, mi_col, bsize,
                               output_enabled, pc_tree);
 
-      vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row,
-                            mi_col);
+      av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
       for (i = 0; i < MAX_MB_PLANE; i++) {
         dst_buf[i] = xd->plane[i].dst.buf;
         dst_stride[i] = xd->plane[i].dst.stride;
@@ -2185,11 +2178,11 @@
         x->skip_optimize = 0;
         x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
 
-        vp10_encode_sb_supertx(x, bsize);
-        vp10_tokenize_sb_supertx(cpi, td, tp, !output_enabled, bsize);
+        av1_encode_sb_supertx(x, bsize);
+        av1_tokenize_sb_supertx(cpi, td, tp, !output_enabled, bsize);
       } else {
         xd->mi[0]->mbmi.skip = 1;
-        if (output_enabled) td->counts->skip[vp10_get_skip_context(xd)][1]++;
+        if (output_enabled) td->counts->skip[av1_get_skip_context(xd)][1]++;
         reset_skip_context(xd, bsize);
       }
       if (output_enabled) {
@@ -2349,7 +2342,7 @@
 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left,
                                       int cols_left, int *bh, int *bw) {
   if (rows_left <= 0 || cols_left <= 0) {
-    return VPXMIN(bsize, BLOCK_8X8);
+    return AOMMIN(bsize, BLOCK_8X8);
   } else {
     for (; bsize > 0; bsize -= 3) {
       *bh = num_8x8_blocks_high_lookup[bsize];
@@ -2362,7 +2355,7 @@
   return bsize;
 }
 
-static void set_partial_sb_partition(const VP10_COMMON *const cm, MODE_INFO *mi,
+static void set_partial_sb_partition(const AV1_COMMON *const cm, MODE_INFO *mi,
                                      int bh_in, int bw_in,
                                      int mi_rows_remaining,
                                      int mi_cols_remaining, BLOCK_SIZE bsize,
@@ -2385,10 +2378,10 @@
 // However, at the bottom and right borders of the image the requested size
 // may not be allowed in which case this code attempts to choose the largest
 // allowable partition.
-static void set_fixed_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
+static void set_fixed_partitioning(AV1_COMP *cpi, const TileInfo *const tile,
                                    MODE_INFO **mib, int mi_row, int mi_col,
                                    BLOCK_SIZE bsize) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const int mi_rows_remaining = tile->mi_row_end - mi_row;
   const int mi_cols_remaining = tile->mi_col_end - mi_col;
   int block_row, block_col;
@@ -2415,7 +2408,7 @@
   }
 }
 
-static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
+static void rd_use_partition(AV1_COMP *cpi, ThreadData *td,
                              TileDataEnc *tile_data, MODE_INFO **mib,
                              TOKENEXTRA **tp, int mi_row, int mi_col,
                              BLOCK_SIZE bsize, int *rate, int64_t *dist,
@@ -2423,7 +2416,7 @@
                              int *rate_nocoef,
 #endif
                              int do_recon, PC_TREE *pc_tree) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -2451,9 +2444,9 @@
   assert(num_4x4_blocks_wide_lookup[bsize] ==
          num_4x4_blocks_high_lookup[bsize]);
 
-  vp10_rd_cost_reset(&last_part_rdc);
-  vp10_rd_cost_reset(&none_rdc);
-  vp10_rd_cost_reset(&chosen_rdc);
+  av1_rd_cost_reset(&last_part_rdc);
+  av1_rd_cost_reset(&none_rdc);
+  av1_rd_cost_reset(&chosen_rdc);
 
   pc_tree->partitioning = partition;
 
@@ -2467,7 +2460,7 @@
 
   if (bsize == BLOCK_16X16 && cpi->vaq_refresh) {
     set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
-    x->mb_energy = vp10_block_energy(cpi, x, bsize);
+    x->mb_energy = av1_block_energy(cpi, x, bsize);
   }
 
   if (do_partition_search &&
@@ -2543,7 +2536,7 @@
         int rt_nocoef = 0;
 #endif
         PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
-        vp10_rd_cost_init(&tmp_rdc);
+        av1_rd_cost_init(&tmp_rdc);
         update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
         encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
         rd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, &tmp_rdc,
@@ -2555,7 +2548,7 @@
 #endif
                          subsize, &pc_tree->horizontal[1], INT64_MAX);
         if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
-          vp10_rd_cost_reset(&last_part_rdc);
+          av1_rd_cost_reset(&last_part_rdc);
 #if CONFIG_SUPERTX
           last_part_rate_nocoef = INT_MAX;
 #endif
@@ -2585,7 +2578,7 @@
         int rt_nocoef = 0;
 #endif
         PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
-        vp10_rd_cost_init(&tmp_rdc);
+        av1_rd_cost_init(&tmp_rdc);
         update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
         encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
         rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, &tmp_rdc,
@@ -2598,7 +2591,7 @@
                          subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
                          INT64_MAX);
         if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
-          vp10_rd_cost_reset(&last_part_rdc);
+          av1_rd_cost_reset(&last_part_rdc);
 #if CONFIG_SUPERTX
           last_part_rate_nocoef = INT_MAX;
 #endif
@@ -2641,7 +2634,7 @@
         if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
           continue;
 
-        vp10_rd_cost_init(&tmp_rdc);
+        av1_rd_cost_init(&tmp_rdc);
         rd_use_partition(cpi, td, tile_data,
                          mib + jj * hbs * cm->mi_stride + ii * hbs, tp,
                          mi_row + y_idx, mi_col + x_idx, subsize, &tmp_rdc.rate,
@@ -2651,7 +2644,7 @@
 #endif
                          i != 3, pc_tree->split[i]);
         if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
-          vp10_rd_cost_reset(&last_part_rdc);
+          av1_rd_cost_reset(&last_part_rdc);
 #if CONFIG_SUPERTX
           last_part_rate_nocoef = INT_MAX;
 #endif
@@ -2726,7 +2719,7 @@
       restore_context(x, &x_ctx, mi_row, mi_col, bsize);
 
       if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
-        vp10_rd_cost_reset(&chosen_rdc);
+        av1_rd_cost_reset(&chosen_rdc);
 #if CONFIG_SUPERTX
         chosen_rate_nocoef = INT_MAX;
 #endif
@@ -2840,7 +2833,7 @@
 // The min and max are assumed to have been initialized prior to calling this
 // function so repeat calls can accumulate a min and max of more than one
 // superblock.
-static void get_sb_partition_size_range(const VP10_COMMON *const cm,
+static void get_sb_partition_size_range(const AV1_COMMON *const cm,
                                         MACROBLOCKD *xd, MODE_INFO **mib,
                                         BLOCK_SIZE *min_block_size,
                                         BLOCK_SIZE *max_block_size) {
@@ -2852,8 +2845,8 @@
     for (j = 0; j < cm->mib_size; ++j) {
       MODE_INFO *mi = mib[index + j];
       BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : BLOCK_4X4;
-      *min_block_size = VPXMIN(*min_block_size, sb_type);
-      *max_block_size = VPXMAX(*max_block_size, sb_type);
+      *min_block_size = AOMMIN(*min_block_size, sb_type);
+      *max_block_size = AOMMAX(*max_block_size, sb_type);
     }
     index += xd->mi_stride;
   }
@@ -2861,11 +2854,11 @@
 
 // Look at neighboring blocks and set a min and max partition size based on
 // what they chose.
-static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
+static void rd_auto_partition_range(AV1_COMP *cpi, const TileInfo *const tile,
                                     MACROBLOCKD *const xd, int mi_row,
                                     int mi_col, BLOCK_SIZE *min_block_size,
                                     BLOCK_SIZE *max_block_size) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MODE_INFO **mi = xd->mi;
   const int left_in_image = xd->left_available && mi[-1];
   const int above_in_image = xd->up_available && mi[-xd->mi_stride];
@@ -2910,30 +2903,30 @@
   // Check border cases where max and min from neighbors may not be legal.
   max_size = find_partition_size(max_size, mi_rows_remaining, mi_cols_remaining,
                                  &bh, &bw);
-  min_size = VPXMIN(min_size, max_size);
+  min_size = AOMMIN(min_size, max_size);
 
   // Test for blocks at the edge of the active image.
   // This may be the actual edge of the image or where there are formatting
   // bars.
-  if (vp10_active_edge_sb(cpi, mi_row, mi_col)) {
+  if (av1_active_edge_sb(cpi, mi_row, mi_col)) {
     min_size = BLOCK_4X4;
   } else {
-    min_size = VPXMIN(cpi->sf.rd_auto_partition_min_limit, min_size);
+    min_size = AOMMIN(cpi->sf.rd_auto_partition_min_limit, min_size);
   }
 
   // When use_square_partition_only is true, make sure at least one square
   // partition is allowed by selecting the next smaller square size as
   // *min_block_size.
   if (cpi->sf.use_square_partition_only) {
-    min_size = VPXMIN(min_size, next_square_size[max_size]);
+    min_size = AOMMIN(min_size, next_square_size[max_size]);
   }
 
-  *min_block_size = VPXMIN(min_size, cm->sb_size);
-  *max_block_size = VPXMIN(max_size, cm->sb_size);
+  *min_block_size = AOMMIN(min_size, cm->sb_size);
+  *max_block_size = AOMMIN(max_size, cm->sb_size);
 }
 
 // TODO(jingning) refactor functions setting partition search range
-static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd, int mi_row,
+static void set_partition_range(AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row,
                                 int mi_col, BLOCK_SIZE bsize,
                                 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
   int mi_width = num_8x8_blocks_wide_lookup[bsize];
@@ -2953,8 +2946,8 @@
       for (idx = 0; idx < mi_width; ++idx) {
         mi = prev_mi[idy * cm->mi_stride + idx];
         bs = mi ? mi->mbmi.sb_type : bsize;
-        min_size = VPXMIN(min_size, bs);
-        max_size = VPXMAX(max_size, bs);
+        min_size = AOMMIN(min_size, bs);
+        max_size = AOMMAX(max_size, bs);
       }
     }
   }
@@ -2963,8 +2956,8 @@
     for (idy = 0; idy < mi_height; ++idy) {
       mi = xd->mi[idy * cm->mi_stride - 1];
       bs = mi ? mi->mbmi.sb_type : bsize;
-      min_size = VPXMIN(min_size, bs);
-      max_size = VPXMAX(max_size, bs);
+      min_size = AOMMIN(min_size, bs);
+      max_size = AOMMAX(max_size, bs);
     }
   }
 
@@ -2972,8 +2965,8 @@
     for (idx = 0; idx < mi_width; ++idx) {
       mi = xd->mi[idx - cm->mi_stride];
       bs = mi ? mi->mbmi.sb_type : bsize;
-      min_size = VPXMIN(min_size, bs);
-      max_size = VPXMAX(max_size, bs);
+      min_size = AOMMIN(min_size, bs);
+      max_size = AOMMAX(max_size, bs);
     }
   }
 
@@ -2982,8 +2975,8 @@
     max_size = max_partition_size[max_size];
   }
 
-  *min_bs = VPXMIN(min_size, cm->sb_size);
-  *max_bs = VPXMIN(max_size, cm->sb_size);
+  *min_bs = AOMMIN(min_size, cm->sb_size);
+  *max_bs = AOMMIN(max_size, cm->sb_size);
 }
 
 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
@@ -3094,7 +3087,7 @@
 
 #if CONFIG_EXT_PARTITION_TYPES
 static void rd_test_partition3(
-    VP10_COMP *cpi, ThreadData *td, TileDataEnc *tile_data, TOKENEXTRA **tp,
+    AV1_COMP *cpi, ThreadData *td, TileDataEnc *tile_data, TOKENEXTRA **tp,
     PC_TREE *pc_tree, RD_COST *best_rdc, PICK_MODE_CONTEXT ctxs[3],
     PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col, BLOCK_SIZE bsize,
     PARTITION_TYPE partition,
@@ -3107,7 +3100,7 @@
   MACROBLOCKD *const xd = &x->e_mbd;
   RD_COST this_rdc, sum_rdc;
 #if CONFIG_SUPERTX
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
   int this_rate_nocoef, sum_rate_nocoef;
   int abort_flag;
@@ -3214,7 +3207,7 @@
         TX_SIZE supertx_size = max_txsize_lookup[bsize];
         const PARTITION_TYPE best_partition = pc_tree->partitioning;
         pc_tree->partitioning = partition;
-        sum_rdc.rate += vp10_cost_bit(
+        sum_rdc.rate += av1_cost_bit(
             cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
                                 [supertx_size],
             0);
@@ -3230,7 +3223,7 @@
           rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize,
                         &tmp_rdc.rate, &tmp_rdc.dist, &best_tx, pc_tree);
 
-          tmp_rdc.rate += vp10_cost_bit(
+          tmp_rdc.rate += av1_cost_bit(
               cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
                                   [supertx_size],
               1);
@@ -3272,7 +3265,7 @@
 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
 // unlikely to be selected depending on previous rate-distortion optimization
 // results, for encoding speed-up.
-static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
+static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
                               TileDataEnc *tile_data, TOKENEXTRA **tp,
                               int mi_row, int mi_col, BLOCK_SIZE bsize,
                               RD_COST *rd_cost,
@@ -3280,7 +3273,7 @@
                               int *rate_nocoef,
 #endif
                               int64_t best_rd, PC_TREE *pc_tree) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -3334,15 +3327,15 @@
     if (!force_vert_split) {  // force_horz_split only
       tmp_partition_cost[PARTITION_VERT] = INT_MAX;
       tmp_partition_cost[PARTITION_HORZ] =
-          vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_HORZ], 0);
+          av1_cost_bit(cm->fc->partition_prob[pl][PARTITION_HORZ], 0);
       tmp_partition_cost[PARTITION_SPLIT] =
-          vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_HORZ], 1);
+          av1_cost_bit(cm->fc->partition_prob[pl][PARTITION_HORZ], 1);
     } else if (!force_horz_split) {  // force_vert_split only
       tmp_partition_cost[PARTITION_HORZ] = INT_MAX;
       tmp_partition_cost[PARTITION_VERT] =
-          vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_VERT], 0);
+          av1_cost_bit(cm->fc->partition_prob[pl][PARTITION_VERT], 0);
       tmp_partition_cost[PARTITION_SPLIT] =
-          vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_VERT], 1);
+          av1_cost_bit(cm->fc->partition_prob[pl][PARTITION_VERT], 1);
     } else {  // force_ horz_split && force_vert_split horz_split
       tmp_partition_cost[PARTITION_HORZ] = INT_MAX;
       tmp_partition_cost[PARTITION_VERT] = INT_MAX;
@@ -3364,15 +3357,15 @@
   assert(num_8x8_blocks_wide_lookup[bsize] ==
          num_8x8_blocks_high_lookup[bsize]);
 
-  vp10_rd_cost_init(&this_rdc);
-  vp10_rd_cost_init(&sum_rdc);
-  vp10_rd_cost_reset(&best_rdc);
+  av1_rd_cost_init(&this_rdc);
+  av1_rd_cost_init(&sum_rdc);
+  av1_rd_cost_reset(&best_rdc);
   best_rdc.rdcost = best_rd;
 
   set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
 
   if (bsize == BLOCK_16X16 && cpi->vaq_refresh)
-    x->mb_energy = vp10_block_energy(cpi, x, bsize);
+    x->mb_energy = av1_block_energy(cpi, x, bsize);
 
   if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
     int cb_partition_search_ctrl =
@@ -3424,9 +3417,9 @@
     int mb_row = mi_row >> 1;
     int mb_col = mi_col >> 1;
     int mb_row_end =
-        VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
+        AOMMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
     int mb_col_end =
-        VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
+        AOMMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
     int r, c;
 
     // compute a complexity measure, basically measure inconsistency of motion
@@ -3527,9 +3520,9 @@
           int mb_row = mi_row >> 1;
           int mb_col = mi_col >> 1;
           int mb_row_end =
-              VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
+              AOMMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
           int mb_col_end =
-              VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
+              AOMMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
           int r, c;
 
           int skip = 1;
@@ -3613,7 +3606,7 @@
 
         pc_tree->partitioning = PARTITION_SPLIT;
 
-        sum_rdc.rate += vp10_cost_bit(
+        sum_rdc.rate += av1_cost_bit(
             cm->fc->supertx_prob[partition_supertx_context_lookup
                                      [PARTITION_SPLIT]][supertx_size],
             0);
@@ -3629,7 +3622,7 @@
           rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize,
                         &tmp_rdc.rate, &tmp_rdc.dist, &best_tx, pc_tree);
 
-          tmp_rdc.rate += vp10_cost_bit(
+          tmp_rdc.rate += av1_cost_bit(
               cm->fc->supertx_prob[partition_supertx_context_lookup
                                        [PARTITION_SPLIT]][supertx_size],
               1);
@@ -3692,7 +3685,7 @@
 
         pc_tree->partitioning = PARTITION_SPLIT;
 
-        sum_rdc.rate += vp10_cost_bit(
+        sum_rdc.rate += av1_cost_bit(
             cm->fc->supertx_prob[partition_supertx_context_lookup
                                      [PARTITION_SPLIT]][supertx_size],
             0);
@@ -3708,7 +3701,7 @@
           rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize,
                         &tmp_rdc.rate, &tmp_rdc.dist, &best_tx, pc_tree);
 
-          tmp_rdc.rate += vp10_cost_bit(
+          tmp_rdc.rate += av1_cost_bit(
               cm->fc->supertx_prob[partition_supertx_context_lookup
                                        [PARTITION_SPLIT]][supertx_size],
               1);
@@ -3752,7 +3745,7 @@
 
   // PARTITION_HORZ
   if (partition_horz_allowed &&
-      (do_rect || vp10_active_h_edge(cpi, mi_row, mi_step))) {
+      (do_rect || av1_active_h_edge(cpi, mi_row, mi_step))) {
     subsize = get_subsize(bsize, PARTITION_HORZ);
     if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
 #if CONFIG_DUAL_FILTER
@@ -3835,7 +3828,7 @@
 
       pc_tree->partitioning = PARTITION_HORZ;
 
-      sum_rdc.rate += vp10_cost_bit(
+      sum_rdc.rate += av1_cost_bit(
           cm->fc->supertx_prob[partition_supertx_context_lookup[PARTITION_HORZ]]
                               [supertx_size],
           0);
@@ -3850,7 +3843,7 @@
         rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize, &tmp_rdc.rate,
                       &tmp_rdc.dist, &best_tx, pc_tree);
 
-        tmp_rdc.rate += vp10_cost_bit(
+        tmp_rdc.rate += av1_cost_bit(
             cm->fc
                 ->supertx_prob[partition_supertx_context_lookup[PARTITION_HORZ]]
                               [supertx_size],
@@ -3889,7 +3882,7 @@
 
   // PARTITION_VERT
   if (partition_vert_allowed &&
-      (do_rect || vp10_active_v_edge(cpi, mi_col, mi_step))) {
+      (do_rect || av1_active_v_edge(cpi, mi_col, mi_step))) {
     subsize = get_subsize(bsize, PARTITION_VERT);
 
     if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
@@ -3972,7 +3965,7 @@
 
       pc_tree->partitioning = PARTITION_VERT;
 
-      sum_rdc.rate += vp10_cost_bit(
+      sum_rdc.rate += av1_cost_bit(
           cm->fc->supertx_prob[partition_supertx_context_lookup[PARTITION_VERT]]
                               [supertx_size],
           0);
@@ -3987,7 +3980,7 @@
         rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize, &tmp_rdc.rate,
                       &tmp_rdc.dist, &best_tx, pc_tree);
 
-        tmp_rdc.rate += vp10_cost_bit(
+        tmp_rdc.rate += av1_cost_bit(
             cm->fc
                 ->supertx_prob[partition_supertx_context_lookup[PARTITION_VERT]]
                               [supertx_size],
@@ -4108,10 +4101,10 @@
   }
 }
 
-static void encode_rd_sb_row(VP10_COMP *cpi, ThreadData *td,
+static void encode_rd_sb_row(AV1_COMP *cpi, ThreadData *td,
                              TileDataEnc *tile_data, int mi_row,
                              TOKENEXTRA **tp) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const TileInfo *const tile_info = &tile_data->tile_info;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -4124,7 +4117,7 @@
 #endif  // CONFIG_EXT_PARTITION
 
   // Initialize the left context for the new SB row
-  vp10_zero_left_context(xd);
+  av1_zero_left_context(xd);
 
   // Code each SB in the row
   for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
@@ -4155,7 +4148,7 @@
       }
     }
 
-    vp10_zero(x->pred_mv);
+    av1_zero(x->pred_mv);
     pc_root->index = 0;
 
     if (seg->enabled) {
@@ -4216,7 +4209,7 @@
       cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
     if ((mi_row + MI_SIZE) %
                 (MI_SIZE *
-                 VPXMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
+                 AOMMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
             0 &&
         mi_row + MI_SIZE < cm->mi_rows &&
         cm->coef_probs_update_idx < COEF_PROBS_BUFS - 1) {
@@ -4224,54 +4217,54 @@
       SUBFRAME_STATS *subframe_stats = &cpi->subframe_stats;
 
       for (t = TX_4X4; t <= TX_32X32; ++t)
-        vp10_full_to_model_counts(cpi->td.counts->coef[t],
-                                  cpi->td.rd_counts.coef_counts[t]);
-      vp10_partial_adapt_probs(cm, mi_row, mi_col);
+        av1_full_to_model_counts(cpi->td.counts->coef[t],
+                                 cpi->td.rd_counts.coef_counts[t]);
+      av1_partial_adapt_probs(cm, mi_row, mi_col);
       ++cm->coef_probs_update_idx;
-      vp10_copy(subframe_stats->coef_probs_buf[cm->coef_probs_update_idx],
-                cm->fc->coef_probs);
-      vp10_copy(subframe_stats->coef_counts_buf[cm->coef_probs_update_idx],
-                cpi->td.rd_counts.coef_counts);
-      vp10_copy(subframe_stats->eob_counts_buf[cm->coef_probs_update_idx],
-                cm->counts.eob_branch);
-      vp10_fill_token_costs(x->token_costs,
+      av1_copy(subframe_stats->coef_probs_buf[cm->coef_probs_update_idx],
+               cm->fc->coef_probs);
+      av1_copy(subframe_stats->coef_counts_buf[cm->coef_probs_update_idx],
+               cpi->td.rd_counts.coef_counts);
+      av1_copy(subframe_stats->eob_counts_buf[cm->coef_probs_update_idx],
+               cm->counts.eob_branch);
+      av1_fill_token_costs(x->token_costs,
 #if CONFIG_ANS
-                            cm->fc->coef_cdfs,
+                           cm->fc->coef_cdfs,
 #endif  // CONFIG_ANS
-                            cm->fc->coef_probs);
+                           cm->fc->coef_probs);
     }
   }
 #endif  // CONFIG_ENTROPY
 }
 
-static void init_encode_frame_mb_context(VP10_COMP *cpi) {
+static void init_encode_frame_mb_context(AV1_COMP *cpi) {
   MACROBLOCK *const x = &cpi->td.mb;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
 
   // Copy data over into macro block data structures.
-  vp10_setup_src_planes(x, cpi->Source, 0, 0);
+  av1_setup_src_planes(x, cpi->Source, 0, 0);
 
-  vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+  av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
 }
 
-static int check_dual_ref_flags(VP10_COMP *cpi) {
+static int check_dual_ref_flags(AV1_COMP *cpi) {
   const int ref_flags = cpi->ref_frame_flags;
 
   if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
     return 0;
   } else {
-    return (!!(ref_flags & VPX_GOLD_FLAG) + !!(ref_flags & VPX_LAST_FLAG) +
+    return (!!(ref_flags & AOM_GOLD_FLAG) + !!(ref_flags & AOM_LAST_FLAG) +
 #if CONFIG_EXT_REFS
-            !!(ref_flags & VPX_LAST2_FLAG) + !!(ref_flags & VPX_LAST3_FLAG) +
-            !!(ref_flags & VPX_BWD_FLAG) +
+            !!(ref_flags & AOM_LAST2_FLAG) + !!(ref_flags & AOM_LAST3_FLAG) +
+            !!(ref_flags & AOM_BWD_FLAG) +
 #endif  // CONFIG_EXT_REFS
-            !!(ref_flags & VPX_ALT_FLAG)) >= 2;
+            !!(ref_flags & AOM_ALT_FLAG)) >= 2;
   }
 }
 
 #if !CONFIG_VAR_TX
-static void reset_skip_tx_size(VP10_COMMON *cm, TX_SIZE max_tx_size) {
+static void reset_skip_tx_size(AV1_COMMON *cm, TX_SIZE max_tx_size) {
   int mi_row, mi_col;
   const int mis = cm->mi_stride;
   MODE_INFO **mi_ptr = cm->mi_grid_visible;
@@ -4285,7 +4278,7 @@
 }
 #endif
 
-static MV_REFERENCE_FRAME get_frame_type(const VP10_COMP *cpi) {
+static MV_REFERENCE_FRAME get_frame_type(const AV1_COMP *cpi) {
   if (frame_is_intra_only(&cpi->common)) return INTRA_FRAME;
 #if CONFIG_EXT_REFS
   // We will not update the golden frame with an internal overlay frame
@@ -4303,7 +4296,7 @@
     return LAST_FRAME;
 }
 
-static TX_MODE select_tx_mode(const VP10_COMP *cpi, MACROBLOCKD *const xd) {
+static TX_MODE select_tx_mode(const AV1_COMP *cpi, MACROBLOCKD *const xd) {
   if (xd->lossless[0]) return ONLY_4X4;
   if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
     return ALLOW_32X32;
@@ -4314,8 +4307,8 @@
     return cpi->common.tx_mode;
 }
 
-void vp10_init_tile_data(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_init_tile_data(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   const int tile_cols = cm->tile_cols;
   const int tile_rows = cm->tile_rows;
   int tile_col, tile_row;
@@ -4323,8 +4316,8 @@
   unsigned int tile_tok = 0;
 
   if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) {
-    if (cpi->tile_data != NULL) vpx_free(cpi->tile_data);
-    CHECK_MEM_ERROR(cm, cpi->tile_data, vpx_malloc(tile_cols * tile_rows *
+    if (cpi->tile_data != NULL) aom_free(cpi->tile_data);
+    CHECK_MEM_ERROR(cm, cpi->tile_data, aom_malloc(tile_cols * tile_rows *
                                                    sizeof(*cpi->tile_data)));
     cpi->allocated_tiles = tile_cols * tile_rows;
 
@@ -4346,7 +4339,7 @@
     for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
       TileInfo *const tile_info =
           &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
-      vp10_tile_init(tile_info, cm, tile_row, tile_col);
+      av1_tile_init(tile_info, cm, tile_row, tile_col);
 
       cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
       pre_tok = cpi->tile_tok[tile_row][tile_col];
@@ -4355,16 +4348,16 @@
   }
 }
 
-void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td, int tile_row,
-                      int tile_col) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_encode_tile(AV1_COMP *cpi, ThreadData *td, int tile_row,
+                     int tile_col) {
+  AV1_COMMON *const cm = &cpi->common;
   TileDataEnc *const this_tile =
       &cpi->tile_data[tile_row * cm->tile_cols + tile_col];
   const TileInfo *const tile_info = &this_tile->tile_info;
   TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
   int mi_row;
 
-  vp10_zero_above_context(cm, tile_info->mi_col_start, tile_info->mi_col_end);
+  av1_zero_above_context(cm, tile_info->mi_col_start, tile_info->mi_col_end);
 
   // Set up pointers to per thread motion search counters.
   td->mb.m_search_count_ptr = &td->rd_counts.m_search_count;
@@ -4380,20 +4373,20 @@
   assert(cpi->tok_count[tile_row][tile_col] <= allocated_tokens(*tile_info));
 }
 
-static void encode_tiles(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void encode_tiles(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   int tile_col, tile_row;
 
-  vp10_init_tile_data(cpi);
+  av1_init_tile_data(cpi);
 
   for (tile_row = 0; tile_row < cm->tile_rows; ++tile_row)
     for (tile_col = 0; tile_col < cm->tile_cols; ++tile_col)
-      vp10_encode_tile(cpi, &cpi->td, tile_row, tile_col);
+      av1_encode_tile(cpi, &cpi->td, tile_row, tile_col);
 }
 
 #if CONFIG_FP_MB_STATS
 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
-                            VP10_COMMON *cm, uint8_t **this_frame_mb_stats) {
+                            AV1_COMMON *cm, uint8_t **this_frame_mb_stats) {
   uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
                          cm->current_video_frame * cm->MBs * sizeof(uint8_t);
 
@@ -4452,29 +4445,29 @@
 }
 #endif  // CONFIG_GLOBAL_MOTION
 
-static void encode_frame_internal(VP10_COMP *cpi) {
+static void encode_frame_internal(AV1_COMP *cpi) {
   ThreadData *const td = &cpi->td;
   MACROBLOCK *const x = &td->mb;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   RD_COUNTS *const rdc = &cpi->td.rd_counts;
   int i;
 
-  x->min_partition_size = VPXMIN(x->min_partition_size, cm->sb_size);
-  x->max_partition_size = VPXMIN(x->max_partition_size, cm->sb_size);
+  x->min_partition_size = AOMMIN(x->min_partition_size, cm->sb_size);
+  x->max_partition_size = AOMMIN(x->max_partition_size, cm->sb_size);
 
   xd->mi = cm->mi_grid_visible;
   xd->mi[0] = cm->mi;
 
-  vp10_zero(*td->counts);
-  vp10_zero(rdc->coef_counts);
-  vp10_zero(rdc->comp_pred_diff);
+  av1_zero(*td->counts);
+  av1_zero(rdc->coef_counts);
+  av1_zero(rdc->comp_pred_diff);
   rdc->m_search_count = 0;   // Count of motion search hits.
   rdc->ex_search_count = 0;  // Exhaustive mesh search hits.
 
 #if CONFIG_GLOBAL_MOTION
-  vpx_clear_system_state();
-  vp10_zero(cpi->global_motion_used);
+  aom_clear_system_state();
+  av1_zero(cpi->global_motion_used);
   if (cpi->common.frame_type == INTER_FRAME && cpi->Source) {
     YV12_BUFFER_CONFIG *ref_buf;
     int frame;
@@ -4488,11 +4481,11 @@
                                   &cm->global_motion[frame]);
           if (get_gmtype(&cm->global_motion[frame]) > GLOBAL_ZERO) {
             // compute the advantage of using gm parameters over 0 motion
-            double erroradvantage = vp10_warp_erroradv(
+            double erroradvantage = av1_warp_erroradv(
                 &cm->global_motion[frame].motion_params,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                 xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH, xd->bd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
                 ref_buf->y_buffer, ref_buf->y_width, ref_buf->y_height,
                 ref_buf->y_stride, cpi->Source->y_buffer, 0, 0,
                 cpi->Source->y_width, cpi->Source->y_height,
@@ -4510,7 +4503,7 @@
 
   for (i = 0; i < MAX_SEGMENTS; ++i) {
     const int qindex = cm->seg.enabled
-                           ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+                           ? av1_get_qindex(&cm->seg, i, cm->base_qindex)
                            : cm->base_qindex;
     xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
                       cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -4519,10 +4512,10 @@
   if (!cm->seg.enabled && xd->lossless[0]) x->optimize = 0;
 
   cm->tx_mode = select_tx_mode(cpi, xd);
-  vp10_frame_init_quantizer(cpi);
+  av1_frame_init_quantizer(cpi);
 
-  vp10_initialize_rd_consts(cpi);
-  vp10_initialize_me_consts(cpi, x, cm->base_qindex);
+  av1_initialize_rd_consts(cpi);
+  av1_initialize_me_consts(cpi, x, cm->base_qindex);
   init_encode_frame_mb_context(cpi);
 
   cm->use_prev_frame_mvs =
@@ -4553,17 +4546,17 @@
 
 #if CONFIG_VAR_TX
 #if CONFIG_REF_MV
-  vp10_zero(x->blk_skip_drl);
+  av1_zero(x->blk_skip_drl);
 #endif
 #endif
 
   if (cpi->sf.partition_search_type == VAR_BASED_PARTITION &&
       cpi->td.var_root[0] == NULL)
-    vp10_setup_var_tree(&cpi->common, &cpi->td);
+    av1_setup_var_tree(&cpi->common, &cpi->td);
 
   {
-    struct vpx_usec_timer emr_timer;
-    vpx_usec_timer_start(&emr_timer);
+    struct aom_usec_timer emr_timer;
+    aom_usec_timer_start(&emr_timer);
 
 #if CONFIG_FP_MB_STATS
     if (cpi->use_fp_mb_stats) {
@@ -4576,13 +4569,13 @@
     // TODO(geza.lore): The multi-threaded encoder is not safe with more than
     // 1 tile rows, as it uses the single above_context et al arrays from
     // cpi->common
-    if (VPXMIN(cpi->oxcf.max_threads, cm->tile_cols) > 1 && cm->tile_rows == 1)
-      vp10_encode_tiles_mt(cpi);
+    if (AOMMIN(cpi->oxcf.max_threads, cm->tile_cols) > 1 && cm->tile_rows == 1)
+      av1_encode_tiles_mt(cpi);
     else
       encode_tiles(cpi);
 
-    vpx_usec_timer_mark(&emr_timer);
-    cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
+    aom_usec_timer_mark(&emr_timer);
+    cpi->time_encode_sb_row += aom_usec_timer_elapsed(&emr_timer);
   }
 
 #if 0
@@ -4591,8 +4584,8 @@
 #endif
 }
 
-void vp10_encode_frame(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_encode_frame(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
 
   // In the longer term the encoder should be generalized to match the
   // decoder such that we allow compound where one of the 3 buffers has a
@@ -4678,10 +4671,10 @@
 
       if (comp_count_zero == 0) {
         cm->reference_mode = SINGLE_REFERENCE;
-        vp10_zero(counts->comp_inter);
+        av1_zero(counts->comp_inter);
       } else if (single_count_zero == 0) {
         cm->reference_mode = COMPOUND_REFERENCE;
-        vp10_zero(counts->comp_inter);
+        av1_zero(counts->comp_inter);
       }
     }
 
@@ -4767,8 +4760,8 @@
         const int bidx = idy * 2 + idx;
         const PREDICTION_MODE bmode = mi->bmi[bidx].as_mode;
         if (intraonly) {
-          const PREDICTION_MODE a = vp10_above_block_mode(mi, above_mi, bidx);
-          const PREDICTION_MODE l = vp10_left_block_mode(mi, left_mi, bidx);
+          const PREDICTION_MODE a = av1_above_block_mode(mi, above_mi, bidx);
+          const PREDICTION_MODE l = av1_left_block_mode(mi, left_mi, bidx);
           ++counts->kf_y_mode[a][l][bmode];
         } else {
           ++counts->y_mode[0][bmode];
@@ -4776,8 +4769,8 @@
       }
   } else {
     if (intraonly) {
-      const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, 0);
-      const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, 0);
+      const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, 0);
+      const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, 0);
       ++counts->kf_y_mode[above][left][y_mode];
     } else {
       ++counts->y_mode[size_group_lookup[bsize]][y_mode];
@@ -4832,7 +4825,7 @@
   }
 }
 
-static void tx_partition_count_update(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void tx_partition_count_update(AV1_COMMON *cm, MACROBLOCKD *xd,
                                       BLOCK_SIZE plane_bsize, int mi_row,
                                       int mi_col, FRAME_COUNTS *td_counts) {
   const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
@@ -4893,7 +4886,7 @@
   }
 }
 
-static void tx_partition_set_contexts(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void tx_partition_set_contexts(AV1_COMMON *cm, MACROBLOCKD *xd,
                                       BLOCK_SIZE plane_bsize, int mi_row,
                                       int mi_col) {
   const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
@@ -4913,10 +4906,10 @@
 }
 #endif
 
-static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+static void encode_superblock(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
                               int output_enabled, int mi_row, int mi_col,
                               BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   MODE_INFO **mi_8x8 = xd->mi;
@@ -4936,7 +4929,7 @@
     int plane;
     mbmi->skip = 1;
     for (plane = 0; plane < MAX_MB_PLANE; ++plane)
-      vp10_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane, 1);
+      av1_encode_intra_block_plane(x, AOMMAX(bsize, BLOCK_8X8), plane, 1);
     if (output_enabled)
       sum_intra_stats(td->counts, mi, xd->above_mi, xd->left_mi,
                       frame_is_intra_only(cm));
@@ -4951,10 +4944,10 @@
         ++counts->ext_intra[1][mbmi->ext_intra_mode_info.use_ext_intra_mode[1]];
       if (mbmi->mode != DC_PRED && mbmi->mode != TM_PRED) {
         int p_angle;
-        const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+        const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
         p_angle =
             mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
-        if (vp10_is_intra_filter_switchable(p_angle))
+        if (av1_is_intra_filter_switchable(p_angle))
           ++counts->intra_filter[intra_filter_ctx][mbmi->intra_filter];
       }
     }
@@ -4967,11 +4960,11 @@
               xd->plane[plane].color_index_map[0];
           // TODO(huisu): this increases the use of token buffer. Needs stretch
           // test to verify.
-          vp10_tokenize_palette_sb(td, bsize, plane, t);
+          av1_tokenize_palette_sb(td, bsize, plane, t);
         }
       }
     }
-    vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+    av1_tokenize_sb(cpi, td, t, !output_enabled, AOMMAX(bsize, BLOCK_8X8));
   } else {
     int ref;
     const int is_compound = has_second_ref(mbmi);
@@ -4980,25 +4973,25 @@
     for (ref = 0; ref < 1 + is_compound; ++ref) {
       YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]);
       assert(cfg != NULL);
-      vp10_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
-                            &xd->block_refs[ref]->sf);
+      av1_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
+                           &xd->block_refs[ref]->sf);
     }
     if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
-      vp10_build_inter_predictors_sby(xd, mi_row, mi_col,
-                                      VPXMAX(bsize, BLOCK_8X8));
+      av1_build_inter_predictors_sby(xd, mi_row, mi_col,
+                                     AOMMAX(bsize, BLOCK_8X8));
 
-    vp10_build_inter_predictors_sbuv(xd, mi_row, mi_col,
-                                     VPXMAX(bsize, BLOCK_8X8));
+    av1_build_inter_predictors_sbuv(xd, mi_row, mi_col,
+                                    AOMMAX(bsize, BLOCK_8X8));
 
 #if CONFIG_OBMC
     if (mbmi->motion_variation == OBMC_CAUSAL) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
       DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
 #else
       DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
       DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
       int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
       int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
@@ -5009,7 +5002,7 @@
 
       assert(mbmi->sb_type >= BLOCK_8X8);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
         int len = sizeof(uint16_t);
         dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -5019,39 +5012,37 @@
         dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * len);
         dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * 2 * len);
       } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         dst_buf1[0] = tmp_buf1;
         dst_buf1[1] = tmp_buf1 + MAX_SB_SQUARE;
         dst_buf1[2] = tmp_buf1 + MAX_SB_SQUARE * 2;
         dst_buf2[0] = tmp_buf2;
         dst_buf2[1] = tmp_buf2 + MAX_SB_SQUARE;
         dst_buf2[2] = tmp_buf2 + MAX_SB_SQUARE * 2;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-      vp10_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
-                                           dst_width1, dst_height1,
-                                           dst_stride1);
-      vp10_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
-                                          dst_width2, dst_height2, dst_stride2);
-      vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row,
-                            mi_col);
-      vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
-                                       dst_stride1, dst_buf2, dst_stride2);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+      av1_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
+                                          dst_width1, dst_height1, dst_stride1);
+      av1_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
+                                         dst_width2, dst_height2, dst_stride2);
+      av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+      av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+                                      dst_stride1, dst_buf2, dst_stride2);
     }
 #endif  // CONFIG_OBMC
 
-    vp10_encode_sb(x, VPXMAX(bsize, BLOCK_8X8));
+    av1_encode_sb(x, AOMMAX(bsize, BLOCK_8X8));
 #if CONFIG_VAR_TX
 #if CONFIG_EXT_TX && CONFIG_RECT_TX
     if (mbmi->tx_size >= TX_SIZES)
-      vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+      av1_tokenize_sb(cpi, td, t, !output_enabled, AOMMAX(bsize, BLOCK_8X8));
     else
 #endif
-      vp10_tokenize_sb_inter(cpi, td, t, !output_enabled, mi_row, mi_col,
-                             VPXMAX(bsize, BLOCK_8X8));
+      av1_tokenize_sb_inter(cpi, td, t, !output_enabled, mi_row, mi_col,
+                            AOMMAX(bsize, BLOCK_8X8));
 #else
-    vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+    av1_tokenize_sb(cpi, td, t, !output_enabled, AOMMAX(bsize, BLOCK_8X8));
 #endif
   }
 
@@ -5136,14 +5127,14 @@
     if (is_inter_block(mbmi))
 #if CONFIG_EXT_TX && CONFIG_RECT_TX
     {
-      tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
+      tx_size = AOMMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
                        max_txsize_lookup[bsize]);
       if (txsize_sqr_map[max_txsize_rect_lookup[bsize]] <= tx_size)
         tx_size = max_txsize_rect_lookup[bsize];
       if (xd->lossless[mbmi->segment_id]) tx_size = TX_4X4;
     }
 #else
-      tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
+      tx_size = AOMMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
                        max_txsize_lookup[bsize]);
 #endif
     else
@@ -5163,10 +5154,9 @@
   return 0;
 }
 
-static int check_intra_sb(VP10_COMP *cpi, const TileInfo *const tile,
-                          int mi_row, int mi_col, BLOCK_SIZE bsize,
-                          PC_TREE *pc_tree) {
-  const VP10_COMMON *const cm = &cpi->common;
+static int check_intra_sb(AV1_COMP *cpi, const TileInfo *const tile, int mi_row,
+                          int mi_col, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
+  const AV1_COMMON *const cm = &cpi->common;
 
   const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
   const PARTITION_TYPE partition = pc_tree->partitioning;
@@ -5274,7 +5264,7 @@
   }
 }
 
-static void predict_superblock(VP10_COMP *cpi, ThreadData *td,
+static void predict_superblock(AV1_COMP *cpi, ThreadData *td,
 #if CONFIG_EXT_INTER
                                int mi_row_ori, int mi_col_ori,
 #endif  // CONFIG_EXT_INTER
@@ -5283,7 +5273,7 @@
   // Used in supertx
   // (mi_row_ori, mi_col_ori): location for mv
   // (mi_row_pred, mi_col_pred, bsize_pred): region to predict
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   MODE_INFO *mi_8x8 = xd->mi[0];
@@ -5296,26 +5286,26 @@
 
   for (ref = 0; ref < 1 + is_compound; ++ref) {
     YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]);
-    vp10_setup_pre_planes(xd, ref, cfg, mi_row_pred, mi_col_pred,
-                          &xd->block_refs[ref]->sf);
+    av1_setup_pre_planes(xd, ref, cfg, mi_row_pred, mi_col_pred,
+                         &xd->block_refs[ref]->sf);
   }
 
   if (!b_sub8x8)
-    vp10_build_inter_predictors_sb_extend(xd,
+    av1_build_inter_predictors_sb_extend(xd,
 #if CONFIG_EXT_INTER
-                                          mi_row_ori, mi_col_ori,
+                                         mi_row_ori, mi_col_ori,
 #endif  // CONFIG_EXT_INTER
-                                          mi_row_pred, mi_col_pred, bsize_pred);
+                                         mi_row_pred, mi_col_pred, bsize_pred);
   else
-    vp10_build_inter_predictors_sb_sub8x8_extend(xd,
+    av1_build_inter_predictors_sb_sub8x8_extend(xd,
 #if CONFIG_EXT_INTER
-                                                 mi_row_ori, mi_col_ori,
+                                                mi_row_ori, mi_col_ori,
 #endif  // CONFIG_EXT_INTER
-                                                 mi_row_pred, mi_col_pred,
-                                                 bsize_pred, block);
+                                                mi_row_pred, mi_col_pred,
+                                                bsize_pred, block);
 }
 
-static void predict_b_extend(VP10_COMP *cpi, ThreadData *td,
+static void predict_b_extend(AV1_COMP *cpi, ThreadData *td,
                              const TileInfo *const tile, int block,
                              int mi_row_ori, int mi_col_ori, int mi_row_pred,
                              int mi_col_pred, int mi_row_top, int mi_col_top,
@@ -5331,7 +5321,7 @@
   // bextend: 1: region to predict is an extension of ori; 0: not
 
   MACROBLOCK *const x = &td->mb;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   int r = (mi_row_pred - mi_row_top) * MI_SIZE;
   int c = (mi_col_pred - mi_col_top) * MI_SIZE;
@@ -5368,7 +5358,7 @@
   if (output_enabled && !bextend) update_stats(&cpi->common, td, 1);
 }
 
-static void extend_dir(VP10_COMP *cpi, ThreadData *td,
+static void extend_dir(AV1_COMP *cpi, ThreadData *td,
                        const TileInfo *const tile, int block, BLOCK_SIZE bsize,
                        BLOCK_SIZE top_bsize, int mi_row, int mi_col,
                        int mi_row_top, int mi_col_top, int output_enabled,
@@ -5440,7 +5430,7 @@
   }
 }
 
-static void extend_all(VP10_COMP *cpi, ThreadData *td,
+static void extend_all(AV1_COMP *cpi, ThreadData *td,
                        const TileInfo *const tile, int block, BLOCK_SIZE bsize,
                        BLOCK_SIZE top_bsize, int mi_row, int mi_col,
                        int mi_row_top, int mi_col_top, int output_enabled,
@@ -5472,13 +5462,13 @@
 // then applied to the 2 masked prediction mentioned above in vertical direction
 // If the block is split into more than one level, at every stage, masked
 // prediction is stored in dst_buf[] passed from higher level.
-static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
+static void predict_sb_complex(AV1_COMP *cpi, ThreadData *td,
                                const TileInfo *const tile, int mi_row,
                                int mi_col, int mi_row_top, int mi_col_top,
                                int output_enabled, BLOCK_SIZE bsize,
                                BLOCK_SIZE top_bsize, uint8_t *dst_buf[3],
                                int dst_stride[3], PC_TREE *pc_tree) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
 
@@ -5503,7 +5493,7 @@
 
   if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     int len = sizeof(uint16_t);
     dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -5516,7 +5506,7 @@
     dst_buf3[1] = CONVERT_TO_BYTEPTR(tmp_buf3 + MAX_TX_SQUARE * len);
     dst_buf3[2] = CONVERT_TO_BYTEPTR(tmp_buf3 + 2 * MAX_TX_SQUARE * len);
   } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     dst_buf1[0] = tmp_buf1;
     dst_buf1[1] = tmp_buf1 + MAX_TX_SQUARE;
     dst_buf1[2] = tmp_buf1 + 2 * MAX_TX_SQUARE;
@@ -5526,9 +5516,9 @@
     dst_buf3[0] = tmp_buf3;
     dst_buf3[1] = tmp_buf3 + MAX_TX_SQUARE;
     dst_buf3[2] = tmp_buf3 + 2 * MAX_TX_SQUARE;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   if (output_enabled && bsize < top_bsize)
     cm->counts.partition[ctx][partition]++;
@@ -5570,7 +5560,7 @@
         // Smooth
         xd->plane[0].dst.buf = dst_buf[0];
         xd->plane[0].dst.stride = dst_stride[0];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
             0);
@@ -5607,7 +5597,7 @@
           for (i = 0; i < MAX_MB_PLANE; i++) {
             xd->plane[i].dst.buf = dst_buf[i];
             xd->plane[i].dst.stride = dst_stride[i];
-            vp10_build_masked_inter_predictor_complex(
+            av1_build_masked_inter_predictor_complex(
                 xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
                 mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
                 PARTITION_HORZ, i);
@@ -5638,7 +5628,7 @@
         // Smooth
         xd->plane[0].dst.buf = dst_buf[0];
         xd->plane[0].dst.stride = dst_stride[0];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
             0);
@@ -5673,7 +5663,7 @@
           for (i = 0; i < MAX_MB_PLANE; i++) {
             xd->plane[i].dst.buf = dst_buf[i];
             xd->plane[i].dst.stride = dst_stride[i];
-            vp10_build_masked_inter_predictor_complex(
+            av1_build_masked_inter_predictor_complex(
                 xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
                 mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
                 PARTITION_VERT, i);
@@ -5732,22 +5722,22 @@
         if (bsize == BLOCK_8X8 && i != 0)
           continue;  // Skip <4x4 chroma smoothing
         if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
-          vp10_build_masked_inter_predictor_complex(
+          av1_build_masked_inter_predictor_complex(
               xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
               mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
               PARTITION_VERT, i);
           if (mi_row + hbs < cm->mi_rows) {
-            vp10_build_masked_inter_predictor_complex(
+            av1_build_masked_inter_predictor_complex(
                 xd, dst_buf2[i], dst_stride2[i], dst_buf3[i], dst_stride3[i],
                 mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
                 PARTITION_VERT, i);
-            vp10_build_masked_inter_predictor_complex(
+            av1_build_masked_inter_predictor_complex(
                 xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
                 mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
                 PARTITION_HORZ, i);
           }
         } else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
-          vp10_build_masked_inter_predictor_complex(
+          av1_build_masked_inter_predictor_complex(
               xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
               mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
               PARTITION_HORZ, i);
@@ -5783,13 +5773,13 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
             i);
       }
       for (i = 0; i < MAX_MB_PLANE; i++) {
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
             i);
@@ -5825,13 +5815,13 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
             i);
       }
       for (i = 0; i < MAX_MB_PLANE; i++) {
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
             i);
@@ -5867,7 +5857,7 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf1[i];
         xd->plane[i].dst.stride = dst_stride1[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
             mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
             PARTITION_VERT, i);
@@ -5875,7 +5865,7 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
             i);
@@ -5911,7 +5901,7 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf1[i];
         xd->plane[i].dst.stride = dst_stride1[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
             mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
             PARTITION_HORZ, i);
@@ -5919,7 +5909,7 @@
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
-        vp10_build_masked_inter_predictor_complex(
+        av1_build_masked_inter_predictor_complex(
             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
             i);
@@ -5938,11 +5928,11 @@
 #endif  // CONFIG_EXT_PARTITION_TYPES
 }
 
-static void rd_supertx_sb(VP10_COMP *cpi, ThreadData *td,
+static void rd_supertx_sb(AV1_COMP *cpi, ThreadData *td,
                           const TileInfo *const tile, int mi_row, int mi_col,
                           BLOCK_SIZE bsize, int *tmp_rate, int64_t *tmp_dist,
                           TX_TYPE *best_tx, PC_TREE *pc_tree) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   int plane, pnskip, skippable, skippable_uv, rate_uv, this_rate,
@@ -5962,7 +5952,7 @@
   set_skip_context(xd, mi_row, mi_col);
   set_mode_info_offsets(cpi, x, xd, mi_row, mi_col);
   update_state_sb_supertx(cpi, td, tile, mi_row, mi_col, bsize, 0, pc_tree);
-  vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+  av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
   for (plane = 0; plane < MAX_MB_PLANE; plane++) {
     dst_buf[plane] = xd->plane[plane].dst.buf;
     dst_stride[plane] = xd->plane[plane].dst.stride;
@@ -5998,20 +5988,20 @@
     tx_size = max_txsize_lookup[bsize];
     tx_size = get_uv_tx_size_impl(tx_size, bsize, cm->subsampling_x,
                                   cm->subsampling_y);
-    vp10_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl);
+    av1_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl);
     coeff_ctx = combine_entropy_contexts(ctxa[0], ctxl[0]);
 
-    vp10_subtract_plane(x, bsize, plane);
-    vp10_tx_block_rd_b(cpi, x, tx_size, 0, 0, plane, 0,
-                       get_plane_block_size(bsize, pd), coeff_ctx, &this_rate,
-                       &this_dist, &pnsse, &pnskip);
+    av1_subtract_plane(x, bsize, plane);
+    av1_tx_block_rd_b(cpi, x, tx_size, 0, 0, plane, 0,
+                      get_plane_block_size(bsize, pd), coeff_ctx, &this_rate,
+                      &this_dist, &pnsse, &pnskip);
 #else
     tx_size = max_txsize_lookup[bsize];
     tx_size = get_uv_tx_size_impl(tx_size, bsize, cm->subsampling_x,
                                   cm->subsampling_y);
-    vp10_subtract_plane(x, bsize, plane);
-    vp10_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip,
-                                  &pnsse, INT64_MAX, plane, bsize, tx_size, 0);
+    av1_subtract_plane(x, bsize, plane);
+    av1_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip,
+                                 &pnsse, INT64_MAX, plane, bsize, tx_size, 0);
 #endif  // CONFIG_VAR_TX
 
     rate_uv += this_rate;
@@ -6022,7 +6012,7 @@
 
   // luma
   tx_size = max_txsize_lookup[bsize];
-  vp10_subtract_plane(x, bsize, 0);
+  av1_subtract_plane(x, bsize, 0);
 #if CONFIG_EXT_TX
   ext_tx_set = get_ext_tx_set(tx_size, bsize, 1);
 #endif  // CONFIG_EXT_TX
@@ -6046,13 +6036,13 @@
     pnsse = 0;
     pnskip = 1;
 
-    vp10_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl);
+    av1_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl);
     coeff_ctx = combine_entropy_contexts(ctxa[0], ctxl[0]);
-    vp10_tx_block_rd_b(cpi, x, tx_size, 0, 0, 0, 0, bsize, coeff_ctx,
-                       &this_rate, &this_dist, &pnsse, &pnskip);
+    av1_tx_block_rd_b(cpi, x, tx_size, 0, 0, 0, 0, bsize, coeff_ctx, &this_rate,
+                      &this_dist, &pnsse, &pnskip);
 #else
-    vp10_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip,
-                                  &pnsse, INT64_MAX, 0, bsize, tx_size, 0);
+    av1_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip,
+                                 &pnsse, INT64_MAX, 0, bsize, tx_size, 0);
 #endif  // CONFIG_VAR_TX
 
 #if CONFIG_EXT_TX
@@ -6073,16 +6063,16 @@
     sse = sse_uv + pnsse;
     skippable = skippable_uv && pnskip;
     if (skippable) {
-      *tmp_rate = vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+      *tmp_rate = av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
       x->skip = 1;
     } else {
       if (RDCOST(x->rdmult, x->rddiv, *tmp_rate, *tmp_dist) <
           RDCOST(x->rdmult, x->rddiv, 0, sse)) {
-        *tmp_rate += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        *tmp_rate += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
         x->skip = 0;
       } else {
         *tmp_dist = sse;
-        *tmp_rate = vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+        *tmp_rate = av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
         x->skip = 1;
       }
     }
diff --git a/av1/encoder/encodeframe.h b/av1/encoder/encodeframe.h
index 338cb86..a0ae454 100644
--- a/av1/encoder/encodeframe.h
+++ b/av1/encoder/encodeframe.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_ENCODEFRAME_H_
-#define VP10_ENCODER_ENCODEFRAME_H_
+#ifndef AV1_ENCODER_ENCODEFRAME_H_
+#define AV1_ENCODER_ENCODEFRAME_H_
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -19,7 +19,7 @@
 
 struct macroblock;
 struct yv12_buffer_config;
-struct VP10_COMP;
+struct AV1_COMP;
 struct ThreadData;
 
 // Constants used in SOURCE_VAR_BASED_PARTITION
@@ -29,20 +29,20 @@
 #define VAR_HIST_LARGE_CUT_OFF 75
 #define VAR_HIST_SMALL_CUT_OFF 45
 
-void vp10_setup_src_planes(struct macroblock *x,
-                           const struct yv12_buffer_config *src, int mi_row,
-                           int mi_col);
+void av1_setup_src_planes(struct macroblock *x,
+                          const struct yv12_buffer_config *src, int mi_row,
+                          int mi_col);
 
-void vp10_encode_frame(struct VP10_COMP *cpi);
+void av1_encode_frame(struct AV1_COMP *cpi);
 
-void vp10_init_tile_data(struct VP10_COMP *cpi);
-void vp10_encode_tile(struct VP10_COMP *cpi, struct ThreadData *td,
-                      int tile_row, int tile_col);
+void av1_init_tile_data(struct AV1_COMP *cpi);
+void av1_encode_tile(struct AV1_COMP *cpi, struct ThreadData *td, int tile_row,
+                     int tile_col);
 
-void vp10_set_variance_partition_thresholds(struct VP10_COMP *cpi, int q);
+void av1_set_variance_partition_thresholds(struct AV1_COMP *cpi, int q);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_ENCODEFRAME_H_
+#endif  // AV1_ENCODER_ENCODEFRAME_H_
diff --git a/av1/encoder/encodemb.c b/av1/encoder/encodemb.c
index e72db2d..07a7748 100644
--- a/av1/encoder/encodemb.c
+++ b/av1/encoder/encodemb.c
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
 #include "aom_dsp/quantize.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #include "av1/common/idct.h"
@@ -27,33 +27,33 @@
 #include "av1/encoder/rd.h"
 #include "av1/encoder/tokenize.h"
 
-void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+void av1_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
   struct macroblock_plane *const p = &x->plane[plane];
   const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane];
   const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
   const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
   const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    vpx_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
+    aom_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
                               p->src.stride, pd->dst.buf, pd->dst.stride,
                               x->e_mbd.bd);
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-  vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+  aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
                      pd->dst.buf, pd->dst.stride);
 }
 
-typedef struct vp10_token_state {
+typedef struct av1_token_state {
   int rate;
   int64_t error;
   int next;
   int16_t token;
   tran_low_t qc;
   tran_low_t dqc;
-} vp10_token_state;
+} av1_token_state;
 
 // These numbers are empirically obtained.
 static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] = {
@@ -66,13 +66,13 @@
     rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \
   }
 
-int vp10_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
-                    int ctx) {
+int av1_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
+                   int ctx) {
   MACROBLOCKD *const xd = &mb->e_mbd;
   struct macroblock_plane *const p = &mb->plane[plane];
   struct macroblockd_plane *const pd = &xd->plane[plane];
   const int ref = is_inter_block(&xd->mi[0]->mbmi);
-  vp10_token_state tokens[MAX_TX_SQUARE + 1][2];
+  av1_token_state tokens[MAX_TX_SQUARE + 1][2];
   unsigned best_index[MAX_TX_SQUARE + 1][2];
   uint8_t token_cache[MAX_TX_SQUARE];
   const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block);
@@ -110,10 +110,10 @@
   int best, band = (eob < default_eob) ? band_translate[eob]
                                        : band_translate[eob - 1];
   int pt, i, final_eob;
-#if CONFIG_VP9_HIGHBITDEPTH
-  const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+  const int *cat6_high_cost = av1_get_high_cost_table(xd->bd);
 #else
-  const int *cat6_high_cost = vp10_get_high_cost_table(8);
+  const int *cat6_high_cost = av1_get_high_cost_table(8);
 #endif
   unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
       mb->token_costs[txsize_sqr_map[tx_size]][type][ref];
@@ -138,9 +138,9 @@
 
   for (i = 0; i < eob; i++) {
     const int rc = scan[i];
-    tokens[i][0].rate = vp10_get_token_cost(qcoeff[rc], &t0, cat6_high_cost);
+    tokens[i][0].rate = av1_get_token_cost(qcoeff[rc], &t0, cat6_high_cost);
     tokens[i][0].token = t0;
-    token_cache[rc] = vp10_pt_energy_class[t0];
+    token_cache[rc] = av1_pt_energy_class[t0];
   }
 
   for (i = eob; i-- > 0;) {
@@ -180,11 +180,11 @@
       }
 
       dx = (dqcoeff[rc] - coeff[rc]) * (1 << shift);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
         dx >>= xd->bd - 8;
       }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       d2 = (int64_t)dx * dx;
       tokens[i][0].rate += (best ? rate1 : rate0);
       tokens[i][0].error = d2 + (best ? error1 : error0);
@@ -202,11 +202,11 @@
         shortcut = 0;
       } else {
 #if CONFIG_NEW_QUANT
-        shortcut = ((vp10_dequant_abscoeff_nuq(abs(x), dequant_ptr[rc != 0],
-                                               dequant_val[band_translate[i]]) >
+        shortcut = ((av1_dequant_abscoeff_nuq(abs(x), dequant_ptr[rc != 0],
+                                              dequant_val[band_translate[i]]) >
                      (abs(coeff[rc]) << shift)) &&
-                    (vp10_dequant_abscoeff_nuq(abs(x) - 1, dequant_ptr[rc != 0],
-                                               dequant_val[band_translate[i]]) <
+                    (av1_dequant_abscoeff_nuq(abs(x) - 1, dequant_ptr[rc != 0],
+                                              dequant_val[band_translate[i]]) <
                      (abs(coeff[rc]) << shift)));
 #else  // CONFIG_NEW_QUANT
 #if CONFIG_AOM_QM
@@ -251,19 +251,19 @@
         t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
         base_bits = 0;
       } else {
-        base_bits = vp10_get_token_cost(x, &t0, cat6_high_cost);
+        base_bits = av1_get_token_cost(x, &t0, cat6_high_cost);
         t1 = t0;
       }
 
       if (next_shortcut) {
         if (LIKELY(next < default_eob)) {
           if (t0 != EOB_TOKEN) {
-            token_cache[rc] = vp10_pt_energy_class[t0];
+            token_cache[rc] = av1_pt_energy_class[t0];
             pt = get_coef_context(nb, token_cache, i + 1);
             rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
           }
           if (t1 != EOB_TOKEN) {
-            token_cache[rc] = vp10_pt_energy_class[t1];
+            token_cache[rc] = av1_pt_energy_class[t1];
             pt = get_coef_context(nb, token_cache, i + 1);
             rate1 += (*token_costs)[!x][pt][tokens[next][1].token];
           }
@@ -275,7 +275,7 @@
       } else {
         // The two states in next stage are identical.
         if (next < default_eob && t0 != EOB_TOKEN) {
-          token_cache[rc] = vp10_pt_energy_class[t0];
+          token_cache[rc] = av1_pt_energy_class[t0];
           pt = get_coef_context(nb, token_cache, i + 1);
           rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
         }
@@ -283,16 +283,16 @@
       }
 
 #if CONFIG_NEW_QUANT
-      dx = vp10_dequant_coeff_nuq(x, dequant_ptr[rc != 0],
-                                  dequant_val[band_translate[i]]) -
+      dx = av1_dequant_coeff_nuq(x, dequant_ptr[rc != 0],
+                                 dequant_val[band_translate[i]]) -
            (coeff[rc] << shift);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
         dx >>= xd->bd - 8;
       }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #else   // CONFIG_NEW_QUANT
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
         dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz;
       } else {
@@ -300,7 +300,7 @@
       }
 #else
       dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_NEW_QUANT
       d2 = (int64_t)dx * dx;
 
@@ -312,7 +312,7 @@
 
       if (x) {
 #if CONFIG_NEW_QUANT
-        tokens[i][1].dqc = vp10_dequant_abscoeff_nuq(
+        tokens[i][1].dqc = av1_dequant_abscoeff_nuq(
             abs(x), dequant_ptr[rc != 0], dequant_val[band_translate[i]]);
         tokens[i][1].dqc = shift ? ROUND_POWER_OF_TWO(tokens[i][1].dqc, shift)
                                  : tokens[i][1].dqc;
@@ -402,20 +402,18 @@
   return final_eob;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef enum QUANT_FUNC {
   QUANT_FUNC_LOWBD = 0,
   QUANT_FUNC_HIGHBD = 1,
   QUANT_FUNC_LAST = 2
 } QUANT_FUNC;
 
-static VP10_QUANT_FACADE
-    quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
-      { vp10_quantize_fp_facade, vp10_highbd_quantize_fp_facade },
-      { vp10_quantize_b_facade, vp10_highbd_quantize_b_facade },
-      { vp10_quantize_dc_facade, vp10_highbd_quantize_dc_facade },
-      { NULL, NULL }
-    };
+static AV1_QUANT_FACADE quant_func_list[AV1_XFORM_QUANT_LAST][QUANT_FUNC_LAST] =
+    { { av1_quantize_fp_facade, av1_highbd_quantize_fp_facade },
+      { av1_quantize_b_facade, av1_highbd_quantize_b_facade },
+      { av1_quantize_dc_facade, av1_highbd_quantize_dc_facade },
+      { NULL, NULL } };
 
 #else
 typedef enum QUANT_FUNC {
@@ -423,22 +421,20 @@
   QUANT_FUNC_LAST = 1
 } QUANT_FUNC;
 
-static VP10_QUANT_FACADE
-    quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
-      { vp10_quantize_fp_facade },
-      { vp10_quantize_b_facade },
-      { vp10_quantize_dc_facade },
-      { NULL }
-    };
+static AV1_QUANT_FACADE quant_func_list[AV1_XFORM_QUANT_LAST][QUANT_FUNC_LAST] =
+    { { av1_quantize_fp_facade },
+      { av1_quantize_b_facade },
+      { av1_quantize_dc_facade },
+      { NULL } };
 #endif
 
-static FWD_TXFM_OPT fwd_txfm_opt_list[VP10_XFORM_QUANT_LAST] = {
+static FWD_TXFM_OPT fwd_txfm_opt_list[AV1_XFORM_QUANT_LAST] = {
   FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_DC, FWD_TXFM_OPT_NORMAL
 };
 
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
-                      int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                      VP10_XFORM_QUANT xform_quant_idx) {
+void av1_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+                     int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                     AV1_XFORM_QUANT xform_quant_idx) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -472,11 +468,11 @@
   src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
 
   qparam.log_scale = get_tx_scale(xd, tx_type, tx_size);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   fwd_txfm_param.bd = xd->bd;
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-    if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
+    if (xform_quant_idx != AV1_XFORM_QUANT_SKIP_QUANT) {
       if (LIKELY(!x->skip_block)) {
         quant_func_list[xform_quant_idx][QUANT_FUNC_HIGHBD](
             coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
@@ -486,15 +482,15 @@
 #endif  // CONFIG_AOM_QM
             );
       } else {
-        vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
+        av1_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
       }
     }
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-  if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
+  if (xform_quant_idx != AV1_XFORM_QUANT_SKIP_QUANT) {
     if (LIKELY(!x->skip_block)) {
       quant_func_list[xform_quant_idx][QUANT_FUNC_LOWBD](
           coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
@@ -504,15 +500,15 @@
 #endif  // CONFIG_AOM_QM
           );
     } else {
-      vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
+      av1_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
     }
   }
 }
 
 #if CONFIG_NEW_QUANT
-void vp10_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
-                          int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                          int ctx) {
+void av1_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                         int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                         int ctx) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -533,7 +529,7 @@
 
   fwd_txfm_param.tx_type = tx_type;
   fwd_txfm_param.tx_size = tx_size;
-  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_FP];
   fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
   fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
 
@@ -541,7 +537,7 @@
 
 // TODO(sarahparker) add all of these new quant quantize functions
 // to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   fwd_txfm_param.bd = xd->bd;
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
@@ -561,7 +557,7 @@
     }
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
   if (tx_size == TX_32X32) {
@@ -579,9 +575,9 @@
   }
 }
 
-void vp10_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
-                             int blk_col, BLOCK_SIZE plane_bsize,
-                             TX_SIZE tx_size, int ctx) {
+void av1_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                            int blk_col, BLOCK_SIZE plane_bsize,
+                            TX_SIZE tx_size, int ctx) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -602,7 +598,7 @@
 
   fwd_txfm_param.tx_type = tx_type;
   fwd_txfm_param.tx_size = tx_size;
-  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_FP];
   fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
   fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
 
@@ -610,7 +606,7 @@
 
 // TODO(sarahparker) add all of these new quant quantize functions
 // to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   fwd_txfm_param.bd = xd->bd;
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
@@ -629,7 +625,7 @@
     }
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
   if (tx_size == TX_32X32) {
@@ -647,9 +643,9 @@
   }
 }
 
-void vp10_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
-                             int blk_col, BLOCK_SIZE plane_bsize,
-                             TX_SIZE tx_size, int ctx) {
+void av1_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                            int blk_col, BLOCK_SIZE plane_bsize,
+                            TX_SIZE tx_size, int ctx) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -667,7 +663,7 @@
 
   fwd_txfm_param.tx_type = tx_type;
   fwd_txfm_param.tx_size = tx_size;
-  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_DC];
   fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
   fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
 
@@ -675,7 +671,7 @@
 
 // TODO(sarahparker) add all of these new quant quantize functions
 // to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   fwd_txfm_param.bd = xd->bd;
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
@@ -692,7 +688,7 @@
     }
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
   if (tx_size == TX_32X32) {
@@ -707,10 +703,9 @@
   }
 }
 
-void vp10_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block,
-                                int blk_row, int blk_col,
-                                BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                                int ctx) {
+void av1_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                               int blk_col, BLOCK_SIZE plane_bsize,
+                               TX_SIZE tx_size, int ctx) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -728,7 +723,7 @@
 
   fwd_txfm_param.tx_type = tx_type;
   fwd_txfm_param.tx_size = tx_size;
-  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_DC];
   fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
   fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
 
@@ -736,7 +731,7 @@
 
 // TODO(sarahparker) add all of these new quant quantize functions
 // to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   fwd_txfm_param.bd = xd->bd;
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
@@ -753,7 +748,7 @@
     }
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
   if (tx_size == TX_32X32) {
@@ -803,11 +798,11 @@
   {
 #endif
 #if CONFIG_NEW_QUANT
-    vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
-                            tx_size, ctx);
+    av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                           tx_size, ctx);
 #else
-    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
-                     VP10_XFORM_QUANT_FP);
+    av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                    AV1_XFORM_QUANT_FP);
 #endif  // CONFIG_NEW_QUANT
   }
 #if CONFIG_VAR_TX
@@ -817,7 +812,7 @@
 #endif
 
   if (p->eobs[block]) {
-    *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
+    *a = *l = av1_optimize_b(x, plane, block, tx_size, ctx) > 0;
   } else {
     *a = *l = p->eobs[block] > 0;
   }
@@ -841,13 +836,13 @@
   inv_txfm_param.eob = p->eobs[block];
   inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     inv_txfm_param.bd = xd->bd;
     highbd_inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
 }
 
@@ -922,41 +917,41 @@
 
 #if CONFIG_NEW_QUANT
   ctx = 0;
-  vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
-                          tx_size, ctx);
+  av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                         tx_size, ctx);
 #else
-  vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
-                   VP10_XFORM_QUANT_B);
+  av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                  AV1_XFORM_QUANT_B);
 #endif  // CONFIG_NEW_QUANT
 
   if (p->eobs[block] > 0) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
-        vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
-                                xd->bd);
+        av1_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+                               xd->bd);
       } else {
-        vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
-                                xd->bd);
+        av1_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+                               xd->bd);
       }
       return;
     }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
-      vp10_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+      av1_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
     } else {
-      vp10_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+      av1_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
     }
   }
 }
 
-void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
-  vp10_subtract_plane(x, bsize, 0);
-  vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
-                                          encode_block_pass1, x);
+void av1_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
+  av1_subtract_plane(x, bsize, 0);
+  av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
+                                         encode_block_pass1, x);
 }
 
-void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
+void av1_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
   MACROBLOCKD *const xd = &x->e_mbd;
   struct optimize_ctx ctx;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -983,21 +978,21 @@
 #if CONFIG_EXT_TX && CONFIG_RECT_TX
     const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
 #endif
-    vp10_get_entropy_contexts(bsize, TX_4X4, pd, ctx.ta[plane], ctx.tl[plane]);
+    av1_get_entropy_contexts(bsize, TX_4X4, pd, ctx.ta[plane], ctx.tl[plane]);
 #else
     const struct macroblockd_plane *const pd = &xd->plane[plane];
     const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
-    vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
+    av1_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
 #endif
-    vp10_subtract_plane(x, bsize, plane);
+    av1_subtract_plane(x, bsize, plane);
     arg.ta = ctx.ta[plane];
     arg.tl = ctx.tl[plane];
 
 #if CONFIG_VAR_TX
 #if CONFIG_EXT_TX && CONFIG_RECT_TX
     if (tx_size >= TX_SIZES) {
-      vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
-                                              &arg);
+      av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+                                             &arg);
     } else {
 #endif
       for (idy = 0; idy < mi_height; idy += bh) {
@@ -1011,14 +1006,14 @@
     }
 #endif
 #else
-    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
-                                            &arg);
+    av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+                                           &arg);
 #endif
   }
 }
 
 #if CONFIG_SUPERTX
-void vp10_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize) {
+void av1_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize) {
   MACROBLOCKD *const xd = &x->e_mbd;
   struct optimize_ctx ctx;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -1035,19 +1030,19 @@
 #else
     const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
 #endif
-    vp10_subtract_plane(x, bsize, plane);
-    vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
+    av1_subtract_plane(x, bsize, plane);
+    av1_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
     arg.ta = ctx.ta[plane];
     arg.tl = ctx.tl[plane];
-    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
-                                            &arg);
+    av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+                                           &arg);
   }
 }
 #endif  // CONFIG_SUPERTX
 
-void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
-                             BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                             void *arg) {
+void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
+                            BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                            void *arg) {
   struct encode_b_args *const args = arg;
   MACROBLOCK *const x = args->x;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -1080,20 +1075,20 @@
   src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
 
   mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
-  vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
-                           dst_stride, blk_col, blk_row, plane);
-#if CONFIG_VP9_HIGHBITDEPTH
+  av1_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
+                          dst_stride, blk_col, blk_row, plane);
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    vpx_highbd_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride,
+    aom_highbd_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride,
                               src, src_stride, dst, dst_stride, xd->bd);
   } else {
-    vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
+    aom_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
                        src_stride, dst, dst_stride);
   }
 #else
-  vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
+  aom_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
                      src_stride, dst, dst_stride);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   a = &args->ta[blk_col];
   l = &args->tl[blk_row];
@@ -1101,20 +1096,20 @@
 
   if (args->enable_optimize_b) {
 #if CONFIG_NEW_QUANT
-    vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
-                            tx_size, ctx);
+    av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                           tx_size, ctx);
 #else   // CONFIG_NEW_QUANT
-    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
-                     VP10_XFORM_QUANT_FP);
+    av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                    AV1_XFORM_QUANT_FP);
 #endif  // CONFIG_NEW_QUANT
     if (p->eobs[block]) {
-      *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
+      *a = *l = av1_optimize_b(x, plane, block, tx_size, ctx) > 0;
     } else {
       *a = *l = 0;
     }
   } else {
-    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
-                     VP10_XFORM_QUANT_B);
+    av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                    AV1_XFORM_QUANT_B);
     *a = *l = p->eobs[block] > 0;
   }
 
@@ -1124,7 +1119,7 @@
     inv_txfm_param.tx_size = tx_size;
     inv_txfm_param.eob = *eob;
     inv_txfm_param.lossless = xd->lossless[mbmi->segment_id];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     inv_txfm_param.bd = xd->bd;
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       highbd_inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
@@ -1133,14 +1128,14 @@
     }
 #else
     inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     *(args->skip) = 0;
   }
 }
 
-void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
-                                   int enable_optimize_b) {
+void av1_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
+                                  int enable_optimize_b) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   ENTROPY_CONTEXT ta[2 * MAX_MIB_SIZE];
   ENTROPY_CONTEXT tl[2 * MAX_MIB_SIZE];
@@ -1151,8 +1146,8 @@
     const struct macroblockd_plane *const pd = &xd->plane[plane];
     const TX_SIZE tx_size =
         plane ? get_uv_tx_size(&xd->mi[0]->mbmi, pd) : xd->mi[0]->mbmi.tx_size;
-    vp10_get_entropy_contexts(bsize, tx_size, pd, ta, tl);
+    av1_get_entropy_contexts(bsize, tx_size, pd, ta, tl);
   }
-  vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
-                                          vp10_encode_block_intra, &arg);
+  av1_foreach_transformed_block_in_plane(xd, bsize, plane,
+                                         av1_encode_block_intra, &arg);
 }
diff --git a/av1/encoder/encodemb.c.orig b/av1/encoder/encodemb.c.orig
deleted file mode 100644
index 4c94032..0000000
--- a/av1/encoder/encodemb.c.orig
+++ /dev/null
@@ -1,1158 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-
-#include "aom_dsp/quantize.h"
-#include "aom_mem/vpx_mem.h"
-#include "aom_ports/mem.h"
-
-#include "av1/common/idct.h"
-#include "av1/common/reconinter.h"
-#include "av1/common/reconintra.h"
-#include "av1/common/scan.h"
-
-#include "av1/encoder/encodemb.h"
-#include "av1/encoder/hybrid_fwd_txfm.h"
-#include "av1/encoder/quantize.h"
-#include "av1/encoder/rd.h"
-#include "av1/encoder/tokenize.h"
-
-void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
-  struct macroblock_plane *const p = &x->plane[plane];
-  const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane];
-  const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
-  const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
-  const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
-
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    vpx_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
-                              p->src.stride, pd->dst.buf, pd->dst.stride,
-                              x->e_mbd.bd);
-    return;
-  }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-  vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
-                     pd->dst.buf, pd->dst.stride);
-}
-
-typedef struct vp10_token_state {
-  int rate;
-  int64_t error;
-  int next;
-  int16_t token;
-  tran_low_t qc;
-  tran_low_t dqc;
-} vp10_token_state;
-
-// These numbers are empirically obtained.
-static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] = {
-  { 10, 6 }, { 8, 5 },
-};
-
-#define UPDATE_RD_COST()                             \
-  {                                                  \
-    rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); \
-    rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \
-  }
-
-int vp10_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
-                    int ctx) {
-  MACROBLOCKD *const xd = &mb->e_mbd;
-  struct macroblock_plane *const p = &mb->plane[plane];
-  struct macroblockd_plane *const pd = &xd->plane[plane];
-  const int ref = is_inter_block(&xd->mi[0]->mbmi);
-  vp10_token_state tokens[MAX_TX_SQUARE + 1][2];
-  unsigned best_index[MAX_TX_SQUARE + 1][2];
-  uint8_t token_cache[MAX_TX_SQUARE];
-  const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block);
-  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
-  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-  const int eob = p->eobs[block];
-  const PLANE_TYPE type = pd->plane_type;
-  const int default_eob = get_tx2d_size(tx_size);
-  const int16_t *const dequant_ptr = pd->dequant;
-  const uint8_t *const band_translate = get_band_translate(tx_size);
-  TX_TYPE tx_type = get_tx_type(type, xd, block, tx_size);
-  const scan_order *const so =
-      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
-  const int16_t *const scan = so->scan;
-  const int16_t *const nb = so->neighbors;
-<<<<<<< HEAD
-  const int shift = get_tx_scale(xd, tx_type, tx_size);
-=======
-#if CONFIG_AOM_QM
-  int seg_id = xd->mi[0]->mbmi.segment_id;
-  int is_intra = !is_inter_block(&xd->mi[0]->mbmi);
-  const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][tx_size];
-#endif
-#if CONFIG_AOM_QM
-  int seg_id = xd->mi[0]->mbmi.segment_id;
-  int is_intra = !is_inter_block(&xd->mi[0]->mbmi);
-  const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][tx_size];
-#endif
->>>>>>> 10d6f02... Port commits related to clpf and qm experiments
-#if CONFIG_NEW_QUANT
-  int dq = get_dq_profile_from_ctx(ctx);
-  const dequant_val_type_nuq *dequant_val = pd->dequant_val_nuq[dq];
-#else
-  const int dq_step[2] = { dequant_ptr[0] >> shift, dequant_ptr[1] >> shift };
-#endif  // CONFIG_NEW_QUANT
-  int next = eob, sz = 0;
-  const int64_t rdmult = (mb->rdmult * plane_rd_mult[ref][type]) >> 1;
-  const int64_t rddiv = mb->rddiv;
-  int64_t rd_cost0, rd_cost1;
-  int rate0, rate1;
-  int64_t error0, error1;
-  int16_t t0, t1;
-  int best, band = (eob < default_eob) ? band_translate[eob]
-                                       : band_translate[eob - 1];
-  int pt, i, final_eob;
-#if CONFIG_VP9_HIGHBITDEPTH
-  const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
-#else
-  const int *cat6_high_cost = vp10_get_high_cost_table(8);
-#endif
-  unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
-      mb->token_costs[txsize_sqr_map[tx_size]][type][ref];
-  const uint16_t *band_counts = &band_count_table[tx_size][band];
-  uint16_t band_left = eob - band_cum_count_table[tx_size][band] + 1;
-  int shortcut = 0;
-  int next_shortcut = 0;
-
-  token_costs += band;
-
-  assert((!type && !plane) || (type && plane));
-  assert(eob <= default_eob);
-
-  /* Now set up a Viterbi trellis to evaluate alternative roundings. */
-  /* Initialize the sentinel node of the trellis. */
-  tokens[eob][0].rate = 0;
-  tokens[eob][0].error = 0;
-  tokens[eob][0].next = default_eob;
-  tokens[eob][0].token = EOB_TOKEN;
-  tokens[eob][0].qc = 0;
-  tokens[eob][1] = tokens[eob][0];
-
-  for (i = 0; i < eob; i++) {
-    const int rc = scan[i];
-    tokens[i][0].rate = vp10_get_token_cost(qcoeff[rc], &t0, cat6_high_cost);
-    tokens[i][0].token = t0;
-    token_cache[rc] = vp10_pt_energy_class[t0];
-  }
-
-  for (i = eob; i-- > 0;) {
-    int base_bits, dx;
-    int64_t d2;
-    const int rc = scan[i];
-#if CONFIG_AOM_QM
-    int iwt = iqmatrix[rc];
-#endif
-    int x = qcoeff[rc];
-    next_shortcut = shortcut;
-
-    /* Only add a trellis state for non-zero coefficients. */
-    if (UNLIKELY(x)) {
-      error0 = tokens[next][0].error;
-      error1 = tokens[next][1].error;
-      /* Evaluate the first possibility for this state. */
-      rate0 = tokens[next][0].rate;
-      rate1 = tokens[next][1].rate;
-
-      if (next_shortcut) {
-        /* Consider both possible successor states. */
-        if (next < default_eob) {
-          pt = get_coef_context(nb, token_cache, i + 1);
-          rate0 += (*token_costs)[0][pt][tokens[next][0].token];
-          rate1 += (*token_costs)[0][pt][tokens[next][1].token];
-        }
-        UPDATE_RD_COST();
-        /* And pick the best. */
-        best = rd_cost1 < rd_cost0;
-      } else {
-        if (next < default_eob) {
-          pt = get_coef_context(nb, token_cache, i + 1);
-          rate0 += (*token_costs)[0][pt][tokens[next][0].token];
-        }
-        best = 0;
-      }
-
-      dx = (dqcoeff[rc] - coeff[rc]) * (1 << shift);
-#if CONFIG_VP9_HIGHBITDEPTH
-      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-        dx >>= xd->bd - 8;
-      }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-      d2 = (int64_t)dx * dx;
-      tokens[i][0].rate += (best ? rate1 : rate0);
-      tokens[i][0].error = d2 + (best ? error1 : error0);
-      tokens[i][0].next = next;
-      tokens[i][0].qc = x;
-      tokens[i][0].dqc = dqcoeff[rc];
-      best_index[i][0] = best;
-
-      /* Evaluate the second possibility for this state. */
-      rate0 = tokens[next][0].rate;
-      rate1 = tokens[next][1].rate;
-
-      // The threshold of 3 is empirically obtained.
-      if (UNLIKELY(abs(x) > 3)) {
-        shortcut = 0;
-      } else {
-#if CONFIG_NEW_QUANT
-        shortcut = ((vp10_dequant_abscoeff_nuq(abs(x), dequant_ptr[rc != 0],
-                                               dequant_val[band_translate[i]]) >
-                     (abs(coeff[rc]) << shift)) &&
-                    (vp10_dequant_abscoeff_nuq(abs(x) - 1, dequant_ptr[rc != 0],
-                                               dequant_val[band_translate[i]]) <
-                     (abs(coeff[rc]) << shift)));
-#else   // CONFIG_NEW_QUANT
-#if CONFIG_AOM_QM
-        if ((abs(x) * dequant_ptr[rc != 0] * iwt >
-             ((abs(coeff[rc]) << shift) << AOM_QM_BITS)) &&
-            (abs(x) * dequant_ptr[rc != 0] * iwt <
-             (((abs(coeff[rc]) << shift) + dequant_ptr[rc != 0]) << AOM_QM_BITS)))
-#else
-        if ((abs(x) * dequant_ptr[rc != 0] > (abs(coeff[rc]) << shift)) &&
-            (abs(x) * dequant_ptr[rc != 0] <
-             (abs(coeff[rc]) << shift) + dequant_ptr[rc != 0]))
-#endif  // CONFIG_AOM_QM
-          shortcut = 1;
-        else
-          shortcut = 0;
-#endif  // CONFIG_NEW_QUANT
-      }
-
-      if (shortcut) {
-        sz = -(x < 0);
-        x -= 2 * sz + 1;
-      } else {
-        tokens[i][1] = tokens[i][0];
-        best_index[i][1] = best_index[i][0];
-        next = i;
-
-        if (UNLIKELY(!(--band_left))) {
-          --band_counts;
-          band_left = *band_counts;
-          --token_costs;
-        }
-        continue;
-      }
-
-      /* Consider both possible successor states. */
-      if (!x) {
-        /* If we reduced this coefficient to zero, check to see if
-         *  we need to move the EOB back here.
-         */
-        t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
-        t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
-        base_bits = 0;
-      } else {
-        base_bits = vp10_get_token_cost(x, &t0, cat6_high_cost);
-        t1 = t0;
-      }
-
-      if (next_shortcut) {
-        if (LIKELY(next < default_eob)) {
-          if (t0 != EOB_TOKEN) {
-            token_cache[rc] = vp10_pt_energy_class[t0];
-            pt = get_coef_context(nb, token_cache, i + 1);
-            rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
-          }
-          if (t1 != EOB_TOKEN) {
-            token_cache[rc] = vp10_pt_energy_class[t1];
-            pt = get_coef_context(nb, token_cache, i + 1);
-            rate1 += (*token_costs)[!x][pt][tokens[next][1].token];
-          }
-        }
-
-        UPDATE_RD_COST();
-        /* And pick the best. */
-        best = rd_cost1 < rd_cost0;
-      } else {
-        // The two states in next stage are identical.
-        if (next < default_eob && t0 != EOB_TOKEN) {
-          token_cache[rc] = vp10_pt_energy_class[t0];
-          pt = get_coef_context(nb, token_cache, i + 1);
-          rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
-        }
-        best = 0;
-      }
-
-#if CONFIG_NEW_QUANT
-      dx = vp10_dequant_coeff_nuq(x, dequant_ptr[rc != 0],
-                                  dequant_val[band_translate[i]]) -
-           (coeff[rc] << shift);
-#if CONFIG_VP9_HIGHBITDEPTH
-      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-        dx >>= xd->bd - 8;
-      }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-#else   // CONFIG_NEW_QUANT
-#if CONFIG_VP9_HIGHBITDEPTH
-      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-        dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz;
-      } else {
-        dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
-      }
-#else
-      dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-#endif  // CONFIG_NEW_QUANT
-      d2 = (int64_t)dx * dx;
-
-      tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
-      tokens[i][1].error = d2 + (best ? error1 : error0);
-      tokens[i][1].next = next;
-      tokens[i][1].token = best ? t1 : t0;
-      tokens[i][1].qc = x;
-
-      if (x) {
-#if CONFIG_NEW_QUANT
-        tokens[i][1].dqc = vp10_dequant_abscoeff_nuq(
-            abs(x), dequant_ptr[rc != 0], dequant_val[band_translate[i]]);
-        tokens[i][1].dqc = shift ? ROUND_POWER_OF_TWO(tokens[i][1].dqc, shift)
-                                 : tokens[i][1].dqc;
-        if (sz) tokens[i][1].dqc = -tokens[i][1].dqc;
-#else
-        tran_low_t offset = dq_step[rc != 0];
-        // The 32x32 transform coefficient uses half quantization step size.
-        // Account for the rounding difference in the dequantized coefficeint
-        // value when the quantization index is dropped from an even number
-        // to an odd number.
-        if (shift & x) offset += (dequant_ptr[rc != 0] & 0x01);
-
-        if (sz == 0)
-          tokens[i][1].dqc = dqcoeff[rc] - offset;
-        else
-          tokens[i][1].dqc = dqcoeff[rc] + offset;
-#endif  // CONFIG_NEW_QUANT
-      } else {
-        tokens[i][1].dqc = 0;
-      }
-
-      best_index[i][1] = best;
-      /* Finally, make this the new head of the trellis. */
-      next = i;
-    } else {
-      /* There's no choice to make for a zero coefficient, so we don't
-       *  add a new trellis node, but we do need to update the costs.
-       */
-      t0 = tokens[next][0].token;
-      t1 = tokens[next][1].token;
-      pt = get_coef_context(nb, token_cache, i + 1);
-      /* Update the cost of each path if we're past the EOB token. */
-      if (t0 != EOB_TOKEN) {
-        tokens[next][0].rate += (*token_costs)[1][pt][t0];
-        tokens[next][0].token = ZERO_TOKEN;
-      }
-      if (t1 != EOB_TOKEN) {
-        tokens[next][1].rate += (*token_costs)[1][pt][t1];
-        tokens[next][1].token = ZERO_TOKEN;
-      }
-      best_index[i][0] = best_index[i][1] = 0;
-      shortcut = (tokens[next][0].rate != tokens[next][1].rate);
-      /* Don't update next, because we didn't add a new node. */
-    }
-
-    if (UNLIKELY(!(--band_left))) {
-      --band_counts;
-      band_left = *band_counts;
-      --token_costs;
-    }
-  }
-
-  /* Now pick the best path through the whole trellis. */
-  rate0 = tokens[next][0].rate;
-  rate1 = tokens[next][1].rate;
-  error0 = tokens[next][0].error;
-  error1 = tokens[next][1].error;
-  t0 = tokens[next][0].token;
-  t1 = tokens[next][1].token;
-  rate0 += (*token_costs)[0][ctx][t0];
-  rate1 += (*token_costs)[0][ctx][t1];
-  UPDATE_RD_COST();
-  best = rd_cost1 < rd_cost0;
-
-  final_eob = -1;
-
-  for (i = next; i < eob; i = next) {
-    const int x = tokens[i][best].qc;
-    const int rc = scan[i];
-#if CONFIG_AOM_QM
-    const int iwt = iqmatrix[rc];
-    const int dequant =
-        (dequant_ptr[rc != 0] * iwt + (1 << (AOM_QM_BITS - 1))) >> AOM_QM_BITS;
-#endif
-
-    if (x) final_eob = i;
-    qcoeff[rc] = x;
-    dqcoeff[rc] = tokens[i][best].dqc;
-
-    next = tokens[i][best].next;
-    best = best_index[i][best];
-  }
-  final_eob++;
-
-  mb->plane[plane].eobs[block] = final_eob;
-  assert(final_eob <= default_eob);
-  return final_eob;
-}
-
-#if CONFIG_VP9_HIGHBITDEPTH
-typedef enum QUANT_FUNC {
-  QUANT_FUNC_LOWBD = 0,
-  QUANT_FUNC_HIGHBD = 1,
-  QUANT_FUNC_LAST = 2
-} QUANT_FUNC;
-
-static VP10_QUANT_FACADE
-    quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
-      { vp10_quantize_fp_facade, vp10_highbd_quantize_fp_facade },
-      { vp10_quantize_b_facade, vp10_highbd_quantize_b_facade },
-      { vp10_quantize_dc_facade, vp10_highbd_quantize_dc_facade },
-      { NULL, NULL }
-    };
-
-#else
-typedef enum QUANT_FUNC {
-  QUANT_FUNC_LOWBD = 0,
-  QUANT_FUNC_LAST = 1
-} QUANT_FUNC;
-
-static VP10_QUANT_FACADE
-    quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
-      { vp10_quantize_fp_facade },
-      { vp10_quantize_b_facade },
-      { vp10_quantize_dc_facade },
-      { NULL }
-    };
-#endif
-
-static FWD_TXFM_OPT fwd_txfm_opt_list[VP10_XFORM_QUANT_LAST] = {
-  FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_DC, FWD_TXFM_OPT_NORMAL
-};
-
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
-                      int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                      VP10_XFORM_QUANT xform_quant_idx) {
-  MACROBLOCKD *const xd = &x->e_mbd;
-  const struct macroblock_plane *const p = &x->plane[plane];
-  const struct macroblockd_plane *const pd = &xd->plane[plane];
-  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
-  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
-  const scan_order *const scan_order =
-      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
-  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
-  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
-  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-  uint16_t *const eob = &p->eobs[block];
-  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
-#if CONFIG_AOM_QM
-  int seg_id = xd->mi[0]->mbmi.segment_id;
-  int is_intra = !is_inter_block(&xd->mi[0]->mbmi);
-  const qm_val_t *qmatrix = pd->seg_qmatrix[seg_id][is_intra][tx_size];
-  const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][tx_size];
-#endif
-  const int16_t *src_diff;
-  const int tx2d_size = get_tx2d_size(tx_size);
-
-  FWD_TXFM_PARAM fwd_txfm_param;
-  QUANT_PARAM qparam;
-
-  fwd_txfm_param.tx_type = tx_type;
-  fwd_txfm_param.tx_size = tx_size;
-  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[xform_quant_idx];
-  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
-  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
-  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
-  qparam.log_scale = get_tx_scale(xd, tx_type, tx_size);
-#if CONFIG_VP9_HIGHBITDEPTH
-  fwd_txfm_param.bd = xd->bd;
-  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-    if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
-      if (LIKELY(!x->skip_block)) {
-        quant_func_list[xform_quant_idx][QUANT_FUNC_HIGHBD](
-<<<<<<< HEAD
-            coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam);
-      } else {
-        vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
-=======
-            coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
-#if CONFIG_AOM_QM
-            , qmatrix, iqmatrix
-#endif  // CONFIG_AOM_QM
-            );
->>>>>>> 10d6f02... Port commits related to clpf and qm experiments
-      }
-    }
-    return;
-  }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-  if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
-    if (LIKELY(!x->skip_block)) {
-      quant_func_list[xform_quant_idx][QUANT_FUNC_LOWBD](
-<<<<<<< HEAD
-          coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam);
-    } else {
-      vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
-=======
-          coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
-#if CONFIG_AOM_QM
-            , qmatrix, iqmatrix
-#endif  // CONFIG_AOM_QM
-          );
->>>>>>> 10d6f02... Port commits related to clpf and qm experiments
-    }
-  }
-}
-
-#if CONFIG_NEW_QUANT
-void vp10_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
-                          int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                          int ctx) {
-  MACROBLOCKD *const xd = &x->e_mbd;
-  const struct macroblock_plane *const p = &x->plane[plane];
-  const struct macroblockd_plane *const pd = &xd->plane[plane];
-  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
-  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
-  const scan_order *const scan_order =
-      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
-  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
-  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
-  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-  int dq = get_dq_profile_from_ctx(ctx);
-  uint16_t *const eob = &p->eobs[block];
-  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
-  const int16_t *src_diff;
-  const uint8_t *band = get_band_translate(tx_size);
-
-  FWD_TXFM_PARAM fwd_txfm_param;
-
-  fwd_txfm_param.tx_type = tx_type;
-  fwd_txfm_param.tx_size = tx_size;
-  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
-  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
-  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
-  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
-// TODO(sarahparker) add all of these new quant quantize functions
-// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
-  fwd_txfm_param.bd = xd->bd;
-  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-    if (tx_size == TX_32X32) {
-      highbd_quantize_32x32_nuq(
-          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant,
-          p->quant_shift, pd->dequant,
-          (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
-          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
-          dqcoeff, eob, scan_order->scan, band);
-    } else {
-      highbd_quantize_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
-                          p->quant, p->quant_shift, pd->dequant,
-                          (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
-                          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
-                          qcoeff, dqcoeff, eob, scan_order->scan, band);
-    }
-    return;
-  }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-  if (tx_size == TX_32X32) {
-    quantize_32x32_nuq(coeff, 1024, x->skip_block, p->quant, p->quant_shift,
-                       pd->dequant,
-                       (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
-                       (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
-                       qcoeff, dqcoeff, eob, scan_order->scan, band);
-  } else {
-    quantize_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant,
-                 p->quant_shift, pd->dequant,
-                 (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
-                 (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
-                 dqcoeff, eob, scan_order->scan, band);
-  }
-}
-
-void vp10_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
-                             int blk_col, BLOCK_SIZE plane_bsize,
-                             TX_SIZE tx_size, int ctx) {
-  MACROBLOCKD *const xd = &x->e_mbd;
-  const struct macroblock_plane *const p = &x->plane[plane];
-  const struct macroblockd_plane *const pd = &xd->plane[plane];
-  int dq = get_dq_profile_from_ctx(ctx);
-  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
-  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
-  const scan_order *const scan_order =
-      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
-  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
-  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
-  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-  uint16_t *const eob = &p->eobs[block];
-  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
-  const int16_t *src_diff;
-  const uint8_t *band = get_band_translate(tx_size);
-
-  FWD_TXFM_PARAM fwd_txfm_param;
-
-  fwd_txfm_param.tx_type = tx_type;
-  fwd_txfm_param.tx_size = tx_size;
-  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
-  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
-  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
-  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
-// TODO(sarahparker) add all of these new quant quantize functions
-// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
-  fwd_txfm_param.bd = xd->bd;
-  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-    if (tx_size == TX_32X32) {
-      highbd_quantize_32x32_fp_nuq(
-          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
-          pd->dequant, (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
-          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
-          dqcoeff, eob, scan_order->scan, band);
-    } else {
-      highbd_quantize_fp_nuq(
-          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
-          pd->dequant, (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
-          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
-          dqcoeff, eob, scan_order->scan, band);
-    }
-    return;
-  }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-  if (tx_size == TX_32X32) {
-    quantize_32x32_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
-                          p->quant_fp, pd->dequant,
-                          (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
-                          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
-                          qcoeff, dqcoeff, eob, scan_order->scan, band);
-  } else {
-    quantize_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
-                    pd->dequant,
-                    (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
-                    (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
-                    qcoeff, dqcoeff, eob, scan_order->scan, band);
-  }
-}
-
-void vp10_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
-                             int blk_col, BLOCK_SIZE plane_bsize,
-                             TX_SIZE tx_size, int ctx) {
-  MACROBLOCKD *const xd = &x->e_mbd;
-  const struct macroblock_plane *const p = &x->plane[plane];
-  const struct macroblockd_plane *const pd = &xd->plane[plane];
-  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
-  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
-  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
-  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
-  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-  uint16_t *const eob = &p->eobs[block];
-  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
-  const int16_t *src_diff;
-  int dq = get_dq_profile_from_ctx(ctx);
-
-  FWD_TXFM_PARAM fwd_txfm_param;
-
-  fwd_txfm_param.tx_type = tx_type;
-  fwd_txfm_param.tx_size = tx_size;
-  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
-  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
-  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
-  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
-// TODO(sarahparker) add all of these new quant quantize functions
-// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
-  fwd_txfm_param.bd = xd->bd;
-  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-    if (tx_size == TX_32X32) {
-      highbd_quantize_dc_32x32_nuq(
-          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant[0],
-          p->quant_shift[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
-          pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
-    } else {
-      highbd_quantize_dc_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
-                             p->quant[0], p->quant_shift[0], pd->dequant[0],
-                             p->cuml_bins_nuq[dq][0],
-                             pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
-    }
-    return;
-  }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-  if (tx_size == TX_32X32) {
-    quantize_dc_32x32_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
-                          p->quant[0], p->quant_shift[0], pd->dequant[0],
-                          p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
-                          qcoeff, dqcoeff, eob);
-  } else {
-    quantize_dc_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant[0],
-                    p->quant_shift[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
-                    pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
-  }
-}
-
-void vp10_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block,
-                                int blk_row, int blk_col,
-                                BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                                int ctx) {
-  MACROBLOCKD *const xd = &x->e_mbd;
-  const struct macroblock_plane *const p = &x->plane[plane];
-  const struct macroblockd_plane *const pd = &xd->plane[plane];
-  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
-  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
-  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
-  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
-  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-  uint16_t *const eob = &p->eobs[block];
-  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
-  const int16_t *src_diff;
-  int dq = get_dq_profile_from_ctx(ctx);
-
-  FWD_TXFM_PARAM fwd_txfm_param;
-
-  fwd_txfm_param.tx_type = tx_type;
-  fwd_txfm_param.tx_size = tx_size;
-  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
-  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
-  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
-  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
-// TODO(sarahparker) add all of these new quant quantize functions
-// to quant_func_list, just trying to get this expr to work for now
-#if CONFIG_VP9_HIGHBITDEPTH
-  fwd_txfm_param.bd = xd->bd;
-  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-    if (tx_size == TX_32X32) {
-      highbd_quantize_dc_32x32_fp_nuq(
-          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp[0],
-          pd->dequant[0], p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
-          qcoeff, dqcoeff, eob);
-    } else {
-      highbd_quantize_dc_fp_nuq(
-          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp[0],
-          pd->dequant[0], p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
-          qcoeff, dqcoeff, eob);
-    }
-    return;
-  }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
-  if (tx_size == TX_32X32) {
-    quantize_dc_32x32_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
-                             p->quant_fp[0], pd->dequant[0],
-                             p->cuml_bins_nuq[dq][0],
-                             pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
-  } else {
-    quantize_dc_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
-                       p->quant_fp[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
-                       pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
-  }
-}
-#endif  // CONFIG_NEW_QUANT
-
-static void encode_block(int plane, int block, int blk_row, int blk_col,
-                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
-  struct encode_b_args *const args = arg;
-  MACROBLOCK *const x = args->x;
-  MACROBLOCKD *const xd = &x->e_mbd;
-  int ctx;
-  struct macroblock_plane *const p = &x->plane[plane];
-  struct macroblockd_plane *const pd = &xd->plane[plane];
-  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-  uint8_t *dst;
-  ENTROPY_CONTEXT *a, *l;
-  INV_TXFM_PARAM inv_txfm_param;
-#if CONFIG_VAR_TX
-  int i;
-  const int bwl = b_width_log2_lookup[plane_bsize];
-#endif
-  dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
-  a = &args->ta[blk_col];
-  l = &args->tl[blk_row];
-#if CONFIG_VAR_TX
-  ctx = get_entropy_context(tx_size, a, l);
-#else
-  ctx = combine_entropy_contexts(*a, *l);
-#endif
-
-#if CONFIG_VAR_TX
-  // Assert not magic number (uninitialised).
-  assert(x->blk_skip[plane][(blk_row << bwl) + blk_col] != 234);
-
-  if (x->blk_skip[plane][(blk_row << bwl) + blk_col] == 0) {
-#else
-  {
-#endif
-#if CONFIG_NEW_QUANT
-    vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
-                            tx_size, ctx);
-#else
-    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
-                     VP10_XFORM_QUANT_FP);
-#endif  // CONFIG_NEW_QUANT
-  }
-#if CONFIG_VAR_TX
-  else {
-    p->eobs[block] = 0;
-  }
-#endif
-
-  if (p->eobs[block]) {
-    *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
-  } else {
-    *a = *l = p->eobs[block] > 0;
-  }
-
-#if CONFIG_VAR_TX
-  for (i = 0; i < num_4x4_blocks_wide_txsize_lookup[tx_size]; ++i) {
-    a[i] = a[0];
-  }
-  for (i = 0; i < num_4x4_blocks_high_txsize_lookup[tx_size]; ++i) {
-    l[i] = l[0];
-  }
-#endif
-
-  if (p->eobs[block]) *(args->skip) = 0;
-
-  if (p->eobs[block] == 0) return;
-
-  // inverse transform parameters
-  inv_txfm_param.tx_type = get_tx_type(pd->plane_type, xd, block, tx_size);
-  inv_txfm_param.tx_size = tx_size;
-  inv_txfm_param.eob = p->eobs[block];
-  inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    inv_txfm_param.bd = xd->bd;
-    highbd_inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
-    return;
-  }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-  inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
-}
-
-#if CONFIG_VAR_TX
-static void encode_block_inter(int plane, int block, int blk_row, int blk_col,
-                               BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                               void *arg) {
-  struct encode_b_args *const args = arg;
-  MACROBLOCK *const x = args->x;
-  MACROBLOCKD *const xd = &x->e_mbd;
-  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
-  const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
-  const struct macroblockd_plane *const pd = &xd->plane[plane];
-  const int tx_row = blk_row >> (1 - pd->subsampling_y);
-  const int tx_col = blk_col >> (1 - pd->subsampling_x);
-  const TX_SIZE plane_tx_size =
-      plane ? get_uv_tx_size_impl(mbmi->inter_tx_size[tx_row][tx_col], bsize, 0,
-                                  0)
-            : mbmi->inter_tx_size[tx_row][tx_col];
-
-  int max_blocks_high = num_4x4_blocks_high_lookup[plane_bsize];
-  int max_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize];
-
-  if (xd->mb_to_bottom_edge < 0)
-    max_blocks_high += xd->mb_to_bottom_edge >> (5 + pd->subsampling_y);
-  if (xd->mb_to_right_edge < 0)
-    max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
-
-  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
-
-  if (tx_size == plane_tx_size) {
-    encode_block(plane, block, blk_row, blk_col, plane_bsize, tx_size, arg);
-  } else {
-    int bsl = b_width_log2_lookup[bsize];
-    int i;
-
-    assert(bsl > 0);
-    --bsl;
-
-#if CONFIG_EXT_TX
-    assert(tx_size < TX_SIZES);
-#endif  // CONFIG_EXT_TX
-
-    for (i = 0; i < 4; ++i) {
-      const int offsetr = blk_row + ((i >> 1) << bsl);
-      const int offsetc = blk_col + ((i & 0x01) << bsl);
-      int step = num_4x4_blocks_txsize_lookup[tx_size - 1];
-
-      if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
-
-      encode_block_inter(plane, block + i * step, offsetr, offsetc, plane_bsize,
-                         tx_size - 1, arg);
-    }
-  }
-}
-#endif
-
-static void encode_block_pass1(int plane, int block, int blk_row, int blk_col,
-                               BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                               void *arg) {
-  MACROBLOCK *const x = (MACROBLOCK *)arg;
-  MACROBLOCKD *const xd = &x->e_mbd;
-  struct macroblock_plane *const p = &x->plane[plane];
-  struct macroblockd_plane *const pd = &xd->plane[plane];
-  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-  uint8_t *dst;
-#if CONFIG_NEW_QUANT
-  int ctx;
-#endif  // CONFIG_NEW_QUANT
-  dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
-
-#if CONFIG_NEW_QUANT
-  ctx = 0;
-  vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
-                          tx_size, ctx);
-#else
-  vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
-                   VP10_XFORM_QUANT_B);
-#endif  // CONFIG_NEW_QUANT
-
-  if (p->eobs[block] > 0) {
-#if CONFIG_VP9_HIGHBITDEPTH
-    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-      if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
-        vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
-                                xd->bd);
-      } else {
-        vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
-                                xd->bd);
-      }
-      return;
-    }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-    if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
-      vp10_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
-    } else {
-      vp10_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
-    }
-  }
-}
-
-void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
-  vp10_subtract_plane(x, bsize, 0);
-  vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
-                                          encode_block_pass1, x);
-}
-
-void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
-  MACROBLOCKD *const xd = &x->e_mbd;
-  struct optimize_ctx ctx;
-  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
-  struct encode_b_args arg = { x, &ctx, &mbmi->skip, NULL, NULL, 1 };
-  int plane;
-
-  mbmi->skip = 1;
-
-  if (x->skip) return;
-
-  for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
-#if CONFIG_VAR_TX
-    // TODO(jingning): Clean this up.
-    const struct macroblockd_plane *const pd = &xd->plane[plane];
-    const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
-    const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
-    const int mi_height = num_4x4_blocks_high_lookup[plane_bsize];
-    const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
-    const BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size];
-    const int bh = num_4x4_blocks_wide_lookup[txb_size];
-    int idx, idy;
-    int block = 0;
-    int step = num_4x4_blocks_txsize_lookup[max_tx_size];
-    vp10_get_entropy_contexts(bsize, TX_4X4, pd, ctx.ta[plane], ctx.tl[plane]);
-#else
-    const struct macroblockd_plane *const pd = &xd->plane[plane];
-    const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
-    vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
-#endif
-    vp10_subtract_plane(x, bsize, plane);
-    arg.ta = ctx.ta[plane];
-    arg.tl = ctx.tl[plane];
-
-#if CONFIG_VAR_TX
-    for (idy = 0; idy < mi_height; idy += bh) {
-      for (idx = 0; idx < mi_width; idx += bh) {
-        encode_block_inter(plane, block, idy, idx, plane_bsize, max_tx_size,
-                           &arg);
-        block += step;
-      }
-    }
-#else
-    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
-                                            &arg);
-#endif
-  }
-}
-
-#if CONFIG_SUPERTX
-void vp10_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize) {
-  MACROBLOCKD *const xd = &x->e_mbd;
-  struct optimize_ctx ctx;
-  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
-  struct encode_b_args arg = { x, &ctx, &mbmi->skip, NULL, NULL, 1 };
-  int plane;
-
-  mbmi->skip = 1;
-  if (x->skip) return;
-
-  for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
-    const struct macroblockd_plane *const pd = &xd->plane[plane];
-#if CONFIG_VAR_TX
-    const TX_SIZE tx_size = TX_4X4;
-#else
-    const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
-#endif
-    vp10_subtract_plane(x, bsize, plane);
-    vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
-    arg.ta = ctx.ta[plane];
-    arg.tl = ctx.tl[plane];
-    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
-                                            &arg);
-  }
-}
-#endif  // CONFIG_SUPERTX
-
-void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
-                             BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                             void *arg) {
-  struct encode_b_args *const args = arg;
-  MACROBLOCK *const x = args->x;
-  MACROBLOCKD *const xd = &x->e_mbd;
-  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
-  struct macroblock_plane *const p = &x->plane[plane];
-  struct macroblockd_plane *const pd = &xd->plane[plane];
-  tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
-  const TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
-  PREDICTION_MODE mode;
-  const int bwl = b_width_log2_lookup[plane_bsize];
-  const int bhl = b_height_log2_lookup[plane_bsize];
-  const int diff_stride = 4 * (1 << bwl);
-  uint8_t *src, *dst;
-  int16_t *src_diff;
-  uint16_t *eob = &p->eobs[block];
-  const int src_stride = p->src.stride;
-  const int dst_stride = pd->dst.stride;
-  const int tx1d_width = num_4x4_blocks_wide_txsize_lookup[tx_size] << 2;
-  const int tx1d_height = num_4x4_blocks_high_txsize_lookup[tx_size] << 2;
-  ENTROPY_CONTEXT *a = NULL, *l = NULL;
-  int ctx;
-
-  INV_TXFM_PARAM inv_txfm_param;
-
-  assert(tx1d_width == tx1d_height);
-
-  dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
-  src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
-  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
-
-  mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
-  vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
-                           dst_stride, blk_col, blk_row, plane);
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    vpx_highbd_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride,
-                              src, src_stride, dst, dst_stride, xd->bd);
-  } else {
-    vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
-                       src_stride, dst, dst_stride);
-  }
-#else
-  vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
-                     src_stride, dst, dst_stride);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-  a = &args->ta[blk_col];
-  l = &args->tl[blk_row];
-  ctx = combine_entropy_contexts(*a, *l);
-
-  if (args->enable_optimize_b) {
-#if CONFIG_NEW_QUANT
-    vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
-                            tx_size, ctx);
-#else   // CONFIG_NEW_QUANT
-    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
-                     VP10_XFORM_QUANT_FP);
-#endif  // CONFIG_NEW_QUANT
-    if (p->eobs[block]) {
-      *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
-    } else {
-      *a = *l = 0;
-    }
-  } else {
-    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
-                     VP10_XFORM_QUANT_B);
-    *a = *l = p->eobs[block] > 0;
-  }
-
-  if (*eob) {
-    // inverse transform
-    inv_txfm_param.tx_type = tx_type;
-    inv_txfm_param.tx_size = tx_size;
-    inv_txfm_param.eob = *eob;
-    inv_txfm_param.lossless = xd->lossless[mbmi->segment_id];
-#if CONFIG_VP9_HIGHBITDEPTH
-    inv_txfm_param.bd = xd->bd;
-    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-      highbd_inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
-    } else {
-      inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
-    }
-#else
-    inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-    *(args->skip) = 0;
-  }
-}
-
-void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
-                                   int enable_optimize_b) {
-  const MACROBLOCKD *const xd = &x->e_mbd;
-  ENTROPY_CONTEXT ta[2 * MAX_MIB_SIZE];
-  ENTROPY_CONTEXT tl[2 * MAX_MIB_SIZE];
-
-  struct encode_b_args arg = { x,  NULL, &xd->mi[0]->mbmi.skip,
-                               ta, tl,   enable_optimize_b };
-  if (enable_optimize_b) {
-    const struct macroblockd_plane *const pd = &xd->plane[plane];
-    const TX_SIZE tx_size =
-        plane ? get_uv_tx_size(&xd->mi[0]->mbmi, pd) : xd->mi[0]->mbmi.tx_size;
-    vp10_get_entropy_contexts(bsize, tx_size, pd, ta, tl);
-  }
-  vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
-                                          vp10_encode_block_intra, &arg);
-}
diff --git a/av1/encoder/encodemb.h b/av1/encoder/encodemb.h
index 4b88831..c9f9f6d 100644
--- a/av1/encoder/encodemb.h
+++ b/av1/encoder/encodemb.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_ENCODEMB_H_
-#define VP10_ENCODER_ENCODEMB_H_
+#ifndef AV1_ENCODER_ENCODEMB_H_
+#define AV1_ENCODER_ENCODEMB_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "av1/encoder/block.h"
 
 #ifdef __cplusplus
@@ -32,52 +32,50 @@
   int8_t enable_optimize_b;
 };
 
-typedef enum VP10_XFORM_QUANT {
-  VP10_XFORM_QUANT_FP = 0,
-  VP10_XFORM_QUANT_B = 1,
-  VP10_XFORM_QUANT_DC = 2,
-  VP10_XFORM_QUANT_SKIP_QUANT = 3,
-  VP10_XFORM_QUANT_LAST = 4
-} VP10_XFORM_QUANT;
+typedef enum AV1_XFORM_QUANT {
+  AV1_XFORM_QUANT_FP = 0,
+  AV1_XFORM_QUANT_B = 1,
+  AV1_XFORM_QUANT_DC = 2,
+  AV1_XFORM_QUANT_SKIP_QUANT = 3,
+  AV1_XFORM_QUANT_LAST = 4
+} AV1_XFORM_QUANT;
 
-void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
 #if CONFIG_SUPERTX
-void vp10_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize);
 #endif  // CONFIG_SUPERTX
-void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
-                      int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                      VP10_XFORM_QUANT xform_quant_idx);
+void av1_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+                     int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                     AV1_XFORM_QUANT xform_quant_idx);
 #if CONFIG_NEW_QUANT
-void vp10_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
-                          int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                          int ctx);
-void vp10_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
-                             int blk_col, BLOCK_SIZE plane_bsize,
-                             TX_SIZE tx_size, int ctx);
-void vp10_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
-                             int blk_col, BLOCK_SIZE plane_bsize,
-                             TX_SIZE tx_size, int ctx);
-void vp10_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block,
-                                int blk_row, int blk_col,
-                                BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                                int ctx);
+void av1_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                         int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                         int ctx);
+void av1_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                            int blk_col, BLOCK_SIZE plane_bsize,
+                            TX_SIZE tx_size, int ctx);
+void av1_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                            int blk_col, BLOCK_SIZE plane_bsize,
+                            TX_SIZE tx_size, int ctx);
+void av1_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                               int blk_col, BLOCK_SIZE plane_bsize,
+                               TX_SIZE tx_size, int ctx);
 #endif
 
-int vp10_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
-                    int ctx);
+int av1_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
+                   int ctx);
 
-void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+void av1_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
 
-void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
-                             BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                             void *arg);
+void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
+                            BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg);
 
-void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
-                                   int enable_optimize_b);
+void av1_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
+                                  int enable_optimize_b);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_ENCODEMB_H_
+#endif  // AV1_ENCODER_ENCODEMB_H_
diff --git a/av1/encoder/encodemv.c b/av1/encoder/encodemv.c
index 78da2b7..a5e06a4 100644
--- a/av1/encoder/encodemv.c
+++ b/av1/encoder/encodemv.c
@@ -17,26 +17,26 @@
 #include "av1/encoder/encodemv.h"
 #include "av1/encoder/subexp.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
-static struct vp10_token mv_joint_encodings[MV_JOINTS];
-static struct vp10_token mv_class_encodings[MV_CLASSES];
-static struct vp10_token mv_fp_encodings[MV_FP_SIZE];
-static struct vp10_token mv_class0_encodings[CLASS0_SIZE];
+static struct av1_token mv_joint_encodings[MV_JOINTS];
+static struct av1_token mv_class_encodings[MV_CLASSES];
+static struct av1_token mv_fp_encodings[MV_FP_SIZE];
+static struct av1_token mv_class0_encodings[CLASS0_SIZE];
 
-void vp10_entropy_mv_init(void) {
-  vp10_tokens_from_tree(mv_joint_encodings, vp10_mv_joint_tree);
-  vp10_tokens_from_tree(mv_class_encodings, vp10_mv_class_tree);
-  vp10_tokens_from_tree(mv_class0_encodings, vp10_mv_class0_tree);
-  vp10_tokens_from_tree(mv_fp_encodings, vp10_mv_fp_tree);
+void av1_entropy_mv_init(void) {
+  av1_tokens_from_tree(mv_joint_encodings, av1_mv_joint_tree);
+  av1_tokens_from_tree(mv_class_encodings, av1_mv_class_tree);
+  av1_tokens_from_tree(mv_class0_encodings, av1_mv_class0_tree);
+  av1_tokens_from_tree(mv_fp_encodings, av1_mv_fp_tree);
 }
 
-static void encode_mv_component(vp10_writer *w, int comp,
+static void encode_mv_component(aom_writer *w, int comp,
                                 const nmv_component *mvcomp, int usehp) {
   int offset;
   const int sign = comp < 0;
   const int mag = sign ? -comp : comp;
-  const int mv_class = vp10_get_mv_class(mag - 1, &offset);
+  const int mv_class = av1_get_mv_class(mag - 1, &offset);
   const int d = offset >> 3;         // int mv data
   const int fr = (offset >> 1) & 3;  // fractional mv data
   const int hp = offset & 1;         // high precision mv data
@@ -44,30 +44,30 @@
   assert(comp != 0);
 
   // Sign
-  vp10_write(w, sign, mvcomp->sign);
+  aom_write(w, sign, mvcomp->sign);
 
   // Class
-  vp10_write_token(w, vp10_mv_class_tree, mvcomp->classes,
-                   &mv_class_encodings[mv_class]);
+  av1_write_token(w, av1_mv_class_tree, mvcomp->classes,
+                  &mv_class_encodings[mv_class]);
 
   // Integer bits
   if (mv_class == MV_CLASS_0) {
-    vp10_write_token(w, vp10_mv_class0_tree, mvcomp->class0,
-                     &mv_class0_encodings[d]);
+    av1_write_token(w, av1_mv_class0_tree, mvcomp->class0,
+                    &mv_class0_encodings[d]);
   } else {
     int i;
     const int n = mv_class + CLASS0_BITS - 1;  // number of bits
-    for (i = 0; i < n; ++i) vp10_write(w, (d >> i) & 1, mvcomp->bits[i]);
+    for (i = 0; i < n; ++i) aom_write(w, (d >> i) & 1, mvcomp->bits[i]);
   }
 
   // Fractional bits
-  vp10_write_token(w, vp10_mv_fp_tree,
-                   mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
-                   &mv_fp_encodings[fr]);
+  av1_write_token(w, av1_mv_fp_tree,
+                  mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
+                  &mv_fp_encodings[fr]);
 
   // High precision bit
   if (usehp)
-    vp10_write(w, hp, mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
+    aom_write(w, hp, mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
 }
 
 static void build_nmv_component_cost_table(int *mvcost,
@@ -79,30 +79,30 @@
   int class0_fp_cost[CLASS0_SIZE][MV_FP_SIZE], fp_cost[MV_FP_SIZE];
   int class0_hp_cost[2], hp_cost[2];
 
-  sign_cost[0] = vp10_cost_zero(mvcomp->sign);
-  sign_cost[1] = vp10_cost_one(mvcomp->sign);
-  vp10_cost_tokens(class_cost, mvcomp->classes, vp10_mv_class_tree);
-  vp10_cost_tokens(class0_cost, mvcomp->class0, vp10_mv_class0_tree);
+  sign_cost[0] = av1_cost_zero(mvcomp->sign);
+  sign_cost[1] = av1_cost_one(mvcomp->sign);
+  av1_cost_tokens(class_cost, mvcomp->classes, av1_mv_class_tree);
+  av1_cost_tokens(class0_cost, mvcomp->class0, av1_mv_class0_tree);
   for (i = 0; i < MV_OFFSET_BITS; ++i) {
-    bits_cost[i][0] = vp10_cost_zero(mvcomp->bits[i]);
-    bits_cost[i][1] = vp10_cost_one(mvcomp->bits[i]);
+    bits_cost[i][0] = av1_cost_zero(mvcomp->bits[i]);
+    bits_cost[i][1] = av1_cost_one(mvcomp->bits[i]);
   }
 
   for (i = 0; i < CLASS0_SIZE; ++i)
-    vp10_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp10_mv_fp_tree);
-  vp10_cost_tokens(fp_cost, mvcomp->fp, vp10_mv_fp_tree);
+    av1_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], av1_mv_fp_tree);
+  av1_cost_tokens(fp_cost, mvcomp->fp, av1_mv_fp_tree);
 
   if (usehp) {
-    class0_hp_cost[0] = vp10_cost_zero(mvcomp->class0_hp);
-    class0_hp_cost[1] = vp10_cost_one(mvcomp->class0_hp);
-    hp_cost[0] = vp10_cost_zero(mvcomp->hp);
-    hp_cost[1] = vp10_cost_one(mvcomp->hp);
+    class0_hp_cost[0] = av1_cost_zero(mvcomp->class0_hp);
+    class0_hp_cost[1] = av1_cost_one(mvcomp->class0_hp);
+    hp_cost[0] = av1_cost_zero(mvcomp->hp);
+    hp_cost[1] = av1_cost_one(mvcomp->hp);
   }
   mvcost[0] = 0;
   for (v = 1; v <= MV_MAX; ++v) {
     int z, c, o, d, e, f, cost = 0;
     z = v - 1;
-    c = vp10_get_mv_class(z, &o);
+    c = av1_get_mv_class(z, &o);
     cost += class_cost[c];
     d = (o >> 3);     /* int mv data */
     f = (o >> 1) & 3; /* fractional pel mv data */
@@ -131,48 +131,48 @@
   }
 }
 
-static void update_mv(vp10_writer *w, const unsigned int ct[2], vpx_prob *cur_p,
-                      vpx_prob upd_p) {
+static void update_mv(aom_writer *w, const unsigned int ct[2], aom_prob *cur_p,
+                      aom_prob upd_p) {
   (void)upd_p;
-  vp10_cond_prob_diff_update(w, cur_p, ct);
+  av1_cond_prob_diff_update(w, cur_p, ct);
 }
 
-static void write_mv_update(const vpx_tree_index *tree,
-                            vpx_prob probs[/*n - 1*/],
+static void write_mv_update(const aom_tree_index *tree,
+                            aom_prob probs[/*n - 1*/],
                             const unsigned int counts[/*n - 1*/], int n,
-                            vp10_writer *w) {
+                            aom_writer *w) {
   int i;
   unsigned int branch_ct[32][2];
 
   // Assuming max number of probabilities <= 32
   assert(n <= 32);
 
-  vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+  av1_tree_probs_from_distribution(tree, branch_ct, counts);
   for (i = 0; i < n - 1; ++i)
     update_mv(w, branch_ct[i], &probs[i], MV_UPDATE_PROB);
 }
 
-void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vp10_writer *w,
-                          nmv_context_counts *const nmv_counts) {
+void aom_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
+                         nmv_context_counts *const nmv_counts) {
   int i, j;
 #if CONFIG_REF_MV
   int nmv_ctx = 0;
   for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
     nmv_context *const mvc = &cm->fc->nmvc[nmv_ctx];
     nmv_context_counts *const counts = &nmv_counts[nmv_ctx];
-    write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
+    write_mv_update(av1_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
                     w);
 
-    vp10_cond_prob_diff_update(w, &mvc->zero_rmv, counts->zero_rmv);
+    av1_cond_prob_diff_update(w, &mvc->zero_rmv, counts->zero_rmv);
 
     for (i = 0; i < 2; ++i) {
       nmv_component *comp = &mvc->comps[i];
       nmv_component_counts *comp_counts = &counts->comps[i];
 
       update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB);
-      write_mv_update(vp10_mv_class_tree, comp->classes, comp_counts->classes,
+      write_mv_update(av1_mv_class_tree, comp->classes, comp_counts->classes,
                       MV_CLASSES, w);
-      write_mv_update(vp10_mv_class0_tree, comp->class0, comp_counts->class0,
+      write_mv_update(av1_mv_class0_tree, comp->class0, comp_counts->class0,
                       CLASS0_SIZE, w);
       for (j = 0; j < MV_OFFSET_BITS; ++j)
         update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB);
@@ -180,10 +180,10 @@
 
     for (i = 0; i < 2; ++i) {
       for (j = 0; j < CLASS0_SIZE; ++j)
-        write_mv_update(vp10_mv_fp_tree, mvc->comps[i].class0_fp[j],
+        write_mv_update(av1_mv_fp_tree, mvc->comps[i].class0_fp[j],
                         counts->comps[i].class0_fp[j], MV_FP_SIZE, w);
 
-      write_mv_update(vp10_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
+      write_mv_update(av1_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
                       MV_FP_SIZE, w);
     }
 
@@ -199,17 +199,16 @@
   nmv_context *const mvc = &cm->fc->nmvc;
   nmv_context_counts *const counts = nmv_counts;
 
-  write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
-                  w);
+  write_mv_update(av1_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS, w);
 
   for (i = 0; i < 2; ++i) {
     nmv_component *comp = &mvc->comps[i];
     nmv_component_counts *comp_counts = &counts->comps[i];
 
     update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB);
-    write_mv_update(vp10_mv_class_tree, comp->classes, comp_counts->classes,
+    write_mv_update(av1_mv_class_tree, comp->classes, comp_counts->classes,
                     MV_CLASSES, w);
-    write_mv_update(vp10_mv_class0_tree, comp->class0, comp_counts->class0,
+    write_mv_update(av1_mv_class0_tree, comp->class0, comp_counts->class0,
                     CLASS0_SIZE, w);
     for (j = 0; j < MV_OFFSET_BITS; ++j)
       update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB);
@@ -217,10 +216,10 @@
 
   for (i = 0; i < 2; ++i) {
     for (j = 0; j < CLASS0_SIZE; ++j)
-      write_mv_update(vp10_mv_fp_tree, mvc->comps[i].class0_fp[j],
+      write_mv_update(av1_mv_fp_tree, mvc->comps[i].class0_fp[j],
                       counts->comps[i].class0_fp[j], MV_FP_SIZE, w);
 
-    write_mv_update(vp10_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
+    write_mv_update(av1_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
                     MV_FP_SIZE, w);
   }
 
@@ -234,18 +233,18 @@
 #endif
 }
 
-void vp10_encode_mv(VP10_COMP *cpi, vp10_writer *w, const MV *mv, const MV *ref,
+void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
 #if CONFIG_REF_MV
-                    int is_compound,
+                   int is_compound,
 #endif
-                    const nmv_context *mvctx, int usehp) {
+                   const nmv_context *mvctx, int usehp) {
   const MV diff = { mv->row - ref->row, mv->col - ref->col };
-  const MV_JOINT_TYPE j = vp10_get_mv_joint(&diff);
-  usehp = usehp && vp10_use_mv_hp(ref);
+  const MV_JOINT_TYPE j = av1_get_mv_joint(&diff);
+  usehp = usehp && av1_use_mv_hp(ref);
 
 #if CONFIG_REF_MV && !CONFIG_EXT_INTER
   if (is_compound) {
-    vp10_write(w, (j == MV_JOINT_ZERO), mvctx->zero_rmv);
+    aom_write(w, (j == MV_JOINT_ZERO), mvctx->zero_rmv);
     if (j == MV_JOINT_ZERO) return;
   } else {
     if (j == MV_JOINT_ZERO) assert(0);
@@ -256,8 +255,7 @@
   (void)is_compound;
 #endif
 
-  vp10_write_token(w, vp10_mv_joint_tree, mvctx->joints,
-                   &mv_joint_encodings[j]);
+  av1_write_token(w, av1_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]);
   if (mv_joint_vertical(j))
     encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
 
@@ -267,14 +265,14 @@
   // If auto_mv_step_size is enabled then keep track of the largest
   // motion vector component used.
   if (cpi->sf.mv.auto_mv_step_size) {
-    unsigned int maxv = VPXMAX(abs(mv->row), abs(mv->col)) >> 3;
-    cpi->max_mv_magnitude = VPXMAX(maxv, cpi->max_mv_magnitude);
+    unsigned int maxv = AOMMAX(abs(mv->row), abs(mv->col)) >> 3;
+    cpi->max_mv_magnitude = AOMMAX(maxv, cpi->max_mv_magnitude);
   }
 }
 
-void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
-                               const nmv_context *ctx, int usehp) {
-  vp10_cost_tokens(mvjoint, ctx->joints, vp10_mv_joint_tree);
+void av1_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
+                              const nmv_context *ctx, int usehp) {
+  av1_cost_tokens(mvjoint, ctx->joints, av1_mv_joint_tree);
   build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp);
   build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp);
 }
@@ -299,33 +297,33 @@
       const MV diff = { mvs[i].as_mv.row - ref->row,
                         mvs[i].as_mv.col - ref->col };
 #if CONFIG_REF_MV
-      int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
-                                 mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+      int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+                                mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
       nmv_context_counts *counts = &nmv_counts[nmv_ctx];
       (void)pred_mvs;
 #endif
-      vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+      av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
     }
   } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
     const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv;
     const MV diff = { mvs[1].as_mv.row - ref->row,
                       mvs[1].as_mv.col - ref->col };
 #if CONFIG_REF_MV
-    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
-                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+    int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                              mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
     nmv_context_counts *counts = &nmv_counts[nmv_ctx];
 #endif
-    vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+    av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
   } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
     const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv;
     const MV diff = { mvs[0].as_mv.row - ref->row,
                       mvs[0].as_mv.col - ref->col };
 #if CONFIG_REF_MV
-    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
-                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+    int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                              mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
     nmv_context_counts *counts = &nmv_counts[nmv_ctx];
 #endif
-    vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+    av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
   }
 }
 
@@ -348,32 +346,32 @@
       const MV diff = { mvs[i].as_mv.row - ref->row,
                         mvs[i].as_mv.col - ref->col };
 #if CONFIG_REF_MV
-      int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
-                                 mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+      int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+                                mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
       nmv_context_counts *counts = &nmv_counts[nmv_ctx];
 #endif
-      vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+      av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
     }
   } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
     const MV *ref = &mi->bmi[block].ref_mv[1].as_mv;
     const MV diff = { mvs[1].as_mv.row - ref->row,
                       mvs[1].as_mv.col - ref->col };
 #if CONFIG_REF_MV
-    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
-                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+    int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                              mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
     nmv_context_counts *counts = &nmv_counts[nmv_ctx];
 #endif
-    vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+    av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
   } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
     const MV *ref = &mi->bmi[block].ref_mv[0].as_mv;
     const MV diff = { mvs[0].as_mv.row - ref->row,
                       mvs[0].as_mv.col - ref->col };
 #if CONFIG_REF_MV
-    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
-                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+    int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                              mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
     nmv_context_counts *counts = &nmv_counts[nmv_ctx];
 #endif
-    vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+    av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
   }
 }
 #else
@@ -390,8 +388,8 @@
 
   for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
 #if CONFIG_REF_MV
-    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
-                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+    int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+                              mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
     nmv_context_counts *counts = &nmv_counts[nmv_ctx];
     const MV *ref = &pred_mvs[i].as_mv;
 #else
@@ -399,12 +397,12 @@
 #endif
     const MV diff = { mvs[i].as_mv.row - ref->row,
                       mvs[i].as_mv.col - ref->col };
-    vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+    av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
   }
 }
 #endif  // CONFIG_EXT_INTER
 
-void vp10_update_mv_count(ThreadData *td) {
+void av1_update_mv_count(ThreadData *td) {
   const MACROBLOCKD *xd = &td->mb.e_mbd;
   const MODE_INFO *mi = xd->mi[0];
   const MB_MODE_INFO *const mbmi = &mi->mbmi;
diff --git a/av1/encoder/encodemv.h b/av1/encoder/encodemv.h
index 6cb57c2..edd913e 100644
--- a/av1/encoder/encodemv.h
+++ b/av1/encoder/encodemv.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_ENCODEMV_H_
-#define VP10_ENCODER_ENCODEMV_H_
+#ifndef AV1_ENCODER_ENCODEMV_H_
+#define AV1_ENCODER_ENCODEMV_H_
 
 #include "av1/encoder/encoder.h"
 
@@ -17,24 +17,24 @@
 extern "C" {
 #endif
 
-void vp10_entropy_mv_init(void);
+void av1_entropy_mv_init(void);
 
-void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vp10_writer *w,
-                          nmv_context_counts *const counts);
+void aom_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
+                         nmv_context_counts *const counts);
 
-void vp10_encode_mv(VP10_COMP *cpi, vp10_writer *w, const MV *mv, const MV *ref,
+void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
 #if CONFIG_REF_MV
-                    int is_compound,
+                   int is_compound,
 #endif
-                    const nmv_context *mvctx, int usehp);
+                   const nmv_context *mvctx, int usehp);
 
-void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
-                               const nmv_context *mvctx, int usehp);
+void av1_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
+                              const nmv_context *mvctx, int usehp);
 
-void vp10_update_mv_count(ThreadData *td);
+void av1_update_mv_count(ThreadData *td);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_ENCODEMV_H_
+#endif  // AV1_ENCODER_ENCODEMV_H_
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index 5196d9c..619204d 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -12,7 +12,7 @@
 #include <math.h>
 #include <stdio.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #include "av1/common/alloccommon.h"
 #if CONFIG_CLPF
@@ -52,19 +52,19 @@
 #include "av1/encoder/speed_features.h"
 #include "av1/encoder/temporal_filter.h"
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_scale_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_scale_rtcd.h"
 #include "aom_dsp/psnr.h"
 #if CONFIG_INTERNAL_STATS
 #include "aom_dsp/ssim.h"
 #endif
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/system_state.h"
-#include "aom_ports/vpx_timer.h"
-#include "aom_scale/vpx_scale.h"
+#include "aom_ports/aom_timer.h"
+#include "aom_scale/aom_scale.h"
 #include "aom_util/debug_util.h"
 
 #define AM_SEGMENT_ID_INACTIVE 7
@@ -96,7 +96,7 @@
 FILE *keyfile;
 #endif
 
-static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) {
+static INLINE void Scale2Ratio(AOM_SCALING mode, int *hr, int *hs) {
   switch (mode) {
     case NORMAL:
       *hr = 1;
@@ -124,7 +124,7 @@
 
 // Mark all inactive blocks as active. Other segmentation features may be set
 // so memset cannot be used, instead only inactive blocks should be reset.
-static void suppress_active_map(VP10_COMP *cpi) {
+static void suppress_active_map(AV1_COMP *cpi) {
   unsigned char *const seg_map = cpi->segmentation_map;
   int i;
   if (cpi->active_map.enabled || cpi->active_map.update)
@@ -133,7 +133,7 @@
         seg_map[i] = AM_SEGMENT_ID_ACTIVE;
 }
 
-static void apply_active_map(VP10_COMP *cpi) {
+static void apply_active_map(AV1_COMP *cpi) {
   struct segmentation *const seg = &cpi->common.seg;
   unsigned char *const seg_map = cpi->segmentation_map;
   const unsigned char *const active_map = cpi->active_map.map;
@@ -150,16 +150,16 @@
     if (cpi->active_map.enabled) {
       for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i)
         if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i];
-      vp10_enable_segmentation(seg);
-      vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
-      vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
+      av1_enable_segmentation(seg);
+      av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
+      av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
       // Setting the data to -MAX_LOOP_FILTER will result in the computed loop
       // filter level being zero regardless of the value of seg->abs_delta.
-      vp10_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
-                       -MAX_LOOP_FILTER);
+      av1_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
+                      -MAX_LOOP_FILTER);
     } else {
-      vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
-      vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
+      av1_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
+      av1_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
       if (seg->enabled) {
         seg->update_data = 1;
         seg->update_map = 1;
@@ -169,8 +169,8 @@
   }
 }
 
-int vp10_set_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
-                        int cols) {
+int av1_set_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows,
+                       int cols) {
   if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
     unsigned char *const active_map_8x8 = cpi->active_map.map;
     const int mi_rows = cpi->common.mi_rows;
@@ -196,8 +196,8 @@
   }
 }
 
-int vp10_get_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
-                        int cols) {
+int av1_get_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows,
+                       int cols) {
   if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols &&
       new_map_16x16) {
     unsigned char *const seg_map_8x8 = cpi->segmentation_map;
@@ -221,7 +221,7 @@
   }
 }
 
-void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv) {
+void av1_set_high_precision_mv(AV1_COMP *cpi, int allow_high_precision_mv) {
   MACROBLOCK *const mb = &cpi->td.mb;
   cpi->common.allow_high_precision_mv = allow_high_precision_mv;
 
@@ -250,15 +250,15 @@
 #endif
 }
 
-static BLOCK_SIZE select_sb_size(const VP10_COMP *const cpi) {
+static BLOCK_SIZE select_sb_size(const AV1_COMP *const cpi) {
 #if CONFIG_EXT_PARTITION
-  if (cpi->oxcf.superblock_size == VPX_SUPERBLOCK_SIZE_64X64)
+  if (cpi->oxcf.superblock_size == AOM_SUPERBLOCK_SIZE_64X64)
     return BLOCK_64X64;
 
-  if (cpi->oxcf.superblock_size == VPX_SUPERBLOCK_SIZE_128X128)
+  if (cpi->oxcf.superblock_size == AOM_SUPERBLOCK_SIZE_128X128)
     return BLOCK_128X128;
 
-  assert(cpi->oxcf.superblock_size == VPX_SUPERBLOCK_SIZE_DYNAMIC);
+  assert(cpi->oxcf.superblock_size == AOM_SUPERBLOCK_SIZE_DYNAMIC);
 
   assert(IMPLIES(cpi->common.tile_cols > 1,
                  cpi->common.tile_width % MAX_MIB_SIZE == 0));
@@ -273,15 +273,15 @@
 #endif  //  CONFIG_EXT_PARTITION
 }
 
-static void setup_frame(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void setup_frame(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   // Set up entropy context depending on frame type. The decoder mandates
   // the use of the default context, index 0, for keyframes and inter
   // frames where the error_resilient_mode or intra_only flag is set. For
   // other inter-frames the encoder currently uses only two contexts;
   // context 1 for ALTREF frames and context 0 for the others.
   if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
-    vp10_setup_past_independence(cm);
+    av1_setup_past_independence(cm);
   } else {
 #if CONFIG_EXT_REFS
     const GF_GROUP *gf_group = &cpi->twopass.gf_group;
@@ -307,10 +307,10 @@
   if (cm->frame_type == KEY_FRAME) {
     cpi->refresh_golden_frame = 1;
     cpi->refresh_alt_ref_frame = 1;
-    vp10_zero(cpi->interp_filter_selected);
+    av1_zero(cpi->interp_filter_selected);
   } else {
     *cm->fc = cm->frame_contexts[cm->frame_context_idx];
-    vp10_zero(cpi->interp_filter_selected[0]);
+    av1_zero(cpi->interp_filter_selected[0]);
   }
 
   cpi->vaq_refresh = 0;
@@ -318,7 +318,7 @@
   set_sb_size(cm, select_sb_size(cpi));
 }
 
-static void vp10_enc_setup_mi(VP10_COMMON *cm) {
+static void av1_enc_setup_mi(AV1_COMMON *cm) {
   int i;
   cm->mi = cm->mip + cm->mi_stride + 1;
   memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
@@ -336,34 +336,34 @@
          cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
 }
 
-static int vp10_enc_alloc_mi(VP10_COMMON *cm, int mi_size) {
-  cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
+static int av1_enc_alloc_mi(AV1_COMMON *cm, int mi_size) {
+  cm->mip = aom_calloc(mi_size, sizeof(*cm->mip));
   if (!cm->mip) return 1;
-  cm->prev_mip = vpx_calloc(mi_size, sizeof(*cm->prev_mip));
+  cm->prev_mip = aom_calloc(mi_size, sizeof(*cm->prev_mip));
   if (!cm->prev_mip) return 1;
   cm->mi_alloc_size = mi_size;
 
-  cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+  cm->mi_grid_base = (MODE_INFO **)aom_calloc(mi_size, sizeof(MODE_INFO *));
   if (!cm->mi_grid_base) return 1;
   cm->prev_mi_grid_base =
-      (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+      (MODE_INFO **)aom_calloc(mi_size, sizeof(MODE_INFO *));
   if (!cm->prev_mi_grid_base) return 1;
 
   return 0;
 }
 
-static void vp10_enc_free_mi(VP10_COMMON *cm) {
-  vpx_free(cm->mip);
+static void av1_enc_free_mi(AV1_COMMON *cm) {
+  aom_free(cm->mip);
   cm->mip = NULL;
-  vpx_free(cm->prev_mip);
+  aom_free(cm->prev_mip);
   cm->prev_mip = NULL;
-  vpx_free(cm->mi_grid_base);
+  aom_free(cm->mi_grid_base);
   cm->mi_grid_base = NULL;
-  vpx_free(cm->prev_mi_grid_base);
+  aom_free(cm->prev_mi_grid_base);
   cm->prev_mi_grid_base = NULL;
 }
 
-static void vp10_swap_mi_and_prev_mi(VP10_COMMON *cm) {
+static void av1_swap_mi_and_prev_mi(AV1_COMMON *cm) {
   // Current mip will be the prev_mip for the next frame.
   MODE_INFO **temp_base = cm->prev_mi_grid_base;
   MODE_INFO *temp = cm->prev_mip;
@@ -380,47 +380,47 @@
   cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
 }
 
-void vp10_initialize_enc(void) {
+void av1_initialize_enc(void) {
   static volatile int init_done = 0;
 
   if (!init_done) {
-    vp10_rtcd();
-    vpx_dsp_rtcd();
-    vpx_scale_rtcd();
-    vp10_init_intra_predictors();
-    vp10_init_me_luts();
-    vp10_rc_init_minq_luts();
-    vp10_entropy_mv_init();
-    vp10_encode_token_init();
+    av1_rtcd();
+    aom_dsp_rtcd();
+    aom_scale_rtcd();
+    av1_init_intra_predictors();
+    av1_init_me_luts();
+    av1_rc_init_minq_luts();
+    av1_entropy_mv_init();
+    av1_encode_token_init();
 #if CONFIG_EXT_INTER
-    vp10_init_wedge_masks();
+    av1_init_wedge_masks();
 #endif
     init_done = 1;
   }
 }
 
-static void dealloc_compressor_data(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void dealloc_compressor_data(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   int i;
 
-  vpx_free(cpi->mbmi_ext_base);
+  aom_free(cpi->mbmi_ext_base);
   cpi->mbmi_ext_base = NULL;
 
-  vpx_free(cpi->tile_data);
+  aom_free(cpi->tile_data);
   cpi->tile_data = NULL;
 
   // Delete sementation map
-  vpx_free(cpi->segmentation_map);
+  aom_free(cpi->segmentation_map);
   cpi->segmentation_map = NULL;
-  vpx_free(cpi->coding_context.last_frame_seg_map_copy);
+  aom_free(cpi->coding_context.last_frame_seg_map_copy);
   cpi->coding_context.last_frame_seg_map_copy = NULL;
 
 #if CONFIG_REF_MV
   for (i = 0; i < NMV_CONTEXTS; ++i) {
-    vpx_free(cpi->nmv_costs[i][0]);
-    vpx_free(cpi->nmv_costs[i][1]);
-    vpx_free(cpi->nmv_costs_hp[i][0]);
-    vpx_free(cpi->nmv_costs_hp[i][1]);
+    aom_free(cpi->nmv_costs[i][0]);
+    aom_free(cpi->nmv_costs[i][1]);
+    aom_free(cpi->nmv_costs_hp[i][0]);
+    aom_free(cpi->nmv_costs_hp[i][1]);
     cpi->nmv_costs[i][0] = NULL;
     cpi->nmv_costs[i][1] = NULL;
     cpi->nmv_costs_hp[i][0] = NULL;
@@ -428,81 +428,81 @@
   }
 #endif
 
-  vpx_free(cpi->nmvcosts[0]);
-  vpx_free(cpi->nmvcosts[1]);
+  aom_free(cpi->nmvcosts[0]);
+  aom_free(cpi->nmvcosts[1]);
   cpi->nmvcosts[0] = NULL;
   cpi->nmvcosts[1] = NULL;
 
-  vpx_free(cpi->nmvcosts_hp[0]);
-  vpx_free(cpi->nmvcosts_hp[1]);
+  aom_free(cpi->nmvcosts_hp[0]);
+  aom_free(cpi->nmvcosts_hp[1]);
   cpi->nmvcosts_hp[0] = NULL;
   cpi->nmvcosts_hp[1] = NULL;
 
-  vpx_free(cpi->nmvsadcosts[0]);
-  vpx_free(cpi->nmvsadcosts[1]);
+  aom_free(cpi->nmvsadcosts[0]);
+  aom_free(cpi->nmvsadcosts[1]);
   cpi->nmvsadcosts[0] = NULL;
   cpi->nmvsadcosts[1] = NULL;
 
-  vpx_free(cpi->nmvsadcosts_hp[0]);
-  vpx_free(cpi->nmvsadcosts_hp[1]);
+  aom_free(cpi->nmvsadcosts_hp[0]);
+  aom_free(cpi->nmvsadcosts_hp[1]);
   cpi->nmvsadcosts_hp[0] = NULL;
   cpi->nmvsadcosts_hp[1] = NULL;
 
-  vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+  av1_cyclic_refresh_free(cpi->cyclic_refresh);
   cpi->cyclic_refresh = NULL;
 
-  vpx_free(cpi->active_map.map);
+  aom_free(cpi->active_map.map);
   cpi->active_map.map = NULL;
 
   // Free up-sampled reference buffers.
   for (i = 0; i < (REF_FRAMES + 1); i++)
-    vpx_free_frame_buffer(&cpi->upsampled_ref_bufs[i].buf);
+    aom_free_frame_buffer(&cpi->upsampled_ref_bufs[i].buf);
 
-  vp10_free_ref_frame_buffers(cm->buffer_pool);
-  vp10_free_context_buffers(cm);
+  av1_free_ref_frame_buffers(cm->buffer_pool);
+  av1_free_context_buffers(cm);
 
-  vpx_free_frame_buffer(&cpi->last_frame_uf);
+  aom_free_frame_buffer(&cpi->last_frame_uf);
 #if CONFIG_LOOP_RESTORATION
-  vpx_free_frame_buffer(&cpi->last_frame_db);
-  vp10_free_restoration_buffers(cm);
+  aom_free_frame_buffer(&cpi->last_frame_db);
+  av1_free_restoration_buffers(cm);
 #endif  // CONFIG_LOOP_RESTORATION
-  vpx_free_frame_buffer(&cpi->scaled_source);
-  vpx_free_frame_buffer(&cpi->scaled_last_source);
-  vpx_free_frame_buffer(&cpi->alt_ref_buffer);
-  vp10_lookahead_destroy(cpi->lookahead);
+  aom_free_frame_buffer(&cpi->scaled_source);
+  aom_free_frame_buffer(&cpi->scaled_last_source);
+  aom_free_frame_buffer(&cpi->alt_ref_buffer);
+  av1_lookahead_destroy(cpi->lookahead);
 
-  vpx_free(cpi->tile_tok[0][0]);
+  aom_free(cpi->tile_tok[0][0]);
   cpi->tile_tok[0][0] = 0;
 
-  vp10_free_pc_tree(&cpi->td);
-  vp10_free_var_tree(&cpi->td);
+  av1_free_pc_tree(&cpi->td);
+  av1_free_var_tree(&cpi->td);
 
   if (cpi->common.allow_screen_content_tools)
-    vpx_free(cpi->td.mb.palette_buffer);
+    aom_free(cpi->td.mb.palette_buffer);
 
   if (cpi->source_diff_var != NULL) {
-    vpx_free(cpi->source_diff_var);
+    aom_free(cpi->source_diff_var);
     cpi->source_diff_var = NULL;
   }
 #if CONFIG_ANS
-  vp10_buf_ans_free(&cpi->buf_ans);
+  av1_buf_ans_free(&cpi->buf_ans);
 #endif  // CONFIG_ANS
 }
 
-static void save_coding_context(VP10_COMP *cpi) {
+static void save_coding_context(AV1_COMP *cpi) {
   CODING_CONTEXT *const cc = &cpi->coding_context;
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
 #if CONFIG_REF_MV
   int i;
 #endif
 
 // Stores a snapshot of key state variables which can subsequently be
-// restored with a call to vp10_restore_coding_context. These functions are
-// intended for use in a re-code loop in vp10_compress_frame where the
+// restored with a call to av1_restore_coding_context. These functions are
+// intended for use in a re-code loop in av1_compress_frame where the
 // quantizer value is adjusted between loop iterations.
 #if CONFIG_REF_MV
   for (i = 0; i < NMV_CONTEXTS; ++i) {
-    vp10_copy(cc->nmv_vec_cost[i], cpi->td.mb.nmv_vec_cost[i]);
+    av1_copy(cc->nmv_vec_cost[i], cpi->td.mb.nmv_vec_cost[i]);
     memcpy(cc->nmv_costs[i][0], cpi->nmv_costs[i][0],
            MV_VALS * sizeof(*cpi->nmv_costs[i][0]));
     memcpy(cc->nmv_costs[i][1], cpi->nmv_costs[i][1],
@@ -513,7 +513,7 @@
            MV_VALS * sizeof(*cpi->nmv_costs_hp[i][1]));
   }
 #else
-  vp10_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
+  av1_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
 #endif
 
   memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
@@ -528,24 +528,24 @@
   memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map,
          (cm->mi_rows * cm->mi_cols));
 
-  vp10_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
-  vp10_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
+  av1_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
+  av1_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
 
   cc->fc = *cm->fc;
 }
 
-static void restore_coding_context(VP10_COMP *cpi) {
+static void restore_coding_context(AV1_COMP *cpi) {
   CODING_CONTEXT *const cc = &cpi->coding_context;
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
 #if CONFIG_REF_MV
   int i;
 #endif
 
 // Restore key state variables to the snapshot state stored in the
-// previous call to vp10_save_coding_context.
+// previous call to av1_save_coding_context.
 #if CONFIG_REF_MV
   for (i = 0; i < NMV_CONTEXTS; ++i) {
-    vp10_copy(cpi->td.mb.nmv_vec_cost[i], cc->nmv_vec_cost[i]);
+    av1_copy(cpi->td.mb.nmv_vec_cost[i], cc->nmv_vec_cost[i]);
     memcpy(cpi->nmv_costs[i][0], cc->nmv_costs[i][0],
            MV_VALS * sizeof(*cc->nmv_costs[i][0]));
     memcpy(cpi->nmv_costs[i][1], cc->nmv_costs[i][1],
@@ -556,7 +556,7 @@
            MV_VALS * sizeof(*cc->nmv_costs_hp[i][1]));
   }
 #else
-  vp10_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
+  av1_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
 #endif
 
   memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
@@ -569,14 +569,14 @@
   memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy,
          (cm->mi_rows * cm->mi_cols));
 
-  vp10_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
-  vp10_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
+  av1_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
+  av1_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
 
   *cm->fc = cc->fc;
 }
 
-static void configure_static_seg_features(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void configure_static_seg_features(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *const rc = &cpi->rc;
   struct segmentation *const seg = &cm->seg;
 
@@ -592,10 +592,10 @@
     cpi->static_mb_pct = 0;
 
     // Disable segmentation
-    vp10_disable_segmentation(seg);
+    av1_disable_segmentation(seg);
 
     // Clear down the segment features.
-    vp10_clearall_segfeatures(seg);
+    av1_clearall_segfeatures(seg);
   } else if (cpi->refresh_alt_ref_frame) {
     // If this is an alt ref frame
     // Clear down the global segmentation map
@@ -605,12 +605,12 @@
     cpi->static_mb_pct = 0;
 
     // Disable segmentation and individual segment features by default
-    vp10_disable_segmentation(seg);
-    vp10_clearall_segfeatures(seg);
+    av1_disable_segmentation(seg);
+    av1_clearall_segfeatures(seg);
 
     // Scan frames from current to arf frame.
     // This function re-enables segmentation if appropriate.
-    vp10_update_mbgraph_stats(cpi);
+    av1_update_mbgraph_stats(cpi);
 
     // If segmentation was enabled set those features needed for the
     // arf itself.
@@ -619,12 +619,12 @@
       seg->update_data = 1;
 
       qi_delta =
-          vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
-      vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
-      vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+          av1_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
+      av1_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
+      av1_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
 
-      vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
-      vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+      av1_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+      av1_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
 
       // Where relevant assume segment data is delta data
       seg->abs_delta = SEGMENT_DELTADATA;
@@ -640,32 +640,32 @@
         seg->update_data = 1;
         seg->abs_delta = SEGMENT_DELTADATA;
 
-        qi_delta = vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125,
-                                       cm->bit_depth);
-        vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
-        vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+        qi_delta =
+            av1_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125, cm->bit_depth);
+        av1_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
+        av1_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
 
-        vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
-        vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+        av1_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+        av1_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
 
         // Segment coding disabled for compred testing
         if (high_q || (cpi->static_mb_pct == 100)) {
-          vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
-          vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
-          vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+          av1_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+          av1_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+          av1_enable_segfeature(seg, 1, SEG_LVL_SKIP);
         }
       } else {
         // Disable segmentation and clear down features if alt ref
         // is not active for this group
 
-        vp10_disable_segmentation(seg);
+        av1_disable_segmentation(seg);
 
         memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
 
         seg->update_map = 0;
         seg->update_data = 0;
 
-        vp10_clearall_segfeatures(seg);
+        av1_clearall_segfeatures(seg);
       }
     } else if (rc->is_src_frame_alt_ref) {
       // Special case where we are coding over the top of a previous
@@ -673,19 +673,19 @@
       // Segment coding disabled for compred testing
 
       // Enable ref frame features for segment 0 as well
-      vp10_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
-      vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+      av1_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
+      av1_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
 
       // All mbs should use ALTREF_FRAME
-      vp10_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
-      vp10_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
-      vp10_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
-      vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+      av1_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
+      av1_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+      av1_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
+      av1_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
 
       // Skip all MBs if high Q (0,0 mv and skip coeffs)
       if (high_q) {
-        vp10_enable_segfeature(seg, 0, SEG_LVL_SKIP);
-        vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+        av1_enable_segfeature(seg, 0, SEG_LVL_SKIP);
+        av1_enable_segfeature(seg, 1, SEG_LVL_SKIP);
       }
       // Enable data update
       seg->update_data = 1;
@@ -699,8 +699,8 @@
   }
 }
 
-static void update_reference_segmentation_map(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void update_reference_segmentation_map(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
   uint8_t *cache_ptr = cm->last_frame_seg_map;
   int row, col;
@@ -715,120 +715,120 @@
   }
 }
 
-static void alloc_raw_frame_buffers(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
-  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+static void alloc_raw_frame_buffers(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
+  const AV1EncoderConfig *oxcf = &cpi->oxcf;
 
   if (!cpi->lookahead)
-    cpi->lookahead = vp10_lookahead_init(oxcf->width, oxcf->height,
-                                         cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
-                                         cm->use_highbitdepth,
+    cpi->lookahead = av1_lookahead_init(oxcf->width, oxcf->height,
+                                        cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_AOM_HIGHBITDEPTH
+                                        cm->use_highbitdepth,
 #endif
-                                         oxcf->lag_in_frames);
+                                        oxcf->lag_in_frames);
   if (!cpi->lookahead)
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate lag buffers");
 
   // TODO(agrange) Check if ARF is enabled and skip allocation if not.
-  if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
+  if (aom_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
                                cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                cm->use_highbitdepth,
 #endif
-                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
                                NULL, NULL, NULL))
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate altref buffer");
 }
 
-static void alloc_util_frame_buffers(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
-  if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
+static void alloc_util_frame_buffers(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
+  if (aom_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                cm->use_highbitdepth,
 #endif
-                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
                                NULL, NULL, NULL))
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate last frame buffer");
 
 #if CONFIG_LOOP_RESTORATION
-  if (vpx_realloc_frame_buffer(&cpi->last_frame_db, cm->width, cm->height,
+  if (aom_realloc_frame_buffer(&cpi->last_frame_db, cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                cm->use_highbitdepth,
 #endif
-                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
                                NULL, NULL, NULL))
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate last frame deblocked buffer");
 #endif  // CONFIG_LOOP_RESTORATION
 
-  if (vpx_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
+  if (aom_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                cm->use_highbitdepth,
 #endif
-                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
                                NULL, NULL, NULL))
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate scaled source buffer");
 
-  if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
+  if (aom_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                cm->use_highbitdepth,
 #endif
-                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
                                NULL, NULL, NULL))
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate scaled last source buffer");
 }
 
-static int alloc_context_buffers_ext(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
+static int alloc_context_buffers_ext(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
   int mi_size = cm->mi_cols * cm->mi_rows;
 
-  cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
+  cpi->mbmi_ext_base = aom_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
   if (!cpi->mbmi_ext_base) return 1;
 
   return 0;
 }
 
-void vp10_alloc_compressor_data(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
+void av1_alloc_compressor_data(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
 
-  vp10_alloc_context_buffers(cm, cm->width, cm->height);
+  av1_alloc_context_buffers(cm, cm->width, cm->height);
 
   alloc_context_buffers_ext(cpi);
 
-  vpx_free(cpi->tile_tok[0][0]);
+  aom_free(cpi->tile_tok[0][0]);
 
   {
     unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
     CHECK_MEM_ERROR(cm, cpi->tile_tok[0][0],
-                    vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
+                    aom_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
 #if CONFIG_ANS
-    vp10_buf_ans_alloc(&cpi->buf_ans, cm, tokens);
+    av1_buf_ans_alloc(&cpi->buf_ans, cm, tokens);
 #endif  // CONFIG_ANS
   }
 
-  vp10_setup_pc_tree(&cpi->common, &cpi->td);
+  av1_setup_pc_tree(&cpi->common, &cpi->td);
 }
 
-void vp10_new_framerate(VP10_COMP *cpi, double framerate) {
+void av1_new_framerate(AV1_COMP *cpi, double framerate) {
   cpi->framerate = framerate < 0.1 ? 30 : framerate;
-  vp10_rc_update_framerate(cpi);
+  av1_rc_update_framerate(cpi);
 }
 
-static void set_tile_info(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void set_tile_info(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
 
 #if CONFIG_EXT_TILE
 #if CONFIG_EXT_PARTITION
-  if (cpi->oxcf.superblock_size != VPX_SUPERBLOCK_SIZE_64X64) {
+  if (cpi->oxcf.superblock_size != AOM_SUPERBLOCK_SIZE_64X64) {
     cm->tile_width = clamp(cpi->oxcf.tile_columns, 1, 32);
     cm->tile_height = clamp(cpi->oxcf.tile_rows, 1, 32);
     cm->tile_width <<= MAX_MIB_SIZE_LOG2;
@@ -846,8 +846,8 @@
   cm->tile_height <<= MAX_MIB_SIZE_LOG2;
 #endif  // CONFIG_EXT_PARTITION
 
-  cm->tile_width = VPXMIN(cm->tile_width, cm->mi_cols);
-  cm->tile_height = VPXMIN(cm->tile_height, cm->mi_rows);
+  cm->tile_width = AOMMIN(cm->tile_width, cm->mi_cols);
+  cm->tile_height = AOMMIN(cm->tile_height, cm->mi_rows);
 
   assert(cm->tile_width >> MAX_MIB_SIZE <= 32);
   assert(cm->tile_height >> MAX_MIB_SIZE <= 32);
@@ -860,7 +860,7 @@
   while (cm->tile_rows * cm->tile_height < cm->mi_rows) ++cm->tile_rows;
 #else
   int min_log2_tile_cols, max_log2_tile_cols;
-  vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+  av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
 
   cm->log2_tile_cols =
       clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
@@ -880,20 +880,20 @@
 #endif  // CONFIG_EXT_TILE
 }
 
-static void update_frame_size(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void update_frame_size(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
 
-  vp10_set_mb_mi(cm, cm->width, cm->height);
-  vp10_init_context_buffers(cm);
-  vp10_init_macroblockd(cm, xd, NULL);
+  av1_set_mb_mi(cm, cm->width, cm->height);
+  av1_init_context_buffers(cm);
+  av1_init_macroblockd(cm, xd, NULL);
   memset(cpi->mbmi_ext_base, 0,
          cm->mi_rows * cm->mi_cols * sizeof(*cpi->mbmi_ext_base));
 
   set_tile_info(cpi);
 }
 
-static void init_buffer_indices(VP10_COMP *cpi) {
+static void init_buffer_indices(AV1_COMP *cpi) {
 #if CONFIG_EXT_REFS
   int fb_idx;
   for (fb_idx = 0; fb_idx < LAST_REF_FRAMES; ++fb_idx)
@@ -910,15 +910,15 @@
 #endif  // CONFIG_EXT_REFS
 }
 
-static void init_config(struct VP10_COMP *cpi, VP10EncoderConfig *oxcf) {
-  VP10_COMMON *const cm = &cpi->common;
+static void init_config(struct AV1_COMP *cpi, AV1EncoderConfig *oxcf) {
+  AV1_COMMON *const cm = &cpi->common;
 
   cpi->oxcf = *oxcf;
   cpi->framerate = oxcf->init_framerate;
 
   cm->profile = oxcf->profile;
   cm->bit_depth = oxcf->bit_depth;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   cm->use_highbitdepth = oxcf->use_highbitdepth;
 #endif
   cm->color_space = oxcf->color_space;
@@ -926,13 +926,13 @@
 
   cm->width = oxcf->width;
   cm->height = oxcf->height;
-  vp10_alloc_compressor_data(cpi);
+  av1_alloc_compressor_data(cpi);
 
   // Single thread case: use counts in common.
   cpi->td.counts = &cm->counts;
 
   // change includes all joint functionality
-  vp10_change_config(cpi, oxcf);
+  av1_change_config(cpi, oxcf);
 
   cpi->static_mb_pct = 0;
   cpi->ref_frame_flags = 0;
@@ -941,7 +941,7 @@
 }
 
 static void set_rc_buffer_sizes(RATE_CONTROL *rc,
-                                const VP10EncoderConfig *oxcf) {
+                                const AV1EncoderConfig *oxcf) {
   const int64_t bandwidth = oxcf->target_bandwidth;
   const int64_t starting = oxcf->starting_buffer_level_ms;
   const int64_t optimal = oxcf->optimal_buffer_level_ms;
@@ -954,7 +954,7 @@
       (maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
   cpi->fn_ptr[BT].sdf = SDF;                                           \
   cpi->fn_ptr[BT].sdaf = SDAF;                                         \
@@ -1064,73 +1064,73 @@
   }
 
 #if CONFIG_EXT_PARTITION
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad128x128)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad128x128_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad128x128x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad128x128x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad128x128x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad128x64)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad128x64_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad128x64x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x128)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x128_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x128x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad128x128)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad128x128_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad128x128x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad128x128x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad128x128x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad128x64)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad128x64_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad128x64x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad64x128)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad64x128_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad64x128x4d)
 #endif  // CONFIG_EXT_PARTITION
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x16)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x16x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x32)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x32_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x32x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x32)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x32_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x32x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x64)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x64_avg)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x64x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x32)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x32_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad32x32x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad32x32x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x32x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x64)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x64_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad64x64x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad64x64x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x64x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x16)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x16_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad16x16x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad16x16x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x16x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x8)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x8_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad16x8x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad16x8x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x8x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x16)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x16_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad8x16x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x16x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x16x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x8)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x8_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad8x8x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x8x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x8x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x4)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x4_avg)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x4x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x4x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x8)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x8_avg)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x8x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x8x4d)
-MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x4)
-MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x4_avg)
-MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad4x4x3)
-MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x4x8)
-MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x4x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad32x16)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad32x16_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad32x16x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad16x32)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad16x32_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad16x32x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad64x32)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad64x32_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad64x32x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad32x64)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad32x64_avg)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad32x64x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad32x32)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad32x32_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad32x32x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad32x32x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad32x32x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad64x64)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad64x64_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad64x64x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad64x64x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad64x64x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad16x16)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad16x16_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad16x16x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad16x16x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad16x16x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad16x8)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad16x8_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad16x8x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad16x8x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad16x8x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad8x16)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad8x16_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad8x16x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad8x16x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad8x16x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad8x8)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad8x8_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad8x8x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad8x8x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad8x8x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad8x4)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad8x4_avg)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad8x4x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad8x4x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad4x8)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad4x8_avg)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad4x8x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad4x8x4d)
+MAKE_BFP_SAD_WRAPPER(aom_highbd_sad4x4)
+MAKE_BFP_SADAVG_WRAPPER(aom_highbd_sad4x4_avg)
+MAKE_BFP_SAD3_WRAPPER(aom_highbd_sad4x4x3)
+MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad4x4x8)
+MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad4x4x4d)
 
 #if CONFIG_EXT_INTER
 #define HIGHBD_MBFP(BT, MSDF, MVF, MSVF) \
@@ -1158,23 +1158,23 @@
   }
 
 #if CONFIG_EXT_PARTITION
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad128x128)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad128x64)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad64x128)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad128x128)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad128x64)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad64x128)
 #endif  // CONFIG_EXT_PARTITION
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad64x64)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad64x32)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad32x64)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad32x32)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad32x16)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad16x32)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad16x16)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad16x8)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad8x16)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad8x8)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad8x4)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad4x8)
-MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad4x4)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad64x64)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad64x32)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad32x64)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad32x32)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad32x16)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad16x32)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad16x16)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad16x8)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad8x16)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad8x8)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad8x4)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad4x8)
+MAKE_MBFP_SAD_WRAPPER(aom_highbd_masked_sad4x4)
 #endif  // CONFIG_EXT_INTER
 
 #if CONFIG_OBMC
@@ -1201,704 +1201,704 @@
   }
 
 #if CONFIG_EXT_PARTITION
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad128x128)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad128x64)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad64x128)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad128x128)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad128x64)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad64x128)
 #endif  // CONFIG_EXT_PARTITION
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad64x64)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad64x32)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad32x64)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad32x32)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad32x16)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad16x32)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad16x16)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad16x8)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad8x16)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad8x8)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad8x4)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad4x8)
-MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad4x4)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad64x64)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad64x32)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad32x64)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad32x32)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad32x16)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad16x32)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad16x16)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad16x8)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad8x16)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad8x8)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad8x4)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad4x8)
+MAKE_OBFP_SAD_WRAPPER(aom_highbd_obmc_sad4x4)
 #endif  // CONFIG_OBMC
 
-static void highbd_set_var_fns(VP10_COMP *const cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void highbd_set_var_fns(AV1_COMP *const cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   if (cm->use_highbitdepth) {
     switch (cm->bit_depth) {
-      case VPX_BITS_8:
-        HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits8,
-                   vpx_highbd_sad32x16_avg_bits8, vpx_highbd_8_variance32x16,
-                   vpx_highbd_8_sub_pixel_variance32x16,
-                   vpx_highbd_8_sub_pixel_avg_variance32x16, NULL, NULL,
-                   vpx_highbd_sad32x16x4d_bits8)
+      case AOM_BITS_8:
+        HIGHBD_BFP(BLOCK_32X16, aom_highbd_sad32x16_bits8,
+                   aom_highbd_sad32x16_avg_bits8, aom_highbd_8_variance32x16,
+                   aom_highbd_8_sub_pixel_variance32x16,
+                   aom_highbd_8_sub_pixel_avg_variance32x16, NULL, NULL,
+                   aom_highbd_sad32x16x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits8,
-                   vpx_highbd_sad16x32_avg_bits8, vpx_highbd_8_variance16x32,
-                   vpx_highbd_8_sub_pixel_variance16x32,
-                   vpx_highbd_8_sub_pixel_avg_variance16x32, NULL, NULL,
-                   vpx_highbd_sad16x32x4d_bits8)
+        HIGHBD_BFP(BLOCK_16X32, aom_highbd_sad16x32_bits8,
+                   aom_highbd_sad16x32_avg_bits8, aom_highbd_8_variance16x32,
+                   aom_highbd_8_sub_pixel_variance16x32,
+                   aom_highbd_8_sub_pixel_avg_variance16x32, NULL, NULL,
+                   aom_highbd_sad16x32x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits8,
-                   vpx_highbd_sad64x32_avg_bits8, vpx_highbd_8_variance64x32,
-                   vpx_highbd_8_sub_pixel_variance64x32,
-                   vpx_highbd_8_sub_pixel_avg_variance64x32, NULL, NULL,
-                   vpx_highbd_sad64x32x4d_bits8)
+        HIGHBD_BFP(BLOCK_64X32, aom_highbd_sad64x32_bits8,
+                   aom_highbd_sad64x32_avg_bits8, aom_highbd_8_variance64x32,
+                   aom_highbd_8_sub_pixel_variance64x32,
+                   aom_highbd_8_sub_pixel_avg_variance64x32, NULL, NULL,
+                   aom_highbd_sad64x32x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits8,
-                   vpx_highbd_sad32x64_avg_bits8, vpx_highbd_8_variance32x64,
-                   vpx_highbd_8_sub_pixel_variance32x64,
-                   vpx_highbd_8_sub_pixel_avg_variance32x64, NULL, NULL,
-                   vpx_highbd_sad32x64x4d_bits8)
+        HIGHBD_BFP(BLOCK_32X64, aom_highbd_sad32x64_bits8,
+                   aom_highbd_sad32x64_avg_bits8, aom_highbd_8_variance32x64,
+                   aom_highbd_8_sub_pixel_variance32x64,
+                   aom_highbd_8_sub_pixel_avg_variance32x64, NULL, NULL,
+                   aom_highbd_sad32x64x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits8,
-                   vpx_highbd_sad32x32_avg_bits8, vpx_highbd_8_variance32x32,
-                   vpx_highbd_8_sub_pixel_variance32x32,
-                   vpx_highbd_8_sub_pixel_avg_variance32x32,
-                   vpx_highbd_sad32x32x3_bits8, vpx_highbd_sad32x32x8_bits8,
-                   vpx_highbd_sad32x32x4d_bits8)
+        HIGHBD_BFP(BLOCK_32X32, aom_highbd_sad32x32_bits8,
+                   aom_highbd_sad32x32_avg_bits8, aom_highbd_8_variance32x32,
+                   aom_highbd_8_sub_pixel_variance32x32,
+                   aom_highbd_8_sub_pixel_avg_variance32x32,
+                   aom_highbd_sad32x32x3_bits8, aom_highbd_sad32x32x8_bits8,
+                   aom_highbd_sad32x32x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits8,
-                   vpx_highbd_sad64x64_avg_bits8, vpx_highbd_8_variance64x64,
-                   vpx_highbd_8_sub_pixel_variance64x64,
-                   vpx_highbd_8_sub_pixel_avg_variance64x64,
-                   vpx_highbd_sad64x64x3_bits8, vpx_highbd_sad64x64x8_bits8,
-                   vpx_highbd_sad64x64x4d_bits8)
+        HIGHBD_BFP(BLOCK_64X64, aom_highbd_sad64x64_bits8,
+                   aom_highbd_sad64x64_avg_bits8, aom_highbd_8_variance64x64,
+                   aom_highbd_8_sub_pixel_variance64x64,
+                   aom_highbd_8_sub_pixel_avg_variance64x64,
+                   aom_highbd_sad64x64x3_bits8, aom_highbd_sad64x64x8_bits8,
+                   aom_highbd_sad64x64x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits8,
-                   vpx_highbd_sad16x16_avg_bits8, vpx_highbd_8_variance16x16,
-                   vpx_highbd_8_sub_pixel_variance16x16,
-                   vpx_highbd_8_sub_pixel_avg_variance16x16,
-                   vpx_highbd_sad16x16x3_bits8, vpx_highbd_sad16x16x8_bits8,
-                   vpx_highbd_sad16x16x4d_bits8)
+        HIGHBD_BFP(BLOCK_16X16, aom_highbd_sad16x16_bits8,
+                   aom_highbd_sad16x16_avg_bits8, aom_highbd_8_variance16x16,
+                   aom_highbd_8_sub_pixel_variance16x16,
+                   aom_highbd_8_sub_pixel_avg_variance16x16,
+                   aom_highbd_sad16x16x3_bits8, aom_highbd_sad16x16x8_bits8,
+                   aom_highbd_sad16x16x4d_bits8)
 
         HIGHBD_BFP(
-            BLOCK_16X8, vpx_highbd_sad16x8_bits8, vpx_highbd_sad16x8_avg_bits8,
-            vpx_highbd_8_variance16x8, vpx_highbd_8_sub_pixel_variance16x8,
-            vpx_highbd_8_sub_pixel_avg_variance16x8, vpx_highbd_sad16x8x3_bits8,
-            vpx_highbd_sad16x8x8_bits8, vpx_highbd_sad16x8x4d_bits8)
+            BLOCK_16X8, aom_highbd_sad16x8_bits8, aom_highbd_sad16x8_avg_bits8,
+            aom_highbd_8_variance16x8, aom_highbd_8_sub_pixel_variance16x8,
+            aom_highbd_8_sub_pixel_avg_variance16x8, aom_highbd_sad16x8x3_bits8,
+            aom_highbd_sad16x8x8_bits8, aom_highbd_sad16x8x4d_bits8)
 
         HIGHBD_BFP(
-            BLOCK_8X16, vpx_highbd_sad8x16_bits8, vpx_highbd_sad8x16_avg_bits8,
-            vpx_highbd_8_variance8x16, vpx_highbd_8_sub_pixel_variance8x16,
-            vpx_highbd_8_sub_pixel_avg_variance8x16, vpx_highbd_sad8x16x3_bits8,
-            vpx_highbd_sad8x16x8_bits8, vpx_highbd_sad8x16x4d_bits8)
+            BLOCK_8X16, aom_highbd_sad8x16_bits8, aom_highbd_sad8x16_avg_bits8,
+            aom_highbd_8_variance8x16, aom_highbd_8_sub_pixel_variance8x16,
+            aom_highbd_8_sub_pixel_avg_variance8x16, aom_highbd_sad8x16x3_bits8,
+            aom_highbd_sad8x16x8_bits8, aom_highbd_sad8x16x4d_bits8)
 
         HIGHBD_BFP(
-            BLOCK_8X8, vpx_highbd_sad8x8_bits8, vpx_highbd_sad8x8_avg_bits8,
-            vpx_highbd_8_variance8x8, vpx_highbd_8_sub_pixel_variance8x8,
-            vpx_highbd_8_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits8,
-            vpx_highbd_sad8x8x8_bits8, vpx_highbd_sad8x8x4d_bits8)
+            BLOCK_8X8, aom_highbd_sad8x8_bits8, aom_highbd_sad8x8_avg_bits8,
+            aom_highbd_8_variance8x8, aom_highbd_8_sub_pixel_variance8x8,
+            aom_highbd_8_sub_pixel_avg_variance8x8, aom_highbd_sad8x8x3_bits8,
+            aom_highbd_sad8x8x8_bits8, aom_highbd_sad8x8x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits8,
-                   vpx_highbd_sad8x4_avg_bits8, vpx_highbd_8_variance8x4,
-                   vpx_highbd_8_sub_pixel_variance8x4,
-                   vpx_highbd_8_sub_pixel_avg_variance8x4, NULL,
-                   vpx_highbd_sad8x4x8_bits8, vpx_highbd_sad8x4x4d_bits8)
+        HIGHBD_BFP(BLOCK_8X4, aom_highbd_sad8x4_bits8,
+                   aom_highbd_sad8x4_avg_bits8, aom_highbd_8_variance8x4,
+                   aom_highbd_8_sub_pixel_variance8x4,
+                   aom_highbd_8_sub_pixel_avg_variance8x4, NULL,
+                   aom_highbd_sad8x4x8_bits8, aom_highbd_sad8x4x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits8,
-                   vpx_highbd_sad4x8_avg_bits8, vpx_highbd_8_variance4x8,
-                   vpx_highbd_8_sub_pixel_variance4x8,
-                   vpx_highbd_8_sub_pixel_avg_variance4x8, NULL,
-                   vpx_highbd_sad4x8x8_bits8, vpx_highbd_sad4x8x4d_bits8)
+        HIGHBD_BFP(BLOCK_4X8, aom_highbd_sad4x8_bits8,
+                   aom_highbd_sad4x8_avg_bits8, aom_highbd_8_variance4x8,
+                   aom_highbd_8_sub_pixel_variance4x8,
+                   aom_highbd_8_sub_pixel_avg_variance4x8, NULL,
+                   aom_highbd_sad4x8x8_bits8, aom_highbd_sad4x8x4d_bits8)
 
         HIGHBD_BFP(
-            BLOCK_4X4, vpx_highbd_sad4x4_bits8, vpx_highbd_sad4x4_avg_bits8,
-            vpx_highbd_8_variance4x4, vpx_highbd_8_sub_pixel_variance4x4,
-            vpx_highbd_8_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits8,
-            vpx_highbd_sad4x4x8_bits8, vpx_highbd_sad4x4x4d_bits8)
+            BLOCK_4X4, aom_highbd_sad4x4_bits8, aom_highbd_sad4x4_avg_bits8,
+            aom_highbd_8_variance4x4, aom_highbd_8_sub_pixel_variance4x4,
+            aom_highbd_8_sub_pixel_avg_variance4x4, aom_highbd_sad4x4x3_bits8,
+            aom_highbd_sad4x4x8_bits8, aom_highbd_sad4x4x4d_bits8)
 
 #if CONFIG_EXT_PARTITION
-        HIGHBD_BFP(BLOCK_128X128, vpx_highbd_sad128x128_bits8,
-                   vpx_highbd_sad128x128_avg_bits8,
-                   vpx_highbd_8_variance128x128,
-                   vpx_highbd_8_sub_pixel_variance128x128,
-                   vpx_highbd_8_sub_pixel_avg_variance128x128,
-                   vpx_highbd_sad128x128x3_bits8, vpx_highbd_sad128x128x8_bits8,
-                   vpx_highbd_sad128x128x4d_bits8)
+        HIGHBD_BFP(BLOCK_128X128, aom_highbd_sad128x128_bits8,
+                   aom_highbd_sad128x128_avg_bits8,
+                   aom_highbd_8_variance128x128,
+                   aom_highbd_8_sub_pixel_variance128x128,
+                   aom_highbd_8_sub_pixel_avg_variance128x128,
+                   aom_highbd_sad128x128x3_bits8, aom_highbd_sad128x128x8_bits8,
+                   aom_highbd_sad128x128x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_128X64, vpx_highbd_sad128x64_bits8,
-                   vpx_highbd_sad128x64_avg_bits8, vpx_highbd_8_variance128x64,
-                   vpx_highbd_8_sub_pixel_variance128x64,
-                   vpx_highbd_8_sub_pixel_avg_variance128x64, NULL, NULL,
-                   vpx_highbd_sad128x64x4d_bits8)
+        HIGHBD_BFP(BLOCK_128X64, aom_highbd_sad128x64_bits8,
+                   aom_highbd_sad128x64_avg_bits8, aom_highbd_8_variance128x64,
+                   aom_highbd_8_sub_pixel_variance128x64,
+                   aom_highbd_8_sub_pixel_avg_variance128x64, NULL, NULL,
+                   aom_highbd_sad128x64x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_64X128, vpx_highbd_sad64x128_bits8,
-                   vpx_highbd_sad64x128_avg_bits8, vpx_highbd_8_variance64x128,
-                   vpx_highbd_8_sub_pixel_variance64x128,
-                   vpx_highbd_8_sub_pixel_avg_variance64x128, NULL, NULL,
-                   vpx_highbd_sad64x128x4d_bits8)
+        HIGHBD_BFP(BLOCK_64X128, aom_highbd_sad64x128_bits8,
+                   aom_highbd_sad64x128_avg_bits8, aom_highbd_8_variance64x128,
+                   aom_highbd_8_sub_pixel_variance64x128,
+                   aom_highbd_8_sub_pixel_avg_variance64x128, NULL, NULL,
+                   aom_highbd_sad64x128x4d_bits8)
 #endif  // CONFIG_EXT_PARTITION
 
 #if CONFIG_EXT_INTER
 #if CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_128X128, vpx_highbd_masked_sad128x128_bits8,
-                    vpx_highbd_masked_variance128x128,
-                    vpx_highbd_masked_sub_pixel_variance128x128)
-        HIGHBD_MBFP(BLOCK_128X64, vpx_highbd_masked_sad128x64_bits8,
-                    vpx_highbd_masked_variance128x64,
-                    vpx_highbd_masked_sub_pixel_variance128x64)
-        HIGHBD_MBFP(BLOCK_64X128, vpx_highbd_masked_sad64x128_bits8,
-                    vpx_highbd_masked_variance64x128,
-                    vpx_highbd_masked_sub_pixel_variance64x128)
+        HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits8,
+                    aom_highbd_masked_variance128x128,
+                    aom_highbd_masked_sub_pixel_variance128x128)
+        HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits8,
+                    aom_highbd_masked_variance128x64,
+                    aom_highbd_masked_sub_pixel_variance128x64)
+        HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits8,
+                    aom_highbd_masked_variance64x128,
+                    aom_highbd_masked_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_64X64, vpx_highbd_masked_sad64x64_bits8,
-                    vpx_highbd_masked_variance64x64,
-                    vpx_highbd_masked_sub_pixel_variance64x64)
-        HIGHBD_MBFP(BLOCK_64X32, vpx_highbd_masked_sad64x32_bits8,
-                    vpx_highbd_masked_variance64x32,
-                    vpx_highbd_masked_sub_pixel_variance64x32)
-        HIGHBD_MBFP(BLOCK_32X64, vpx_highbd_masked_sad32x64_bits8,
-                    vpx_highbd_masked_variance32x64,
-                    vpx_highbd_masked_sub_pixel_variance32x64)
-        HIGHBD_MBFP(BLOCK_32X32, vpx_highbd_masked_sad32x32_bits8,
-                    vpx_highbd_masked_variance32x32,
-                    vpx_highbd_masked_sub_pixel_variance32x32)
-        HIGHBD_MBFP(BLOCK_32X16, vpx_highbd_masked_sad32x16_bits8,
-                    vpx_highbd_masked_variance32x16,
-                    vpx_highbd_masked_sub_pixel_variance32x16)
-        HIGHBD_MBFP(BLOCK_16X32, vpx_highbd_masked_sad16x32_bits8,
-                    vpx_highbd_masked_variance16x32,
-                    vpx_highbd_masked_sub_pixel_variance16x32)
-        HIGHBD_MBFP(BLOCK_16X16, vpx_highbd_masked_sad16x16_bits8,
-                    vpx_highbd_masked_variance16x16,
-                    vpx_highbd_masked_sub_pixel_variance16x16)
-        HIGHBD_MBFP(BLOCK_8X16, vpx_highbd_masked_sad8x16_bits8,
-                    vpx_highbd_masked_variance8x16,
-                    vpx_highbd_masked_sub_pixel_variance8x16)
-        HIGHBD_MBFP(BLOCK_16X8, vpx_highbd_masked_sad16x8_bits8,
-                    vpx_highbd_masked_variance16x8,
-                    vpx_highbd_masked_sub_pixel_variance16x8)
-        HIGHBD_MBFP(BLOCK_8X8, vpx_highbd_masked_sad8x8_bits8,
-                    vpx_highbd_masked_variance8x8,
-                    vpx_highbd_masked_sub_pixel_variance8x8)
-        HIGHBD_MBFP(BLOCK_4X8, vpx_highbd_masked_sad4x8_bits8,
-                    vpx_highbd_masked_variance4x8,
-                    vpx_highbd_masked_sub_pixel_variance4x8)
-        HIGHBD_MBFP(BLOCK_8X4, vpx_highbd_masked_sad8x4_bits8,
-                    vpx_highbd_masked_variance8x4,
-                    vpx_highbd_masked_sub_pixel_variance8x4)
-        HIGHBD_MBFP(BLOCK_4X4, vpx_highbd_masked_sad4x4_bits8,
-                    vpx_highbd_masked_variance4x4,
-                    vpx_highbd_masked_sub_pixel_variance4x4)
+        HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits8,
+                    aom_highbd_masked_variance64x64,
+                    aom_highbd_masked_sub_pixel_variance64x64)
+        HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits8,
+                    aom_highbd_masked_variance64x32,
+                    aom_highbd_masked_sub_pixel_variance64x32)
+        HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits8,
+                    aom_highbd_masked_variance32x64,
+                    aom_highbd_masked_sub_pixel_variance32x64)
+        HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits8,
+                    aom_highbd_masked_variance32x32,
+                    aom_highbd_masked_sub_pixel_variance32x32)
+        HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits8,
+                    aom_highbd_masked_variance32x16,
+                    aom_highbd_masked_sub_pixel_variance32x16)
+        HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits8,
+                    aom_highbd_masked_variance16x32,
+                    aom_highbd_masked_sub_pixel_variance16x32)
+        HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits8,
+                    aom_highbd_masked_variance16x16,
+                    aom_highbd_masked_sub_pixel_variance16x16)
+        HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits8,
+                    aom_highbd_masked_variance8x16,
+                    aom_highbd_masked_sub_pixel_variance8x16)
+        HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits8,
+                    aom_highbd_masked_variance16x8,
+                    aom_highbd_masked_sub_pixel_variance16x8)
+        HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits8,
+                    aom_highbd_masked_variance8x8,
+                    aom_highbd_masked_sub_pixel_variance8x8)
+        HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits8,
+                    aom_highbd_masked_variance4x8,
+                    aom_highbd_masked_sub_pixel_variance4x8)
+        HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits8,
+                    aom_highbd_masked_variance8x4,
+                    aom_highbd_masked_sub_pixel_variance8x4)
+        HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits8,
+                    aom_highbd_masked_variance4x4,
+                    aom_highbd_masked_sub_pixel_variance4x4)
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_OBMC
 #if CONFIG_EXT_PARTITION
-        HIGHBD_OBFP(BLOCK_128X128, vpx_highbd_obmc_sad128x128_bits8,
-                    vpx_highbd_obmc_variance128x128,
-                    vpx_highbd_obmc_sub_pixel_variance128x128)
-        HIGHBD_OBFP(BLOCK_128X64, vpx_highbd_obmc_sad128x64_bits8,
-                    vpx_highbd_obmc_variance128x64,
-                    vpx_highbd_obmc_sub_pixel_variance128x64)
-        HIGHBD_OBFP(BLOCK_64X128, vpx_highbd_obmc_sad64x128_bits8,
-                    vpx_highbd_obmc_variance64x128,
-                    vpx_highbd_obmc_sub_pixel_variance64x128)
+        HIGHBD_OBFP(BLOCK_128X128, aom_highbd_obmc_sad128x128_bits8,
+                    aom_highbd_obmc_variance128x128,
+                    aom_highbd_obmc_sub_pixel_variance128x128)
+        HIGHBD_OBFP(BLOCK_128X64, aom_highbd_obmc_sad128x64_bits8,
+                    aom_highbd_obmc_variance128x64,
+                    aom_highbd_obmc_sub_pixel_variance128x64)
+        HIGHBD_OBFP(BLOCK_64X128, aom_highbd_obmc_sad64x128_bits8,
+                    aom_highbd_obmc_variance64x128,
+                    aom_highbd_obmc_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-        HIGHBD_OBFP(BLOCK_64X64, vpx_highbd_obmc_sad64x64_bits8,
-                    vpx_highbd_obmc_variance64x64,
-                    vpx_highbd_obmc_sub_pixel_variance64x64)
-        HIGHBD_OBFP(BLOCK_64X32, vpx_highbd_obmc_sad64x32_bits8,
-                    vpx_highbd_obmc_variance64x32,
-                    vpx_highbd_obmc_sub_pixel_variance64x32)
-        HIGHBD_OBFP(BLOCK_32X64, vpx_highbd_obmc_sad32x64_bits8,
-                    vpx_highbd_obmc_variance32x64,
-                    vpx_highbd_obmc_sub_pixel_variance32x64)
-        HIGHBD_OBFP(BLOCK_32X32, vpx_highbd_obmc_sad32x32_bits8,
-                    vpx_highbd_obmc_variance32x32,
-                    vpx_highbd_obmc_sub_pixel_variance32x32)
-        HIGHBD_OBFP(BLOCK_32X16, vpx_highbd_obmc_sad32x16_bits8,
-                    vpx_highbd_obmc_variance32x16,
-                    vpx_highbd_obmc_sub_pixel_variance32x16)
-        HIGHBD_OBFP(BLOCK_16X32, vpx_highbd_obmc_sad16x32_bits8,
-                    vpx_highbd_obmc_variance16x32,
-                    vpx_highbd_obmc_sub_pixel_variance16x32)
-        HIGHBD_OBFP(BLOCK_16X16, vpx_highbd_obmc_sad16x16_bits8,
-                    vpx_highbd_obmc_variance16x16,
-                    vpx_highbd_obmc_sub_pixel_variance16x16)
-        HIGHBD_OBFP(BLOCK_8X16, vpx_highbd_obmc_sad8x16_bits8,
-                    vpx_highbd_obmc_variance8x16,
-                    vpx_highbd_obmc_sub_pixel_variance8x16)
-        HIGHBD_OBFP(BLOCK_16X8, vpx_highbd_obmc_sad16x8_bits8,
-                    vpx_highbd_obmc_variance16x8,
-                    vpx_highbd_obmc_sub_pixel_variance16x8)
-        HIGHBD_OBFP(BLOCK_8X8, vpx_highbd_obmc_sad8x8_bits8,
-                    vpx_highbd_obmc_variance8x8,
-                    vpx_highbd_obmc_sub_pixel_variance8x8)
-        HIGHBD_OBFP(BLOCK_4X8, vpx_highbd_obmc_sad4x8_bits8,
-                    vpx_highbd_obmc_variance4x8,
-                    vpx_highbd_obmc_sub_pixel_variance4x8)
-        HIGHBD_OBFP(BLOCK_8X4, vpx_highbd_obmc_sad8x4_bits8,
-                    vpx_highbd_obmc_variance8x4,
-                    vpx_highbd_obmc_sub_pixel_variance8x4)
-        HIGHBD_OBFP(BLOCK_4X4, vpx_highbd_obmc_sad4x4_bits8,
-                    vpx_highbd_obmc_variance4x4,
-                    vpx_highbd_obmc_sub_pixel_variance4x4)
+        HIGHBD_OBFP(BLOCK_64X64, aom_highbd_obmc_sad64x64_bits8,
+                    aom_highbd_obmc_variance64x64,
+                    aom_highbd_obmc_sub_pixel_variance64x64)
+        HIGHBD_OBFP(BLOCK_64X32, aom_highbd_obmc_sad64x32_bits8,
+                    aom_highbd_obmc_variance64x32,
+                    aom_highbd_obmc_sub_pixel_variance64x32)
+        HIGHBD_OBFP(BLOCK_32X64, aom_highbd_obmc_sad32x64_bits8,
+                    aom_highbd_obmc_variance32x64,
+                    aom_highbd_obmc_sub_pixel_variance32x64)
+        HIGHBD_OBFP(BLOCK_32X32, aom_highbd_obmc_sad32x32_bits8,
+                    aom_highbd_obmc_variance32x32,
+                    aom_highbd_obmc_sub_pixel_variance32x32)
+        HIGHBD_OBFP(BLOCK_32X16, aom_highbd_obmc_sad32x16_bits8,
+                    aom_highbd_obmc_variance32x16,
+                    aom_highbd_obmc_sub_pixel_variance32x16)
+        HIGHBD_OBFP(BLOCK_16X32, aom_highbd_obmc_sad16x32_bits8,
+                    aom_highbd_obmc_variance16x32,
+                    aom_highbd_obmc_sub_pixel_variance16x32)
+        HIGHBD_OBFP(BLOCK_16X16, aom_highbd_obmc_sad16x16_bits8,
+                    aom_highbd_obmc_variance16x16,
+                    aom_highbd_obmc_sub_pixel_variance16x16)
+        HIGHBD_OBFP(BLOCK_8X16, aom_highbd_obmc_sad8x16_bits8,
+                    aom_highbd_obmc_variance8x16,
+                    aom_highbd_obmc_sub_pixel_variance8x16)
+        HIGHBD_OBFP(BLOCK_16X8, aom_highbd_obmc_sad16x8_bits8,
+                    aom_highbd_obmc_variance16x8,
+                    aom_highbd_obmc_sub_pixel_variance16x8)
+        HIGHBD_OBFP(BLOCK_8X8, aom_highbd_obmc_sad8x8_bits8,
+                    aom_highbd_obmc_variance8x8,
+                    aom_highbd_obmc_sub_pixel_variance8x8)
+        HIGHBD_OBFP(BLOCK_4X8, aom_highbd_obmc_sad4x8_bits8,
+                    aom_highbd_obmc_variance4x8,
+                    aom_highbd_obmc_sub_pixel_variance4x8)
+        HIGHBD_OBFP(BLOCK_8X4, aom_highbd_obmc_sad8x4_bits8,
+                    aom_highbd_obmc_variance8x4,
+                    aom_highbd_obmc_sub_pixel_variance8x4)
+        HIGHBD_OBFP(BLOCK_4X4, aom_highbd_obmc_sad4x4_bits8,
+                    aom_highbd_obmc_variance4x4,
+                    aom_highbd_obmc_sub_pixel_variance4x4)
 #endif  // CONFIG_OBMC
         break;
 
-      case VPX_BITS_10:
-        HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits10,
-                   vpx_highbd_sad32x16_avg_bits10, vpx_highbd_10_variance32x16,
-                   vpx_highbd_10_sub_pixel_variance32x16,
-                   vpx_highbd_10_sub_pixel_avg_variance32x16, NULL, NULL,
-                   vpx_highbd_sad32x16x4d_bits10)
+      case AOM_BITS_10:
+        HIGHBD_BFP(BLOCK_32X16, aom_highbd_sad32x16_bits10,
+                   aom_highbd_sad32x16_avg_bits10, aom_highbd_10_variance32x16,
+                   aom_highbd_10_sub_pixel_variance32x16,
+                   aom_highbd_10_sub_pixel_avg_variance32x16, NULL, NULL,
+                   aom_highbd_sad32x16x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits10,
-                   vpx_highbd_sad16x32_avg_bits10, vpx_highbd_10_variance16x32,
-                   vpx_highbd_10_sub_pixel_variance16x32,
-                   vpx_highbd_10_sub_pixel_avg_variance16x32, NULL, NULL,
-                   vpx_highbd_sad16x32x4d_bits10)
+        HIGHBD_BFP(BLOCK_16X32, aom_highbd_sad16x32_bits10,
+                   aom_highbd_sad16x32_avg_bits10, aom_highbd_10_variance16x32,
+                   aom_highbd_10_sub_pixel_variance16x32,
+                   aom_highbd_10_sub_pixel_avg_variance16x32, NULL, NULL,
+                   aom_highbd_sad16x32x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits10,
-                   vpx_highbd_sad64x32_avg_bits10, vpx_highbd_10_variance64x32,
-                   vpx_highbd_10_sub_pixel_variance64x32,
-                   vpx_highbd_10_sub_pixel_avg_variance64x32, NULL, NULL,
-                   vpx_highbd_sad64x32x4d_bits10)
+        HIGHBD_BFP(BLOCK_64X32, aom_highbd_sad64x32_bits10,
+                   aom_highbd_sad64x32_avg_bits10, aom_highbd_10_variance64x32,
+                   aom_highbd_10_sub_pixel_variance64x32,
+                   aom_highbd_10_sub_pixel_avg_variance64x32, NULL, NULL,
+                   aom_highbd_sad64x32x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits10,
-                   vpx_highbd_sad32x64_avg_bits10, vpx_highbd_10_variance32x64,
-                   vpx_highbd_10_sub_pixel_variance32x64,
-                   vpx_highbd_10_sub_pixel_avg_variance32x64, NULL, NULL,
-                   vpx_highbd_sad32x64x4d_bits10)
+        HIGHBD_BFP(BLOCK_32X64, aom_highbd_sad32x64_bits10,
+                   aom_highbd_sad32x64_avg_bits10, aom_highbd_10_variance32x64,
+                   aom_highbd_10_sub_pixel_variance32x64,
+                   aom_highbd_10_sub_pixel_avg_variance32x64, NULL, NULL,
+                   aom_highbd_sad32x64x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits10,
-                   vpx_highbd_sad32x32_avg_bits10, vpx_highbd_10_variance32x32,
-                   vpx_highbd_10_sub_pixel_variance32x32,
-                   vpx_highbd_10_sub_pixel_avg_variance32x32,
-                   vpx_highbd_sad32x32x3_bits10, vpx_highbd_sad32x32x8_bits10,
-                   vpx_highbd_sad32x32x4d_bits10)
+        HIGHBD_BFP(BLOCK_32X32, aom_highbd_sad32x32_bits10,
+                   aom_highbd_sad32x32_avg_bits10, aom_highbd_10_variance32x32,
+                   aom_highbd_10_sub_pixel_variance32x32,
+                   aom_highbd_10_sub_pixel_avg_variance32x32,
+                   aom_highbd_sad32x32x3_bits10, aom_highbd_sad32x32x8_bits10,
+                   aom_highbd_sad32x32x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits10,
-                   vpx_highbd_sad64x64_avg_bits10, vpx_highbd_10_variance64x64,
-                   vpx_highbd_10_sub_pixel_variance64x64,
-                   vpx_highbd_10_sub_pixel_avg_variance64x64,
-                   vpx_highbd_sad64x64x3_bits10, vpx_highbd_sad64x64x8_bits10,
-                   vpx_highbd_sad64x64x4d_bits10)
+        HIGHBD_BFP(BLOCK_64X64, aom_highbd_sad64x64_bits10,
+                   aom_highbd_sad64x64_avg_bits10, aom_highbd_10_variance64x64,
+                   aom_highbd_10_sub_pixel_variance64x64,
+                   aom_highbd_10_sub_pixel_avg_variance64x64,
+                   aom_highbd_sad64x64x3_bits10, aom_highbd_sad64x64x8_bits10,
+                   aom_highbd_sad64x64x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits10,
-                   vpx_highbd_sad16x16_avg_bits10, vpx_highbd_10_variance16x16,
-                   vpx_highbd_10_sub_pixel_variance16x16,
-                   vpx_highbd_10_sub_pixel_avg_variance16x16,
-                   vpx_highbd_sad16x16x3_bits10, vpx_highbd_sad16x16x8_bits10,
-                   vpx_highbd_sad16x16x4d_bits10)
+        HIGHBD_BFP(BLOCK_16X16, aom_highbd_sad16x16_bits10,
+                   aom_highbd_sad16x16_avg_bits10, aom_highbd_10_variance16x16,
+                   aom_highbd_10_sub_pixel_variance16x16,
+                   aom_highbd_10_sub_pixel_avg_variance16x16,
+                   aom_highbd_sad16x16x3_bits10, aom_highbd_sad16x16x8_bits10,
+                   aom_highbd_sad16x16x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits10,
-                   vpx_highbd_sad16x8_avg_bits10, vpx_highbd_10_variance16x8,
-                   vpx_highbd_10_sub_pixel_variance16x8,
-                   vpx_highbd_10_sub_pixel_avg_variance16x8,
-                   vpx_highbd_sad16x8x3_bits10, vpx_highbd_sad16x8x8_bits10,
-                   vpx_highbd_sad16x8x4d_bits10)
+        HIGHBD_BFP(BLOCK_16X8, aom_highbd_sad16x8_bits10,
+                   aom_highbd_sad16x8_avg_bits10, aom_highbd_10_variance16x8,
+                   aom_highbd_10_sub_pixel_variance16x8,
+                   aom_highbd_10_sub_pixel_avg_variance16x8,
+                   aom_highbd_sad16x8x3_bits10, aom_highbd_sad16x8x8_bits10,
+                   aom_highbd_sad16x8x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits10,
-                   vpx_highbd_sad8x16_avg_bits10, vpx_highbd_10_variance8x16,
-                   vpx_highbd_10_sub_pixel_variance8x16,
-                   vpx_highbd_10_sub_pixel_avg_variance8x16,
-                   vpx_highbd_sad8x16x3_bits10, vpx_highbd_sad8x16x8_bits10,
-                   vpx_highbd_sad8x16x4d_bits10)
+        HIGHBD_BFP(BLOCK_8X16, aom_highbd_sad8x16_bits10,
+                   aom_highbd_sad8x16_avg_bits10, aom_highbd_10_variance8x16,
+                   aom_highbd_10_sub_pixel_variance8x16,
+                   aom_highbd_10_sub_pixel_avg_variance8x16,
+                   aom_highbd_sad8x16x3_bits10, aom_highbd_sad8x16x8_bits10,
+                   aom_highbd_sad8x16x4d_bits10)
 
         HIGHBD_BFP(
-            BLOCK_8X8, vpx_highbd_sad8x8_bits10, vpx_highbd_sad8x8_avg_bits10,
-            vpx_highbd_10_variance8x8, vpx_highbd_10_sub_pixel_variance8x8,
-            vpx_highbd_10_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits10,
-            vpx_highbd_sad8x8x8_bits10, vpx_highbd_sad8x8x4d_bits10)
+            BLOCK_8X8, aom_highbd_sad8x8_bits10, aom_highbd_sad8x8_avg_bits10,
+            aom_highbd_10_variance8x8, aom_highbd_10_sub_pixel_variance8x8,
+            aom_highbd_10_sub_pixel_avg_variance8x8, aom_highbd_sad8x8x3_bits10,
+            aom_highbd_sad8x8x8_bits10, aom_highbd_sad8x8x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits10,
-                   vpx_highbd_sad8x4_avg_bits10, vpx_highbd_10_variance8x4,
-                   vpx_highbd_10_sub_pixel_variance8x4,
-                   vpx_highbd_10_sub_pixel_avg_variance8x4, NULL,
-                   vpx_highbd_sad8x4x8_bits10, vpx_highbd_sad8x4x4d_bits10)
+        HIGHBD_BFP(BLOCK_8X4, aom_highbd_sad8x4_bits10,
+                   aom_highbd_sad8x4_avg_bits10, aom_highbd_10_variance8x4,
+                   aom_highbd_10_sub_pixel_variance8x4,
+                   aom_highbd_10_sub_pixel_avg_variance8x4, NULL,
+                   aom_highbd_sad8x4x8_bits10, aom_highbd_sad8x4x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits10,
-                   vpx_highbd_sad4x8_avg_bits10, vpx_highbd_10_variance4x8,
-                   vpx_highbd_10_sub_pixel_variance4x8,
-                   vpx_highbd_10_sub_pixel_avg_variance4x8, NULL,
-                   vpx_highbd_sad4x8x8_bits10, vpx_highbd_sad4x8x4d_bits10)
+        HIGHBD_BFP(BLOCK_4X8, aom_highbd_sad4x8_bits10,
+                   aom_highbd_sad4x8_avg_bits10, aom_highbd_10_variance4x8,
+                   aom_highbd_10_sub_pixel_variance4x8,
+                   aom_highbd_10_sub_pixel_avg_variance4x8, NULL,
+                   aom_highbd_sad4x8x8_bits10, aom_highbd_sad4x8x4d_bits10)
 
         HIGHBD_BFP(
-            BLOCK_4X4, vpx_highbd_sad4x4_bits10, vpx_highbd_sad4x4_avg_bits10,
-            vpx_highbd_10_variance4x4, vpx_highbd_10_sub_pixel_variance4x4,
-            vpx_highbd_10_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits10,
-            vpx_highbd_sad4x4x8_bits10, vpx_highbd_sad4x4x4d_bits10)
+            BLOCK_4X4, aom_highbd_sad4x4_bits10, aom_highbd_sad4x4_avg_bits10,
+            aom_highbd_10_variance4x4, aom_highbd_10_sub_pixel_variance4x4,
+            aom_highbd_10_sub_pixel_avg_variance4x4, aom_highbd_sad4x4x3_bits10,
+            aom_highbd_sad4x4x8_bits10, aom_highbd_sad4x4x4d_bits10)
 
 #if CONFIG_EXT_PARTITION
         HIGHBD_BFP(
-            BLOCK_128X128, vpx_highbd_sad128x128_bits10,
-            vpx_highbd_sad128x128_avg_bits10, vpx_highbd_10_variance128x128,
-            vpx_highbd_10_sub_pixel_variance128x128,
-            vpx_highbd_10_sub_pixel_avg_variance128x128,
-            vpx_highbd_sad128x128x3_bits10, vpx_highbd_sad128x128x8_bits10,
-            vpx_highbd_sad128x128x4d_bits10)
+            BLOCK_128X128, aom_highbd_sad128x128_bits10,
+            aom_highbd_sad128x128_avg_bits10, aom_highbd_10_variance128x128,
+            aom_highbd_10_sub_pixel_variance128x128,
+            aom_highbd_10_sub_pixel_avg_variance128x128,
+            aom_highbd_sad128x128x3_bits10, aom_highbd_sad128x128x8_bits10,
+            aom_highbd_sad128x128x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_128X64, vpx_highbd_sad128x64_bits10,
-                   vpx_highbd_sad128x64_avg_bits10,
-                   vpx_highbd_10_variance128x64,
-                   vpx_highbd_10_sub_pixel_variance128x64,
-                   vpx_highbd_10_sub_pixel_avg_variance128x64, NULL, NULL,
-                   vpx_highbd_sad128x64x4d_bits10)
+        HIGHBD_BFP(BLOCK_128X64, aom_highbd_sad128x64_bits10,
+                   aom_highbd_sad128x64_avg_bits10,
+                   aom_highbd_10_variance128x64,
+                   aom_highbd_10_sub_pixel_variance128x64,
+                   aom_highbd_10_sub_pixel_avg_variance128x64, NULL, NULL,
+                   aom_highbd_sad128x64x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_64X128, vpx_highbd_sad64x128_bits10,
-                   vpx_highbd_sad64x128_avg_bits10,
-                   vpx_highbd_10_variance64x128,
-                   vpx_highbd_10_sub_pixel_variance64x128,
-                   vpx_highbd_10_sub_pixel_avg_variance64x128, NULL, NULL,
-                   vpx_highbd_sad64x128x4d_bits10)
+        HIGHBD_BFP(BLOCK_64X128, aom_highbd_sad64x128_bits10,
+                   aom_highbd_sad64x128_avg_bits10,
+                   aom_highbd_10_variance64x128,
+                   aom_highbd_10_sub_pixel_variance64x128,
+                   aom_highbd_10_sub_pixel_avg_variance64x128, NULL, NULL,
+                   aom_highbd_sad64x128x4d_bits10)
 #endif  // CONFIG_EXT_PARTITION
 
 #if CONFIG_EXT_INTER
 #if CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_128X128, vpx_highbd_masked_sad128x128_bits10,
-                    vpx_highbd_10_masked_variance128x128,
-                    vpx_highbd_10_masked_sub_pixel_variance128x128)
-        HIGHBD_MBFP(BLOCK_128X64, vpx_highbd_masked_sad128x64_bits10,
-                    vpx_highbd_10_masked_variance128x64,
-                    vpx_highbd_10_masked_sub_pixel_variance128x64)
-        HIGHBD_MBFP(BLOCK_64X128, vpx_highbd_masked_sad64x128_bits10,
-                    vpx_highbd_10_masked_variance64x128,
-                    vpx_highbd_10_masked_sub_pixel_variance64x128)
+        HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits10,
+                    aom_highbd_10_masked_variance128x128,
+                    aom_highbd_10_masked_sub_pixel_variance128x128)
+        HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits10,
+                    aom_highbd_10_masked_variance128x64,
+                    aom_highbd_10_masked_sub_pixel_variance128x64)
+        HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits10,
+                    aom_highbd_10_masked_variance64x128,
+                    aom_highbd_10_masked_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_64X64, vpx_highbd_masked_sad64x64_bits10,
-                    vpx_highbd_10_masked_variance64x64,
-                    vpx_highbd_10_masked_sub_pixel_variance64x64)
-        HIGHBD_MBFP(BLOCK_64X32, vpx_highbd_masked_sad64x32_bits10,
-                    vpx_highbd_10_masked_variance64x32,
-                    vpx_highbd_10_masked_sub_pixel_variance64x32)
-        HIGHBD_MBFP(BLOCK_32X64, vpx_highbd_masked_sad32x64_bits10,
-                    vpx_highbd_10_masked_variance32x64,
-                    vpx_highbd_10_masked_sub_pixel_variance32x64)
-        HIGHBD_MBFP(BLOCK_32X32, vpx_highbd_masked_sad32x32_bits10,
-                    vpx_highbd_10_masked_variance32x32,
-                    vpx_highbd_10_masked_sub_pixel_variance32x32)
-        HIGHBD_MBFP(BLOCK_32X16, vpx_highbd_masked_sad32x16_bits10,
-                    vpx_highbd_10_masked_variance32x16,
-                    vpx_highbd_10_masked_sub_pixel_variance32x16)
-        HIGHBD_MBFP(BLOCK_16X32, vpx_highbd_masked_sad16x32_bits10,
-                    vpx_highbd_10_masked_variance16x32,
-                    vpx_highbd_10_masked_sub_pixel_variance16x32)
-        HIGHBD_MBFP(BLOCK_16X16, vpx_highbd_masked_sad16x16_bits10,
-                    vpx_highbd_10_masked_variance16x16,
-                    vpx_highbd_10_masked_sub_pixel_variance16x16)
-        HIGHBD_MBFP(BLOCK_8X16, vpx_highbd_masked_sad8x16_bits10,
-                    vpx_highbd_10_masked_variance8x16,
-                    vpx_highbd_10_masked_sub_pixel_variance8x16)
-        HIGHBD_MBFP(BLOCK_16X8, vpx_highbd_masked_sad16x8_bits10,
-                    vpx_highbd_10_masked_variance16x8,
-                    vpx_highbd_10_masked_sub_pixel_variance16x8)
-        HIGHBD_MBFP(BLOCK_8X8, vpx_highbd_masked_sad8x8_bits10,
-                    vpx_highbd_10_masked_variance8x8,
-                    vpx_highbd_10_masked_sub_pixel_variance8x8)
-        HIGHBD_MBFP(BLOCK_4X8, vpx_highbd_masked_sad4x8_bits10,
-                    vpx_highbd_10_masked_variance4x8,
-                    vpx_highbd_10_masked_sub_pixel_variance4x8)
-        HIGHBD_MBFP(BLOCK_8X4, vpx_highbd_masked_sad8x4_bits10,
-                    vpx_highbd_10_masked_variance8x4,
-                    vpx_highbd_10_masked_sub_pixel_variance8x4)
-        HIGHBD_MBFP(BLOCK_4X4, vpx_highbd_masked_sad4x4_bits10,
-                    vpx_highbd_10_masked_variance4x4,
-                    vpx_highbd_10_masked_sub_pixel_variance4x4)
+        HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits10,
+                    aom_highbd_10_masked_variance64x64,
+                    aom_highbd_10_masked_sub_pixel_variance64x64)
+        HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits10,
+                    aom_highbd_10_masked_variance64x32,
+                    aom_highbd_10_masked_sub_pixel_variance64x32)
+        HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits10,
+                    aom_highbd_10_masked_variance32x64,
+                    aom_highbd_10_masked_sub_pixel_variance32x64)
+        HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits10,
+                    aom_highbd_10_masked_variance32x32,
+                    aom_highbd_10_masked_sub_pixel_variance32x32)
+        HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits10,
+                    aom_highbd_10_masked_variance32x16,
+                    aom_highbd_10_masked_sub_pixel_variance32x16)
+        HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits10,
+                    aom_highbd_10_masked_variance16x32,
+                    aom_highbd_10_masked_sub_pixel_variance16x32)
+        HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits10,
+                    aom_highbd_10_masked_variance16x16,
+                    aom_highbd_10_masked_sub_pixel_variance16x16)
+        HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits10,
+                    aom_highbd_10_masked_variance8x16,
+                    aom_highbd_10_masked_sub_pixel_variance8x16)
+        HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits10,
+                    aom_highbd_10_masked_variance16x8,
+                    aom_highbd_10_masked_sub_pixel_variance16x8)
+        HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits10,
+                    aom_highbd_10_masked_variance8x8,
+                    aom_highbd_10_masked_sub_pixel_variance8x8)
+        HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits10,
+                    aom_highbd_10_masked_variance4x8,
+                    aom_highbd_10_masked_sub_pixel_variance4x8)
+        HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits10,
+                    aom_highbd_10_masked_variance8x4,
+                    aom_highbd_10_masked_sub_pixel_variance8x4)
+        HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits10,
+                    aom_highbd_10_masked_variance4x4,
+                    aom_highbd_10_masked_sub_pixel_variance4x4)
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_OBMC
 #if CONFIG_EXT_PARTITION
-        HIGHBD_OBFP(BLOCK_128X128, vpx_highbd_obmc_sad128x128_bits10,
-                    vpx_highbd_10_obmc_variance128x128,
-                    vpx_highbd_10_obmc_sub_pixel_variance128x128)
-        HIGHBD_OBFP(BLOCK_128X64, vpx_highbd_obmc_sad128x64_bits10,
-                    vpx_highbd_10_obmc_variance128x64,
-                    vpx_highbd_10_obmc_sub_pixel_variance128x64)
-        HIGHBD_OBFP(BLOCK_64X128, vpx_highbd_obmc_sad64x128_bits10,
-                    vpx_highbd_10_obmc_variance64x128,
-                    vpx_highbd_10_obmc_sub_pixel_variance64x128)
+        HIGHBD_OBFP(BLOCK_128X128, aom_highbd_obmc_sad128x128_bits10,
+                    aom_highbd_10_obmc_variance128x128,
+                    aom_highbd_10_obmc_sub_pixel_variance128x128)
+        HIGHBD_OBFP(BLOCK_128X64, aom_highbd_obmc_sad128x64_bits10,
+                    aom_highbd_10_obmc_variance128x64,
+                    aom_highbd_10_obmc_sub_pixel_variance128x64)
+        HIGHBD_OBFP(BLOCK_64X128, aom_highbd_obmc_sad64x128_bits10,
+                    aom_highbd_10_obmc_variance64x128,
+                    aom_highbd_10_obmc_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-        HIGHBD_OBFP(BLOCK_64X64, vpx_highbd_obmc_sad64x64_bits10,
-                    vpx_highbd_10_obmc_variance64x64,
-                    vpx_highbd_10_obmc_sub_pixel_variance64x64)
-        HIGHBD_OBFP(BLOCK_64X32, vpx_highbd_obmc_sad64x32_bits10,
-                    vpx_highbd_10_obmc_variance64x32,
-                    vpx_highbd_10_obmc_sub_pixel_variance64x32)
-        HIGHBD_OBFP(BLOCK_32X64, vpx_highbd_obmc_sad32x64_bits10,
-                    vpx_highbd_10_obmc_variance32x64,
-                    vpx_highbd_10_obmc_sub_pixel_variance32x64)
-        HIGHBD_OBFP(BLOCK_32X32, vpx_highbd_obmc_sad32x32_bits10,
-                    vpx_highbd_10_obmc_variance32x32,
-                    vpx_highbd_10_obmc_sub_pixel_variance32x32)
-        HIGHBD_OBFP(BLOCK_32X16, vpx_highbd_obmc_sad32x16_bits10,
-                    vpx_highbd_10_obmc_variance32x16,
-                    vpx_highbd_10_obmc_sub_pixel_variance32x16)
-        HIGHBD_OBFP(BLOCK_16X32, vpx_highbd_obmc_sad16x32_bits10,
-                    vpx_highbd_10_obmc_variance16x32,
-                    vpx_highbd_10_obmc_sub_pixel_variance16x32)
-        HIGHBD_OBFP(BLOCK_16X16, vpx_highbd_obmc_sad16x16_bits10,
-                    vpx_highbd_10_obmc_variance16x16,
-                    vpx_highbd_10_obmc_sub_pixel_variance16x16)
-        HIGHBD_OBFP(BLOCK_8X16, vpx_highbd_obmc_sad8x16_bits10,
-                    vpx_highbd_10_obmc_variance8x16,
-                    vpx_highbd_10_obmc_sub_pixel_variance8x16)
-        HIGHBD_OBFP(BLOCK_16X8, vpx_highbd_obmc_sad16x8_bits10,
-                    vpx_highbd_10_obmc_variance16x8,
-                    vpx_highbd_10_obmc_sub_pixel_variance16x8)
-        HIGHBD_OBFP(BLOCK_8X8, vpx_highbd_obmc_sad8x8_bits10,
-                    vpx_highbd_10_obmc_variance8x8,
-                    vpx_highbd_10_obmc_sub_pixel_variance8x8)
-        HIGHBD_OBFP(BLOCK_4X8, vpx_highbd_obmc_sad4x8_bits10,
-                    vpx_highbd_10_obmc_variance4x8,
-                    vpx_highbd_10_obmc_sub_pixel_variance4x8)
-        HIGHBD_OBFP(BLOCK_8X4, vpx_highbd_obmc_sad8x4_bits10,
-                    vpx_highbd_10_obmc_variance8x4,
-                    vpx_highbd_10_obmc_sub_pixel_variance8x4)
-        HIGHBD_OBFP(BLOCK_4X4, vpx_highbd_obmc_sad4x4_bits10,
-                    vpx_highbd_10_obmc_variance4x4,
-                    vpx_highbd_10_obmc_sub_pixel_variance4x4)
+        HIGHBD_OBFP(BLOCK_64X64, aom_highbd_obmc_sad64x64_bits10,
+                    aom_highbd_10_obmc_variance64x64,
+                    aom_highbd_10_obmc_sub_pixel_variance64x64)
+        HIGHBD_OBFP(BLOCK_64X32, aom_highbd_obmc_sad64x32_bits10,
+                    aom_highbd_10_obmc_variance64x32,
+                    aom_highbd_10_obmc_sub_pixel_variance64x32)
+        HIGHBD_OBFP(BLOCK_32X64, aom_highbd_obmc_sad32x64_bits10,
+                    aom_highbd_10_obmc_variance32x64,
+                    aom_highbd_10_obmc_sub_pixel_variance32x64)
+        HIGHBD_OBFP(BLOCK_32X32, aom_highbd_obmc_sad32x32_bits10,
+                    aom_highbd_10_obmc_variance32x32,
+                    aom_highbd_10_obmc_sub_pixel_variance32x32)
+        HIGHBD_OBFP(BLOCK_32X16, aom_highbd_obmc_sad32x16_bits10,
+                    aom_highbd_10_obmc_variance32x16,
+                    aom_highbd_10_obmc_sub_pixel_variance32x16)
+        HIGHBD_OBFP(BLOCK_16X32, aom_highbd_obmc_sad16x32_bits10,
+                    aom_highbd_10_obmc_variance16x32,
+                    aom_highbd_10_obmc_sub_pixel_variance16x32)
+        HIGHBD_OBFP(BLOCK_16X16, aom_highbd_obmc_sad16x16_bits10,
+                    aom_highbd_10_obmc_variance16x16,
+                    aom_highbd_10_obmc_sub_pixel_variance16x16)
+        HIGHBD_OBFP(BLOCK_8X16, aom_highbd_obmc_sad8x16_bits10,
+                    aom_highbd_10_obmc_variance8x16,
+                    aom_highbd_10_obmc_sub_pixel_variance8x16)
+        HIGHBD_OBFP(BLOCK_16X8, aom_highbd_obmc_sad16x8_bits10,
+                    aom_highbd_10_obmc_variance16x8,
+                    aom_highbd_10_obmc_sub_pixel_variance16x8)
+        HIGHBD_OBFP(BLOCK_8X8, aom_highbd_obmc_sad8x8_bits10,
+                    aom_highbd_10_obmc_variance8x8,
+                    aom_highbd_10_obmc_sub_pixel_variance8x8)
+        HIGHBD_OBFP(BLOCK_4X8, aom_highbd_obmc_sad4x8_bits10,
+                    aom_highbd_10_obmc_variance4x8,
+                    aom_highbd_10_obmc_sub_pixel_variance4x8)
+        HIGHBD_OBFP(BLOCK_8X4, aom_highbd_obmc_sad8x4_bits10,
+                    aom_highbd_10_obmc_variance8x4,
+                    aom_highbd_10_obmc_sub_pixel_variance8x4)
+        HIGHBD_OBFP(BLOCK_4X4, aom_highbd_obmc_sad4x4_bits10,
+                    aom_highbd_10_obmc_variance4x4,
+                    aom_highbd_10_obmc_sub_pixel_variance4x4)
 #endif  // CONFIG_OBMC
         break;
 
-      case VPX_BITS_12:
-        HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits12,
-                   vpx_highbd_sad32x16_avg_bits12, vpx_highbd_12_variance32x16,
-                   vpx_highbd_12_sub_pixel_variance32x16,
-                   vpx_highbd_12_sub_pixel_avg_variance32x16, NULL, NULL,
-                   vpx_highbd_sad32x16x4d_bits12)
+      case AOM_BITS_12:
+        HIGHBD_BFP(BLOCK_32X16, aom_highbd_sad32x16_bits12,
+                   aom_highbd_sad32x16_avg_bits12, aom_highbd_12_variance32x16,
+                   aom_highbd_12_sub_pixel_variance32x16,
+                   aom_highbd_12_sub_pixel_avg_variance32x16, NULL, NULL,
+                   aom_highbd_sad32x16x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits12,
-                   vpx_highbd_sad16x32_avg_bits12, vpx_highbd_12_variance16x32,
-                   vpx_highbd_12_sub_pixel_variance16x32,
-                   vpx_highbd_12_sub_pixel_avg_variance16x32, NULL, NULL,
-                   vpx_highbd_sad16x32x4d_bits12)
+        HIGHBD_BFP(BLOCK_16X32, aom_highbd_sad16x32_bits12,
+                   aom_highbd_sad16x32_avg_bits12, aom_highbd_12_variance16x32,
+                   aom_highbd_12_sub_pixel_variance16x32,
+                   aom_highbd_12_sub_pixel_avg_variance16x32, NULL, NULL,
+                   aom_highbd_sad16x32x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits12,
-                   vpx_highbd_sad64x32_avg_bits12, vpx_highbd_12_variance64x32,
-                   vpx_highbd_12_sub_pixel_variance64x32,
-                   vpx_highbd_12_sub_pixel_avg_variance64x32, NULL, NULL,
-                   vpx_highbd_sad64x32x4d_bits12)
+        HIGHBD_BFP(BLOCK_64X32, aom_highbd_sad64x32_bits12,
+                   aom_highbd_sad64x32_avg_bits12, aom_highbd_12_variance64x32,
+                   aom_highbd_12_sub_pixel_variance64x32,
+                   aom_highbd_12_sub_pixel_avg_variance64x32, NULL, NULL,
+                   aom_highbd_sad64x32x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits12,
-                   vpx_highbd_sad32x64_avg_bits12, vpx_highbd_12_variance32x64,
-                   vpx_highbd_12_sub_pixel_variance32x64,
-                   vpx_highbd_12_sub_pixel_avg_variance32x64, NULL, NULL,
-                   vpx_highbd_sad32x64x4d_bits12)
+        HIGHBD_BFP(BLOCK_32X64, aom_highbd_sad32x64_bits12,
+                   aom_highbd_sad32x64_avg_bits12, aom_highbd_12_variance32x64,
+                   aom_highbd_12_sub_pixel_variance32x64,
+                   aom_highbd_12_sub_pixel_avg_variance32x64, NULL, NULL,
+                   aom_highbd_sad32x64x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits12,
-                   vpx_highbd_sad32x32_avg_bits12, vpx_highbd_12_variance32x32,
-                   vpx_highbd_12_sub_pixel_variance32x32,
-                   vpx_highbd_12_sub_pixel_avg_variance32x32,
-                   vpx_highbd_sad32x32x3_bits12, vpx_highbd_sad32x32x8_bits12,
-                   vpx_highbd_sad32x32x4d_bits12)
+        HIGHBD_BFP(BLOCK_32X32, aom_highbd_sad32x32_bits12,
+                   aom_highbd_sad32x32_avg_bits12, aom_highbd_12_variance32x32,
+                   aom_highbd_12_sub_pixel_variance32x32,
+                   aom_highbd_12_sub_pixel_avg_variance32x32,
+                   aom_highbd_sad32x32x3_bits12, aom_highbd_sad32x32x8_bits12,
+                   aom_highbd_sad32x32x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits12,
-                   vpx_highbd_sad64x64_avg_bits12, vpx_highbd_12_variance64x64,
-                   vpx_highbd_12_sub_pixel_variance64x64,
-                   vpx_highbd_12_sub_pixel_avg_variance64x64,
-                   vpx_highbd_sad64x64x3_bits12, vpx_highbd_sad64x64x8_bits12,
-                   vpx_highbd_sad64x64x4d_bits12)
+        HIGHBD_BFP(BLOCK_64X64, aom_highbd_sad64x64_bits12,
+                   aom_highbd_sad64x64_avg_bits12, aom_highbd_12_variance64x64,
+                   aom_highbd_12_sub_pixel_variance64x64,
+                   aom_highbd_12_sub_pixel_avg_variance64x64,
+                   aom_highbd_sad64x64x3_bits12, aom_highbd_sad64x64x8_bits12,
+                   aom_highbd_sad64x64x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits12,
-                   vpx_highbd_sad16x16_avg_bits12, vpx_highbd_12_variance16x16,
-                   vpx_highbd_12_sub_pixel_variance16x16,
-                   vpx_highbd_12_sub_pixel_avg_variance16x16,
-                   vpx_highbd_sad16x16x3_bits12, vpx_highbd_sad16x16x8_bits12,
-                   vpx_highbd_sad16x16x4d_bits12)
+        HIGHBD_BFP(BLOCK_16X16, aom_highbd_sad16x16_bits12,
+                   aom_highbd_sad16x16_avg_bits12, aom_highbd_12_variance16x16,
+                   aom_highbd_12_sub_pixel_variance16x16,
+                   aom_highbd_12_sub_pixel_avg_variance16x16,
+                   aom_highbd_sad16x16x3_bits12, aom_highbd_sad16x16x8_bits12,
+                   aom_highbd_sad16x16x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits12,
-                   vpx_highbd_sad16x8_avg_bits12, vpx_highbd_12_variance16x8,
-                   vpx_highbd_12_sub_pixel_variance16x8,
-                   vpx_highbd_12_sub_pixel_avg_variance16x8,
-                   vpx_highbd_sad16x8x3_bits12, vpx_highbd_sad16x8x8_bits12,
-                   vpx_highbd_sad16x8x4d_bits12)
+        HIGHBD_BFP(BLOCK_16X8, aom_highbd_sad16x8_bits12,
+                   aom_highbd_sad16x8_avg_bits12, aom_highbd_12_variance16x8,
+                   aom_highbd_12_sub_pixel_variance16x8,
+                   aom_highbd_12_sub_pixel_avg_variance16x8,
+                   aom_highbd_sad16x8x3_bits12, aom_highbd_sad16x8x8_bits12,
+                   aom_highbd_sad16x8x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits12,
-                   vpx_highbd_sad8x16_avg_bits12, vpx_highbd_12_variance8x16,
-                   vpx_highbd_12_sub_pixel_variance8x16,
-                   vpx_highbd_12_sub_pixel_avg_variance8x16,
-                   vpx_highbd_sad8x16x3_bits12, vpx_highbd_sad8x16x8_bits12,
-                   vpx_highbd_sad8x16x4d_bits12)
+        HIGHBD_BFP(BLOCK_8X16, aom_highbd_sad8x16_bits12,
+                   aom_highbd_sad8x16_avg_bits12, aom_highbd_12_variance8x16,
+                   aom_highbd_12_sub_pixel_variance8x16,
+                   aom_highbd_12_sub_pixel_avg_variance8x16,
+                   aom_highbd_sad8x16x3_bits12, aom_highbd_sad8x16x8_bits12,
+                   aom_highbd_sad8x16x4d_bits12)
 
         HIGHBD_BFP(
-            BLOCK_8X8, vpx_highbd_sad8x8_bits12, vpx_highbd_sad8x8_avg_bits12,
-            vpx_highbd_12_variance8x8, vpx_highbd_12_sub_pixel_variance8x8,
-            vpx_highbd_12_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits12,
-            vpx_highbd_sad8x8x8_bits12, vpx_highbd_sad8x8x4d_bits12)
+            BLOCK_8X8, aom_highbd_sad8x8_bits12, aom_highbd_sad8x8_avg_bits12,
+            aom_highbd_12_variance8x8, aom_highbd_12_sub_pixel_variance8x8,
+            aom_highbd_12_sub_pixel_avg_variance8x8, aom_highbd_sad8x8x3_bits12,
+            aom_highbd_sad8x8x8_bits12, aom_highbd_sad8x8x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits12,
-                   vpx_highbd_sad8x4_avg_bits12, vpx_highbd_12_variance8x4,
-                   vpx_highbd_12_sub_pixel_variance8x4,
-                   vpx_highbd_12_sub_pixel_avg_variance8x4, NULL,
-                   vpx_highbd_sad8x4x8_bits12, vpx_highbd_sad8x4x4d_bits12)
+        HIGHBD_BFP(BLOCK_8X4, aom_highbd_sad8x4_bits12,
+                   aom_highbd_sad8x4_avg_bits12, aom_highbd_12_variance8x4,
+                   aom_highbd_12_sub_pixel_variance8x4,
+                   aom_highbd_12_sub_pixel_avg_variance8x4, NULL,
+                   aom_highbd_sad8x4x8_bits12, aom_highbd_sad8x4x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits12,
-                   vpx_highbd_sad4x8_avg_bits12, vpx_highbd_12_variance4x8,
-                   vpx_highbd_12_sub_pixel_variance4x8,
-                   vpx_highbd_12_sub_pixel_avg_variance4x8, NULL,
-                   vpx_highbd_sad4x8x8_bits12, vpx_highbd_sad4x8x4d_bits12)
+        HIGHBD_BFP(BLOCK_4X8, aom_highbd_sad4x8_bits12,
+                   aom_highbd_sad4x8_avg_bits12, aom_highbd_12_variance4x8,
+                   aom_highbd_12_sub_pixel_variance4x8,
+                   aom_highbd_12_sub_pixel_avg_variance4x8, NULL,
+                   aom_highbd_sad4x8x8_bits12, aom_highbd_sad4x8x4d_bits12)
 
         HIGHBD_BFP(
-            BLOCK_4X4, vpx_highbd_sad4x4_bits12, vpx_highbd_sad4x4_avg_bits12,
-            vpx_highbd_12_variance4x4, vpx_highbd_12_sub_pixel_variance4x4,
-            vpx_highbd_12_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits12,
-            vpx_highbd_sad4x4x8_bits12, vpx_highbd_sad4x4x4d_bits12)
+            BLOCK_4X4, aom_highbd_sad4x4_bits12, aom_highbd_sad4x4_avg_bits12,
+            aom_highbd_12_variance4x4, aom_highbd_12_sub_pixel_variance4x4,
+            aom_highbd_12_sub_pixel_avg_variance4x4, aom_highbd_sad4x4x3_bits12,
+            aom_highbd_sad4x4x8_bits12, aom_highbd_sad4x4x4d_bits12)
 
 #if CONFIG_EXT_PARTITION
         HIGHBD_BFP(
-            BLOCK_128X128, vpx_highbd_sad128x128_bits12,
-            vpx_highbd_sad128x128_avg_bits12, vpx_highbd_12_variance128x128,
-            vpx_highbd_12_sub_pixel_variance128x128,
-            vpx_highbd_12_sub_pixel_avg_variance128x128,
-            vpx_highbd_sad128x128x3_bits12, vpx_highbd_sad128x128x8_bits12,
-            vpx_highbd_sad128x128x4d_bits12)
+            BLOCK_128X128, aom_highbd_sad128x128_bits12,
+            aom_highbd_sad128x128_avg_bits12, aom_highbd_12_variance128x128,
+            aom_highbd_12_sub_pixel_variance128x128,
+            aom_highbd_12_sub_pixel_avg_variance128x128,
+            aom_highbd_sad128x128x3_bits12, aom_highbd_sad128x128x8_bits12,
+            aom_highbd_sad128x128x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_128X64, vpx_highbd_sad128x64_bits12,
-                   vpx_highbd_sad128x64_avg_bits12,
-                   vpx_highbd_12_variance128x64,
-                   vpx_highbd_12_sub_pixel_variance128x64,
-                   vpx_highbd_12_sub_pixel_avg_variance128x64, NULL, NULL,
-                   vpx_highbd_sad128x64x4d_bits12)
+        HIGHBD_BFP(BLOCK_128X64, aom_highbd_sad128x64_bits12,
+                   aom_highbd_sad128x64_avg_bits12,
+                   aom_highbd_12_variance128x64,
+                   aom_highbd_12_sub_pixel_variance128x64,
+                   aom_highbd_12_sub_pixel_avg_variance128x64, NULL, NULL,
+                   aom_highbd_sad128x64x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_64X128, vpx_highbd_sad64x128_bits12,
-                   vpx_highbd_sad64x128_avg_bits12,
-                   vpx_highbd_12_variance64x128,
-                   vpx_highbd_12_sub_pixel_variance64x128,
-                   vpx_highbd_12_sub_pixel_avg_variance64x128, NULL, NULL,
-                   vpx_highbd_sad64x128x4d_bits12)
+        HIGHBD_BFP(BLOCK_64X128, aom_highbd_sad64x128_bits12,
+                   aom_highbd_sad64x128_avg_bits12,
+                   aom_highbd_12_variance64x128,
+                   aom_highbd_12_sub_pixel_variance64x128,
+                   aom_highbd_12_sub_pixel_avg_variance64x128, NULL, NULL,
+                   aom_highbd_sad64x128x4d_bits12)
 #endif  // CONFIG_EXT_PARTITION
 
 #if CONFIG_EXT_INTER
 #if CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_128X128, vpx_highbd_masked_sad128x128_bits12,
-                    vpx_highbd_12_masked_variance128x128,
-                    vpx_highbd_12_masked_sub_pixel_variance128x128)
-        HIGHBD_MBFP(BLOCK_128X64, vpx_highbd_masked_sad128x64_bits12,
-                    vpx_highbd_12_masked_variance128x64,
-                    vpx_highbd_12_masked_sub_pixel_variance128x64)
-        HIGHBD_MBFP(BLOCK_64X128, vpx_highbd_masked_sad64x128_bits12,
-                    vpx_highbd_12_masked_variance64x128,
-                    vpx_highbd_12_masked_sub_pixel_variance64x128)
+        HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits12,
+                    aom_highbd_12_masked_variance128x128,
+                    aom_highbd_12_masked_sub_pixel_variance128x128)
+        HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits12,
+                    aom_highbd_12_masked_variance128x64,
+                    aom_highbd_12_masked_sub_pixel_variance128x64)
+        HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits12,
+                    aom_highbd_12_masked_variance64x128,
+                    aom_highbd_12_masked_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_64X64, vpx_highbd_masked_sad64x64_bits12,
-                    vpx_highbd_12_masked_variance64x64,
-                    vpx_highbd_12_masked_sub_pixel_variance64x64)
-        HIGHBD_MBFP(BLOCK_64X32, vpx_highbd_masked_sad64x32_bits12,
-                    vpx_highbd_12_masked_variance64x32,
-                    vpx_highbd_12_masked_sub_pixel_variance64x32)
-        HIGHBD_MBFP(BLOCK_32X64, vpx_highbd_masked_sad32x64_bits12,
-                    vpx_highbd_12_masked_variance32x64,
-                    vpx_highbd_12_masked_sub_pixel_variance32x64)
-        HIGHBD_MBFP(BLOCK_32X32, vpx_highbd_masked_sad32x32_bits12,
-                    vpx_highbd_12_masked_variance32x32,
-                    vpx_highbd_12_masked_sub_pixel_variance32x32)
-        HIGHBD_MBFP(BLOCK_32X16, vpx_highbd_masked_sad32x16_bits12,
-                    vpx_highbd_12_masked_variance32x16,
-                    vpx_highbd_12_masked_sub_pixel_variance32x16)
-        HIGHBD_MBFP(BLOCK_16X32, vpx_highbd_masked_sad16x32_bits12,
-                    vpx_highbd_12_masked_variance16x32,
-                    vpx_highbd_12_masked_sub_pixel_variance16x32)
-        HIGHBD_MBFP(BLOCK_16X16, vpx_highbd_masked_sad16x16_bits12,
-                    vpx_highbd_12_masked_variance16x16,
-                    vpx_highbd_12_masked_sub_pixel_variance16x16)
-        HIGHBD_MBFP(BLOCK_8X16, vpx_highbd_masked_sad8x16_bits12,
-                    vpx_highbd_12_masked_variance8x16,
-                    vpx_highbd_12_masked_sub_pixel_variance8x16)
-        HIGHBD_MBFP(BLOCK_16X8, vpx_highbd_masked_sad16x8_bits12,
-                    vpx_highbd_12_masked_variance16x8,
-                    vpx_highbd_12_masked_sub_pixel_variance16x8)
-        HIGHBD_MBFP(BLOCK_8X8, vpx_highbd_masked_sad8x8_bits12,
-                    vpx_highbd_12_masked_variance8x8,
-                    vpx_highbd_12_masked_sub_pixel_variance8x8)
-        HIGHBD_MBFP(BLOCK_4X8, vpx_highbd_masked_sad4x8_bits12,
-                    vpx_highbd_12_masked_variance4x8,
-                    vpx_highbd_12_masked_sub_pixel_variance4x8)
-        HIGHBD_MBFP(BLOCK_8X4, vpx_highbd_masked_sad8x4_bits12,
-                    vpx_highbd_12_masked_variance8x4,
-                    vpx_highbd_12_masked_sub_pixel_variance8x4)
-        HIGHBD_MBFP(BLOCK_4X4, vpx_highbd_masked_sad4x4_bits12,
-                    vpx_highbd_12_masked_variance4x4,
-                    vpx_highbd_12_masked_sub_pixel_variance4x4)
+        HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits12,
+                    aom_highbd_12_masked_variance64x64,
+                    aom_highbd_12_masked_sub_pixel_variance64x64)
+        HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits12,
+                    aom_highbd_12_masked_variance64x32,
+                    aom_highbd_12_masked_sub_pixel_variance64x32)
+        HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits12,
+                    aom_highbd_12_masked_variance32x64,
+                    aom_highbd_12_masked_sub_pixel_variance32x64)
+        HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits12,
+                    aom_highbd_12_masked_variance32x32,
+                    aom_highbd_12_masked_sub_pixel_variance32x32)
+        HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits12,
+                    aom_highbd_12_masked_variance32x16,
+                    aom_highbd_12_masked_sub_pixel_variance32x16)
+        HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits12,
+                    aom_highbd_12_masked_variance16x32,
+                    aom_highbd_12_masked_sub_pixel_variance16x32)
+        HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits12,
+                    aom_highbd_12_masked_variance16x16,
+                    aom_highbd_12_masked_sub_pixel_variance16x16)
+        HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits12,
+                    aom_highbd_12_masked_variance8x16,
+                    aom_highbd_12_masked_sub_pixel_variance8x16)
+        HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits12,
+                    aom_highbd_12_masked_variance16x8,
+                    aom_highbd_12_masked_sub_pixel_variance16x8)
+        HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits12,
+                    aom_highbd_12_masked_variance8x8,
+                    aom_highbd_12_masked_sub_pixel_variance8x8)
+        HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits12,
+                    aom_highbd_12_masked_variance4x8,
+                    aom_highbd_12_masked_sub_pixel_variance4x8)
+        HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits12,
+                    aom_highbd_12_masked_variance8x4,
+                    aom_highbd_12_masked_sub_pixel_variance8x4)
+        HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits12,
+                    aom_highbd_12_masked_variance4x4,
+                    aom_highbd_12_masked_sub_pixel_variance4x4)
 #endif  // CONFIG_EXT_INTER
 
 #if CONFIG_OBMC
 #if CONFIG_EXT_PARTITION
-        HIGHBD_OBFP(BLOCK_128X128, vpx_highbd_obmc_sad128x128_bits12,
-                    vpx_highbd_12_obmc_variance128x128,
-                    vpx_highbd_12_obmc_sub_pixel_variance128x128)
-        HIGHBD_OBFP(BLOCK_128X64, vpx_highbd_obmc_sad128x64_bits12,
-                    vpx_highbd_12_obmc_variance128x64,
-                    vpx_highbd_12_obmc_sub_pixel_variance128x64)
-        HIGHBD_OBFP(BLOCK_64X128, vpx_highbd_obmc_sad64x128_bits12,
-                    vpx_highbd_12_obmc_variance64x128,
-                    vpx_highbd_12_obmc_sub_pixel_variance64x128)
+        HIGHBD_OBFP(BLOCK_128X128, aom_highbd_obmc_sad128x128_bits12,
+                    aom_highbd_12_obmc_variance128x128,
+                    aom_highbd_12_obmc_sub_pixel_variance128x128)
+        HIGHBD_OBFP(BLOCK_128X64, aom_highbd_obmc_sad128x64_bits12,
+                    aom_highbd_12_obmc_variance128x64,
+                    aom_highbd_12_obmc_sub_pixel_variance128x64)
+        HIGHBD_OBFP(BLOCK_64X128, aom_highbd_obmc_sad64x128_bits12,
+                    aom_highbd_12_obmc_variance64x128,
+                    aom_highbd_12_obmc_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-        HIGHBD_OBFP(BLOCK_64X64, vpx_highbd_obmc_sad64x64_bits12,
-                    vpx_highbd_12_obmc_variance64x64,
-                    vpx_highbd_12_obmc_sub_pixel_variance64x64)
-        HIGHBD_OBFP(BLOCK_64X32, vpx_highbd_obmc_sad64x32_bits12,
-                    vpx_highbd_12_obmc_variance64x32,
-                    vpx_highbd_12_obmc_sub_pixel_variance64x32)
-        HIGHBD_OBFP(BLOCK_32X64, vpx_highbd_obmc_sad32x64_bits12,
-                    vpx_highbd_12_obmc_variance32x64,
-                    vpx_highbd_12_obmc_sub_pixel_variance32x64)
-        HIGHBD_OBFP(BLOCK_32X32, vpx_highbd_obmc_sad32x32_bits12,
-                    vpx_highbd_12_obmc_variance32x32,
-                    vpx_highbd_12_obmc_sub_pixel_variance32x32)
-        HIGHBD_OBFP(BLOCK_32X16, vpx_highbd_obmc_sad32x16_bits12,
-                    vpx_highbd_12_obmc_variance32x16,
-                    vpx_highbd_12_obmc_sub_pixel_variance32x16)
-        HIGHBD_OBFP(BLOCK_16X32, vpx_highbd_obmc_sad16x32_bits12,
-                    vpx_highbd_12_obmc_variance16x32,
-                    vpx_highbd_12_obmc_sub_pixel_variance16x32)
-        HIGHBD_OBFP(BLOCK_16X16, vpx_highbd_obmc_sad16x16_bits12,
-                    vpx_highbd_12_obmc_variance16x16,
-                    vpx_highbd_12_obmc_sub_pixel_variance16x16)
-        HIGHBD_OBFP(BLOCK_8X16, vpx_highbd_obmc_sad8x16_bits12,
-                    vpx_highbd_12_obmc_variance8x16,
-                    vpx_highbd_12_obmc_sub_pixel_variance8x16)
-        HIGHBD_OBFP(BLOCK_16X8, vpx_highbd_obmc_sad16x8_bits12,
-                    vpx_highbd_12_obmc_variance16x8,
-                    vpx_highbd_12_obmc_sub_pixel_variance16x8)
-        HIGHBD_OBFP(BLOCK_8X8, vpx_highbd_obmc_sad8x8_bits12,
-                    vpx_highbd_12_obmc_variance8x8,
-                    vpx_highbd_12_obmc_sub_pixel_variance8x8)
-        HIGHBD_OBFP(BLOCK_4X8, vpx_highbd_obmc_sad4x8_bits12,
-                    vpx_highbd_12_obmc_variance4x8,
-                    vpx_highbd_12_obmc_sub_pixel_variance4x8)
-        HIGHBD_OBFP(BLOCK_8X4, vpx_highbd_obmc_sad8x4_bits12,
-                    vpx_highbd_12_obmc_variance8x4,
-                    vpx_highbd_12_obmc_sub_pixel_variance8x4)
-        HIGHBD_OBFP(BLOCK_4X4, vpx_highbd_obmc_sad4x4_bits12,
-                    vpx_highbd_12_obmc_variance4x4,
-                    vpx_highbd_12_obmc_sub_pixel_variance4x4)
+        HIGHBD_OBFP(BLOCK_64X64, aom_highbd_obmc_sad64x64_bits12,
+                    aom_highbd_12_obmc_variance64x64,
+                    aom_highbd_12_obmc_sub_pixel_variance64x64)
+        HIGHBD_OBFP(BLOCK_64X32, aom_highbd_obmc_sad64x32_bits12,
+                    aom_highbd_12_obmc_variance64x32,
+                    aom_highbd_12_obmc_sub_pixel_variance64x32)
+        HIGHBD_OBFP(BLOCK_32X64, aom_highbd_obmc_sad32x64_bits12,
+                    aom_highbd_12_obmc_variance32x64,
+                    aom_highbd_12_obmc_sub_pixel_variance32x64)
+        HIGHBD_OBFP(BLOCK_32X32, aom_highbd_obmc_sad32x32_bits12,
+                    aom_highbd_12_obmc_variance32x32,
+                    aom_highbd_12_obmc_sub_pixel_variance32x32)
+        HIGHBD_OBFP(BLOCK_32X16, aom_highbd_obmc_sad32x16_bits12,
+                    aom_highbd_12_obmc_variance32x16,
+                    aom_highbd_12_obmc_sub_pixel_variance32x16)
+        HIGHBD_OBFP(BLOCK_16X32, aom_highbd_obmc_sad16x32_bits12,
+                    aom_highbd_12_obmc_variance16x32,
+                    aom_highbd_12_obmc_sub_pixel_variance16x32)
+        HIGHBD_OBFP(BLOCK_16X16, aom_highbd_obmc_sad16x16_bits12,
+                    aom_highbd_12_obmc_variance16x16,
+                    aom_highbd_12_obmc_sub_pixel_variance16x16)
+        HIGHBD_OBFP(BLOCK_8X16, aom_highbd_obmc_sad8x16_bits12,
+                    aom_highbd_12_obmc_variance8x16,
+                    aom_highbd_12_obmc_sub_pixel_variance8x16)
+        HIGHBD_OBFP(BLOCK_16X8, aom_highbd_obmc_sad16x8_bits12,
+                    aom_highbd_12_obmc_variance16x8,
+                    aom_highbd_12_obmc_sub_pixel_variance16x8)
+        HIGHBD_OBFP(BLOCK_8X8, aom_highbd_obmc_sad8x8_bits12,
+                    aom_highbd_12_obmc_variance8x8,
+                    aom_highbd_12_obmc_sub_pixel_variance8x8)
+        HIGHBD_OBFP(BLOCK_4X8, aom_highbd_obmc_sad4x8_bits12,
+                    aom_highbd_12_obmc_variance4x8,
+                    aom_highbd_12_obmc_sub_pixel_variance4x8)
+        HIGHBD_OBFP(BLOCK_8X4, aom_highbd_obmc_sad8x4_bits12,
+                    aom_highbd_12_obmc_variance8x4,
+                    aom_highbd_12_obmc_sub_pixel_variance8x4)
+        HIGHBD_OBFP(BLOCK_4X4, aom_highbd_obmc_sad4x4_bits12,
+                    aom_highbd_12_obmc_variance4x4,
+                    aom_highbd_12_obmc_sub_pixel_variance4x4)
 #endif  // CONFIG_OBMC
         break;
 
       default:
         assert(0 &&
-               "cm->bit_depth should be VPX_BITS_8, "
-               "VPX_BITS_10 or VPX_BITS_12");
+               "cm->bit_depth should be AOM_BITS_8, "
+               "AOM_BITS_10 or AOM_BITS_12");
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static void realloc_segmentation_maps(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void realloc_segmentation_maps(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
 
   // Create the encoder segmentation map and set all entries to 0
-  vpx_free(cpi->segmentation_map);
+  aom_free(cpi->segmentation_map);
   CHECK_MEM_ERROR(cm, cpi->segmentation_map,
-                  vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+                  aom_calloc(cm->mi_rows * cm->mi_cols, 1));
 
   // Create a map used for cyclic background refresh.
-  if (cpi->cyclic_refresh) vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+  if (cpi->cyclic_refresh) av1_cyclic_refresh_free(cpi->cyclic_refresh);
   CHECK_MEM_ERROR(cm, cpi->cyclic_refresh,
-                  vp10_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
+                  av1_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
 
   // Create a map used to mark inactive areas.
-  vpx_free(cpi->active_map.map);
+  aom_free(cpi->active_map.map);
   CHECK_MEM_ERROR(cm, cpi->active_map.map,
-                  vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+                  aom_calloc(cm->mi_rows * cm->mi_cols, 1));
 
   // And a place holder structure is the coding context
   // for use if we want to save and restore it
-  vpx_free(cpi->coding_context.last_frame_seg_map_copy);
+  aom_free(cpi->coding_context.last_frame_seg_map_copy);
   CHECK_MEM_ERROR(cm, cpi->coding_context.last_frame_seg_map_copy,
-                  vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+                  aom_calloc(cm->mi_rows * cm->mi_cols, 1));
 }
 
-void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_change_config(struct AV1_COMP *cpi, const AV1EncoderConfig *oxcf) {
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
 
   if (cm->profile != oxcf->profile) cm->profile = oxcf->profile;
@@ -1907,19 +1907,19 @@
   cm->color_range = oxcf->color_range;
 
   if (cm->profile <= PROFILE_1)
-    assert(cm->bit_depth == VPX_BITS_8);
+    assert(cm->bit_depth == AOM_BITS_8);
   else
-    assert(cm->bit_depth > VPX_BITS_8);
+    assert(cm->bit_depth > AOM_BITS_8);
 
   cpi->oxcf = *oxcf;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   cpi->td.mb.e_mbd.bd = (int)cm->bit_depth;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #if CONFIG_GLOBAL_MOTION
   cpi->td.mb.e_mbd.global_motion = cm->global_motion;
 #endif  // CONFIG_GLOBAL_MOTION
 
-  if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
+  if ((oxcf->pass == 0) && (oxcf->rc_mode == AOM_Q)) {
     rc->baseline_gf_interval = FIXED_GF_INTERVAL;
   } else {
     rc->baseline_gf_interval = (MIN_GF_INTERVAL + MAX_GF_INTERVAL) / 2;
@@ -1937,21 +1937,21 @@
           : REFRESH_FRAME_CONTEXT_BACKWARD;
   cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
 
-  cm->allow_screen_content_tools = (cpi->oxcf.content == VPX_CONTENT_SCREEN);
+  cm->allow_screen_content_tools = (cpi->oxcf.content == AOM_CONTENT_SCREEN);
   if (cm->allow_screen_content_tools) {
     MACROBLOCK *x = &cpi->td.mb;
     if (x->palette_buffer == 0) {
       CHECK_MEM_ERROR(cm, x->palette_buffer,
-                      vpx_memalign(16, sizeof(*x->palette_buffer)));
+                      aom_memalign(16, sizeof(*x->palette_buffer)));
     }
     // Reallocate the pc_tree, as it's contents depends on
     // the state of cm->allow_screen_content_tools
-    vp10_free_pc_tree(&cpi->td);
-    vp10_setup_pc_tree(&cpi->common, &cpi->td);
+    av1_free_pc_tree(&cpi->td);
+    av1_setup_pc_tree(&cpi->common, &cpi->td);
   }
 
-  vp10_reset_segment_features(cm);
-  vp10_set_high_precision_mv(cpi, 0);
+  av1_reset_segment_features(cm);
+  av1_set_high_precision_mv(cpi, 0);
 
   {
     int i;
@@ -1965,11 +1965,11 @@
 
   // Under a configuration change, where maximum_buffer_size may change,
   // keep buffer level clipped to the maximum allowed buffer size.
-  rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
-  rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size);
+  rc->bits_off_target = AOMMIN(rc->bits_off_target, rc->maximum_buffer_size);
+  rc->buffer_level = AOMMIN(rc->buffer_level, rc->maximum_buffer_size);
 
   // Set up frame rate and related parameters rate control values.
-  vp10_new_framerate(cpi, cpi->framerate);
+  av1_new_framerate(cpi, cpi->framerate);
 
   // Set absolute upper and lower quality limits
   rc->worst_quality = cpi->oxcf.worst_allowed_q;
@@ -1989,8 +1989,8 @@
 
   if (cpi->initial_width) {
     if (cm->width > cpi->initial_width || cm->height > cpi->initial_height) {
-      vp10_free_context_buffers(cm);
-      vp10_alloc_compressor_data(cpi);
+      av1_free_context_buffers(cm);
+      av1_alloc_compressor_data(cpi);
       realloc_segmentation_maps(cpi);
       cpi->initial_width = cpi->initial_height = 0;
     }
@@ -2017,7 +2017,7 @@
   cpi->ext_refresh_frame_flags_pending = 0;
   cpi->ext_refresh_frame_context_pending = 0;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   highbd_set_var_fns(cpi);
 #endif
 }
@@ -2066,7 +2066,7 @@
   } while (++i <= MV_MAX);
 }
 
-static INLINE void init_upsampled_ref_frame_bufs(VP10_COMP *cpi) {
+static INLINE void init_upsampled_ref_frame_bufs(AV1_COMP *cpi) {
   int i;
 
   for (i = 0; i < (REF_FRAMES + 1); ++i) {
@@ -2075,31 +2075,31 @@
   }
 }
 
-VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
-                                  BufferPool *const pool) {
+AV1_COMP *av1_create_compressor(AV1EncoderConfig *oxcf,
+                                BufferPool *const pool) {
   unsigned int i;
-  VP10_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP10_COMP));
-  VP10_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
+  AV1_COMP *volatile const cpi = aom_memalign(32, sizeof(AV1_COMP));
+  AV1_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
 
   if (!cm) return NULL;
 
-  vp10_zero(*cpi);
+  av1_zero(*cpi);
 
   if (setjmp(cm->error.jmp)) {
     cm->error.setjmp = 0;
-    vp10_remove_compressor(cpi);
+    av1_remove_compressor(cpi);
     return 0;
   }
 
   cm->error.setjmp = 1;
-  cm->alloc_mi = vp10_enc_alloc_mi;
-  cm->free_mi = vp10_enc_free_mi;
-  cm->setup_mi = vp10_enc_setup_mi;
+  cm->alloc_mi = av1_enc_alloc_mi;
+  cm->free_mi = av1_enc_free_mi;
+  cm->setup_mi = av1_enc_setup_mi;
 
-  CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+  CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)aom_calloc(1, sizeof(*cm->fc)));
   CHECK_MEM_ERROR(
       cm, cm->frame_contexts,
-      (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
+      (FRAME_CONTEXT *)aom_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
 
   cpi->resize_state = 0;
   cpi->resize_avg_qp = 0;
@@ -2107,7 +2107,7 @@
   cpi->common.buffer_pool = pool;
 
   init_config(cpi, oxcf);
-  vp10_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
+  av1_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
 
   cm->current_video_frame = 0;
   cpi->partition_search_skippable_frame = 0;
@@ -2119,38 +2119,38 @@
 #if CONFIG_REF_MV
   for (i = 0; i < NMV_CONTEXTS; ++i) {
     CHECK_MEM_ERROR(cm, cpi->nmv_costs[i][0],
-                    vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][0])));
+                    aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][0])));
     CHECK_MEM_ERROR(cm, cpi->nmv_costs[i][1],
-                    vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][1])));
+                    aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][1])));
     CHECK_MEM_ERROR(cm, cpi->nmv_costs_hp[i][0],
-                    vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][0])));
+                    aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][0])));
     CHECK_MEM_ERROR(cm, cpi->nmv_costs_hp[i][1],
-                    vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][1])));
+                    aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][1])));
   }
 #endif
 
   CHECK_MEM_ERROR(cm, cpi->nmvcosts[0],
-                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
+                  aom_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
   CHECK_MEM_ERROR(cm, cpi->nmvcosts[1],
-                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[1])));
+                  aom_calloc(MV_VALS, sizeof(*cpi->nmvcosts[1])));
   CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[0],
-                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[0])));
+                  aom_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[0])));
   CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[1],
-                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[1])));
+                  aom_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[1])));
   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[0],
-                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[0])));
+                  aom_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[0])));
   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[1],
-                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[1])));
+                  aom_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[1])));
   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[0],
-                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[0])));
+                  aom_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[0])));
   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[1],
-                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1])));
+                  aom_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1])));
 
   for (i = 0; i < (sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]));
        i++) {
     CHECK_MEM_ERROR(
         cm, cpi->mbgraph_stats[i].mb_stats,
-        vpx_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
+        aom_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
   }
 
 #if CONFIG_FP_MB_STATS
@@ -2158,7 +2158,7 @@
   if (cpi->use_fp_mb_stats) {
     // a place holder used to store the first pass mb stats in the first pass
     CHECK_MEM_ERROR(cm, cpi->twopass.frame_mb_stats_buf,
-                    vpx_calloc(cm->MBs * sizeof(uint8_t), 1));
+                    aom_calloc(cm->MBs * sizeof(uint8_t), 1));
   } else {
     cpi->twopass.frame_mb_stats_buf = NULL;
   }
@@ -2196,7 +2196,7 @@
 
   if (cpi->b_calculate_consistency) {
     CHECK_MEM_ERROR(cm, cpi->ssim_vars,
-                    vpx_malloc(sizeof(*cpi->ssim_vars) * 4 *
+                    aom_malloc(sizeof(*cpi->ssim_vars) * 4 *
                                cpi->common.mi_rows * cpi->common.mi_cols));
     cpi->worst_consistency = 100.0;
   }
@@ -2241,7 +2241,7 @@
   cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
 
   if (oxcf->pass == 1) {
-    vp10_init_first_pass(cpi);
+    av1_init_first_pass(cpi);
   } else if (oxcf->pass == 2) {
     const size_t packet_sz = sizeof(FIRSTPASS_STATS);
     const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
@@ -2263,16 +2263,16 @@
     cpi->twopass.stats_in = cpi->twopass.stats_in_start;
     cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1];
 
-    vp10_init_second_pass(cpi);
+    av1_init_second_pass(cpi);
   }
 
   init_upsampled_ref_frame_bufs(cpi);
 
-  vp10_set_speed_features_framesize_independent(cpi);
-  vp10_set_speed_features_framesize_dependent(cpi);
+  av1_set_speed_features_framesize_independent(cpi);
+  av1_set_speed_features_framesize_dependent(cpi);
 
   // Allocate memory to store variances for a frame.
-  CHECK_MEM_ERROR(cm, cpi->source_diff_var, vpx_calloc(cm->MBs, sizeof(diff)));
+  CHECK_MEM_ERROR(cm, cpi->source_diff_var, aom_calloc(cm->MBs, sizeof(diff)));
   cpi->source_var_thresh = 0;
   cpi->frames_till_next_var_check = 0;
 
@@ -2287,70 +2287,70 @@
   cpi->fn_ptr[BT].sdx4df = SDX4DF;
 
 #if CONFIG_EXT_PARTITION
-  BFP(BLOCK_128X128, vpx_sad128x128, vpx_sad128x128_avg, vpx_variance128x128,
-      vpx_sub_pixel_variance128x128, vpx_sub_pixel_avg_variance128x128,
-      vpx_sad128x128x3, vpx_sad128x128x8, vpx_sad128x128x4d)
+  BFP(BLOCK_128X128, aom_sad128x128, aom_sad128x128_avg, aom_variance128x128,
+      aom_sub_pixel_variance128x128, aom_sub_pixel_avg_variance128x128,
+      aom_sad128x128x3, aom_sad128x128x8, aom_sad128x128x4d)
 
-  BFP(BLOCK_128X64, vpx_sad128x64, vpx_sad128x64_avg, vpx_variance128x64,
-      vpx_sub_pixel_variance128x64, vpx_sub_pixel_avg_variance128x64, NULL,
-      NULL, vpx_sad128x64x4d)
+  BFP(BLOCK_128X64, aom_sad128x64, aom_sad128x64_avg, aom_variance128x64,
+      aom_sub_pixel_variance128x64, aom_sub_pixel_avg_variance128x64, NULL,
+      NULL, aom_sad128x64x4d)
 
-  BFP(BLOCK_64X128, vpx_sad64x128, vpx_sad64x128_avg, vpx_variance64x128,
-      vpx_sub_pixel_variance64x128, vpx_sub_pixel_avg_variance64x128, NULL,
-      NULL, vpx_sad64x128x4d)
+  BFP(BLOCK_64X128, aom_sad64x128, aom_sad64x128_avg, aom_variance64x128,
+      aom_sub_pixel_variance64x128, aom_sub_pixel_avg_variance64x128, NULL,
+      NULL, aom_sad64x128x4d)
 #endif  // CONFIG_EXT_PARTITION
 
-  BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, vpx_variance32x16,
-      vpx_sub_pixel_variance32x16, vpx_sub_pixel_avg_variance32x16, NULL, NULL,
-      vpx_sad32x16x4d)
+  BFP(BLOCK_32X16, aom_sad32x16, aom_sad32x16_avg, aom_variance32x16,
+      aom_sub_pixel_variance32x16, aom_sub_pixel_avg_variance32x16, NULL, NULL,
+      aom_sad32x16x4d)
 
-  BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, vpx_variance16x32,
-      vpx_sub_pixel_variance16x32, vpx_sub_pixel_avg_variance16x32, NULL, NULL,
-      vpx_sad16x32x4d)
+  BFP(BLOCK_16X32, aom_sad16x32, aom_sad16x32_avg, aom_variance16x32,
+      aom_sub_pixel_variance16x32, aom_sub_pixel_avg_variance16x32, NULL, NULL,
+      aom_sad16x32x4d)
 
-  BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, vpx_variance64x32,
-      vpx_sub_pixel_variance64x32, vpx_sub_pixel_avg_variance64x32, NULL, NULL,
-      vpx_sad64x32x4d)
+  BFP(BLOCK_64X32, aom_sad64x32, aom_sad64x32_avg, aom_variance64x32,
+      aom_sub_pixel_variance64x32, aom_sub_pixel_avg_variance64x32, NULL, NULL,
+      aom_sad64x32x4d)
 
-  BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, vpx_variance32x64,
-      vpx_sub_pixel_variance32x64, vpx_sub_pixel_avg_variance32x64, NULL, NULL,
-      vpx_sad32x64x4d)
+  BFP(BLOCK_32X64, aom_sad32x64, aom_sad32x64_avg, aom_variance32x64,
+      aom_sub_pixel_variance32x64, aom_sub_pixel_avg_variance32x64, NULL, NULL,
+      aom_sad32x64x4d)
 
-  BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, vpx_variance32x32,
-      vpx_sub_pixel_variance32x32, vpx_sub_pixel_avg_variance32x32,
-      vpx_sad32x32x3, vpx_sad32x32x8, vpx_sad32x32x4d)
+  BFP(BLOCK_32X32, aom_sad32x32, aom_sad32x32_avg, aom_variance32x32,
+      aom_sub_pixel_variance32x32, aom_sub_pixel_avg_variance32x32,
+      aom_sad32x32x3, aom_sad32x32x8, aom_sad32x32x4d)
 
-  BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, vpx_variance64x64,
-      vpx_sub_pixel_variance64x64, vpx_sub_pixel_avg_variance64x64,
-      vpx_sad64x64x3, vpx_sad64x64x8, vpx_sad64x64x4d)
+  BFP(BLOCK_64X64, aom_sad64x64, aom_sad64x64_avg, aom_variance64x64,
+      aom_sub_pixel_variance64x64, aom_sub_pixel_avg_variance64x64,
+      aom_sad64x64x3, aom_sad64x64x8, aom_sad64x64x4d)
 
-  BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, vpx_variance16x16,
-      vpx_sub_pixel_variance16x16, vpx_sub_pixel_avg_variance16x16,
-      vpx_sad16x16x3, vpx_sad16x16x8, vpx_sad16x16x4d)
+  BFP(BLOCK_16X16, aom_sad16x16, aom_sad16x16_avg, aom_variance16x16,
+      aom_sub_pixel_variance16x16, aom_sub_pixel_avg_variance16x16,
+      aom_sad16x16x3, aom_sad16x16x8, aom_sad16x16x4d)
 
-  BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, vpx_variance16x8,
-      vpx_sub_pixel_variance16x8, vpx_sub_pixel_avg_variance16x8, vpx_sad16x8x3,
-      vpx_sad16x8x8, vpx_sad16x8x4d)
+  BFP(BLOCK_16X8, aom_sad16x8, aom_sad16x8_avg, aom_variance16x8,
+      aom_sub_pixel_variance16x8, aom_sub_pixel_avg_variance16x8, aom_sad16x8x3,
+      aom_sad16x8x8, aom_sad16x8x4d)
 
-  BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, vpx_variance8x16,
-      vpx_sub_pixel_variance8x16, vpx_sub_pixel_avg_variance8x16, vpx_sad8x16x3,
-      vpx_sad8x16x8, vpx_sad8x16x4d)
+  BFP(BLOCK_8X16, aom_sad8x16, aom_sad8x16_avg, aom_variance8x16,
+      aom_sub_pixel_variance8x16, aom_sub_pixel_avg_variance8x16, aom_sad8x16x3,
+      aom_sad8x16x8, aom_sad8x16x4d)
 
-  BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, vpx_variance8x8,
-      vpx_sub_pixel_variance8x8, vpx_sub_pixel_avg_variance8x8, vpx_sad8x8x3,
-      vpx_sad8x8x8, vpx_sad8x8x4d)
+  BFP(BLOCK_8X8, aom_sad8x8, aom_sad8x8_avg, aom_variance8x8,
+      aom_sub_pixel_variance8x8, aom_sub_pixel_avg_variance8x8, aom_sad8x8x3,
+      aom_sad8x8x8, aom_sad8x8x4d)
 
-  BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, vpx_variance8x4,
-      vpx_sub_pixel_variance8x4, vpx_sub_pixel_avg_variance8x4, NULL,
-      vpx_sad8x4x8, vpx_sad8x4x4d)
+  BFP(BLOCK_8X4, aom_sad8x4, aom_sad8x4_avg, aom_variance8x4,
+      aom_sub_pixel_variance8x4, aom_sub_pixel_avg_variance8x4, NULL,
+      aom_sad8x4x8, aom_sad8x4x4d)
 
-  BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, vpx_variance4x8,
-      vpx_sub_pixel_variance4x8, vpx_sub_pixel_avg_variance4x8, NULL,
-      vpx_sad4x8x8, vpx_sad4x8x4d)
+  BFP(BLOCK_4X8, aom_sad4x8, aom_sad4x8_avg, aom_variance4x8,
+      aom_sub_pixel_variance4x8, aom_sub_pixel_avg_variance4x8, NULL,
+      aom_sad4x8x8, aom_sad4x8x4d)
 
-  BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, vpx_variance4x4,
-      vpx_sub_pixel_variance4x4, vpx_sub_pixel_avg_variance4x4, vpx_sad4x4x3,
-      vpx_sad4x4x8, vpx_sad4x4x4d)
+  BFP(BLOCK_4X4, aom_sad4x4, aom_sad4x4_avg, aom_variance4x4,
+      aom_sub_pixel_variance4x4, aom_sub_pixel_avg_variance4x4, aom_sad4x4x3,
+      aom_sad4x4x8, aom_sad4x4x4d)
 
 #if CONFIG_OBMC
 #define OBFP(BT, OSDF, OVF, OSVF) \
@@ -2359,39 +2359,39 @@
   cpi->fn_ptr[BT].osvf = OSVF;
 
 #if CONFIG_EXT_PARTITION
-  OBFP(BLOCK_128X128, vpx_obmc_sad128x128, vpx_obmc_variance128x128,
-       vpx_obmc_sub_pixel_variance128x128)
-  OBFP(BLOCK_128X64, vpx_obmc_sad128x64, vpx_obmc_variance128x64,
-       vpx_obmc_sub_pixel_variance128x64)
-  OBFP(BLOCK_64X128, vpx_obmc_sad64x128, vpx_obmc_variance64x128,
-       vpx_obmc_sub_pixel_variance64x128)
+  OBFP(BLOCK_128X128, aom_obmc_sad128x128, aom_obmc_variance128x128,
+       aom_obmc_sub_pixel_variance128x128)
+  OBFP(BLOCK_128X64, aom_obmc_sad128x64, aom_obmc_variance128x64,
+       aom_obmc_sub_pixel_variance128x64)
+  OBFP(BLOCK_64X128, aom_obmc_sad64x128, aom_obmc_variance64x128,
+       aom_obmc_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-  OBFP(BLOCK_64X64, vpx_obmc_sad64x64, vpx_obmc_variance64x64,
-       vpx_obmc_sub_pixel_variance64x64)
-  OBFP(BLOCK_64X32, vpx_obmc_sad64x32, vpx_obmc_variance64x32,
-       vpx_obmc_sub_pixel_variance64x32)
-  OBFP(BLOCK_32X64, vpx_obmc_sad32x64, vpx_obmc_variance32x64,
-       vpx_obmc_sub_pixel_variance32x64)
-  OBFP(BLOCK_32X32, vpx_obmc_sad32x32, vpx_obmc_variance32x32,
-       vpx_obmc_sub_pixel_variance32x32)
-  OBFP(BLOCK_32X16, vpx_obmc_sad32x16, vpx_obmc_variance32x16,
-       vpx_obmc_sub_pixel_variance32x16)
-  OBFP(BLOCK_16X32, vpx_obmc_sad16x32, vpx_obmc_variance16x32,
-       vpx_obmc_sub_pixel_variance16x32)
-  OBFP(BLOCK_16X16, vpx_obmc_sad16x16, vpx_obmc_variance16x16,
-       vpx_obmc_sub_pixel_variance16x16)
-  OBFP(BLOCK_16X8, vpx_obmc_sad16x8, vpx_obmc_variance16x8,
-       vpx_obmc_sub_pixel_variance16x8)
-  OBFP(BLOCK_8X16, vpx_obmc_sad8x16, vpx_obmc_variance8x16,
-       vpx_obmc_sub_pixel_variance8x16)
-  OBFP(BLOCK_8X8, vpx_obmc_sad8x8, vpx_obmc_variance8x8,
-       vpx_obmc_sub_pixel_variance8x8)
-  OBFP(BLOCK_4X8, vpx_obmc_sad4x8, vpx_obmc_variance4x8,
-       vpx_obmc_sub_pixel_variance4x8)
-  OBFP(BLOCK_8X4, vpx_obmc_sad8x4, vpx_obmc_variance8x4,
-       vpx_obmc_sub_pixel_variance8x4)
-  OBFP(BLOCK_4X4, vpx_obmc_sad4x4, vpx_obmc_variance4x4,
-       vpx_obmc_sub_pixel_variance4x4)
+  OBFP(BLOCK_64X64, aom_obmc_sad64x64, aom_obmc_variance64x64,
+       aom_obmc_sub_pixel_variance64x64)
+  OBFP(BLOCK_64X32, aom_obmc_sad64x32, aom_obmc_variance64x32,
+       aom_obmc_sub_pixel_variance64x32)
+  OBFP(BLOCK_32X64, aom_obmc_sad32x64, aom_obmc_variance32x64,
+       aom_obmc_sub_pixel_variance32x64)
+  OBFP(BLOCK_32X32, aom_obmc_sad32x32, aom_obmc_variance32x32,
+       aom_obmc_sub_pixel_variance32x32)
+  OBFP(BLOCK_32X16, aom_obmc_sad32x16, aom_obmc_variance32x16,
+       aom_obmc_sub_pixel_variance32x16)
+  OBFP(BLOCK_16X32, aom_obmc_sad16x32, aom_obmc_variance16x32,
+       aom_obmc_sub_pixel_variance16x32)
+  OBFP(BLOCK_16X16, aom_obmc_sad16x16, aom_obmc_variance16x16,
+       aom_obmc_sub_pixel_variance16x16)
+  OBFP(BLOCK_16X8, aom_obmc_sad16x8, aom_obmc_variance16x8,
+       aom_obmc_sub_pixel_variance16x8)
+  OBFP(BLOCK_8X16, aom_obmc_sad8x16, aom_obmc_variance8x16,
+       aom_obmc_sub_pixel_variance8x16)
+  OBFP(BLOCK_8X8, aom_obmc_sad8x8, aom_obmc_variance8x8,
+       aom_obmc_sub_pixel_variance8x8)
+  OBFP(BLOCK_4X8, aom_obmc_sad4x8, aom_obmc_variance4x8,
+       aom_obmc_sub_pixel_variance4x8)
+  OBFP(BLOCK_8X4, aom_obmc_sad8x4, aom_obmc_variance8x4,
+       aom_obmc_sub_pixel_variance8x4)
+  OBFP(BLOCK_4X4, aom_obmc_sad4x4, aom_obmc_variance4x4,
+       aom_obmc_sub_pixel_variance4x4)
 #endif  // CONFIG_OBMC
 
 #if CONFIG_EXT_INTER
@@ -2401,58 +2401,58 @@
   cpi->fn_ptr[BT].msvf = MSVF;
 
 #if CONFIG_EXT_PARTITION
-  MBFP(BLOCK_128X128, vpx_masked_sad128x128, vpx_masked_variance128x128,
-       vpx_masked_sub_pixel_variance128x128)
-  MBFP(BLOCK_128X64, vpx_masked_sad128x64, vpx_masked_variance128x64,
-       vpx_masked_sub_pixel_variance128x64)
-  MBFP(BLOCK_64X128, vpx_masked_sad64x128, vpx_masked_variance64x128,
-       vpx_masked_sub_pixel_variance64x128)
+  MBFP(BLOCK_128X128, aom_masked_sad128x128, aom_masked_variance128x128,
+       aom_masked_sub_pixel_variance128x128)
+  MBFP(BLOCK_128X64, aom_masked_sad128x64, aom_masked_variance128x64,
+       aom_masked_sub_pixel_variance128x64)
+  MBFP(BLOCK_64X128, aom_masked_sad64x128, aom_masked_variance64x128,
+       aom_masked_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-  MBFP(BLOCK_64X64, vpx_masked_sad64x64, vpx_masked_variance64x64,
-       vpx_masked_sub_pixel_variance64x64)
-  MBFP(BLOCK_64X32, vpx_masked_sad64x32, vpx_masked_variance64x32,
-       vpx_masked_sub_pixel_variance64x32)
-  MBFP(BLOCK_32X64, vpx_masked_sad32x64, vpx_masked_variance32x64,
-       vpx_masked_sub_pixel_variance32x64)
-  MBFP(BLOCK_32X32, vpx_masked_sad32x32, vpx_masked_variance32x32,
-       vpx_masked_sub_pixel_variance32x32)
-  MBFP(BLOCK_32X16, vpx_masked_sad32x16, vpx_masked_variance32x16,
-       vpx_masked_sub_pixel_variance32x16)
-  MBFP(BLOCK_16X32, vpx_masked_sad16x32, vpx_masked_variance16x32,
-       vpx_masked_sub_pixel_variance16x32)
-  MBFP(BLOCK_16X16, vpx_masked_sad16x16, vpx_masked_variance16x16,
-       vpx_masked_sub_pixel_variance16x16)
-  MBFP(BLOCK_16X8, vpx_masked_sad16x8, vpx_masked_variance16x8,
-       vpx_masked_sub_pixel_variance16x8)
-  MBFP(BLOCK_8X16, vpx_masked_sad8x16, vpx_masked_variance8x16,
-       vpx_masked_sub_pixel_variance8x16)
-  MBFP(BLOCK_8X8, vpx_masked_sad8x8, vpx_masked_variance8x8,
-       vpx_masked_sub_pixel_variance8x8)
-  MBFP(BLOCK_4X8, vpx_masked_sad4x8, vpx_masked_variance4x8,
-       vpx_masked_sub_pixel_variance4x8)
-  MBFP(BLOCK_8X4, vpx_masked_sad8x4, vpx_masked_variance8x4,
-       vpx_masked_sub_pixel_variance8x4)
-  MBFP(BLOCK_4X4, vpx_masked_sad4x4, vpx_masked_variance4x4,
-       vpx_masked_sub_pixel_variance4x4)
+  MBFP(BLOCK_64X64, aom_masked_sad64x64, aom_masked_variance64x64,
+       aom_masked_sub_pixel_variance64x64)
+  MBFP(BLOCK_64X32, aom_masked_sad64x32, aom_masked_variance64x32,
+       aom_masked_sub_pixel_variance64x32)
+  MBFP(BLOCK_32X64, aom_masked_sad32x64, aom_masked_variance32x64,
+       aom_masked_sub_pixel_variance32x64)
+  MBFP(BLOCK_32X32, aom_masked_sad32x32, aom_masked_variance32x32,
+       aom_masked_sub_pixel_variance32x32)
+  MBFP(BLOCK_32X16, aom_masked_sad32x16, aom_masked_variance32x16,
+       aom_masked_sub_pixel_variance32x16)
+  MBFP(BLOCK_16X32, aom_masked_sad16x32, aom_masked_variance16x32,
+       aom_masked_sub_pixel_variance16x32)
+  MBFP(BLOCK_16X16, aom_masked_sad16x16, aom_masked_variance16x16,
+       aom_masked_sub_pixel_variance16x16)
+  MBFP(BLOCK_16X8, aom_masked_sad16x8, aom_masked_variance16x8,
+       aom_masked_sub_pixel_variance16x8)
+  MBFP(BLOCK_8X16, aom_masked_sad8x16, aom_masked_variance8x16,
+       aom_masked_sub_pixel_variance8x16)
+  MBFP(BLOCK_8X8, aom_masked_sad8x8, aom_masked_variance8x8,
+       aom_masked_sub_pixel_variance8x8)
+  MBFP(BLOCK_4X8, aom_masked_sad4x8, aom_masked_variance4x8,
+       aom_masked_sub_pixel_variance4x8)
+  MBFP(BLOCK_8X4, aom_masked_sad8x4, aom_masked_variance8x4,
+       aom_masked_sub_pixel_variance8x4)
+  MBFP(BLOCK_4X4, aom_masked_sad4x4, aom_masked_variance4x4,
+       aom_masked_sub_pixel_variance4x4)
 #endif  // CONFIG_EXT_INTER
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   highbd_set_var_fns(cpi);
 #endif
 
-  /* vp10_init_quantizer() is first called here. Add check in
-   * vp10_frame_init_quantizer() so that vp10_init_quantizer is only
+  /* av1_init_quantizer() is first called here. Add check in
+   * av1_frame_init_quantizer() so that av1_init_quantizer is only
    * called later when needed. This will avoid unnecessary calls of
-   * vp10_init_quantizer() for every frame.
+   * av1_init_quantizer() for every frame.
    */
-  vp10_init_quantizer(cpi);
+  av1_init_quantizer(cpi);
 #if CONFIG_AOM_QM
   aom_qm_init(cm);
 #endif
 
-  vp10_loop_filter_init(cm);
+  av1_loop_filter_init(cm);
 #if CONFIG_LOOP_RESTORATION
-  vp10_loop_restoration_precal();
+  av1_loop_restoration_precal();
 #endif  // CONFIG_LOOP_RESTORATION
 
   cm->error.setjmp = 0;
@@ -2465,8 +2465,8 @@
 #define SNPRINT2(H, T, V) \
   snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V))
 
-void vp10_remove_compressor(VP10_COMP *cpi) {
-  VP10_COMMON *cm;
+void av1_remove_compressor(AV1_COMP *cpi) {
+  AV1_COMMON *cm;
   unsigned int i;
   int t;
 
@@ -2475,7 +2475,7 @@
   cm = &cpi->common;
   if (cm->current_video_frame > 0) {
 #if CONFIG_INTERNAL_STATS
-    vpx_clear_system_state();
+    aom_clear_system_state();
 
     if (cpi->oxcf.pass != 1) {
       char headings[512] = { 0 };
@@ -2493,13 +2493,13 @@
       const double rate_err = ((100.0 * (dr - target_rate)) / target_rate);
 
       if (cpi->b_calculate_psnr) {
-        const double total_psnr = vpx_sse_to_psnr(
+        const double total_psnr = aom_sse_to_psnr(
             (double)cpi->total_samples, peak, (double)cpi->total_sq_error);
         const double total_ssim =
             100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
         snprintf(headings, sizeof(headings),
                  "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t"
-                 "VPXSSIM\tVPSSIMP\tFASTSIM\tPSNRHVS\t"
+                 "AOMSSIM\tVPSSIMP\tFASTSIM\tPSNRHVS\t"
                  "WstPsnr\tWstSsim\tWstFast\tWstHVS");
         snprintf(results, sizeof(results),
                  "%7.2f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
@@ -2519,7 +2519,7 @@
 
         if (cpi->b_calculate_consistency) {
           double consistency =
-              vpx_sse_to_psnr((double)cpi->total_samples, peak,
+              aom_sse_to_psnr((double)cpi->total_samples, peak,
                               (double)cpi->total_inconsistency);
 
           SNPRINT(headings, "\tConsist\tWstCons");
@@ -2550,44 +2550,44 @@
   }
 
   for (t = 0; t < cpi->num_workers; ++t) {
-    VPxWorker *const worker = &cpi->workers[t];
+    AVxWorker *const worker = &cpi->workers[t];
     EncWorkerData *const thread_data = &cpi->tile_thr_data[t];
 
     // Deallocate allocated threads.
-    vpx_get_worker_interface()->end(worker);
+    aom_get_worker_interface()->end(worker);
 
     // Deallocate allocated thread data.
     if (t < cpi->num_workers - 1) {
       if (cpi->common.allow_screen_content_tools)
-        vpx_free(thread_data->td->mb.palette_buffer);
-      vpx_free(thread_data->td->counts);
-      vp10_free_pc_tree(thread_data->td);
-      vp10_free_var_tree(thread_data->td);
-      vpx_free(thread_data->td);
+        aom_free(thread_data->td->mb.palette_buffer);
+      aom_free(thread_data->td->counts);
+      av1_free_pc_tree(thread_data->td);
+      av1_free_var_tree(thread_data->td);
+      aom_free(thread_data->td);
     }
   }
-  vpx_free(cpi->tile_thr_data);
-  vpx_free(cpi->workers);
+  aom_free(cpi->tile_thr_data);
+  aom_free(cpi->workers);
 
-  if (cpi->num_workers > 1) vp10_loop_filter_dealloc(&cpi->lf_row_sync);
+  if (cpi->num_workers > 1) av1_loop_filter_dealloc(&cpi->lf_row_sync);
 
   dealloc_compressor_data(cpi);
 
   for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]);
        ++i) {
-    vpx_free(cpi->mbgraph_stats[i].mb_stats);
+    aom_free(cpi->mbgraph_stats[i].mb_stats);
   }
 
 #if CONFIG_FP_MB_STATS
   if (cpi->use_fp_mb_stats) {
-    vpx_free(cpi->twopass.frame_mb_stats_buf);
+    aom_free(cpi->twopass.frame_mb_stats_buf);
     cpi->twopass.frame_mb_stats_buf = NULL;
   }
 #endif
 
-  vp10_remove_common(cm);
-  vp10_free_ref_frame_buffers(cm->buffer_pool);
-  vpx_free(cpi);
+  av1_remove_common(cm);
+  av1_free_ref_frame_buffers(cm->buffer_pool);
+  aom_free(cpi);
 
 #ifdef OUTPUT_YUV_SKINMAP
   fclose(yuv_skinmap_file);
@@ -2610,15 +2610,15 @@
 #endif
 }
 
-static void generate_psnr_packet(VP10_COMP *cpi) {
-  struct vpx_codec_cx_pkt pkt;
+static void generate_psnr_packet(AV1_COMP *cpi) {
+  struct aom_codec_cx_pkt pkt;
   int i;
   PSNR_STATS psnr;
-#if CONFIG_VP9_HIGHBITDEPTH
-  vpx_calc_highbd_psnr(cpi->Source, cpi->common.frame_to_show, &psnr,
+#if CONFIG_AOM_HIGHBITDEPTH
+  aom_calc_highbd_psnr(cpi->Source, cpi->common.frame_to_show, &psnr,
                        cpi->td.mb.e_mbd.bd, cpi->oxcf.input_bit_depth);
 #else
-  vpx_calc_psnr(cpi->Source, cpi->common.frame_to_show, &psnr);
+  aom_calc_psnr(cpi->Source, cpi->common.frame_to_show, &psnr);
 #endif
 
   for (i = 0; i < 4; ++i) {
@@ -2626,69 +2626,69 @@
     pkt.data.psnr.sse[i] = psnr.sse[i];
     pkt.data.psnr.psnr[i] = psnr.psnr[i];
   }
-  pkt.kind = VPX_CODEC_PSNR_PKT;
-  vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
+  pkt.kind = AOM_CODEC_PSNR_PKT;
+  aom_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
 }
 
-int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags) {
+int av1_use_as_reference(AV1_COMP *cpi, int ref_frame_flags) {
   if (ref_frame_flags > ((1 << INTER_REFS_PER_FRAME) - 1)) return -1;
 
   cpi->ref_frame_flags = ref_frame_flags;
   return 0;
 }
 
-void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags) {
-  cpi->ext_refresh_golden_frame = (ref_frame_flags & VPX_GOLD_FLAG) != 0;
-  cpi->ext_refresh_alt_ref_frame = (ref_frame_flags & VPX_ALT_FLAG) != 0;
-  cpi->ext_refresh_last_frame = (ref_frame_flags & VPX_LAST_FLAG) != 0;
+void av1_update_reference(AV1_COMP *cpi, int ref_frame_flags) {
+  cpi->ext_refresh_golden_frame = (ref_frame_flags & AOM_GOLD_FLAG) != 0;
+  cpi->ext_refresh_alt_ref_frame = (ref_frame_flags & AOM_ALT_FLAG) != 0;
+  cpi->ext_refresh_last_frame = (ref_frame_flags & AOM_LAST_FLAG) != 0;
   cpi->ext_refresh_frame_flags_pending = 1;
 }
 
-static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(
-    VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag) {
+static YV12_BUFFER_CONFIG *get_av1_ref_frame_buffer(
+    AV1_COMP *cpi, AOM_REFFRAME ref_frame_flag) {
   MV_REFERENCE_FRAME ref_frame = NONE;
-  if (ref_frame_flag == VPX_LAST_FLAG) ref_frame = LAST_FRAME;
+  if (ref_frame_flag == AOM_LAST_FLAG) ref_frame = LAST_FRAME;
 #if CONFIG_EXT_REFS
-  else if (ref_frame_flag == VPX_LAST2_FLAG)
+  else if (ref_frame_flag == AOM_LAST2_FLAG)
     ref_frame = LAST2_FRAME;
-  else if (ref_frame_flag == VPX_LAST3_FLAG)
+  else if (ref_frame_flag == AOM_LAST3_FLAG)
     ref_frame = LAST3_FRAME;
 #endif  // CONFIG_EXT_REFS
-  else if (ref_frame_flag == VPX_GOLD_FLAG)
+  else if (ref_frame_flag == AOM_GOLD_FLAG)
     ref_frame = GOLDEN_FRAME;
 #if CONFIG_EXT_REFS
-  else if (ref_frame_flag == VPX_BWD_FLAG)
+  else if (ref_frame_flag == AOM_BWD_FLAG)
     ref_frame = BWDREF_FRAME;
 #endif  // CONFIG_EXT_REFS
-  else if (ref_frame_flag == VPX_ALT_FLAG)
+  else if (ref_frame_flag == AOM_ALT_FLAG)
     ref_frame = ALTREF_FRAME;
 
   return ref_frame == NONE ? NULL : get_ref_frame_buffer(cpi, ref_frame);
 }
 
-int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
-                            YV12_BUFFER_CONFIG *sd) {
-  YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
-  if (cfg) {
-    vpx_yv12_copy_frame(cfg, sd);
-    return 0;
-  } else {
-    return -1;
-  }
-}
-
-int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_copy_reference_enc(AV1_COMP *cpi, AOM_REFFRAME ref_frame_flag,
                            YV12_BUFFER_CONFIG *sd) {
-  YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
+  YV12_BUFFER_CONFIG *cfg = get_av1_ref_frame_buffer(cpi, ref_frame_flag);
   if (cfg) {
-    vpx_yv12_copy_frame(sd, cfg);
+    aom_yv12_copy_frame(cfg, sd);
     return 0;
   } else {
     return -1;
   }
 }
 
-int vp10_update_entropy(VP10_COMP *cpi, int update) {
+int av1_set_reference_enc(AV1_COMP *cpi, AOM_REFFRAME ref_frame_flag,
+                          YV12_BUFFER_CONFIG *sd) {
+  YV12_BUFFER_CONFIG *cfg = get_av1_ref_frame_buffer(cpi, ref_frame_flag);
+  if (cfg) {
+    aom_yv12_copy_frame(sd, cfg);
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+int av1_update_entropy(AV1_COMP *cpi, int update) {
   cpi->ext_refresh_frame_context = update;
   cpi->ext_refresh_frame_context_pending = 1;
   return 0;
@@ -2699,7 +2699,7 @@
 // as YUV 420. We simply use the top-left pixels of the UV buffers, since we do
 // not denoise the UV channels at this time. If ever we implement UV channel
 // denoising we will have to modify this.
-void vp10_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) {
+void aom_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) {
   uint8_t *src = s->y_buffer;
   int h = s->y_height;
 
@@ -2727,9 +2727,9 @@
 #endif
 
 #if CONFIG_EXT_REFS
-static void check_show_existing_frame(VP10_COMP *cpi) {
+static void check_show_existing_frame(AV1_COMP *cpi) {
   const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const FRAME_UPDATE_TYPE next_frame_update_type =
       gf_group->update_type[gf_group->index];
   const int which_arf = gf_group->arf_update_idx[gf_group->index];
@@ -2744,7 +2744,7 @@
              (next_frame_update_type == OVERLAY_UPDATE ||
               next_frame_update_type == INTNL_OVERLAY_UPDATE)) {
     // Other parameters related to OVERLAY_UPDATE will be taken care of
-    // in vp10_rc_get_second_pass_params(cpi)
+    // in av1_rc_get_second_pass_params(cpi)
     cm->show_existing_frame = 1;
     cpi->rc.is_src_frame_alt_ref = 1;
     cpi->existing_fb_idx_to_show = cpi->alt_fb_idx;
@@ -2757,11 +2757,11 @@
 #endif  // CONFIG_EXT_REFS
 
 #ifdef OUTPUT_YUV_REC
-void vp10_write_one_yuv_frame(VP10_COMMON *cm, YV12_BUFFER_CONFIG *s) {
+void aom_write_one_yuv_frame(AV1_COMMON *cm, YV12_BUFFER_CONFIG *s) {
   uint8_t *src = s->y_buffer;
   int h = cm->height;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (s->flags & YV12_FLAG_HIGHBITDEPTH) {
     uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer);
 
@@ -2789,7 +2789,7 @@
     fflush(yuv_rec_file);
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   do {
     fwrite(src, s->y_width, 1, yuv_rec_file);
@@ -2816,15 +2816,15 @@
 }
 #endif  // OUTPUT_YUV_REC
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
                                                 YV12_BUFFER_CONFIG *dst,
                                                 int bd) {
 #else
 static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
                                                 YV12_BUFFER_CONFIG *dst) {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-  // TODO(dkovalev): replace YV12_BUFFER_CONFIG with vpx_image_t
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+  // TODO(dkovalev): replace YV12_BUFFER_CONFIG with aom_image_t
   int i;
   const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
                                    src->v_buffer };
@@ -2841,31 +2841,31 @@
                                dst->uv_crop_height };
 
   for (i = 0; i < MAX_MB_PLANE; ++i) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
-      vp10_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
-                               src_strides[i], dsts[i], dst_heights[i],
-                               dst_widths[i], dst_strides[i], bd);
+      av1_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
+                              src_strides[i], dsts[i], dst_heights[i],
+                              dst_widths[i], dst_strides[i], bd);
     } else {
-      vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
-                        dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
+      av1_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
+                       dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
     }
 #else
-    vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
-                      dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+    av1_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
+                     dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
-  vpx_extend_frame_borders(dst);
+  aom_extend_frame_borders(dst);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
                                    YV12_BUFFER_CONFIG *dst, int planes,
                                    int bd) {
 #else
 static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
                                    YV12_BUFFER_CONFIG *dst, int planes) {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   const int src_w = src->y_crop_width;
   const int src_h = src->y_crop_height;
   const int dst_w = dst->y_crop_width;
@@ -2876,7 +2876,7 @@
   uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
   const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
   const InterpFilterParams interp_filter_params =
-      vp10_get_interp_filter_params(EIGHTTAP_REGULAR);
+      av1_get_interp_filter_params(EIGHTTAP_REGULAR);
   const int16_t *kernel = interp_filter_params.filter_ptr;
   const int taps = interp_filter_params.taps;
   int x, y, i;
@@ -2894,35 +2894,35 @@
                                  (x / factor) * src_w / dst_w;
         uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
-          vpx_highbd_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
+          aom_highbd_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
                                &kernel[(x_q4 & 0xf) * taps], 16 * src_w / dst_w,
                                &kernel[(y_q4 & 0xf) * taps], 16 * src_h / dst_h,
                                16 / factor, 16 / factor, bd);
         } else {
-          vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
+          aom_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
                         &kernel[(x_q4 & 0xf) * taps], 16 * src_w / dst_w,
                         &kernel[(y_q4 & 0xf) * taps], 16 * src_h / dst_h,
                         16 / factor, 16 / factor);
         }
 #else
-        vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
+        aom_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
                       &kernel[(x_q4 & 0xf) * taps], 16 * src_w / dst_w,
                       &kernel[(y_q4 & 0xf) * taps], 16 * src_h / dst_h,
                       16 / factor, 16 / factor);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
   }
 
   if (planes == 1)
-    vpx_extend_frame_borders_y(dst);
+    aom_extend_frame_borders_y(dst);
   else
-    vpx_extend_frame_borders(dst);
+    aom_extend_frame_borders(dst);
 }
 
-static int scale_down(VP10_COMP *cpi, int q) {
+static int scale_down(AV1_COMP *cpi, int q) {
   RATE_CONTROL *const rc = &cpi->rc;
   GF_GROUP *const gf_group = &cpi->twopass.gf_group;
   int scale = 0;
@@ -2932,7 +2932,7 @@
       q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) {
     const int max_size_thresh =
         (int)(rate_thresh_mult[SCALE_STEP1] *
-              VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
+              AOMMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
     scale = rc->projected_frame_size > max_size_thresh ? 1 : 0;
   }
   return scale;
@@ -2940,10 +2940,10 @@
 
 // Function to test for conditions that indicate we should loop
 // back and recode a frame.
-static int recode_loop_test(VP10_COMP *cpi, int high_limit, int low_limit,
-                            int q, int maxq, int minq) {
+static int recode_loop_test(AV1_COMP *cpi, int high_limit, int low_limit, int q,
+                            int maxq, int minq) {
   const RATE_CONTROL *const rc = &cpi->rc;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   const int frame_is_kfgfarf = frame_is_kf_gf_arf(cpi);
   int force_recode = 0;
 
@@ -2961,7 +2961,7 @@
     if ((rc->projected_frame_size > high_limit && q < maxq) ||
         (rc->projected_frame_size < low_limit && q > minq)) {
       force_recode = 1;
-    } else if (cpi->oxcf.rc_mode == VPX_CQ) {
+    } else if (cpi->oxcf.rc_mode == AOM_CQ) {
       // Deal with frame undershoot and whether or not we are
       // below the automatically set cq level.
       if (q > oxcf->cq_level &&
@@ -2985,9 +2985,9 @@
 }
 
 // Up-sample 1 reference frame.
-static INLINE int upsample_ref_frame(VP10_COMP *cpi,
+static INLINE int upsample_ref_frame(AV1_COMP *cpi,
                                      const YV12_BUFFER_CONFIG *const ref) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   EncRefCntBuffer *ubufs = cpi->upsampled_ref_bufs;
   int new_uidx = get_free_upsampled_ref_buf(ubufs);
 
@@ -2998,19 +2998,19 @@
 
     // Can allocate buffer for Y plane only.
     if (upsampled_ref->buffer_alloc_sz < (ref->buffer_alloc_sz << 6))
-      if (vpx_realloc_frame_buffer(upsampled_ref, (cm->width << 3),
+      if (aom_realloc_frame_buffer(upsampled_ref, (cm->width << 3),
                                    (cm->height << 3), cm->subsampling_x,
                                    cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                    cm->use_highbitdepth,
 #endif
-                                   (VPX_ENC_BORDER_IN_PIXELS << 3),
+                                   (AOM_ENC_BORDER_IN_PIXELS << 3),
                                    cm->byte_alignment, NULL, NULL, NULL))
-        vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+        aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                            "Failed to allocate up-sampled frame buffer");
 
 // Currently, only Y plane is up-sampled, U, V are not used.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     scale_and_extend_frame(ref, upsampled_ref, 1, (int)cm->bit_depth);
 #else
     scale_and_extend_frame(ref, upsampled_ref, 1);
@@ -3022,7 +3022,7 @@
 #define DUMP_REF_FRAME_IMAGES 0
 
 #if DUMP_REF_FRAME_IMAGES == 1
-static int dump_one_image(VP10_COMMON *cm,
+static int dump_one_image(AV1_COMMON *cm,
                           const YV12_BUFFER_CONFIG *const ref_buf,
                           char *file_name) {
   int h;
@@ -3030,12 +3030,12 @@
 
   if (ref_buf == NULL) {
     printf("Frame data buffer is NULL.\n");
-    return VPX_CODEC_MEM_ERROR;
+    return AOM_CODEC_MEM_ERROR;
   }
 
   if ((f_ref = fopen(file_name, "wb")) == NULL) {
     printf("Unable to open file %s to write.\n", file_name);
-    return VPX_CODEC_MEM_ERROR;
+    return AOM_CODEC_MEM_ERROR;
   }
 
   // --- Y ---
@@ -3055,11 +3055,11 @@
 
   fclose(f_ref);
 
-  return VPX_CODEC_OK;
+  return AOM_CODEC_OK;
 }
 
-static void dump_ref_frame_images(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void dump_ref_frame_images(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   MV_REFERENCE_FRAME ref_frame;
 
   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
@@ -3076,7 +3076,7 @@
 // as follows:
 // LAST_FRAME -> LAST2_FRAME -> LAST3_FRAME
 // when the LAST_FRAME is updated.
-static INLINE void shift_last_ref_frames(VP10_COMP *cpi) {
+static INLINE void shift_last_ref_frames(AV1_COMP *cpi) {
   int ref_frame;
   for (ref_frame = LAST_REF_FRAMES - 1; ref_frame > 0; --ref_frame) {
     cpi->lst_fb_idxes[ref_frame] = cpi->lst_fb_idxes[ref_frame - 1];
@@ -3092,8 +3092,8 @@
 }
 #endif
 
-void vp10_update_reference_frames(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_update_reference_frames(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   BufferPool *const pool = cm->buffer_pool;
   const int use_upsampled_ref = cpi->sf.use_upsampled_references;
   int new_uidx = 0;
@@ -3142,10 +3142,10 @@
       uref_cnt_fb(cpi->upsampled_ref_bufs,
                   &cpi->upsampled_ref_idx[cpi->alt_fb_idx], new_uidx);
     }
-  } else if (vp10_preserve_existing_gf(cpi)) {
+  } else if (av1_preserve_existing_gf(cpi)) {
     // We have decided to preserve the previously existing golden frame as our
     // new ARF frame. However, in the short term in function
-    // vp10_bitstream.c::get_refresh_mask() we left it in the GF slot and, if
+    // av1_bitstream.c::get_refresh_mask() we left it in the GF slot and, if
     // we're updating the GF with the current decoded frame, we save it to the
     // ARF slot instead.
     // We now have to update the ARF with the current frame and swap gld_fb_idx
@@ -3385,38 +3385,38 @@
 #endif  // DUMP_REF_FRAME_IMAGES
 }
 
-static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
+static void loopfilter_frame(AV1_COMP *cpi, AV1_COMMON *cm) {
   MACROBLOCKD *xd = &cpi->td.mb.e_mbd;
   struct loopfilter *lf = &cm->lf;
   if (is_lossless_requested(&cpi->oxcf)) {
     lf->filter_level = 0;
   } else {
-    struct vpx_usec_timer timer;
+    struct aom_usec_timer timer;
 
-    vpx_clear_system_state();
+    aom_clear_system_state();
 
-    vpx_usec_timer_start(&timer);
+    aom_usec_timer_start(&timer);
 
 #if CONFIG_LOOP_RESTORATION
-    vp10_pick_filter_restoration(cpi->Source, cpi, cpi->sf.lpf_pick);
+    av1_pick_filter_restoration(cpi->Source, cpi, cpi->sf.lpf_pick);
 #else
-    vp10_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick);
+    av1_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick);
 #endif  // CONFIG_LOOP_RESTORATION
 
-    vpx_usec_timer_mark(&timer);
-    cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
+    aom_usec_timer_mark(&timer);
+    cpi->time_pick_lpf += aom_usec_timer_elapsed(&timer);
   }
 
   if (lf->filter_level > 0) {
 #if CONFIG_VAR_TX || CONFIG_EXT_PARTITION
-    vp10_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
+    av1_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
 #else
     if (cpi->num_workers > 1)
-      vp10_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
-                                lf->filter_level, 0, 0, cpi->workers,
-                                cpi->num_workers, &cpi->lf_row_sync);
+      av1_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
+                               lf->filter_level, 0, 0, cpi->workers,
+                               cpi->num_workers, &cpi->lf_row_sync);
     else
-      vp10_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
+      av1_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
 #endif
   }
 #if CONFIG_DERING
@@ -3424,8 +3424,8 @@
     cm->dering_level = 0;
   } else {
     cm->dering_level =
-        vp10_dering_search(cm->frame_to_show, cpi->Source, cm, xd);
-    vp10_dering_frame(cm->frame_to_show, cm, xd, cm->dering_level);
+        av1_dering_search(cm->frame_to_show, cpi->Source, cm, xd);
+    av1_dering_frame(cm->frame_to_show, cm, xd, cm->dering_level);
   }
 #endif  // CONFIG_DERING
 
@@ -3438,31 +3438,31 @@
     // TODO(yaowu): investigate per-segment CLPF decision and
     // an optimal threshold, use 80 for now.
     for (i = 0; i < MAX_SEGMENTS; i++)
-      hq &= vp10_get_qindex(&cm->seg, i, cm->base_qindex) < 80;
+      hq &= av1_get_qindex(&cm->seg, i, cm->base_qindex) < 80;
 
     if (!hq) {  // Don't try filter if the entire image is nearly losslessly
                 // encoded
 #if CLPF_FILTER_ALL_PLANES
-      vpx_yv12_copy_frame(cm->frame_to_show, &cpi->last_frame_uf);
-      before = vpx_get_y_sse(cpi->Source, cm->frame_to_show) +
-               vpx_get_u_sse(cpi->Source, cm->frame_to_show) +
-               vpx_get_v_sse(cpi->Source, cm->frame_to_show);
-      vp10_clpf_frame(cm->frame_to_show, cm, xd);
-      after = vpx_get_y_sse(cpi->Source, cm->frame_to_show) +
-              vpx_get_u_sse(cpi->Source, cm->frame_to_show) +
-              vpx_get_v_sse(cpi->Source, cm->frame_to_show);
+      aom_yv12_copy_frame(cm->frame_to_show, &cpi->last_frame_uf);
+      before = aom_get_y_sse(cpi->Source, cm->frame_to_show) +
+               aom_get_u_sse(cpi->Source, cm->frame_to_show) +
+               aom_get_v_sse(cpi->Source, cm->frame_to_show);
+      av1_clpf_frame(cm->frame_to_show, cm, xd);
+      after = aom_get_y_sse(cpi->Source, cm->frame_to_show) +
+              aom_get_u_sse(cpi->Source, cm->frame_to_show) +
+              aom_get_v_sse(cpi->Source, cm->frame_to_show);
 #else
-      vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
-      before = vpx_get_y_sse(cpi->Source, cm->frame_to_show);
-      vp10_clpf_frame(cm->frame_to_show, cm, xd);
-      after = vpx_get_y_sse(cpi->Source, cm->frame_to_show);
+      aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+      before = aom_get_y_sse(cpi->Source, cm->frame_to_show);
+      av1_clpf_frame(cm->frame_to_show, cm, xd);
+      after = aom_get_y_sse(cpi->Source, cm->frame_to_show);
 #endif
       if (before < after) {
 // No improvement, restore original
 #if CLPF_FILTER_ALL_PLANES
-        vpx_yv12_copy_frame(&cpi->last_frame_uf, cm->frame_to_show);
+        aom_yv12_copy_frame(&cpi->last_frame_uf, cm->frame_to_show);
 #else
-        vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+        aom_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
 #endif
       } else {
         cm->clpf = 1;
@@ -3472,47 +3472,47 @@
 #endif
 #if CONFIG_LOOP_RESTORATION
   if (cm->rst_info.restoration_type != RESTORE_NONE) {
-    vp10_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
-                               cm->frame_type == KEY_FRAME, cm->width,
-                               cm->height);
-    vp10_loop_restoration_rows(cm->frame_to_show, cm, 0, cm->mi_rows, 0);
+    av1_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
+                              cm->frame_type == KEY_FRAME, cm->width,
+                              cm->height);
+    av1_loop_restoration_rows(cm->frame_to_show, cm, 0, cm->mi_rows, 0);
   }
 #endif  // CONFIG_LOOP_RESTORATION
 
-  vpx_extend_frame_inner_borders(cm->frame_to_show);
+  aom_extend_frame_inner_borders(cm->frame_to_show);
 }
 
-static INLINE void alloc_frame_mvs(VP10_COMMON *const cm, int buffer_idx) {
+static INLINE void alloc_frame_mvs(AV1_COMMON *const cm, int buffer_idx) {
   RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx];
   if (new_fb_ptr->mvs == NULL || new_fb_ptr->mi_rows < cm->mi_rows ||
       new_fb_ptr->mi_cols < cm->mi_cols) {
-    vpx_free(new_fb_ptr->mvs);
+    aom_free(new_fb_ptr->mvs);
     CHECK_MEM_ERROR(cm, new_fb_ptr->mvs,
-                    (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
+                    (MV_REF *)aom_calloc(cm->mi_rows * cm->mi_cols,
                                          sizeof(*new_fb_ptr->mvs)));
     new_fb_ptr->mi_rows = cm->mi_rows;
     new_fb_ptr->mi_cols = cm->mi_cols;
   }
 }
 
-void vp10_scale_references(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
+void av1_scale_references(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
   MV_REFERENCE_FRAME ref_frame;
-  const VPX_REFFRAME ref_mask[INTER_REFS_PER_FRAME] = {
-    VPX_LAST_FLAG,
+  const AOM_REFFRAME ref_mask[INTER_REFS_PER_FRAME] = {
+    AOM_LAST_FLAG,
 #if CONFIG_EXT_REFS
-    VPX_LAST2_FLAG,
-    VPX_LAST3_FLAG,
+    AOM_LAST2_FLAG,
+    AOM_LAST3_FLAG,
 #endif  // CONFIG_EXT_REFS
-    VPX_GOLD_FLAG,
+    AOM_GOLD_FLAG,
 #if CONFIG_EXT_REFS
-    VPX_BWD_FLAG,
+    AOM_BWD_FLAG,
 #endif  // CONFIG_EXT_REFS
-    VPX_ALT_FLAG
+    AOM_ALT_FLAG
   };
 
   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
-    // Need to convert from VPX_REFFRAME to index into ref_mask (subtract 1).
+    // Need to convert from AOM_REFFRAME to index into ref_mask (subtract 1).
     if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
       BufferPool *const pool = cm->buffer_pool;
       const YV12_BUFFER_CONFIG *const ref =
@@ -3523,7 +3523,7 @@
         continue;
       }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
         RefCntBuffer *new_fb_ptr = NULL;
         int force_scaling = 0;
@@ -3536,12 +3536,12 @@
         new_fb_ptr = &pool->frame_bufs[new_fb];
         if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
             new_fb_ptr->buf.y_crop_height != cm->height) {
-          if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
+          if (aom_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
                                        cm->subsampling_x, cm->subsampling_y,
                                        cm->use_highbitdepth,
-                                       VPX_ENC_BORDER_IN_PIXELS,
+                                       AOM_ENC_BORDER_IN_PIXELS,
                                        cm->byte_alignment, NULL, NULL, NULL))
-            vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+            aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                                "Failed to allocate frame buffer");
           scale_and_extend_frame(ref, &new_fb_ptr->buf, MAX_MB_PLANE,
                                  (int)cm->bit_depth);
@@ -3561,17 +3561,17 @@
         new_fb_ptr = &pool->frame_bufs[new_fb];
         if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
             new_fb_ptr->buf.y_crop_height != cm->height) {
-          if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
+          if (aom_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
                                        cm->subsampling_x, cm->subsampling_y,
-                                       VPX_ENC_BORDER_IN_PIXELS,
+                                       AOM_ENC_BORDER_IN_PIXELS,
                                        cm->byte_alignment, NULL, NULL, NULL))
-            vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+            aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                                "Failed to allocate frame buffer");
           scale_and_extend_frame(ref, &new_fb_ptr->buf, MAX_MB_PLANE);
           cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
           alloc_frame_mvs(cm, new_fb);
         }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
         if (cpi->sf.use_upsampled_references &&
             (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
@@ -3580,17 +3580,17 @@
           EncRefCntBuffer *ubuf =
               &cpi->upsampled_ref_bufs[cpi->upsampled_ref_idx[map_idx]];
 
-          if (vpx_realloc_frame_buffer(&ubuf->buf, (cm->width << 3),
+          if (aom_realloc_frame_buffer(&ubuf->buf, (cm->width << 3),
                                        (cm->height << 3), cm->subsampling_x,
                                        cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                        cm->use_highbitdepth,
 #endif
-                                       (VPX_ENC_BORDER_IN_PIXELS << 3),
+                                       (AOM_ENC_BORDER_IN_PIXELS << 3),
                                        cm->byte_alignment, NULL, NULL, NULL))
-            vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+            aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                                "Failed to allocate up-sampled frame buffer");
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           scale_and_extend_frame(&new_fb_ptr->buf, &ubuf->buf, 1,
                                  (int)cm->bit_depth);
 #else
@@ -3611,8 +3611,8 @@
   }
 }
 
-static void release_scaled_references(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
+static void release_scaled_references(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
   int i;
   if (cpi->oxcf.pass == 0) {
     // Only release scaled references under certain conditions:
@@ -3664,8 +3664,8 @@
   model_count[EOB_MODEL_TOKEN] = full_count[EOB_TOKEN];
 }
 
-void vp10_full_to_model_counts(vp10_coeff_count_model *model_count,
-                               vp10_coeff_count *full_count) {
+void av1_full_to_model_counts(av1_coeff_count_model *model_count,
+                              av1_coeff_count *full_count) {
   int i, j, k, l;
 
   for (i = 0; i < PLANE_TYPES; ++i)
@@ -3676,14 +3676,14 @@
 }
 
 #if 0 && CONFIG_INTERNAL_STATS
-static void output_frame_level_debug_stats(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void output_frame_level_debug_stats(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w");
   int64_t recon_err;
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
-  recon_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+  recon_err = aom_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
 
   if (cpi->twopass.total_left_stats.coded_error != 0.0)
     fprintf(f, "%10u %dx%d  %10d %10d %d %d %10d %10d %10d %10d"
@@ -3710,12 +3710,12 @@
         cpi->rc.total_target_vs_actual,
         (cpi->rc.starting_buffer_level - cpi->rc.bits_off_target),
         cpi->rc.total_actual_bits, cm->base_qindex,
-        vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth),
-        (double)vp10_dc_quant(cm->base_qindex, 0, cm->bit_depth) / 4.0,
-        vp10_convert_qindex_to_q(cpi->twopass.active_worst_quality,
+        av1_convert_qindex_to_q(cm->base_qindex, cm->bit_depth),
+        (double)av1_dc_quant(cm->base_qindex, 0, cm->bit_depth) / 4.0,
+        av1_convert_qindex_to_q(cpi->twopass.active_worst_quality,
                                 cm->bit_depth),
         cpi->rc.avg_q,
-        vp10_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth),
+        av1_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth),
         cpi->refresh_last_frame, cpi->refresh_golden_frame,
         cpi->refresh_alt_ref_frame, cm->frame_type, cpi->rc.gfu_boost,
         cpi->twopass.bits_left,
@@ -3746,12 +3746,12 @@
 }
 #endif
 
-static void set_mv_search_params(VP10_COMP *cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
-  const unsigned int max_mv_def = VPXMIN(cm->width, cm->height);
+static void set_mv_search_params(AV1_COMP *cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
+  const unsigned int max_mv_def = AOMMIN(cm->width, cm->height);
 
   // Default based on max resolution.
-  cpi->mv_step_param = vp10_init_search_range(max_mv_def);
+  cpi->mv_step_param = av1_init_search_range(max_mv_def);
 
   if (cpi->sf.mv.auto_mv_step_size) {
     if (frame_is_intra_only(cm)) {
@@ -3763,34 +3763,34 @@
         // Allow mv_steps to correspond to twice the max mv magnitude found
         // in the previous frame, capped by the default max_mv_magnitude based
         // on resolution.
-        cpi->mv_step_param = vp10_init_search_range(
-            VPXMIN(max_mv_def, 2 * cpi->max_mv_magnitude));
+        cpi->mv_step_param = av1_init_search_range(
+            AOMMIN(max_mv_def, 2 * cpi->max_mv_magnitude));
       }
       cpi->max_mv_magnitude = 0;
     }
   }
 }
 
-static void set_size_independent_vars(VP10_COMP *cpi) {
-  vp10_set_speed_features_framesize_independent(cpi);
-  vp10_set_rd_speed_thresholds(cpi);
-  vp10_set_rd_speed_thresholds_sub8x8(cpi);
+static void set_size_independent_vars(AV1_COMP *cpi) {
+  av1_set_speed_features_framesize_independent(cpi);
+  av1_set_rd_speed_thresholds(cpi);
+  av1_set_rd_speed_thresholds_sub8x8(cpi);
   cpi->common.interp_filter = cpi->sf.default_interp_filter;
 }
 
-static void set_size_dependent_vars(VP10_COMP *cpi, int *q, int *bottom_index,
+static void set_size_dependent_vars(AV1_COMP *cpi, int *q, int *bottom_index,
                                     int *top_index) {
-  VP10_COMMON *const cm = &cpi->common;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  AV1_COMMON *const cm = &cpi->common;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
 
   // Setup variables that depend on the dimensions of the frame.
-  vp10_set_speed_features_framesize_dependent(cpi);
+  av1_set_speed_features_framesize_dependent(cpi);
 
   // Decide q and q bounds.
-  *q = vp10_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
+  *q = av1_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
 
   if (!frame_is_intra_only(cm)) {
-    vp10_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
+    av1_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
   }
 
   // Configure experimental use of segmentation for enhanced coding of
@@ -3801,34 +3801,34 @@
     configure_static_seg_features(cpi);
 }
 
-static void init_motion_estimation(VP10_COMP *cpi) {
+static void init_motion_estimation(AV1_COMP *cpi) {
   int y_stride = cpi->scaled_source.y_stride;
 
   if (cpi->sf.mv.search_method == NSTEP) {
-    vp10_init3smotion_compensation(&cpi->ss_cfg, y_stride);
+    av1_init3smotion_compensation(&cpi->ss_cfg, y_stride);
   } else if (cpi->sf.mv.search_method == DIAMOND) {
-    vp10_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
+    av1_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
   }
 }
 
-static void set_frame_size(VP10_COMP *cpi) {
+static void set_frame_size(AV1_COMP *cpi) {
   int ref_frame;
-  VP10_COMMON *const cm = &cpi->common;
-  VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  AV1_COMMON *const cm = &cpi->common;
+  AV1EncoderConfig *const oxcf = &cpi->oxcf;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
 
-  if (oxcf->pass == 2 && oxcf->rc_mode == VPX_VBR &&
+  if (oxcf->pass == 2 && oxcf->rc_mode == AOM_VBR &&
       ((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) ||
        (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
-    vp10_calculate_coded_size(cpi, &oxcf->scaled_frame_width,
-                              &oxcf->scaled_frame_height);
+    av1_calculate_coded_size(cpi, &oxcf->scaled_frame_width,
+                             &oxcf->scaled_frame_height);
 
     // There has been a change in frame size.
-    vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
-                          oxcf->scaled_frame_height);
+    av1_set_size_literal(cpi, oxcf->scaled_frame_width,
+                         oxcf->scaled_frame_height);
   }
 
-  if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
+  if (oxcf->pass == 0 && oxcf->rc_mode == AOM_CBR &&
       oxcf->resize_mode == RESIZE_DYNAMIC) {
     if (cpi->resize_pending == 1) {
       oxcf->scaled_frame_width =
@@ -3842,8 +3842,8 @@
     }
     if (cpi->resize_pending != 0) {
       // There has been a change in frame size.
-      vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
-                            oxcf->scaled_frame_height);
+      av1_set_size_literal(cpi, oxcf->scaled_frame_width,
+                           oxcf->scaled_frame_height);
 
       // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
       set_mv_search_params(cpi);
@@ -3851,20 +3851,20 @@
   }
 
   if (oxcf->pass == 2) {
-    vp10_set_target_rate(cpi);
+    av1_set_target_rate(cpi);
   }
 
   alloc_frame_mvs(cm, cm->new_fb_idx);
 
   // Reset the frame pointers to the current frame size.
-  if (vpx_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
+  if (aom_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                cm->use_highbitdepth,
 #endif
-                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               AOM_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
                                NULL, NULL, NULL))
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate frame buffer");
 
   alloc_util_frame_buffers(cpi);
@@ -3879,16 +3879,16 @@
     if (buf_idx != INVALID_IDX) {
       YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf;
       ref_buf->buf = buf;
-#if CONFIG_VP9_HIGHBITDEPTH
-      vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+      av1_setup_scale_factors_for_frame(
           &ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width,
           cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0);
 #else
-      vp10_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
-                                         buf->y_crop_height, cm->width,
-                                         cm->height);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-      if (vp10_is_scaled(&ref_buf->sf)) vpx_extend_frame_borders(buf);
+      av1_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
+                                        buf->y_crop_height, cm->width,
+                                        cm->height);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+      if (av1_is_scaled(&ref_buf->sf)) aom_extend_frame_borders(buf);
     } else {
       ref_buf->buf = NULL;
     }
@@ -3897,7 +3897,7 @@
   set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME);
 }
 
-static void reset_use_upsampled_references(VP10_COMP *cpi) {
+static void reset_use_upsampled_references(AV1_COMP *cpi) {
   MV_REFERENCE_FRAME ref_frame;
 
   // reset up-sampled reference buffer structure.
@@ -3913,36 +3913,36 @@
   }
 }
 
-static void encode_without_recode_loop(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void encode_without_recode_loop(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   int q = 0, bottom_index = 0, top_index = 0;  // Dummy variables.
   const int use_upsampled_ref = cpi->sf.use_upsampled_references;
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   set_frame_size(cpi);
 
   // For 1 pass CBR under dynamic resize mode: use faster scaling for source.
   // Only for 2x2 scaling for now.
-  if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR &&
+  if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == AOM_CBR &&
       cpi->oxcf.resize_mode == RESIZE_DYNAMIC &&
       cpi->un_scaled_source->y_width == (cm->width << 1) &&
       cpi->un_scaled_source->y_height == (cm->height << 1)) {
-    cpi->Source = vp10_scale_if_required_fast(cm, cpi->un_scaled_source,
-                                              &cpi->scaled_source);
+    cpi->Source = av1_scale_if_required_fast(cm, cpi->un_scaled_source,
+                                             &cpi->scaled_source);
     if (cpi->unscaled_last_source != NULL)
-      cpi->Last_Source = vp10_scale_if_required_fast(
+      cpi->Last_Source = av1_scale_if_required_fast(
           cm, cpi->unscaled_last_source, &cpi->scaled_last_source);
   } else {
     cpi->Source =
-        vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
+        av1_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
     if (cpi->unscaled_last_source != NULL)
-      cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
-                                                &cpi->scaled_last_source);
+      cpi->Last_Source = av1_scale_if_required(cm, cpi->unscaled_last_source,
+                                               &cpi->scaled_last_source);
   }
 
   if (frame_is_intra_only(cm) == 0) {
-    vp10_scale_references(cpi);
+    av1_scale_references(cpi);
   }
 
   set_size_independent_vars(cpi);
@@ -3954,49 +3954,49 @@
   if (!use_upsampled_ref && cpi->sf.use_upsampled_references)
     reset_use_upsampled_references(cpi);
 
-  vp10_set_quantizer(cm, q);
-  vp10_set_variance_partition_thresholds(cpi, q);
+  av1_set_quantizer(cm, q);
+  av1_set_variance_partition_thresholds(cpi, q);
 
   setup_frame(cpi);
 
 #if CONFIG_ENTROPY
   cm->do_subframe_update = cm->tile_cols == 1 && cm->tile_rows == 1;
-  vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
-  vp10_copy(cpi->subframe_stats.enc_starting_coef_probs, cm->fc->coef_probs);
+  av1_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+  av1_copy(cpi->subframe_stats.enc_starting_coef_probs, cm->fc->coef_probs);
   cm->coef_probs_update_idx = 0;
-  vp10_copy(cpi->subframe_stats.coef_probs_buf[0], cm->fc->coef_probs);
+  av1_copy(cpi->subframe_stats.coef_probs_buf[0], cm->fc->coef_probs);
 #endif  // CONFIG_ENTROPY
 
   suppress_active_map(cpi);
   // Variance adaptive and in frame q adjustment experiments are mutually
   // exclusive.
   if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
-    vp10_vaq_frame_setup(cpi);
+    av1_vaq_frame_setup(cpi);
   } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
-    vp10_setup_in_frame_q_adj(cpi);
+    av1_setup_in_frame_q_adj(cpi);
   } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
-    vp10_cyclic_refresh_setup(cpi);
+    av1_cyclic_refresh_setup(cpi);
   }
   apply_active_map(cpi);
 
   // transform / motion compensation build reconstruction frame
-  vp10_encode_frame(cpi);
+  av1_encode_frame(cpi);
 
   // Update some stats from cyclic refresh, and check if we should not update
   // golden reference, for 1 pass CBR.
   if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->frame_type != KEY_FRAME &&
-      (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR))
-    vp10_cyclic_refresh_check_golden_update(cpi);
+      (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == AOM_CBR))
+    av1_cyclic_refresh_check_golden_update(cpi);
 
   // Update the skip mb flag probabilities based on the distribution
   // seen in the last encoder iteration.
   // update_base_skip_probs(cpi);
-  vpx_clear_system_state();
+  aom_clear_system_state();
 }
 
-static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
+static void encode_with_recode_loop(AV1_COMP *cpi, size_t *size,
                                     uint8_t *dest) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   int bottom_index, top_index;
   int loop_count = 0;
@@ -4018,7 +4018,7 @@
     reset_use_upsampled_references(cpi);
 
   do {
-    vpx_clear_system_state();
+    aom_clear_system_state();
 
     set_frame_size(cpi);
 
@@ -4043,26 +4043,26 @@
 
     // Decide frame size bounds first time through.
     if (loop_count == 0) {
-      vp10_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
-                                        &frame_under_shoot_limit,
-                                        &frame_over_shoot_limit);
+      av1_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
+                                       &frame_under_shoot_limit,
+                                       &frame_over_shoot_limit);
     }
 
     cpi->Source =
-        vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
+        av1_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
 
     if (cpi->unscaled_last_source != NULL)
-      cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
-                                                &cpi->scaled_last_source);
+      cpi->Last_Source = av1_scale_if_required(cm, cpi->unscaled_last_source,
+                                               &cpi->scaled_last_source);
 
     if (frame_is_intra_only(cm) == 0) {
       if (loop_count > 0) {
         release_scaled_references(cpi);
       }
-      vp10_scale_references(cpi);
+      av1_scale_references(cpi);
     }
 
-    vp10_set_quantizer(cm, q);
+    av1_set_quantizer(cm, q);
 
     if (loop_count == 0) setup_frame(cpi);
 
@@ -4071,7 +4071,7 @@
     // probs before every iteration.
     if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
       int i;
-      vp10_default_coef_probs(cm);
+      av1_default_coef_probs(cm);
       if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
           cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL) {
         for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = *cm->fc;
@@ -4085,39 +4085,38 @@
     cm->do_subframe_update = cm->tile_cols == 1 && cm->tile_rows == 1;
     if (loop_count == 0 || frame_is_intra_only(cm) ||
         cm->error_resilient_mode) {
-      vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
-      vp10_copy(cpi->subframe_stats.enc_starting_coef_probs,
-                cm->fc->coef_probs);
+      av1_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+      av1_copy(cpi->subframe_stats.enc_starting_coef_probs, cm->fc->coef_probs);
     } else {
       if (cm->do_subframe_update) {
-        vp10_copy(cm->fc->coef_probs,
-                  cpi->subframe_stats.enc_starting_coef_probs);
-        vp10_copy(cm->starting_coef_probs,
-                  cpi->subframe_stats.enc_starting_coef_probs);
-        vp10_zero(cpi->subframe_stats.coef_counts_buf);
-        vp10_zero(cpi->subframe_stats.eob_counts_buf);
+        av1_copy(cm->fc->coef_probs,
+                 cpi->subframe_stats.enc_starting_coef_probs);
+        av1_copy(cm->starting_coef_probs,
+                 cpi->subframe_stats.enc_starting_coef_probs);
+        av1_zero(cpi->subframe_stats.coef_counts_buf);
+        av1_zero(cpi->subframe_stats.eob_counts_buf);
       }
     }
     cm->coef_probs_update_idx = 0;
-    vp10_copy(cpi->subframe_stats.coef_probs_buf[0], cm->fc->coef_probs);
+    av1_copy(cpi->subframe_stats.coef_probs_buf[0], cm->fc->coef_probs);
 #endif  // CONFIG_ENTROPY
 
     // Variance adaptive and in frame q adjustment experiments are mutually
     // exclusive.
     if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
-      vp10_vaq_frame_setup(cpi);
+      av1_vaq_frame_setup(cpi);
     } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
-      vp10_setup_in_frame_q_adj(cpi);
+      av1_setup_in_frame_q_adj(cpi);
     }
 
     // transform / motion compensation build reconstruction frame
-    vp10_encode_frame(cpi);
+    av1_encode_frame(cpi);
 
     // Update the skip mb flag probabilities based on the distribution
     // seen in the last encoder iteration.
     // update_base_skip_probs(cpi);
 
-    vpx_clear_system_state();
+    aom_clear_system_state();
 
     // Dummy pack of the bitstream using up to date stats to get an
     // accurate estimate of output frame size to determine if we need
@@ -4125,7 +4124,7 @@
     if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) {
       save_coding_context(cpi);
 
-      vp10_pack_bitstream(cpi, dest, size);
+      av1_pack_bitstream(cpi, dest, size);
 
       rc->projected_frame_size = (int)(*size) << 3;
       restore_coding_context(cpi);
@@ -4133,7 +4132,7 @@
       if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
     }
 
-    if (cpi->oxcf.rc_mode == VPX_Q) {
+    if (cpi->oxcf.rc_mode == AOM_Q) {
       loop = 0;
     } else {
       if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced &&
@@ -4144,15 +4143,15 @@
         int64_t high_err_target = cpi->ambient_err;
         int64_t low_err_target = cpi->ambient_err >> 1;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (cm->use_highbitdepth) {
-          kf_err = vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+          kf_err = aom_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
         } else {
-          kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+          kf_err = aom_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
         }
 #else
-        kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+        kf_err = aom_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
         // Prevent possible divide by zero error below for perfect KF
         kf_err += !kf_err;
@@ -4168,7 +4167,7 @@
 
           // Adjust Q
           q = (int)((q * high_err_target) / kf_err);
-          q = VPXMIN(q, (q_high + q_low) >> 1);
+          q = AOMMIN(q, (q_high + q_low) >> 1);
         } else if (kf_err < low_err_target &&
                    rc->projected_frame_size >= frame_under_shoot_limit) {
           // The key frame is much better than the previous frame
@@ -4177,7 +4176,7 @@
 
           // Adjust Q
           q = (int)((q * low_err_target) / kf_err);
-          q = VPXMIN(q, (q_high + q_low + 1) >> 1);
+          q = AOMMIN(q, (q_high + q_low + 1) >> 1);
         }
 
         // Clamp Q to upper and lower limits:
@@ -4186,7 +4185,7 @@
         loop = q != last_q;
       } else if (recode_loop_test(cpi, frame_over_shoot_limit,
                                   frame_under_shoot_limit, q,
-                                  VPXMAX(q_high, top_index), bottom_index)) {
+                                  AOMMAX(q_high, top_index), bottom_index)) {
         // Is the projected frame size out of range and are we allowed
         // to attempt to recode.
         int last_q = q;
@@ -4220,20 +4219,20 @@
 
           if (undershoot_seen || loop_at_this_size > 1) {
             // Update rate_correction_factor unless
-            vp10_rc_update_rate_correction_factors(cpi);
+            av1_rc_update_rate_correction_factors(cpi);
 
             q = (q_high + q_low + 1) / 2;
           } else {
             // Update rate_correction_factor unless
-            vp10_rc_update_rate_correction_factors(cpi);
+            av1_rc_update_rate_correction_factors(cpi);
 
-            q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
-                                   VPXMAX(q_high, top_index));
+            q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                  AOMMAX(q_high, top_index));
 
             while (q < q_low && retries < 10) {
-              vp10_rc_update_rate_correction_factors(cpi);
-              q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
-                                     VPXMAX(q_high, top_index));
+              av1_rc_update_rate_correction_factors(cpi);
+              q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                    AOMMAX(q_high, top_index));
               retries++;
             }
           }
@@ -4244,24 +4243,24 @@
           q_high = q > q_low ? q - 1 : q_low;
 
           if (overshoot_seen || loop_at_this_size > 1) {
-            vp10_rc_update_rate_correction_factors(cpi);
+            av1_rc_update_rate_correction_factors(cpi);
             q = (q_high + q_low) / 2;
           } else {
-            vp10_rc_update_rate_correction_factors(cpi);
-            q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
-                                   top_index);
+            av1_rc_update_rate_correction_factors(cpi);
+            q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                  top_index);
             // Special case reset for qlow for constrained quality.
             // This should only trigger where there is very substantial
             // undershoot on a frame and the auto cq level is above
             // the user passsed in value.
-            if (cpi->oxcf.rc_mode == VPX_CQ && q < q_low) {
+            if (cpi->oxcf.rc_mode == AOM_CQ && q < q_low) {
               q_low = q;
             }
 
             while (q > q_high && retries < 10) {
-              vp10_rc_update_rate_correction_factors(cpi);
-              q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
-                                     top_index);
+              av1_rc_update_rate_correction_factors(cpi);
+              q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                    top_index);
               retries++;
             }
           }
@@ -4294,7 +4293,7 @@
   } while (loop);
 }
 
-static int get_ref_frame_flags(const VP10_COMP *cpi) {
+static int get_ref_frame_flags(const AV1_COMP *cpi) {
   const int *const map = cpi->common.ref_frame_map;
 
 #if CONFIG_EXT_REFS
@@ -4326,40 +4325,40 @@
   const int alt_is_last = map[cpi->alt_fb_idx] == map[cpi->lst_fb_idx];
 #endif  // CONFIG_EXT_REFS
 
-  int flags = VPX_REFFRAME_ALL;
+  int flags = AOM_REFFRAME_ALL;
 
 #if CONFIG_EXT_REFS
   // Disable the use of BWDREF_FRAME for non-bipredictive frames.
   if (!(cpi->rc.is_bipred_frame || cpi->rc.is_last_bipred_frame ||
         (cpi->rc.is_bwd_ref_frame && cpi->num_extra_arfs)))
-    flags &= ~VPX_BWD_FLAG;
+    flags &= ~AOM_BWD_FLAG;
 #endif  // CONFIG_EXT_REFS
 
-  if (gld_is_last || gld_is_alt) flags &= ~VPX_GOLD_FLAG;
+  if (gld_is_last || gld_is_alt) flags &= ~AOM_GOLD_FLAG;
 
-  if (cpi->rc.frames_till_gf_update_due == INT_MAX) flags &= ~VPX_GOLD_FLAG;
+  if (cpi->rc.frames_till_gf_update_due == INT_MAX) flags &= ~AOM_GOLD_FLAG;
 
-  if (alt_is_last) flags &= ~VPX_ALT_FLAG;
+  if (alt_is_last) flags &= ~AOM_ALT_FLAG;
 
 #if CONFIG_EXT_REFS
-  if (last2_is_last || last2_is_alt) flags &= ~VPX_LAST2_FLAG;
+  if (last2_is_last || last2_is_alt) flags &= ~AOM_LAST2_FLAG;
 
-  if (last3_is_last || last3_is_last2 || last3_is_alt) flags &= ~VPX_LAST3_FLAG;
+  if (last3_is_last || last3_is_last2 || last3_is_alt) flags &= ~AOM_LAST3_FLAG;
 
-  if (gld_is_last2 || gld_is_last3) flags &= ~VPX_GOLD_FLAG;
+  if (gld_is_last2 || gld_is_last3) flags &= ~AOM_GOLD_FLAG;
 
   if ((bwd_is_last || bwd_is_last2 || bwd_is_last3 || bwd_is_gld ||
        bwd_is_alt) &&
-      (flags & VPX_BWD_FLAG))
-    flags &= ~VPX_BWD_FLAG;
+      (flags & AOM_BWD_FLAG))
+    flags &= ~AOM_BWD_FLAG;
 #endif  // CONFIG_EXT_REFS
 
   return flags;
 }
 
-static void set_ext_overrides(VP10_COMP *cpi) {
+static void set_ext_overrides(AV1_COMP *cpi) {
   // Overrides the defaults with the externally supplied values with
-  // vp10_update_reference() and vp10_update_entropy() calls
+  // av1_update_reference() and av1_update_entropy() calls
   // Note: The overrides are valid only for the next frame passed
   // to encode_frame_to_data_rate() function
   if (cpi->ext_refresh_frame_context_pending) {
@@ -4374,38 +4373,38 @@
   }
 }
 
-YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
-                                                YV12_BUFFER_CONFIG *unscaled,
-                                                YV12_BUFFER_CONFIG *scaled) {
+YV12_BUFFER_CONFIG *av1_scale_if_required_fast(AV1_COMMON *cm,
+                                               YV12_BUFFER_CONFIG *unscaled,
+                                               YV12_BUFFER_CONFIG *scaled) {
   if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
       cm->mi_rows * MI_SIZE != unscaled->y_height) {
     // For 2x2 scaling down.
-    vpx_scale_frame(unscaled, scaled, unscaled->y_buffer, 9, 2, 1, 2, 1, 0);
-    vpx_extend_frame_borders(scaled);
+    aom_scale_frame(unscaled, scaled, unscaled->y_buffer, 9, 2, 1, 2, 1, 0);
+    aom_extend_frame_borders(scaled);
     return scaled;
   } else {
     return unscaled;
   }
 }
 
-YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
-                                           YV12_BUFFER_CONFIG *unscaled,
-                                           YV12_BUFFER_CONFIG *scaled) {
+YV12_BUFFER_CONFIG *av1_scale_if_required(AV1_COMMON *cm,
+                                          YV12_BUFFER_CONFIG *unscaled,
+                                          YV12_BUFFER_CONFIG *scaled) {
   if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
       cm->mi_rows * MI_SIZE != unscaled->y_height) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     scale_and_extend_frame_nonnormative(unscaled, scaled, (int)cm->bit_depth);
 #else
     scale_and_extend_frame_nonnormative(unscaled, scaled);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     return scaled;
   } else {
     return unscaled;
   }
 }
 
-static void set_arf_sign_bias(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void set_arf_sign_bias(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   int arf_sign_bias;
 #if CONFIG_EXT_REFS
   const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
@@ -4430,7 +4429,7 @@
 #endif  // CONFIG_EXT_REFS
 }
 
-static int setup_interp_filter_search_mask(VP10_COMP *cpi) {
+static int setup_interp_filter_search_mask(AV1_COMP *cpi) {
   INTERP_FILTER ifilter;
   int ref_total[TOTAL_REFS_PER_FRAME] = { 0 };
   MV_REFERENCE_FRAME ref;
@@ -4490,8 +4489,8 @@
 
 #if DUMP_RECON_FRAMES == 1
 // NOTE(zoeliu): For debug - Output the filtered reconstructed video.
-static void dump_filtered_recon_frames(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void dump_filtered_recon_frames(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   const YV12_BUFFER_CONFIG *recon_buf = cm->frame_to_show;
   int h;
   char file_name[256] = "/tmp/enc_filtered_recon.yuv";
@@ -4542,15 +4541,15 @@
 }
 #endif  // DUMP_RECON_FRAMES
 
-static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
+static void encode_frame_to_data_rate(AV1_COMP *cpi, size_t *size,
                                       uint8_t *dest,
                                       unsigned int *frame_flags) {
-  VP10_COMMON *const cm = &cpi->common;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  AV1_COMMON *const cm = &cpi->common;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   struct segmentation *const seg = &cm->seg;
   TX_SIZE t;
   set_ext_overrides(cpi);
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   // Set the arf sign bias for this frame.
   set_arf_sign_bias(cpi);
@@ -4585,7 +4584,7 @@
     cpi->rc.is_bipred_frame = 0;
 
     // Build the bitstream
-    vp10_pack_bitstream(cpi, dest, size);
+    av1_pack_bitstream(cpi, dest, size);
 
     // Set up frame to show to get ready for stats collection.
     cm->frame_to_show = get_frame_new_buffer(cm);
@@ -4596,7 +4595,7 @@
 #endif  // DUMP_RECON_FRAMES
 
     // Update the LAST_FRAME in the reference frame buffer.
-    vp10_update_reference_frames(cpi);
+    av1_update_reference_frames(cpi);
 
     // Update frame flags
     cpi->frame_flags &= ~FRAMEFLAGS_GOLDEN;
@@ -4612,8 +4611,8 @@
     // Since we allocate a spot for the OVERLAY frame in the gf group, we need
     // to do post-encoding update accordingly.
     if (cpi->rc.is_src_frame_alt_ref) {
-      vp10_set_target_rate(cpi);
-      vp10_rc_postencode_update(cpi, *size);
+      av1_set_target_rate(cpi);
+      av1_rc_postencode_update(cpi, *size);
     }
 #endif
 
@@ -4635,7 +4634,7 @@
   // Set various flags etc to special state if it is a key frame.
   if (frame_is_intra_only(cm)) {
     // Reset the loop filter deltas and segmentation map.
-    vp10_reset_segment_features(cm);
+    av1_reset_segment_features(cm);
 
     // If segmentation is enabled force a map update for key frames.
     if (seg->enabled) {
@@ -4660,16 +4659,16 @@
 
   // For 1 pass CBR, check if we are dropping this frame.
   // Never drop on key frame.
-  if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
+  if (oxcf->pass == 0 && oxcf->rc_mode == AOM_CBR &&
       cm->frame_type != KEY_FRAME) {
-    if (vp10_rc_drop_frame(cpi)) {
-      vp10_rc_postencode_update_drop_frame(cpi);
+    if (av1_rc_drop_frame(cpi)) {
+      av1_rc_postencode_update_drop_frame(cpi);
       ++cm->current_video_frame;
       return;
     }
   }
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
 #if CONFIG_INTERNAL_STATS
   memset(cpi->mode_chosen_counts, 0,
@@ -4684,7 +4683,7 @@
 
 #ifdef OUTPUT_YUV_SKINMAP
   if (cpi->common.current_video_frame > 1) {
-    vp10_compute_skin_map(cpi, yuv_skinmap_file);
+    av1_compute_skin_map(cpi, yuv_skinmap_file);
   }
 #endif  // OUTPUT_YUV_SKINMAP
 
@@ -4692,16 +4691,16 @@
   // fixed interval. Note the reconstruction error if it is the frame before
   // the force key frame
   if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       cpi->ambient_err =
-          vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+          aom_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
     } else {
-      cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+      cpi->ambient_err = aom_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
     }
 #else
-    cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+    cpi->ambient_err = aom_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 
   // If the encoder forced a KEY_FRAME decision
@@ -4724,7 +4723,7 @@
   loopfilter_frame(cpi, cm);
 
   // Build the bitstream
-  vp10_pack_bitstream(cpi, dest, size);
+  av1_pack_bitstream(cpi, dest, size);
 
 #if DUMP_RECON_FRAMES == 1
   // NOTE(zoeliu): For debug - Output the filtered reconstructed video.
@@ -4737,24 +4736,24 @@
     release_scaled_references(cpi);
   }
 
-  vp10_update_reference_frames(cpi);
+  av1_update_reference_frames(cpi);
 
   for (t = TX_4X4; t <= TX_32X32; t++)
-    vp10_full_to_model_counts(cpi->td.counts->coef[t],
-                              cpi->td.rd_counts.coef_counts[t]);
+    av1_full_to_model_counts(cpi->td.counts->coef[t],
+                             cpi->td.rd_counts.coef_counts[t]);
 
   if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
 #if CONFIG_ENTROPY
     cm->partial_prob_update = 0;
 #endif  // CONFIG_ENTROPY
-    vp10_adapt_coef_probs(cm);
-    vp10_adapt_intra_frame_probs(cm);
+    av1_adapt_coef_probs(cm);
+    av1_adapt_intra_frame_probs(cm);
   }
 
   if (!frame_is_intra_only(cm)) {
     if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
-      vp10_adapt_inter_frame_probs(cm);
-      vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+      av1_adapt_inter_frame_probs(cm);
+      av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
     }
   }
 
@@ -4785,7 +4784,7 @@
 #endif  // CONFIG_EXT_REFS
   cm->last_frame_type = cm->frame_type;
 
-  vp10_rc_postencode_update(cpi, *size);
+  av1_rc_postencode_update(cpi, *size);
 
 #if 0
   output_frame_level_debug_stats(cpi);
@@ -4816,7 +4815,7 @@
 // TODO(zoeliu): We may only swamp mi and prev_mi for those frames that are
 // being used as reference.
 #endif  // CONFIG_EXT_REFS
-    vp10_swap_mi_and_prev_mi(cm);
+    av1_swap_mi_and_prev_mi(cm);
     // Don't increment frame counters if this was an altref buffer
     // update not a real frame
     ++cm->current_video_frame;
@@ -4829,17 +4828,17 @@
     cm->prev_frame = cm->cur_frame;
 }
 
-static void Pass0Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+static void Pass0Encode(AV1_COMP *cpi, size_t *size, uint8_t *dest,
                         unsigned int *frame_flags) {
-  if (cpi->oxcf.rc_mode == VPX_CBR) {
-    vp10_rc_get_one_pass_cbr_params(cpi);
+  if (cpi->oxcf.rc_mode == AOM_CBR) {
+    av1_rc_get_one_pass_cbr_params(cpi);
   } else {
-    vp10_rc_get_one_pass_vbr_params(cpi);
+    av1_rc_get_one_pass_vbr_params(cpi);
   }
   encode_frame_to_data_rate(cpi, size, dest, frame_flags);
 }
 
-static void Pass2Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+static void Pass2Encode(AV1_COMP *cpi, size_t *size, uint8_t *dest,
                         unsigned int *frame_flags) {
   cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
 
@@ -4850,15 +4849,15 @@
   // a gf group, but note that an OVERLAY frame always has a spot in a gf group,
   // even when show_existing_frame is used.
   if (!cpi->common.show_existing_frame || cpi->rc.is_src_frame_alt_ref) {
-    vp10_twopass_postencode_update(cpi);
+    av1_twopass_postencode_update(cpi);
   }
   check_show_existing_frame(cpi);
 #else
-  vp10_twopass_postencode_update(cpi);
+  av1_twopass_postencode_update(cpi);
 #endif  // CONFIG_EXT_REFS
 }
 
-static void init_ref_frame_bufs(VP10_COMMON *cm) {
+static void init_ref_frame_bufs(AV1_COMMON *cm) {
   int i;
   BufferPool *const pool = cm->buffer_pool;
   cm->new_fb_idx = INVALID_IDX;
@@ -4868,22 +4867,22 @@
   }
 }
 
-static void check_initial_width(VP10_COMP *cpi,
-#if CONFIG_VP9_HIGHBITDEPTH
+static void check_initial_width(AV1_COMP *cpi,
+#if CONFIG_AOM_HIGHBITDEPTH
                                 int use_highbitdepth,
 #endif
                                 int subsampling_x, int subsampling_y) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
 
   if (!cpi->initial_width ||
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       cm->use_highbitdepth != use_highbitdepth ||
 #endif
       cm->subsampling_x != subsampling_x ||
       cm->subsampling_y != subsampling_y) {
     cm->subsampling_x = subsampling_x;
     cm->subsampling_y = subsampling_y;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     cm->use_highbitdepth = use_highbitdepth;
 #endif
 
@@ -4899,44 +4898,44 @@
   }
 }
 
-int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
-                           YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
-                           int64_t end_time) {
-  VP10_COMMON *const cm = &cpi->common;
-  struct vpx_usec_timer timer;
+int av1_receive_raw_frame(AV1_COMP *cpi, unsigned int frame_flags,
+                          YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+                          int64_t end_time) {
+  AV1_COMMON *const cm = &cpi->common;
+  struct aom_usec_timer timer;
   int res = 0;
   const int subsampling_x = sd->subsampling_x;
   const int subsampling_y = sd->subsampling_y;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int use_highbitdepth = (sd->flags & YV12_FLAG_HIGHBITDEPTH) != 0;
 #endif
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   check_initial_width(cpi, use_highbitdepth, subsampling_x, subsampling_y);
 #else
   check_initial_width(cpi, subsampling_x, subsampling_y);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-  vpx_usec_timer_start(&timer);
+  aom_usec_timer_start(&timer);
 
-  if (vp10_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
-#if CONFIG_VP9_HIGHBITDEPTH
-                          use_highbitdepth,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                          frame_flags))
+  if (av1_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
+#if CONFIG_AOM_HIGHBITDEPTH
+                         use_highbitdepth,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                         frame_flags))
     res = -1;
-  vpx_usec_timer_mark(&timer);
-  cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
+  aom_usec_timer_mark(&timer);
+  cpi->time_receive_data += aom_usec_timer_elapsed(&timer);
 
   if ((cm->profile == PROFILE_0 || cm->profile == PROFILE_2) &&
       (subsampling_x != 1 || subsampling_y != 1)) {
-    vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM,
+    aom_internal_error(&cm->error, AOM_CODEC_INVALID_PARAM,
                        "Non-4:2:0 color format requires profile 1 or 3");
     res = -1;
   }
   if ((cm->profile == PROFILE_1 || cm->profile == PROFILE_3) &&
       (subsampling_x == 1 && subsampling_y == 1)) {
-    vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM,
+    aom_internal_error(&cm->error, AOM_CODEC_INVALID_PARAM,
                        "4:2:0 color format requires profile 0 or 2");
     res = -1;
   }
@@ -4944,8 +4943,8 @@
   return res;
 }
 
-static int frame_is_reference(const VP10_COMP *cpi) {
-  const VP10_COMMON *cm = &cpi->common;
+static int frame_is_reference(const AV1_COMP *cpi) {
+  const AV1_COMMON *cm = &cpi->common;
 
   return cm->frame_type == KEY_FRAME || cpi->refresh_last_frame ||
          cpi->refresh_golden_frame ||
@@ -4957,7 +4956,7 @@
          cm->seg.update_data;
 }
 
-static void adjust_frame_rate(VP10_COMP *cpi,
+static void adjust_frame_rate(AV1_COMP *cpi,
                               const struct lookahead_entry *source) {
   int64_t this_duration;
   int step = 0;
@@ -4978,18 +4977,18 @@
 
   if (this_duration) {
     if (step) {
-      vp10_new_framerate(cpi, 10000000.0 / this_duration);
+      av1_new_framerate(cpi, 10000000.0 / this_duration);
     } else {
       // Average this frame's rate into the last second's average
       // frame rate. If we haven't seen 1 second yet, then average
       // over the whole interval seen.
-      const double interval = VPXMIN(
+      const double interval = AOMMIN(
           (double)(source->ts_end - cpi->first_time_stamp_ever), 10000000.0);
       double avg_duration = 10000000.0 / cpi->framerate;
       avg_duration *= (interval - avg_duration + this_duration);
       avg_duration /= interval;
 
-      vp10_new_framerate(cpi, 10000000.0 / avg_duration);
+      av1_new_framerate(cpi, 10000000.0 / avg_duration);
     }
   }
   cpi->last_time_stamp_seen = source->ts_start;
@@ -4998,7 +4997,7 @@
 
 // Returns 0 if this is not an alt ref else the offset of the source frame
 // used as the arf midpoint.
-static int get_arf_src_index(VP10_COMP *cpi) {
+static int get_arf_src_index(AV1_COMP *cpi) {
   RATE_CONTROL *const rc = &cpi->rc;
   int arf_src_index = 0;
   if (is_altref_enabled(cpi)) {
@@ -5015,7 +5014,7 @@
 }
 
 #if CONFIG_EXT_REFS
-static int get_brf_src_index(VP10_COMP *cpi) {
+static int get_brf_src_index(AV1_COMP *cpi) {
   int brf_src_index = 0;
   const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
 
@@ -5035,12 +5034,12 @@
 }
 #endif  // CONFIG_EXT_REFS
 
-static void check_src_altref(VP10_COMP *cpi,
+static void check_src_altref(AV1_COMP *cpi,
                              const struct lookahead_entry *source) {
   RATE_CONTROL *const rc = &cpi->rc;
 
   // If pass == 2, the parameters set here will be reset in
-  // vp10_rc_get_second_pass_params()
+  // av1_rc_get_second_pass_params()
 
   if (cpi->oxcf.pass == 2) {
     const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
@@ -5065,9 +5064,9 @@
 }
 
 #if CONFIG_INTERNAL_STATS
-extern double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
-                                  const unsigned char *img2, int img2_pitch,
-                                  int width, int height);
+extern double av1_get_blockiness(const unsigned char *img1, int img1_pitch,
+                                 const unsigned char *img2, int img2_pitch,
+                                 int width, int height);
 
 static void adjust_image_stat(double y, double u, double v, double all,
                               ImageStat *s) {
@@ -5075,16 +5074,16 @@
   s->stat[U] += u;
   s->stat[V] += v;
   s->stat[ALL] += all;
-  s->worst = VPXMIN(s->worst, all);
+  s->worst = AOMMIN(s->worst, all);
 }
 
-static void compute_internal_stats(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void compute_internal_stats(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   double samples = 0.0;
   uint32_t in_bit_depth = 8;
   uint32_t bit_depth = 8;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (cm->use_highbitdepth) {
     in_bit_depth = cpi->oxcf.input_bit_depth;
     bit_depth = cm->bit_depth;
@@ -5099,13 +5098,13 @@
     if (cpi->b_calculate_psnr) {
       PSNR_STATS psnr;
       double frame_ssim2 = 0.0, weight = 0.0;
-      vpx_clear_system_state();
+      aom_clear_system_state();
 // TODO(yaowu): unify these two versions into one.
-#if CONFIG_VP9_HIGHBITDEPTH
-      vpx_calc_highbd_psnr(orig, recon, &psnr, bit_depth, in_bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
+      aom_calc_highbd_psnr(orig, recon, &psnr, bit_depth, in_bit_depth);
 #else
-      vpx_calc_psnr(orig, recon, &psnr);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+      aom_calc_psnr(orig, recon, &psnr);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
       adjust_image_stat(psnr.psnr[1], psnr.psnr[2], psnr.psnr[3], psnr.psnr[0],
                         &cpi->psnr);
@@ -5113,17 +5112,17 @@
       cpi->total_samples += psnr.samples[0];
       samples = psnr.samples[0];
 // TODO(yaowu): unify these two versions into one.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cm->use_highbitdepth)
         frame_ssim2 =
-            vpx_highbd_calc_ssim(orig, recon, &weight, bit_depth, in_bit_depth);
+            aom_highbd_calc_ssim(orig, recon, &weight, bit_depth, in_bit_depth);
       else
-        frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
+        frame_ssim2 = aom_calc_ssim(orig, recon, &weight);
 #else
-      frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+      frame_ssim2 = aom_calc_ssim(orig, recon, &weight);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-      cpi->worst_ssim = VPXMIN(cpi->worst_ssim, frame_ssim2);
+      cpi->worst_ssim = AOMMIN(cpi->worst_ssim, frame_ssim2);
       cpi->summed_quality += frame_ssim2 * weight;
       cpi->summed_weights += weight;
 
@@ -5138,54 +5137,54 @@
 #endif
     }
     if (cpi->b_calculate_blockiness) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (!cm->use_highbitdepth)
 #endif
       {
         const double frame_blockiness =
-            vp10_get_blockiness(orig->y_buffer, orig->y_stride, recon->y_buffer,
-                                recon->y_stride, orig->y_width, orig->y_height);
-        cpi->worst_blockiness = VPXMAX(cpi->worst_blockiness, frame_blockiness);
+            av1_get_blockiness(orig->y_buffer, orig->y_stride, recon->y_buffer,
+                               recon->y_stride, orig->y_width, orig->y_height);
+        cpi->worst_blockiness = AOMMAX(cpi->worst_blockiness, frame_blockiness);
         cpi->total_blockiness += frame_blockiness;
       }
 
       if (cpi->b_calculate_consistency) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (!cm->use_highbitdepth)
 #endif
         {
-          const double this_inconsistency = vpx_get_ssim_metrics(
+          const double this_inconsistency = aom_get_ssim_metrics(
               orig->y_buffer, orig->y_stride, recon->y_buffer, recon->y_stride,
               orig->y_width, orig->y_height, cpi->ssim_vars, &cpi->metrics, 1);
 
           const double peak = (double)((1 << in_bit_depth) - 1);
           const double consistency =
-              vpx_sse_to_psnr(samples, peak, cpi->total_inconsistency);
+              aom_sse_to_psnr(samples, peak, cpi->total_inconsistency);
           if (consistency > 0.0)
             cpi->worst_consistency =
-                VPXMIN(cpi->worst_consistency, consistency);
+                AOMMIN(cpi->worst_consistency, consistency);
           cpi->total_inconsistency += this_inconsistency;
         }
       }
     }
 
     frame_all =
-        vpx_calc_fastssim(orig, recon, &y, &u, &v, bit_depth, in_bit_depth);
+        aom_calc_fastssim(orig, recon, &y, &u, &v, bit_depth, in_bit_depth);
     adjust_image_stat(y, u, v, frame_all, &cpi->fastssim);
-    frame_all = vpx_psnrhvs(orig, recon, &y, &u, &v, bit_depth, in_bit_depth);
+    frame_all = aom_psnrhvs(orig, recon, &y, &u, &v, bit_depth, in_bit_depth);
     adjust_image_stat(y, u, v, frame_all, &cpi->psnrhvs);
   }
 }
 #endif  // CONFIG_INTERNAL_STATS
 
-int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
-                             size_t *size, uint8_t *dest, int64_t *time_stamp,
-                             int64_t *time_end, int flush) {
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
-  VP10_COMMON *const cm = &cpi->common;
+int av1_get_compressed_data(AV1_COMP *cpi, unsigned int *frame_flags,
+                            size_t *size, uint8_t *dest, int64_t *time_stamp,
+                            int64_t *time_end, int flush) {
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
+  AV1_COMMON *const cm = &cpi->common;
   BufferPool *const pool = cm->buffer_pool;
   RATE_CONTROL *const rc = &cpi->rc;
-  struct vpx_usec_timer cmptimer;
+  struct aom_usec_timer cmptimer;
   YV12_BUFFER_CONFIG *force_src_buffer = NULL;
   struct lookahead_entry *last_source = NULL;
   struct lookahead_entry *source = NULL;
@@ -5201,9 +5200,9 @@
   bitstream_queue_record_write();
 #endif
 
-  vpx_usec_timer_start(&cmptimer);
+  aom_usec_timer_start(&cmptimer);
 
-  vp10_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
+  av1_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
 
   // Is multi-arf enabled.
   // Note that at the moment multi_arf is only configured for 2 pass VBR
@@ -5230,7 +5229,7 @@
   if (oxcf->pass == 2 && cm->show_existing_frame) {
     // Manage the source buffer and flush out the source frame that has been
     // coded already; Also get prepared for PSNR calculation if needed.
-    if ((source = vp10_lookahead_pop(cpi->lookahead, flush)) == NULL) {
+    if ((source = av1_lookahead_pop(cpi->lookahead, flush)) == NULL) {
       *size = 0;
       return -1;
     }
@@ -5255,14 +5254,14 @@
     if (cm->new_fb_idx == INVALID_IDX) return -1;
 
     // Clear down mmx registers
-    vpx_clear_system_state();
+    aom_clear_system_state();
 
     // Start with a 0 size frame.
     *size = 0;
 
     // We need to update the gf_group for show_existing overlay frame
     if (cpi->rc.is_src_frame_alt_ref) {
-      vp10_rc_get_second_pass_params(cpi);
+      av1_rc_get_second_pass_params(cpi);
     }
 
     Pass2Encode(cpi, size, dest, frame_flags);
@@ -5275,7 +5274,7 @@
 #endif  // CONFIG_INTERNAL_STATS
 
     // Clear down mmx registers
-    vpx_clear_system_state();
+    aom_clear_system_state();
 
     cm->show_existing_frame = 0;
     return 0;
@@ -5286,11 +5285,11 @@
   arf_src_index = get_arf_src_index(cpi);
   if (arf_src_index) {
     for (i = 0; i <= arf_src_index; ++i) {
-      struct lookahead_entry *e = vp10_lookahead_peek(cpi->lookahead, i);
+      struct lookahead_entry *e = av1_lookahead_peek(cpi->lookahead, i);
       // Avoid creating an alt-ref if there's a forced keyframe pending.
       if (e == NULL) {
         break;
-      } else if (e->flags == VPX_EFLAG_FORCE_KF) {
+      } else if (e->flags == AOM_EFLAG_FORCE_KF) {
         arf_src_index = 0;
         flush = 1;
         break;
@@ -5301,13 +5300,13 @@
   if (arf_src_index) {
     assert(arf_src_index <= rc->frames_to_key);
 
-    if ((source = vp10_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
+    if ((source = av1_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
       cpi->alt_ref_source = source;
 
       if (oxcf->arnr_max_frames > 0) {
         // Produce the filtered ARF frame.
-        vp10_temporal_filter(cpi, arf_src_index);
-        vpx_extend_frame_borders(&cpi->alt_ref_buffer);
+        av1_temporal_filter(cpi, arf_src_index);
+        aom_extend_frame_borders(&cpi->alt_ref_buffer);
         force_src_buffer = &cpi->alt_ref_buffer;
       }
 
@@ -5326,7 +5325,7 @@
   brf_src_index = get_brf_src_index(cpi);
   if (brf_src_index) {
     assert(brf_src_index <= rc->frames_to_key);
-    if ((source = vp10_lookahead_peek(cpi->lookahead, brf_src_index)) != NULL) {
+    if ((source = av1_lookahead_peek(cpi->lookahead, brf_src_index)) != NULL) {
       cm->show_frame = 0;
       cm->intra_only = 0;
 
@@ -5343,12 +5342,12 @@
   if (!source) {
     // Get last frame source.
     if (cm->current_video_frame > 0) {
-      if ((last_source = vp10_lookahead_peek(cpi->lookahead, -1)) == NULL)
+      if ((last_source = av1_lookahead_peek(cpi->lookahead, -1)) == NULL)
         return -1;
     }
 
     // Read in the source frame.
-    source = vp10_lookahead_pop(cpi->lookahead, flush);
+    source = av1_lookahead_pop(cpi->lookahead, flush);
 
     if (source != NULL) {
       cm->show_frame = 1;
@@ -5367,12 +5366,12 @@
 
     *time_stamp = source->ts_start;
     *time_end = source->ts_end;
-    *frame_flags = (source->flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
+    *frame_flags = (source->flags & AOM_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
 
   } else {
     *size = 0;
     if (flush && oxcf->pass == 1 && !cpi->twopass.first_pass_done) {
-      vp10_end_first_pass(cpi); /* get last stats packet */
+      av1_end_first_pass(cpi); /* get last stats packet */
       cpi->twopass.first_pass_done = 1;
     }
     return -1;
@@ -5384,7 +5383,7 @@
   }
 
   // Clear down mmx registers
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   // adjust frame rates based on timestamps given
   if (cm->show_frame) adjust_frame_rate(cpi, source);
@@ -5421,7 +5420,7 @@
   cpi->frame_flags = *frame_flags;
 
   if (oxcf->pass == 2) {
-    vp10_rc_get_second_pass_params(cpi);
+    av1_rc_get_second_pass_params(cpi);
   } else if (oxcf->pass == 1) {
     set_frame_size(cpi);
   }
@@ -5439,7 +5438,7 @@
 
   if (oxcf->pass == 1) {
     cpi->td.mb.e_mbd.lossless[0] = is_lossless_requested(oxcf);
-    vp10_first_pass(cpi, source);
+    av1_first_pass(cpi, source);
   } else if (oxcf->pass == 2) {
     Pass2Encode(cpi, size, dest, frame_flags);
   } else {
@@ -5459,8 +5458,8 @@
     cpi->droppable = !frame_is_reference(cpi);
   }
 
-  vpx_usec_timer_mark(&cmptimer);
-  cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
+  aom_usec_timer_mark(&cmptimer);
+  cpi->time_compress_data += aom_usec_timer_elapsed(&cmptimer);
 
   if (cpi->b_calculate_psnr && oxcf->pass != 1 && cm->show_frame)
     generate_psnr_packet(cpi);
@@ -5472,13 +5471,13 @@
   }
 #endif  // CONFIG_INTERNAL_STATS
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   return 0;
 }
 
-int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest) {
-  VP10_COMMON *cm = &cpi->common;
+int av1_get_preview_raw_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *dest) {
+  AV1_COMMON *cm = &cpi->common;
   if (!cm->show_frame) {
     return -1;
   } else {
@@ -5493,12 +5492,12 @@
     } else {
       ret = -1;
     }
-    vpx_clear_system_state();
+    aom_clear_system_state();
     return ret;
   }
 }
 
-int vp10_get_last_show_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *frame) {
+int av1_get_last_show_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *frame) {
   if (cpi->last_show_frame_buf_idx == INVALID_IDX) return -1;
 
   *frame =
@@ -5506,9 +5505,9 @@
   return 0;
 }
 
-int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
-                           VPX_SCALING vert_mode) {
-  VP10_COMMON *cm = &cpi->common;
+int av1_set_internal_size(AV1_COMP *cpi, AOM_SCALING horiz_mode,
+                          AOM_SCALING vert_mode) {
+  AV1_COMMON *cm = &cpi->common;
   int hr = 0, hs = 0, vr = 0, vs = 0;
 
   if (horiz_mode > ONETWO || vert_mode > ONETWO) return -1;
@@ -5527,14 +5526,14 @@
   return 0;
 }
 
-int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
-                          unsigned int height) {
-  VP10_COMMON *cm = &cpi->common;
-#if CONFIG_VP9_HIGHBITDEPTH
+int av1_set_size_literal(AV1_COMP *cpi, unsigned int width,
+                         unsigned int height) {
+  AV1_COMMON *cm = &cpi->common;
+#if CONFIG_AOM_HIGHBITDEPTH
   check_initial_width(cpi, cm->use_highbitdepth, 1, 1);
 #else
   check_initial_width(cpi, 1, 1);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   if (width) {
     cm->width = width;
@@ -5559,49 +5558,49 @@
   return 0;
 }
 
-int vp10_get_quantizer(VP10_COMP *cpi) { return cpi->common.base_qindex; }
+int av1_get_quantizer(AV1_COMP *cpi) { return cpi->common.base_qindex; }
 
-void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags) {
+void av1_apply_encoding_flags(AV1_COMP *cpi, aom_enc_frame_flags_t flags) {
   if (flags &
-      (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) {
-    int ref = VPX_REFFRAME_ALL;
+      (AOM_EFLAG_NO_REF_LAST | AOM_EFLAG_NO_REF_GF | AOM_EFLAG_NO_REF_ARF)) {
+    int ref = AOM_REFFRAME_ALL;
 
-    if (flags & VP8_EFLAG_NO_REF_LAST) {
-      ref ^= VPX_LAST_FLAG;
+    if (flags & AOM_EFLAG_NO_REF_LAST) {
+      ref ^= AOM_LAST_FLAG;
 #if CONFIG_EXT_REFS
-      ref ^= VPX_LAST2_FLAG;
-      ref ^= VPX_LAST3_FLAG;
+      ref ^= AOM_LAST2_FLAG;
+      ref ^= AOM_LAST3_FLAG;
 #endif  // CONFIG_EXT_REFS
     }
 
-    if (flags & VP8_EFLAG_NO_REF_GF) ref ^= VPX_GOLD_FLAG;
+    if (flags & AOM_EFLAG_NO_REF_GF) ref ^= AOM_GOLD_FLAG;
 
-    if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VPX_ALT_FLAG;
+    if (flags & AOM_EFLAG_NO_REF_ARF) ref ^= AOM_ALT_FLAG;
 
-    vp10_use_as_reference(cpi, ref);
+    av1_use_as_reference(cpi, ref);
   }
 
   if (flags &
-      (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
-       VP8_EFLAG_FORCE_GF | VP8_EFLAG_FORCE_ARF)) {
-    int upd = VPX_REFFRAME_ALL;
+      (AOM_EFLAG_NO_UPD_LAST | AOM_EFLAG_NO_UPD_GF | AOM_EFLAG_NO_UPD_ARF |
+       AOM_EFLAG_FORCE_GF | AOM_EFLAG_FORCE_ARF)) {
+    int upd = AOM_REFFRAME_ALL;
 
-    if (flags & VP8_EFLAG_NO_UPD_LAST) {
-      upd ^= VPX_LAST_FLAG;
+    if (flags & AOM_EFLAG_NO_UPD_LAST) {
+      upd ^= AOM_LAST_FLAG;
 #if CONFIG_EXT_REFS
-      upd ^= VPX_LAST2_FLAG;
-      upd ^= VPX_LAST3_FLAG;
+      upd ^= AOM_LAST2_FLAG;
+      upd ^= AOM_LAST3_FLAG;
 #endif  // CONFIG_EXT_REFS
     }
 
-    if (flags & VP8_EFLAG_NO_UPD_GF) upd ^= VPX_GOLD_FLAG;
+    if (flags & AOM_EFLAG_NO_UPD_GF) upd ^= AOM_GOLD_FLAG;
 
-    if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VPX_ALT_FLAG;
+    if (flags & AOM_EFLAG_NO_UPD_ARF) upd ^= AOM_ALT_FLAG;
 
-    vp10_update_reference(cpi, upd);
+    av1_update_reference(cpi, upd);
   }
 
-  if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
-    vp10_update_entropy(cpi, 0);
+  if (flags & AOM_EFLAG_NO_UPD_ENTROPY) {
+    av1_update_entropy(cpi, 0);
   }
 }
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
index 719615b..821d2f1 100644
--- a/av1/encoder/encoder.h
+++ b/av1/encoder/encoder.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_ENCODER_H_
-#define VP10_ENCODER_ENCODER_H_
+#ifndef AV1_ENCODER_ENCODER_H_
+#define AV1_ENCODER_ENCODER_H_
 
 #include <stdio.h>
 
-#include "./vpx_config.h"
-#include "aom/vp8cx.h"
+#include "./aom_config.h"
+#include "aom/aomcx.h"
 
 #include "av1/common/alloccommon.h"
 #include "av1/common/entropymode.h"
@@ -41,8 +41,8 @@
 #include "aom_dsp/ssim.h"
 #endif
 #include "aom_dsp/variance.h"
-#include "aom/internal/vpx_codec_internal.h"
-#include "aom_util/vpx_thread.h"
+#include "aom/internal/aom_codec_internal.h"
+#include "aom_util/aom_thread.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -100,7 +100,7 @@
   FOURFIVE = 1,
   THREEFIVE = 2,
   ONETWO = 3
-} VPX_SCALING;
+} AOM_SCALING;
 
 typedef enum {
   // Good Quality Fast Encoding. The encoder balances quality with the amount of
@@ -143,9 +143,9 @@
   RESIZE_DYNAMIC = 2  // Coded size of each frame is determined by the codec.
 } RESIZE_TYPE;
 
-typedef struct VP10EncoderConfig {
+typedef struct AV1EncoderConfig {
   BITSTREAM_PROFILE profile;
-  vpx_bit_depth_t bit_depth;     // Codec bit-depth.
+  aom_bit_depth_t bit_depth;     // Codec bit-depth.
   int width;                     // width of data passed to the compressor
   int height;                    // height of data passed to the compressor
   unsigned int input_bit_depth;  // Input bit depth.
@@ -175,7 +175,7 @@
   // DATARATE CONTROL OPTIONS
 
   // vbr, cbr, constrained quality or constant quality
-  enum vpx_rc_mode rc_mode;
+  enum aom_rc_mode rc_mode;
 
   // buffer targeting aggressiveness
   int under_shoot_pct;
@@ -246,29 +246,29 @@
 
   int max_threads;
 
-  vpx_fixed_buf_t two_pass_stats_in;
-  struct vpx_codec_pkt_list *output_pkt_list;
+  aom_fixed_buf_t two_pass_stats_in;
+  struct aom_codec_pkt_list *output_pkt_list;
 
 #if CONFIG_FP_MB_STATS
-  vpx_fixed_buf_t firstpass_mb_stats_in;
+  aom_fixed_buf_t firstpass_mb_stats_in;
 #endif
 
-  vpx_tune_metric tuning;
-  vpx_tune_content content;
-#if CONFIG_VP9_HIGHBITDEPTH
+  aom_tune_metric tuning;
+  aom_tune_content content;
+#if CONFIG_AOM_HIGHBITDEPTH
   int use_highbitdepth;
 #endif
-  vpx_color_space_t color_space;
+  aom_color_space_t color_space;
   int color_range;
   int render_width;
   int render_height;
 
 #if CONFIG_EXT_PARTITION
-  vpx_superblock_size_t superblock_size;
+  aom_superblock_size_t superblock_size;
 #endif  // CONFIG_EXT_PARTITION
-} VP10EncoderConfig;
+} AV1EncoderConfig;
 
-static INLINE int is_lossless_requested(const VP10EncoderConfig *cfg) {
+static INLINE int is_lossless_requested(const AV1EncoderConfig *cfg) {
   return cfg->best_allowed_q == 0 && cfg->worst_allowed_q == 0;
 }
 
@@ -280,7 +280,7 @@
 } TileDataEnc;
 
 typedef struct RD_COUNTS {
-  vp10_coeff_count coef_counts[TX_SIZES][PLANE_TYPES];
+  av1_coeff_count coef_counts[TX_SIZES][PLANE_TYPES];
   int64_t comp_pred_diff[REFERENCE_MODES];
   int m_search_count;
   int ex_search_count;
@@ -321,11 +321,11 @@
 
 #if CONFIG_ENTROPY
 typedef struct SUBFRAME_STATS {
-  vp10_coeff_probs_model coef_probs_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
-  vp10_coeff_count coef_counts_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
+  av1_coeff_probs_model coef_probs_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
+  av1_coeff_count coef_counts_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
   unsigned int eob_counts_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES][REF_TYPES]
                              [COEF_BANDS][COEFF_CONTEXTS];
-  vp10_coeff_probs_model enc_starting_coef_probs[TX_SIZES][PLANE_TYPES];
+  av1_coeff_probs_model enc_starting_coef_probs[TX_SIZES][PLANE_TYPES];
 } SUBFRAME_STATS;
 #endif  // CONFIG_ENTROPY
 
@@ -334,7 +334,7 @@
   size_t size;
 } TileBufferEnc;
 
-typedef struct VP10_COMP {
+typedef struct AV1_COMP {
   QUANTS quants;
   ThreadData td;
   MB_MODE_INFO_EXT *mbmi_ext_base;
@@ -346,8 +346,8 @@
   DECLARE_ALIGNED(16, dequant_val_type_nuq,
                   uv_dequant_val_nuq[QUANT_PROFILES][QINDEX_RANGE][COEF_BANDS]);
 #endif  // CONFIG_NEW_QUANT
-  VP10_COMMON common;
-  VP10EncoderConfig oxcf;
+  AV1_COMMON common;
+  AV1EncoderConfig oxcf;
   struct lookahead_ctx *lookahead;
   struct lookahead_entry *alt_ref_source;
 
@@ -431,7 +431,7 @@
   // sufficient space to the size of the maximum possible number of frames.
   int interp_filter_selected[REF_FRAMES + 1][SWITCHABLE];
 
-  struct vpx_codec_pkt_list *output_pkt_list;
+  struct aom_codec_pkt_list *output_pkt_list;
 
   MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS];
   int mbgraph_n_frames;  // number of frames filled in the above
@@ -461,9 +461,9 @@
   ActiveMap active_map;
 
   fractional_mv_step_fp *find_fractional_mv_step;
-  vp10_full_search_fn_t full_search_sad;  // It is currently unused.
-  vp10_diamond_search_fn_t diamond_search_sad;
-  vpx_variance_fn_ptr_t fn_ptr[BLOCK_SIZES];
+  av1_full_search_fn_t full_search_sad;  // It is currently unused.
+  av1_diamond_search_fn_t diamond_search_sad;
+  aom_variance_fn_ptr_t fn_ptr[BLOCK_SIZES];
   uint64_t time_receive_data;
   uint64_t time_compress_data;
   uint64_t time_pick_lpf;
@@ -609,14 +609,14 @@
 
   // Multi-threading
   int num_workers;
-  VPxWorker *workers;
+  AVxWorker *workers;
   struct EncWorkerData *tile_thr_data;
-  VP10LfSync lf_row_sync;
+  AV1LfSync lf_row_sync;
 #if CONFIG_ENTROPY
   SUBFRAME_STATS subframe_stats;
   // TODO(yaowu): minimize the size of count buffers
   SUBFRAME_STATS wholeframe_stats;
-  vp10_coeff_stats branch_ct_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
+  av1_coeff_stats branch_ct_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
 #endif  // CONFIG_ENTROPY
 #if CONFIG_ANS
   struct BufAnsCoder buf_ans;
@@ -631,63 +631,63 @@
 #if CONFIG_GLOBAL_MOTION
   int global_motion_used[TOTAL_REFS_PER_FRAME];
 #endif
-} VP10_COMP;
+} AV1_COMP;
 
-void vp10_initialize_enc(void);
+void av1_initialize_enc(void);
 
-struct VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
-                                         BufferPool *const pool);
-void vp10_remove_compressor(VP10_COMP *cpi);
+struct AV1_COMP *av1_create_compressor(AV1EncoderConfig *oxcf,
+                                       BufferPool *const pool);
+void av1_remove_compressor(AV1_COMP *cpi);
 
-void vp10_change_config(VP10_COMP *cpi, const VP10EncoderConfig *oxcf);
+void av1_change_config(AV1_COMP *cpi, const AV1EncoderConfig *oxcf);
 
 // receive a frames worth of data. caller can assume that a copy of this
 // frame is made and not just a copy of the pointer..
-int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
-                           YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
-                           int64_t end_time_stamp);
+int av1_receive_raw_frame(AV1_COMP *cpi, unsigned int frame_flags,
+                          YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+                          int64_t end_time_stamp);
 
-int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
-                             size_t *size, uint8_t *dest, int64_t *time_stamp,
-                             int64_t *time_end, int flush);
+int av1_get_compressed_data(AV1_COMP *cpi, unsigned int *frame_flags,
+                            size_t *size, uint8_t *dest, int64_t *time_stamp,
+                            int64_t *time_end, int flush);
 
-int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest);
+int av1_get_preview_raw_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *dest);
 
-int vp10_get_last_show_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *frame);
+int av1_get_last_show_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *frame);
 
-int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags);
+int av1_use_as_reference(AV1_COMP *cpi, int ref_frame_flags);
 
-void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags);
+void av1_update_reference(AV1_COMP *cpi, int ref_frame_flags);
 
-int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
-                            YV12_BUFFER_CONFIG *sd);
-
-int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_copy_reference_enc(AV1_COMP *cpi, AOM_REFFRAME ref_frame_flag,
                            YV12_BUFFER_CONFIG *sd);
 
-int vp10_update_entropy(VP10_COMP *cpi, int update);
+int av1_set_reference_enc(AV1_COMP *cpi, AOM_REFFRAME ref_frame_flag,
+                          YV12_BUFFER_CONFIG *sd);
 
-int vp10_set_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
+int av1_update_entropy(AV1_COMP *cpi, int update);
 
-int vp10_get_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
+int av1_set_active_map(AV1_COMP *cpi, unsigned char *map, int rows, int cols);
 
-int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
-                           VPX_SCALING vert_mode);
+int av1_get_active_map(AV1_COMP *cpi, unsigned char *map, int rows, int cols);
 
-int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
-                          unsigned int height);
+int av1_set_internal_size(AV1_COMP *cpi, AOM_SCALING horiz_mode,
+                          AOM_SCALING vert_mode);
 
-int vp10_get_quantizer(struct VP10_COMP *cpi);
+int av1_set_size_literal(AV1_COMP *cpi, unsigned int width,
+                         unsigned int height);
 
-void vp10_full_to_model_counts(vp10_coeff_count_model *model_count,
-                               vp10_coeff_count *full_count);
+int av1_get_quantizer(struct AV1_COMP *cpi);
 
-static INLINE int frame_is_kf_gf_arf(const VP10_COMP *cpi) {
+void av1_full_to_model_counts(av1_coeff_count_model *model_count,
+                              av1_coeff_count *full_count);
+
+static INLINE int frame_is_kf_gf_arf(const AV1_COMP *cpi) {
   return frame_is_intra_only(&cpi->common) || cpi->refresh_alt_ref_frame ||
          (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref);
 }
 
-static INLINE int get_ref_frame_map_idx(const VP10_COMP *cpi,
+static INLINE int get_ref_frame_map_idx(const AV1_COMP *cpi,
                                         MV_REFERENCE_FRAME ref_frame) {
 #if CONFIG_EXT_REFS
   if (ref_frame >= LAST_FRAME && ref_frame <= LAST3_FRAME)
@@ -705,23 +705,23 @@
     return cpi->alt_fb_idx;
 }
 
-static INLINE int get_ref_frame_buf_idx(const VP10_COMP *const cpi,
+static INLINE int get_ref_frame_buf_idx(const AV1_COMP *const cpi,
                                         MV_REFERENCE_FRAME ref_frame) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const int map_idx = get_ref_frame_map_idx(cpi, ref_frame);
   return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : INVALID_IDX;
 }
 
 static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer(
-    VP10_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
-  VP10_COMMON *const cm = &cpi->common;
+    AV1_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
+  AV1_COMMON *const cm = &cpi->common;
   const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
   return buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf
                                 : NULL;
 }
 
 static INLINE const YV12_BUFFER_CONFIG *get_upsampled_ref(
-    VP10_COMP *cpi, const MV_REFERENCE_FRAME ref_frame) {
+    AV1_COMP *cpi, const MV_REFERENCE_FRAME ref_frame) {
   // Use up-sampled reference frames.
   const int buf_idx =
       cpi->upsampled_ref_idx[get_ref_frame_map_idx(cpi, ref_frame)];
@@ -729,10 +729,9 @@
 }
 
 #if CONFIG_EXT_REFS
-static INLINE int enc_is_ref_frame_buf(VP10_COMP *cpi,
-                                       RefCntBuffer *frame_buf) {
+static INLINE int enc_is_ref_frame_buf(AV1_COMP *cpi, RefCntBuffer *frame_buf) {
   MV_REFERENCE_FRAME ref_frame;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
     const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
     if (buf_idx == INVALID_IDX) continue;
@@ -760,32 +759,32 @@
   return get_token_alloc(tile_mb_rows, tile_mb_cols);
 }
 
-void vp10_alloc_compressor_data(VP10_COMP *cpi);
+void av1_alloc_compressor_data(AV1_COMP *cpi);
 
-void vp10_scale_references(VP10_COMP *cpi);
+void av1_scale_references(AV1_COMP *cpi);
 
-void vp10_update_reference_frames(VP10_COMP *cpi);
+void av1_update_reference_frames(AV1_COMP *cpi);
 
-void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv);
+void av1_set_high_precision_mv(AV1_COMP *cpi, int allow_high_precision_mv);
 
-YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
-                                                YV12_BUFFER_CONFIG *unscaled,
-                                                YV12_BUFFER_CONFIG *scaled);
+YV12_BUFFER_CONFIG *av1_scale_if_required_fast(AV1_COMMON *cm,
+                                               YV12_BUFFER_CONFIG *unscaled,
+                                               YV12_BUFFER_CONFIG *scaled);
 
-YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
-                                           YV12_BUFFER_CONFIG *unscaled,
-                                           YV12_BUFFER_CONFIG *scaled);
+YV12_BUFFER_CONFIG *av1_scale_if_required(AV1_COMMON *cm,
+                                          YV12_BUFFER_CONFIG *unscaled,
+                                          YV12_BUFFER_CONFIG *scaled);
 
-void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags);
+void av1_apply_encoding_flags(AV1_COMP *cpi, aom_enc_frame_flags_t flags);
 
-static INLINE int is_altref_enabled(const VP10_COMP *const cpi) {
+static INLINE int is_altref_enabled(const AV1_COMP *const cpi) {
   return cpi->oxcf.mode != REALTIME && cpi->oxcf.lag_in_frames > 0 &&
          cpi->oxcf.enable_auto_arf;
 }
 
 // TODO(zoeliu): To set up cpi->oxcf.enable_auto_brf
 #if 0 && CONFIG_EXT_REFS
-static INLINE int is_bwdref_enabled(const VP10_COMP *const cpi) {
+static INLINE int is_bwdref_enabled(const AV1_COMP *const cpi) {
   // NOTE(zoeliu): The enabling of bi-predictive frames depends on the use of
   //               alt_ref, and now will be off when the alt_ref interval is
   //               not sufficiently large.
@@ -793,7 +792,7 @@
 }
 #endif  // CONFIG_EXT_REFS
 
-static INLINE void set_ref_ptrs(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE void set_ref_ptrs(AV1_COMMON *cm, MACROBLOCKD *xd,
                                 MV_REFERENCE_FRAME ref0,
                                 MV_REFERENCE_FRAME ref1) {
   xd->block_refs[0] =
@@ -806,11 +805,11 @@
   return frame_index & 0x1;
 }
 
-static INLINE int *cond_cost_list(const struct VP10_COMP *cpi, int *cost_list) {
+static INLINE int *cond_cost_list(const struct AV1_COMP *cpi, int *cost_list) {
   return cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL;
 }
 
-void vp10_new_framerate(VP10_COMP *cpi, double framerate);
+void av1_new_framerate(AV1_COMP *cpi, double framerate);
 
 #define LAYER_IDS_TO_IDX(sl, tl, num_tl) ((sl) * (num_tl) + (tl))
 
@@ -830,4 +829,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_ENCODER_H_
+#endif  // AV1_ENCODER_ENCODER_H_
diff --git a/av1/encoder/ethread.c b/av1/encoder/ethread.c
index 63d716c..d4c0a7a 100644
--- a/av1/encoder/ethread.c
+++ b/av1/encoder/ethread.c
@@ -11,7 +11,7 @@
 #include "av1/encoder/encodeframe.h"
 #include "av1/encoder/encoder.h"
 #include "av1/encoder/ethread.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) {
   int i, j, k, l, m, n;
@@ -34,8 +34,8 @@
 }
 
 static int enc_worker_hook(EncWorkerData *const thread_data, void *unused) {
-  VP10_COMP *const cpi = thread_data->cpi;
-  const VP10_COMMON *const cm = &cpi->common;
+  AV1_COMP *const cpi = thread_data->cpi;
+  const AV1_COMMON *const cm = &cpi->common;
   const int tile_cols = cm->tile_cols;
   const int tile_rows = cm->tile_rows;
   int t;
@@ -47,31 +47,31 @@
     int tile_row = t / tile_cols;
     int tile_col = t % tile_cols;
 
-    vp10_encode_tile(cpi, thread_data->td, tile_row, tile_col);
+    av1_encode_tile(cpi, thread_data->td, tile_row, tile_col);
   }
 
   return 0;
 }
 
-void vp10_encode_tiles_mt(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_encode_tiles_mt(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   const int tile_cols = cm->tile_cols;
-  const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
-  const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols);
+  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
+  const int num_workers = AOMMIN(cpi->oxcf.max_threads, tile_cols);
   int i;
 
-  vp10_init_tile_data(cpi);
+  av1_init_tile_data(cpi);
 
   // Only run once to create threads and allocate thread data.
   if (cpi->num_workers == 0) {
     CHECK_MEM_ERROR(cm, cpi->workers,
-                    vpx_malloc(num_workers * sizeof(*cpi->workers)));
+                    aom_malloc(num_workers * sizeof(*cpi->workers)));
 
     CHECK_MEM_ERROR(cm, cpi->tile_thr_data,
-                    vpx_calloc(num_workers, sizeof(*cpi->tile_thr_data)));
+                    aom_calloc(num_workers, sizeof(*cpi->tile_thr_data)));
 
     for (i = 0; i < num_workers; i++) {
-      VPxWorker *const worker = &cpi->workers[i];
+      AVxWorker *const worker = &cpi->workers[i];
       EncWorkerData *const thread_data = &cpi->tile_thr_data[i];
 
       ++cpi->num_workers;
@@ -82,25 +82,25 @@
       if (i < num_workers - 1) {
         // Allocate thread data.
         CHECK_MEM_ERROR(cm, thread_data->td,
-                        vpx_memalign(32, sizeof(*thread_data->td)));
-        vp10_zero(*thread_data->td);
+                        aom_memalign(32, sizeof(*thread_data->td)));
+        av1_zero(*thread_data->td);
 
         // Set up pc_tree.
         thread_data->td->leaf_tree = NULL;
         thread_data->td->pc_tree = NULL;
-        vp10_setup_pc_tree(cm, thread_data->td);
+        av1_setup_pc_tree(cm, thread_data->td);
 
         // Set up variance tree if needed.
         if (cpi->sf.partition_search_type == VAR_BASED_PARTITION)
-          vp10_setup_var_tree(cm, &cpi->td);
+          av1_setup_var_tree(cm, &cpi->td);
 
         // Allocate frame counters in thread data.
         CHECK_MEM_ERROR(cm, thread_data->td->counts,
-                        vpx_calloc(1, sizeof(*thread_data->td->counts)));
+                        aom_calloc(1, sizeof(*thread_data->td->counts)));
 
         // Create threads
         if (!winterface->reset(worker))
-          vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+          aom_internal_error(&cm->error, AOM_CODEC_ERROR,
                              "Tile encoder thread creation failed");
       } else {
         // Main thread acts as a worker and uses the thread data in cpi.
@@ -112,10 +112,10 @@
   }
 
   for (i = 0; i < num_workers; i++) {
-    VPxWorker *const worker = &cpi->workers[i];
+    AVxWorker *const worker = &cpi->workers[i];
     EncWorkerData *thread_data;
 
-    worker->hook = (VPxWorkerHook)enc_worker_hook;
+    worker->hook = (AVxWorkerHook)enc_worker_hook;
     worker->data1 = &cpi->tile_thr_data[i];
     worker->data2 = NULL;
     thread_data = (EncWorkerData *)worker->data1;
@@ -134,13 +134,13 @@
     if (cpi->common.allow_screen_content_tools && i < num_workers - 1) {
       MACROBLOCK *x = &thread_data->td->mb;
       CHECK_MEM_ERROR(cm, x->palette_buffer,
-                      vpx_memalign(16, sizeof(*x->palette_buffer)));
+                      aom_memalign(16, sizeof(*x->palette_buffer)));
     }
   }
 
   // Encode a frame
   for (i = 0; i < num_workers; i++) {
-    VPxWorker *const worker = &cpi->workers[i];
+    AVxWorker *const worker = &cpi->workers[i];
     EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
 
     // Set the starting tile for each thread.
@@ -154,17 +154,17 @@
 
   // Encoding ends.
   for (i = 0; i < num_workers; i++) {
-    VPxWorker *const worker = &cpi->workers[i];
+    AVxWorker *const worker = &cpi->workers[i];
     winterface->sync(worker);
   }
 
   for (i = 0; i < num_workers; i++) {
-    VPxWorker *const worker = &cpi->workers[i];
+    AVxWorker *const worker = &cpi->workers[i];
     EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
 
     // Accumulate counters.
     if (i < cpi->num_workers - 1) {
-      vp10_accumulate_frame_counts(cm, thread_data->td->counts);
+      av1_accumulate_frame_counts(cm, thread_data->td->counts);
       accumulate_rd_opt(&cpi->td, thread_data->td);
     }
   }
diff --git a/av1/encoder/ethread.h b/av1/encoder/ethread.h
index d72816c..161c68f 100644
--- a/av1/encoder/ethread.h
+++ b/av1/encoder/ethread.h
@@ -8,26 +8,26 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_ETHREAD_H_
-#define VP10_ENCODER_ETHREAD_H_
+#ifndef AV1_ENCODER_ETHREAD_H_
+#define AV1_ENCODER_ETHREAD_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-struct VP10_COMP;
+struct AV1_COMP;
 struct ThreadData;
 
 typedef struct EncWorkerData {
-  struct VP10_COMP *cpi;
+  struct AV1_COMP *cpi;
   struct ThreadData *td;
   int start;
 } EncWorkerData;
 
-void vp10_encode_tiles_mt(struct VP10_COMP *cpi);
+void av1_encode_tiles_mt(struct AV1_COMP *cpi);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_ETHREAD_H_
+#endif  // AV1_ENCODER_ETHREAD_H_
diff --git a/av1/encoder/extend.c b/av1/encoder/extend.c
index 1b0c442..13e529b 100644
--- a/av1/encoder/extend.c
+++ b/av1/encoder/extend.c
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #include "av1/common/common.h"
@@ -56,7 +56,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
                                          uint8_t *dst8, int dst_pitch, int w,
                                          int h, int extend_top, int extend_left,
@@ -72,9 +72,9 @@
   uint16_t *dst_ptr2 = dst + w;
 
   for (i = 0; i < h; i++) {
-    vpx_memset16(dst_ptr1, src_ptr1[0], extend_left);
+    aom_memset16(dst_ptr1, src_ptr1[0], extend_left);
     memcpy(dst_ptr1 + extend_left, src_ptr1, w * sizeof(src_ptr1[0]));
-    vpx_memset16(dst_ptr2, src_ptr2[0], extend_right);
+    aom_memset16(dst_ptr2, src_ptr2[0], extend_right);
     src_ptr1 += src_pitch;
     src_ptr2 += src_pitch;
     dst_ptr1 += dst_pitch;
@@ -99,10 +99,10 @@
     dst_ptr2 += dst_pitch;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
-                                YV12_BUFFER_CONFIG *dst) {
+void av1_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+                               YV12_BUFFER_CONFIG *dst) {
   // Extend src frame in buffer
   // Altref filtering assumes 16 pixel extension
   const int et_y = 16;
@@ -111,10 +111,10 @@
   // to 64x64, so the right and bottom need to be extended to 64 multiple
   // or up to 16, whichever is greater.
   const int er_y =
-      VPXMAX(src->y_width + 16, ALIGN_POWER_OF_TWO(src->y_width, 6)) -
+      AOMMAX(src->y_width + 16, ALIGN_POWER_OF_TWO(src->y_width, 6)) -
       src->y_crop_width;
   const int eb_y =
-      VPXMAX(src->y_height + 16, ALIGN_POWER_OF_TWO(src->y_height, 6)) -
+      AOMMAX(src->y_height + 16, ALIGN_POWER_OF_TWO(src->y_height, 6)) -
       src->y_crop_height;
   const int uv_width_subsampling = (src->uv_width != src->y_width);
   const int uv_height_subsampling = (src->uv_height != src->y_height);
@@ -123,7 +123,7 @@
   const int eb_uv = eb_y >> uv_height_subsampling;
   const int er_uv = er_y >> uv_width_subsampling;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
     highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
                                  dst->y_stride, src->y_crop_width,
@@ -138,7 +138,7 @@
         src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
                         dst->y_stride, src->y_crop_width, src->y_crop_height,
@@ -153,9 +153,9 @@
                         et_uv, el_uv, eb_uv, er_uv);
 }
 
-void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
-                                          YV12_BUFFER_CONFIG *dst, int srcy,
-                                          int srcx, int srch, int srcw) {
+void av1_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+                                         YV12_BUFFER_CONFIG *dst, int srcy,
+                                         int srcx, int srch, int srcw) {
   // If the side is not touching the bounder then don't extend.
   const int et_y = srcy ? 0 : dst->border;
   const int el_y = srcx ? 0 : dst->border;
diff --git a/av1/encoder/extend.h b/av1/encoder/extend.h
index 1ad763e..2c436de 100644
--- a/av1/encoder/extend.h
+++ b/av1/encoder/extend.h
@@ -8,24 +8,24 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_EXTEND_H_
-#define VP10_ENCODER_EXTEND_H_
+#ifndef AV1_ENCODER_EXTEND_H_
+#define AV1_ENCODER_EXTEND_H_
 
 #include "aom_scale/yv12config.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
-                                YV12_BUFFER_CONFIG *dst);
+void av1_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+                               YV12_BUFFER_CONFIG *dst);
 
-void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
-                                          YV12_BUFFER_CONFIG *dst, int srcy,
-                                          int srcx, int srch, int srcw);
+void av1_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+                                         YV12_BUFFER_CONFIG *dst, int srcy,
+                                         int srcx, int srch, int srcw);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_EXTEND_H_
+#endif  // AV1_ENCODER_EXTEND_H_
diff --git a/av1/encoder/firstpass.c b/av1/encoder/firstpass.c
index b23b839..61a799c 100644
--- a/av1/encoder/firstpass.c
+++ b/av1/encoder/firstpass.c
@@ -12,19 +12,19 @@
 #include <math.h>
 #include <stdio.h>
 
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_scale_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_scale_rtcd.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/system_state.h"
-#include "aom_scale/vpx_scale.h"
+#include "aom_scale/aom_scale.h"
 #include "aom_scale/yv12config.h"
 
 #include "av1/common/entropymv.h"
 #include "av1/common/quant_common.h"
-#include "av1/common/reconinter.h"  // vp10_setup_dst_planes()
+#include "av1/common/reconinter.h"  // av1_setup_dst_planes()
 #include "av1/encoder/aq_variance.h"
 #include "av1/encoder/block.h"
 #include "av1/encoder/encodeframe.h"
@@ -95,12 +95,12 @@
 }
 
 static void output_stats(FIRSTPASS_STATS *stats,
-                         struct vpx_codec_pkt_list *pktlist) {
-  struct vpx_codec_cx_pkt pkt;
-  pkt.kind = VPX_CODEC_STATS_PKT;
+                         struct aom_codec_pkt_list *pktlist) {
+  struct aom_codec_cx_pkt pkt;
+  pkt.kind = AOM_CODEC_STATS_PKT;
   pkt.data.twopass_stats.buf = stats;
   pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS);
-  vpx_codec_pkt_list_add(pktlist, &pkt);
+  aom_codec_pkt_list_add(pktlist, &pkt);
 
 // TEMP debug code
 #if OUTPUT_FPF
@@ -125,13 +125,13 @@
 }
 
 #if CONFIG_FP_MB_STATS
-static void output_fpmb_stats(uint8_t *this_frame_mb_stats, VP10_COMMON *cm,
-                              struct vpx_codec_pkt_list *pktlist) {
-  struct vpx_codec_cx_pkt pkt;
-  pkt.kind = VPX_CODEC_FPMB_STATS_PKT;
+static void output_fpmb_stats(uint8_t *this_frame_mb_stats, AV1_COMMON *cm,
+                              struct aom_codec_pkt_list *pktlist) {
+  struct aom_codec_cx_pkt pkt;
+  pkt.kind = AOM_CODEC_FPMB_STATS_PKT;
   pkt.data.firstpass_mb_stats.buf = this_frame_mb_stats;
   pkt.data.firstpass_mb_stats.sz = cm->initial_mbs * sizeof(uint8_t);
-  vpx_codec_pkt_list_add(pktlist, &pkt);
+  aom_codec_pkt_list_add(pktlist, &pkt);
 }
 #endif
 
@@ -214,7 +214,7 @@
 
 // Calculate the linear size relative to a baseline of 1080P
 #define BASE_SIZE 2073600.0  // 1920x1080
-static double get_linear_size_factor(const VP10_COMP *cpi) {
+static double get_linear_size_factor(const AV1_COMP *cpi) {
   const double this_area = cpi->initial_width * cpi->initial_height;
   return pow(this_area / BASE_SIZE, 0.5);
 }
@@ -223,7 +223,7 @@
 // bars and partially discounts other 0 energy areas.
 #define MIN_ACTIVE_AREA 0.5
 #define MAX_ACTIVE_AREA 1.0
-static double calculate_active_area(const VP10_COMP *cpi,
+static double calculate_active_area(const AV1_COMP *cpi,
                                     const FIRSTPASS_STATS *this_frame) {
   double active_pct;
 
@@ -237,9 +237,9 @@
 // Calculate a modified Error used in distributing bits between easier and
 // harder frames.
 #define ACT_AREA_CORRECTION 0.5
-static double calculate_modified_err(const VP10_COMP *cpi,
+static double calculate_modified_err(const AV1_COMP *cpi,
                                      const TWO_PASS *twopass,
-                                     const VP10EncoderConfig *oxcf,
+                                     const AV1EncoderConfig *oxcf,
                                      const FIRSTPASS_STATS *this_frame) {
   const FIRSTPASS_STATS *const stats = &twopass->total_stats;
   const double av_weight = stats->weight / stats->count;
@@ -263,7 +263,7 @@
 
 // This function returns the maximum target rate per frame.
 static int frame_max_bits(const RATE_CONTROL *rc,
-                          const VP10EncoderConfig *oxcf) {
+                          const AV1EncoderConfig *oxcf) {
   int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
                       (int64_t)oxcf->two_pass_vbrmax_section) /
                      100;
@@ -275,20 +275,20 @@
   return (int)max_bits;
 }
 
-void vp10_init_first_pass(VP10_COMP *cpi) {
+void av1_init_first_pass(AV1_COMP *cpi) {
   zero_stats(&cpi->twopass.total_stats);
 }
 
-void vp10_end_first_pass(VP10_COMP *cpi) {
+void av1_end_first_pass(AV1_COMP *cpi) {
   output_stats(&cpi->twopass.total_stats, cpi->output_pkt_list);
 }
 
-static vpx_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) {
+static aom_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) {
   switch (bsize) {
-    case BLOCK_8X8: return vpx_mse8x8;
-    case BLOCK_16X8: return vpx_mse16x8;
-    case BLOCK_8X16: return vpx_mse8x16;
-    default: return vpx_mse16x16;
+    case BLOCK_8X8: return aom_mse8x8;
+    case BLOCK_16X8: return aom_mse16x8;
+    case BLOCK_8X16: return aom_mse8x16;
+    default: return aom_mse16x16;
   }
 }
 
@@ -296,37 +296,37 @@
                                          const struct buf_2d *src,
                                          const struct buf_2d *ref) {
   unsigned int sse;
-  const vpx_variance_fn_t fn = get_block_variance_fn(bsize);
+  const aom_variance_fn_t fn = get_block_variance_fn(bsize);
   fn(src->buf, src->stride, ref->buf, ref->stride, &sse);
   return sse;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-static vpx_variance_fn_t highbd_get_block_variance_fn(BLOCK_SIZE bsize,
+#if CONFIG_AOM_HIGHBITDEPTH
+static aom_variance_fn_t highbd_get_block_variance_fn(BLOCK_SIZE bsize,
                                                       int bd) {
   switch (bd) {
     default:
       switch (bsize) {
-        case BLOCK_8X8: return vpx_highbd_8_mse8x8;
-        case BLOCK_16X8: return vpx_highbd_8_mse16x8;
-        case BLOCK_8X16: return vpx_highbd_8_mse8x16;
-        default: return vpx_highbd_8_mse16x16;
+        case BLOCK_8X8: return aom_highbd_8_mse8x8;
+        case BLOCK_16X8: return aom_highbd_8_mse16x8;
+        case BLOCK_8X16: return aom_highbd_8_mse8x16;
+        default: return aom_highbd_8_mse16x16;
       }
       break;
     case 10:
       switch (bsize) {
-        case BLOCK_8X8: return vpx_highbd_10_mse8x8;
-        case BLOCK_16X8: return vpx_highbd_10_mse16x8;
-        case BLOCK_8X16: return vpx_highbd_10_mse8x16;
-        default: return vpx_highbd_10_mse16x16;
+        case BLOCK_8X8: return aom_highbd_10_mse8x8;
+        case BLOCK_16X8: return aom_highbd_10_mse16x8;
+        case BLOCK_8X16: return aom_highbd_10_mse8x16;
+        default: return aom_highbd_10_mse16x16;
       }
       break;
     case 12:
       switch (bsize) {
-        case BLOCK_8X8: return vpx_highbd_12_mse8x8;
-        case BLOCK_16X8: return vpx_highbd_12_mse16x8;
-        case BLOCK_8X16: return vpx_highbd_12_mse8x16;
-        default: return vpx_highbd_12_mse16x16;
+        case BLOCK_8X8: return aom_highbd_12_mse8x8;
+        case BLOCK_16X8: return aom_highbd_12_mse16x8;
+        case BLOCK_8X16: return aom_highbd_12_mse8x16;
+        default: return aom_highbd_12_mse16x16;
       }
       break;
   }
@@ -337,23 +337,23 @@
                                                 const struct buf_2d *ref,
                                                 int bd) {
   unsigned int sse;
-  const vpx_variance_fn_t fn = highbd_get_block_variance_fn(bsize, bd);
+  const aom_variance_fn_t fn = highbd_get_block_variance_fn(bsize, bd);
   fn(src->buf, src->stride, ref->buf, ref->stride, &sse);
   return sse;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // Refine the motion search range according to the frame dimension
 // for first pass test.
-static int get_search_range(const VP10_COMP *cpi) {
+static int get_search_range(const AV1_COMP *cpi) {
   int sr = 0;
-  const int dim = VPXMIN(cpi->initial_width, cpi->initial_height);
+  const int dim = AOMMIN(cpi->initial_width, cpi->initial_height);
 
   while ((dim << sr) < MAX_FULL_PEL_VAL) ++sr;
   return sr;
 }
 
-static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
+static void first_pass_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
                                      const MV *ref_mv, MV *best_mv,
                                      int *best_motion_err) {
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -361,7 +361,7 @@
   MV ref_mv_full = { ref_mv->row >> 3, ref_mv->col >> 3 };
   int num00, tmp_err, n;
   const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
-  vpx_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize];
+  aom_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize];
   const int new_mv_mode_penalty = NEW_MV_MODE_PENALTY;
 
   int step_param = 3;
@@ -372,18 +372,18 @@
 
   // Override the default variance function to use MSE.
   v_fn_ptr.vf = get_block_variance_fn(bsize);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     v_fn_ptr.vf = highbd_get_block_variance_fn(bsize, xd->bd);
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // Center the initial step/diamond search on best mv.
   tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
                                     step_param, x->sadperbit16, &num00,
                                     &v_fn_ptr, ref_mv);
   if (tmp_err < INT_MAX)
-    tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
+    tmp_err = av1_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
   if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty;
 
   if (tmp_err < *best_motion_err) {
@@ -405,7 +405,7 @@
                                         step_param + n, x->sadperbit16, &num00,
                                         &v_fn_ptr, ref_mv);
       if (tmp_err < INT_MAX)
-        tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
+        tmp_err = av1_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
       if (tmp_err < INT_MAX - new_mv_mode_penalty)
         tmp_err += new_mv_mode_penalty;
 
@@ -417,7 +417,7 @@
   }
 }
 
-static BLOCK_SIZE get_bsize(const VP10_COMMON *cm, int mb_row, int mb_col) {
+static BLOCK_SIZE get_bsize(const AV1_COMMON *cm, int mb_row, int mb_col) {
   if (2 * mb_col + 1 < cm->mi_cols) {
     return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 : BLOCK_16X8;
   } else {
@@ -425,19 +425,19 @@
   }
 }
 
-static int find_fp_qindex(vpx_bit_depth_t bit_depth) {
+static int find_fp_qindex(aom_bit_depth_t bit_depth) {
   int i;
 
   for (i = 0; i < QINDEX_RANGE; ++i)
-    if (vp10_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
+    if (av1_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
 
   if (i == QINDEX_RANGE) i--;
 
   return i;
 }
 
-static void set_first_pass_params(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void set_first_pass_params(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   if (!cpi->refresh_alt_ref_frame &&
       (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY))) {
     cm->frame_type = KEY_FRAME;
@@ -450,10 +450,10 @@
 
 #define UL_INTRA_THRESH 50
 #define INVALID_ROW -1
-void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
+void av1_first_pass(AV1_COMP *cpi, const struct lookahead_entry *source) {
   int mb_row, mb_col;
   MACROBLOCK *const x = &cpi->td.mb;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   TileInfo tile;
   struct macroblock_plane *const p = x->plane;
@@ -498,32 +498,32 @@
 
 #if CONFIG_FP_MB_STATS
   if (cpi->use_fp_mb_stats) {
-    vp10_zero_array(cpi->twopass.frame_mb_stats_buf, cm->initial_mbs);
+    av1_zero_array(cpi->twopass.frame_mb_stats_buf, cm->initial_mbs);
   }
 #endif
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   intra_factor = 0.0;
   brightness_factor = 0.0;
   neutral_count = 0.0;
 
   set_first_pass_params(cpi);
-  vp10_set_quantizer(cm, find_fp_qindex(cm->bit_depth));
+  av1_set_quantizer(cm, find_fp_qindex(cm->bit_depth));
 
-  vp10_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+  av1_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
 
-  vp10_setup_src_planes(x, cpi->Source, 0, 0);
-  vp10_setup_dst_planes(xd->plane, new_yv12, 0, 0);
+  av1_setup_src_planes(x, cpi->Source, 0, 0);
+  av1_setup_dst_planes(xd->plane, new_yv12, 0, 0);
 
   if (!frame_is_intra_only(cm)) {
-    vp10_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL);
+    av1_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL);
   }
 
   xd->mi = cm->mi_grid_visible;
   xd->mi[0] = cm->mi;
 
-  vp10_frame_init_quantizer(cpi);
+  av1_frame_init_quantizer(cpi);
 
   for (i = 0; i < MAX_MB_PLANE; ++i) {
     p[i].coeff = ctx->coeff[i][1];
@@ -532,11 +532,11 @@
     p[i].eobs = ctx->eobs[i][1];
   }
 
-  vp10_init_mv_probs(cm);
-  vp10_initialize_rd_consts(cpi);
+  av1_init_mv_probs(cm);
+  av1_initialize_rd_consts(cpi);
 
   // Tiling is ignored in the first pass.
-  vp10_tile_init(&tile, cm, 0, 0);
+  av1_tile_init(&tile, cm, 0, 0);
 
   recon_y_stride = new_yv12->y_stride;
   recon_uv_stride = new_yv12->uv_stride;
@@ -566,7 +566,7 @@
       const int mb_index = mb_row * cm->mb_cols + mb_col;
 #endif
 
-      vpx_clear_system_state();
+      aom_clear_system_state();
 
       xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
       xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset;
@@ -586,8 +586,8 @@
       xd->mi[0]->mbmi.mode = DC_PRED;
       xd->mi[0]->mbmi.tx_size =
           use_dc_pred ? (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
-      vp10_encode_intra_block_plane(x, bsize, 0, 0);
-      this_error = vpx_get_mb_ss(x->plane[0].src_diff);
+      av1_encode_intra_block_plane(x, bsize, 0, 0);
+      this_error = aom_get_mb_ss(x->plane[0].src_diff);
 
       // Keep a record of blocks that have almost no intra error residual
       // (i.e. are in effect completely flat and untextured in the intra
@@ -600,29 +600,29 @@
         image_data_start_row = mb_row;
       }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cm->use_highbitdepth) {
         switch (cm->bit_depth) {
-          case VPX_BITS_8: break;
-          case VPX_BITS_10: this_error >>= 4; break;
-          case VPX_BITS_12: this_error >>= 8; break;
+          case AOM_BITS_8: break;
+          case AOM_BITS_10: this_error >>= 4; break;
+          case AOM_BITS_12: this_error >>= 8; break;
           default:
             assert(0 &&
-                   "cm->bit_depth should be VPX_BITS_8, "
-                   "VPX_BITS_10 or VPX_BITS_12");
+                   "cm->bit_depth should be AOM_BITS_8, "
+                   "AOM_BITS_10 or AOM_BITS_12");
             return;
         }
       }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-      vpx_clear_system_state();
+      aom_clear_system_state();
       log_intra = log(this_error + 1.0);
       if (log_intra < 10.0)
         intra_factor += 1.0 + ((10.0 - log_intra) * 0.05);
       else
         intra_factor += 1.0;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cm->use_highbitdepth)
         level_sample = CONVERT_TO_SHORTPTR(x->plane[0].src.buf)[0];
       else
@@ -667,7 +667,7 @@
         struct buf_2d unscaled_last_source_buf_2d;
 
         xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
           motion_error = highbd_get_prediction_error(
               bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
@@ -678,7 +678,7 @@
 #else
         motion_error =
             get_prediction_error(bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
         // Compute the motion error of the 0,0 motion using the last source
         // frame as the reference. Skip the further motion search on
@@ -687,7 +687,7 @@
             cpi->unscaled_last_source->y_buffer + recon_yoffset;
         unscaled_last_source_buf_2d.stride =
             cpi->unscaled_last_source->y_stride;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
           raw_motion_error = highbd_get_prediction_error(
               bsize, &x->plane[0].src, &unscaled_last_source_buf_2d, xd->bd);
@@ -698,7 +698,7 @@
 #else
         raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
                                                 &unscaled_last_source_buf_2d);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
         // TODO(pengchong): Replace the hard-coded threshold
         if (raw_motion_error > 25) {
@@ -724,7 +724,7 @@
             int gf_motion_error;
 
             xd->plane[0].pre[0].buf = gld_yv12->y_buffer + recon_yoffset;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
             if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
               gf_motion_error = highbd_get_prediction_error(
                   bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
@@ -735,7 +735,7 @@
 #else
             gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
                                                    &xd->plane[0].pre[0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
             first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv,
                                      &gf_motion_error);
@@ -782,7 +782,7 @@
 #endif
 
         if (motion_error <= this_error) {
-          vpx_clear_system_state();
+          aom_clear_system_state();
 
           // Keep a count of cases where the inter and intra were very close
           // and very low. This helps with scene cut detection for example in
@@ -806,8 +806,8 @@
           xd->mi[0]->mbmi.tx_size = TX_4X4;
           xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME;
           xd->mi[0]->mbmi.ref_frame[1] = NONE;
-          vp10_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
-          vp10_encode_sby_pass1(x, bsize);
+          av1_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
+          av1_encode_sby_pass1(x, bsize);
           sum_mvr += mv.row;
           sum_mvr_abs += abs(mv.row);
           sum_mvc += mv.col;
@@ -916,7 +916,7 @@
     x->plane[2].src.buf +=
         uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols;
 
-    vpx_clear_system_state();
+    aom_clear_system_state();
   }
 
   // Clamp the image start to rows/2. This number of rows is discarded top
@@ -928,7 +928,7 @@
   // Exclude any image dead zone
   if (image_data_start_row > 0) {
     intra_skip_count =
-        VPXMAX(0, intra_skip_count - (image_data_start_row * cm->mb_cols * 2));
+        AOMMAX(0, intra_skip_count - (image_data_start_row * cm->mb_cols * 2));
   }
 
   {
@@ -1021,7 +1021,7 @@
     ++twopass->sr_update_lag;
   }
 
-  vpx_extend_frame_borders(new_yv12);
+  aom_extend_frame_borders(new_yv12);
 
 // The frame we just compressed now becomes the last frame.
 #if CONFIG_EXT_REFS
@@ -1066,12 +1066,12 @@
 
 static double calc_correction_factor(double err_per_mb, double err_divisor,
                                      double pt_low, double pt_high, int q,
-                                     vpx_bit_depth_t bit_depth) {
+                                     aom_bit_depth_t bit_depth) {
   const double error_term = err_per_mb / err_divisor;
 
   // Adjustment based on actual quantizer to power term.
   const double power_term =
-      VPXMIN(vp10_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
+      AOMMIN(av1_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
 
   // Calculate correction factor.
   if (power_term < 1.0) assert(error_term >= 0.0);
@@ -1080,13 +1080,13 @@
 }
 
 #define ERR_DIVISOR 100.0
-static int get_twopass_worst_quality(const VP10_COMP *cpi,
+static int get_twopass_worst_quality(const AV1_COMP *cpi,
                                      const double section_err,
                                      double inactive_zone,
                                      int section_target_bandwidth,
                                      double group_weight_factor) {
   const RATE_CONTROL *const rc = &cpi->rc;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
 
   inactive_zone = fclamp(inactive_zone, 0.0, 1.0);
 
@@ -1096,7 +1096,7 @@
     const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
                             ? cpi->initial_mbs
                             : cpi->common.MBs;
-    const int active_mbs = VPXMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
+    const int active_mbs = AOMMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
     const double av_err_per_mb = section_err / active_mbs;
     const double speed_term = 1.0 + 0.04 * oxcf->speed;
     double ediv_size_correction;
@@ -1110,7 +1110,7 @@
     // motion vectors. Some account of this is made through adjustment of
     // the error divisor.
     ediv_size_correction =
-        VPXMAX(0.2, VPXMIN(5.0, get_linear_size_factor(cpi)));
+        AOMMAX(0.2, AOMMIN(5.0, get_linear_size_factor(cpi)));
     if (ediv_size_correction < 1.0)
       ediv_size_correction = -(1.0 / ediv_size_correction);
     ediv_size_correction *= 4.0;
@@ -1121,29 +1121,29 @@
       const double factor = calc_correction_factor(
           av_err_per_mb, ERR_DIVISOR - ediv_size_correction, FACTOR_PT_LOW,
           FACTOR_PT_HIGH, q, cpi->common.bit_depth);
-      const int bits_per_mb = vp10_rc_bits_per_mb(
+      const int bits_per_mb = av1_rc_bits_per_mb(
           INTER_FRAME, q, factor * speed_term * group_weight_factor,
           cpi->common.bit_depth);
       if (bits_per_mb <= target_norm_bits_per_mb) break;
     }
 
     // Restriction on active max q for constrained quality mode.
-    if (cpi->oxcf.rc_mode == VPX_CQ) q = VPXMAX(q, oxcf->cq_level);
+    if (cpi->oxcf.rc_mode == AOM_CQ) q = AOMMAX(q, oxcf->cq_level);
     return q;
   }
 }
 
-static void setup_rf_level_maxq(VP10_COMP *cpi) {
+static void setup_rf_level_maxq(AV1_COMP *cpi) {
   int i;
   RATE_CONTROL *const rc = &cpi->rc;
   for (i = INTER_NORMAL; i < RATE_FACTOR_LEVELS; ++i) {
-    int qdelta = vp10_frame_type_qdelta(cpi, i, rc->worst_quality);
-    rc->rf_level_maxq[i] = VPXMAX(rc->worst_quality + qdelta, rc->best_quality);
+    int qdelta = av1_frame_type_qdelta(cpi, i, rc->worst_quality);
+    rc->rf_level_maxq[i] = AOMMAX(rc->worst_quality + qdelta, rc->best_quality);
   }
 }
 
-void vp10_init_subsampling(VP10_COMP *cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
+void av1_init_subsampling(AV1_COMP *cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   const int w = cm->width;
   const int h = cm->height;
@@ -1158,15 +1158,15 @@
   setup_rf_level_maxq(cpi);
 }
 
-void vp10_calculate_coded_size(VP10_COMP *cpi, int *scaled_frame_width,
-                               int *scaled_frame_height) {
+void av1_calculate_coded_size(AV1_COMP *cpi, int *scaled_frame_width,
+                              int *scaled_frame_height) {
   RATE_CONTROL *const rc = &cpi->rc;
   *scaled_frame_width = rc->frame_width[rc->frame_size_selector];
   *scaled_frame_height = rc->frame_height[rc->frame_size_selector];
 }
 
-void vp10_init_second_pass(VP10_COMP *cpi) {
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_init_second_pass(AV1_COMP *cpi) {
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   TWO_PASS *const twopass = &cpi->twopass;
   double frame_rate;
   FIRSTPASS_STATS *stats;
@@ -1187,7 +1187,7 @@
   // encoded in the second pass is a guess. However, the sum duration is not.
   // It is calculated based on the actual durations of all frames from the
   // first pass.
-  vp10_new_framerate(cpi, frame_rate);
+  av1_new_framerate(cpi, frame_rate);
   twopass->bits_left =
       (int64_t)(stats->duration * oxcf->target_bandwidth / 10000000.0);
 
@@ -1223,7 +1223,7 @@
   twopass->last_kfgroup_zeromotion_pct = 100;
 
   if (oxcf->resize_mode != RESIZE_NONE) {
-    vp10_init_subsampling(cpi);
+    av1_init_subsampling(cpi);
   }
 }
 
@@ -1234,7 +1234,7 @@
 #define LOW_SR_DIFF_TRHESH 0.1
 #define SR_DIFF_MAX 128.0
 
-static double get_sr_decay_rate(const VP10_COMP *cpi,
+static double get_sr_decay_rate(const AV1_COMP *cpi,
                                 const FIRSTPASS_STATS *frame) {
   const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
                                                              : cpi->common.MBs;
@@ -1253,40 +1253,40 @@
   modified_pcnt_intra = 100 * (1.0 - modified_pct_inter);
 
   if ((sr_diff > LOW_SR_DIFF_TRHESH)) {
-    sr_diff = VPXMIN(sr_diff, SR_DIFF_MAX);
+    sr_diff = AOMMIN(sr_diff, SR_DIFF_MAX);
     sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) -
                (MOTION_AMP_PART * motion_amplitude_factor) -
                (INTRA_PART * modified_pcnt_intra);
   }
-  return VPXMAX(sr_decay, VPXMIN(DEFAULT_DECAY_LIMIT, modified_pct_inter));
+  return AOMMAX(sr_decay, AOMMIN(DEFAULT_DECAY_LIMIT, modified_pct_inter));
 }
 
 // This function gives an estimate of how badly we believe the prediction
 // quality is decaying from frame to frame.
-static double get_zero_motion_factor(const VP10_COMP *cpi,
+static double get_zero_motion_factor(const AV1_COMP *cpi,
                                      const FIRSTPASS_STATS *frame) {
   const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
   double sr_decay = get_sr_decay_rate(cpi, frame);
-  return VPXMIN(sr_decay, zero_motion_pct);
+  return AOMMIN(sr_decay, zero_motion_pct);
 }
 
 #define ZM_POWER_FACTOR 0.75
 
-static double get_prediction_decay_rate(const VP10_COMP *cpi,
+static double get_prediction_decay_rate(const AV1_COMP *cpi,
                                         const FIRSTPASS_STATS *next_frame) {
   const double sr_decay_rate = get_sr_decay_rate(cpi, next_frame);
   const double zero_motion_factor =
       (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion),
                   ZM_POWER_FACTOR));
 
-  return VPXMAX(zero_motion_factor,
+  return AOMMAX(zero_motion_factor,
                 (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor)));
 }
 
 // Function to test for a condition where a complex transition is followed
 // by a static section. For example in slide shows where there is a fade
 // between slides. This is to help with more optimal kf and gf positioning.
-static int detect_transition_to_still(VP10_COMP *cpi, int frame_interval,
+static int detect_transition_to_still(AV1_COMP *cpi, int frame_interval,
                                       int still_interval,
                                       double loop_decay_rate,
                                       double last_decay_rate) {
@@ -1360,18 +1360,17 @@
 }
 
 #define BASELINE_ERR_PER_MB 1000.0
-static double calc_frame_boost(VP10_COMP *cpi,
-                               const FIRSTPASS_STATS *this_frame,
+static double calc_frame_boost(AV1_COMP *cpi, const FIRSTPASS_STATS *this_frame,
                                double this_frame_mv_in_out, double max_boost) {
   double frame_boost;
-  const double lq = vp10_convert_qindex_to_q(
+  const double lq = av1_convert_qindex_to_q(
       cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth);
-  const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5);
+  const double boost_q_correction = AOMMIN((0.5 + (lq * 0.015)), 1.5);
   int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
                                                        : cpi->common.MBs;
 
   // Correct for any inactive region in the image
-  num_mbs = (int)VPXMAX(1, num_mbs * calculate_active_area(cpi, this_frame));
+  num_mbs = (int)AOMMAX(1, num_mbs * calculate_active_area(cpi, this_frame));
 
   // Underlying boost factor is based on inter error ratio.
   frame_boost = (BASELINE_ERR_PER_MB * num_mbs) /
@@ -1387,11 +1386,11 @@
   else
     frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
 
-  return VPXMIN(frame_boost, max_boost * boost_q_correction);
+  return AOMMIN(frame_boost, max_boost * boost_q_correction);
 }
 
-static int calc_arf_boost(VP10_COMP *cpi, int offset, int f_frames,
-                          int b_frames, int *f_boost, int *b_boost) {
+static int calc_arf_boost(AV1_COMP *cpi, int offset, int f_frames, int b_frames,
+                          int *f_boost, int *b_boost) {
   TWO_PASS *const twopass = &cpi->twopass;
   int i;
   double boost_score = 0.0;
@@ -1473,7 +1472,7 @@
   arf_boost = (*f_boost + *b_boost);
   if (arf_boost < ((b_frames + f_frames) * 20))
     arf_boost = ((b_frames + f_frames) * 20);
-  arf_boost = VPXMAX(arf_boost, MIN_ARF_GF_BOOST);
+  arf_boost = AOMMAX(arf_boost, MIN_ARF_GF_BOOST);
 
   return arf_boost;
 }
@@ -1498,7 +1497,7 @@
 }
 
 // Calculate the total bits to allocate in this GF/ARF group.
-static int64_t calculate_total_gf_group_bits(VP10_COMP *cpi,
+static int64_t calculate_total_gf_group_bits(AV1_COMP *cpi,
                                              double gf_group_err) {
   const RATE_CONTROL *const rc = &cpi->rc;
   const TWO_PASS *const twopass = &cpi->twopass;
@@ -1544,7 +1543,7 @@
   }
 
   // Calculate the number of extra bits for use in the boosted frame or frames.
-  return VPXMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks),
+  return AOMMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks),
                 0);
 }
 
@@ -1562,10 +1561,10 @@
 }
 #endif
 
-static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
+static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
                                    double group_error, int gf_arf_bits) {
   RATE_CONTROL *const rc = &cpi->rc;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   TWO_PASS *const twopass = &cpi->twopass;
   GF_GROUP *const gf_group = &twopass->gf_group;
   FIRSTPASS_STATS frame_stats;
@@ -1606,7 +1605,7 @@
 #endif  // CONFIG_EXT_REFS
 
 #if CONFIG_EXT_REFS
-  vp10_zero_array(ext_arf_boost, MAX_EXT_ARFS);
+  av1_zero_array(ext_arf_boost, MAX_EXT_ARFS);
 #endif
 
   key_frame = cpi->common.frame_type == KEY_FRAME;
@@ -1761,7 +1760,7 @@
     gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[arf_idx];
 #endif  // CONFIG_EXT_REFS
     target_frame_size =
-        clamp(target_frame_size, 0, VPXMIN(max_bits, (int)total_group_bits));
+        clamp(target_frame_size, 0, AOMMIN(max_bits, (int)total_group_bits));
 
 #if CONFIG_EXT_REFS
     // If we are going to have ARFs, check if we can have BWDREF in this
@@ -1862,7 +1861,7 @@
 // Note:
 // We need to configure the frame at the end of the sequence + 1 that will be
 // the start frame for the next group. Otherwise prior to the call to
-// vp10_rc_get_second_pass_params() the data will be undefined.
+// av1_rc_get_second_pass_params() the data will be undefined.
 #if CONFIG_EXT_REFS
   gf_group->arf_update_idx[frame_index] = 0;
   gf_group->arf_ref_idx[frame_index] = 0;
@@ -1908,10 +1907,10 @@
   cpi->multi_arf_last_grp_enabled = cpi->multi_arf_enabled;
 }
 // Analyse and define a gf/arf group.
-static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
-  VP10_COMMON *const cm = &cpi->common;
+static void define_gf_group(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
-  VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  AV1EncoderConfig *const oxcf = &cpi->oxcf;
   TWO_PASS *const twopass = &cpi->twopass;
   FIRSTPASS_STATS next_frame;
   const FIRSTPASS_STATS *const start_pos = twopass->stats_in;
@@ -1955,11 +1954,11 @@
   // Reset the GF group data structures unless this is a key
   // frame in which case it will already have been done.
   if (is_key_frame == 0) {
-    vp10_zero(twopass->gf_group);
+    av1_zero(twopass->gf_group);
   }
 
-  vpx_clear_system_state();
-  vp10_zero(next_frame);
+  aom_clear_system_state();
+  av1_zero(next_frame);
 
   // Load stats for the current frame.
   mod_frame_err = calculate_modified_err(cpi, twopass, oxcf, this_frame);
@@ -1986,12 +1985,12 @@
   // Set a maximum and minimum interval for the GF group.
   // If the image appears almost completely static we can extend beyond this.
   {
-    int int_max_q = (int)(vp10_convert_qindex_to_q(
-        twopass->active_worst_quality, cpi->common.bit_depth));
-    int int_lbq = (int)(vp10_convert_qindex_to_q(rc->last_boosted_qindex,
-                                                 cpi->common.bit_depth));
+    int int_max_q = (int)(av1_convert_qindex_to_q(twopass->active_worst_quality,
+                                                  cpi->common.bit_depth));
+    int int_lbq = (int)(av1_convert_qindex_to_q(rc->last_boosted_qindex,
+                                                cpi->common.bit_depth));
 
-    active_min_gf_interval = rc->min_gf_interval + VPXMIN(2, int_max_q / 200);
+    active_min_gf_interval = rc->min_gf_interval + AOMMIN(2, int_max_q / 200);
     if (active_min_gf_interval > rc->max_gf_interval)
       active_min_gf_interval = rc->max_gf_interval;
 
@@ -2002,7 +2001,7 @@
       // bits to spare and are better with a smaller interval and smaller boost.
       // At high Q when there are few bits to spare we are better with a longer
       // interval to spread the cost of the GF.
-      active_max_gf_interval = 12 + VPXMIN(4, (int_lbq / 6));
+      active_max_gf_interval = 12 + AOMMIN(4, (int_lbq / 6));
 
       // We have: active_min_gf_interval <= rc->max_gf_interval
       if (active_max_gf_interval < active_min_gf_interval)
@@ -2044,7 +2043,7 @@
       decay_accumulator = decay_accumulator * loop_decay_rate;
 
       // Monitor for static sections.
-      zero_motion_accumulator = VPXMIN(
+      zero_motion_accumulator = AOMMIN(
           zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame));
 
       // Break clause to detect very still sections after motion. For example,
@@ -2102,7 +2101,7 @@
             ? 1
             : 0;
   } else {
-    rc->gfu_boost = VPXMAX((int)boost_score, MIN_ARF_GF_BOOST);
+    rc->gfu_boost = AOMMAX((int)boost_score, MIN_ARF_GF_BOOST);
     rc->source_alt_ref_pending = 0;
   }
 
@@ -2137,7 +2136,7 @@
   // where there could be significant overshoot than for easier
   // sections where we do not wish to risk creating an overshoot
   // of the allocated bit budget.
-  if ((cpi->oxcf.rc_mode != VPX_Q) && (rc->baseline_gf_interval > 1)) {
+  if ((cpi->oxcf.rc_mode != AOM_Q) && (rc->baseline_gf_interval > 1)) {
     const int vbr_group_bits_per_frame =
         (int)(gf_group_bits / rc->baseline_gf_interval);
     const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval;
@@ -2151,17 +2150,17 @@
     // rc factor is a weight factor that corrects for local rate control drift.
     double rc_factor = 1.0;
     if (rc->rate_error_estimate > 0) {
-      rc_factor = VPXMAX(RC_FACTOR_MIN,
+      rc_factor = AOMMAX(RC_FACTOR_MIN,
                          (double)(100 - rc->rate_error_estimate) / 100.0);
     } else {
-      rc_factor = VPXMIN(RC_FACTOR_MAX,
+      rc_factor = AOMMIN(RC_FACTOR_MAX,
                          (double)(100 - rc->rate_error_estimate) / 100.0);
     }
     tmp_q = get_twopass_worst_quality(
         cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone),
         vbr_group_bits_per_frame, twopass->kfgroup_inter_fraction * rc_factor);
     twopass->active_worst_quality =
-        VPXMAX(tmp_q, twopass->active_worst_quality >> 1);
+        AOMMAX(tmp_q, twopass->active_worst_quality >> 1);
   }
 #endif
 
@@ -2315,12 +2314,12 @@
 
 #define FRAMES_TO_CHECK_DECAY 8
 
-static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+static void find_next_key_frame(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   int i, j;
   RATE_CONTROL *const rc = &cpi->rc;
   TWO_PASS *const twopass = &cpi->twopass;
   GF_GROUP *const gf_group = &twopass->gf_group;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   const FIRSTPASS_STATS first_frame = *this_frame;
   const FIRSTPASS_STATS *const start_position = twopass->stats_in;
   FIRSTPASS_STATS next_frame;
@@ -2335,12 +2334,12 @@
   double kf_group_err = 0.0;
   double recent_loop_decay[FRAMES_TO_CHECK_DECAY];
 
-  vp10_zero(next_frame);
+  av1_zero(next_frame);
 
   cpi->common.frame_type = KEY_FRAME;
 
   // Reset the GF group data structures.
-  vp10_zero(*gf_group);
+  av1_zero(*gf_group);
 
   // Is this a forced key frame by interval.
   rc->this_key_frame_forced = rc->next_key_frame_forced;
@@ -2465,7 +2464,7 @@
   } else {
     twopass->kf_group_bits = 0;
   }
-  twopass->kf_group_bits = VPXMAX(0, twopass->kf_group_bits);
+  twopass->kf_group_bits = AOMMAX(0, twopass->kf_group_bits);
 
   // Reset the first pass file position.
   reset_fpf_position(twopass, start_position);
@@ -2478,7 +2477,7 @@
     if (EOF == input_stats(twopass, &next_frame)) break;
 
     // Monitor for static sections.
-    zero_motion_accumulator = VPXMIN(zero_motion_accumulator,
+    zero_motion_accumulator = AOMMIN(zero_motion_accumulator,
                                      get_zero_motion_factor(cpi, &next_frame));
 
     // Not all frames in the group are necessarily used in calculating boost.
@@ -2492,7 +2491,7 @@
         const double loop_decay_rate =
             get_prediction_decay_rate(cpi, &next_frame);
         decay_accumulator *= loop_decay_rate;
-        decay_accumulator = VPXMAX(decay_accumulator, MIN_DECAY_FACTOR);
+        decay_accumulator = AOMMAX(decay_accumulator, MIN_DECAY_FACTOR);
         av_decay_accumulator += decay_accumulator;
         ++loop_decay_counter;
       }
@@ -2512,8 +2511,8 @@
 
   // Apply various clamps for min and max boost
   rc->kf_boost = (int)(av_decay_accumulator * boost_score);
-  rc->kf_boost = VPXMAX(rc->kf_boost, (rc->frames_to_key * 3));
-  rc->kf_boost = VPXMAX(rc->kf_boost, MIN_KF_BOOST);
+  rc->kf_boost = AOMMAX(rc->kf_boost, (rc->frames_to_key * 3));
+  rc->kf_boost = AOMMAX(rc->kf_boost, MIN_KF_BOOST);
 
   // Work out how many bits to allocate for the key frame itself.
   kf_bits = calculate_boost_bits((rc->frames_to_key - 1), rc->kf_boost,
@@ -2551,7 +2550,7 @@
 }
 
 // Define the reference buffers that will be updated post encode.
-static void configure_buffer_updates(VP10_COMP *cpi) {
+static void configure_buffer_updates(AV1_COMP *cpi) {
   TWO_PASS *const twopass = &cpi->twopass;
 
   // Wei-Ting: Should we define another function to take care of
@@ -2634,7 +2633,7 @@
         // Allow BRF use the farthest ALT_REF (ALT0) as BWD_REF by swapping
         // the virtual indices.
         // NOTE: The indices will be swapped back after this frame is encoded
-        //       (in vp10_update_reference_frames()).
+        //       (in av1_update_reference_frames()).
         int tmp = cpi->bwd_fb_idx;
         cpi->bwd_fb_idx = cpi->alt_fb_idx;
         cpi->alt_fb_idx = cpi->arf_map[0];
@@ -2671,7 +2670,7 @@
   }
 }
 
-static int is_skippable_frame(const VP10_COMP *cpi) {
+static int is_skippable_frame(const AV1_COMP *cpi) {
   // If the current frame does not have non-zero motion vector detected in the
   // first  pass, and so do its previous and forward frames, then this frame
   // can be skipped for partition check, and the partition size is assigned
@@ -2690,8 +2689,8 @@
           twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1);
 }
 
-void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_second_pass_params(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   TWO_PASS *const twopass = &cpi->twopass;
   GF_GROUP *const gf_group = &twopass->gf_group;
@@ -2710,7 +2709,7 @@
     int target_rate;
     configure_buffer_updates(cpi);
     target_rate = gf_group->bit_allocation[gf_group->index];
-    target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate);
+    target_rate = av1_rc_clamp_pframe_target_size(cpi, target_rate);
     rc->base_frame_target = target_rate;
 
     cm->frame_type = INTER_FRAME;
@@ -2724,9 +2723,9 @@
     return;
   }
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
-  if (cpi->oxcf.rc_mode == VPX_Q) {
+  if (cpi->oxcf.rc_mode == AOM_Q) {
     twopass->active_worst_quality = cpi->oxcf.cq_level;
   } else if (cm->current_video_frame == 0) {
     // Special case code for first frame.
@@ -2748,13 +2747,13 @@
     twopass->baseline_active_worst_quality = tmp_q;
     rc->ni_av_qi = tmp_q;
     rc->last_q[INTER_FRAME] = tmp_q;
-    rc->avg_q = vp10_convert_qindex_to_q(tmp_q, cm->bit_depth);
+    rc->avg_q = av1_convert_qindex_to_q(tmp_q, cm->bit_depth);
     rc->avg_frame_qindex[INTER_FRAME] = tmp_q;
     rc->last_q[KEY_FRAME] = (tmp_q + cpi->oxcf.best_allowed_q) / 2;
     rc->avg_frame_qindex[KEY_FRAME] = rc->last_q[KEY_FRAME];
   }
 
-  vp10_zero(this_frame);
+  av1_zero(this_frame);
   if (EOF == input_stats(twopass, &this_frame)) return;
 
   // Set the frame content type flag.
@@ -2805,9 +2804,9 @@
   target_rate = gf_group->bit_allocation[gf_group->index];
 
   if (cpi->common.frame_type == KEY_FRAME)
-    target_rate = vp10_rc_clamp_iframe_target_size(cpi, target_rate);
+    target_rate = av1_rc_clamp_iframe_target_size(cpi, target_rate);
   else
-    target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate);
+    target_rate = av1_rc_clamp_pframe_target_size(cpi, target_rate);
 
   rc->base_frame_target = target_rate;
 
@@ -2828,7 +2827,7 @@
 #define MINQ_ADJ_LIMIT 48
 #define MINQ_ADJ_LIMIT_CQ 20
 #define HIGH_UNDERSHOOT_RATIO 2
-void vp10_twopass_postencode_update(VP10_COMP *cpi) {
+void av1_twopass_postencode_update(AV1_COMP *cpi) {
   TWO_PASS *const twopass = &cpi->twopass;
   RATE_CONTROL *const rc = &cpi->rc;
   const int bits_used = rc->base_frame_target;
@@ -2839,7 +2838,7 @@
   // is designed to prevent extreme behaviour at the end of a clip
   // or group of frames.
   rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size;
-  twopass->bits_left = VPXMAX(twopass->bits_left - bits_used, 0);
+  twopass->bits_left = AOMMAX(twopass->bits_left - bits_used, 0);
 
   // Calculate the pct rc error.
   if (rc->total_actual_bits) {
@@ -2854,19 +2853,19 @@
     twopass->kf_group_bits -= bits_used;
     twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct;
   }
-  twopass->kf_group_bits = VPXMAX(twopass->kf_group_bits, 0);
+  twopass->kf_group_bits = AOMMAX(twopass->kf_group_bits, 0);
 
   // Increment the gf group index ready for the next frame.
   ++twopass->gf_group.index;
 
   // If the rate control is drifting consider adjustment to min or maxq.
-  if ((cpi->oxcf.rc_mode != VPX_Q) &&
+  if ((cpi->oxcf.rc_mode != AOM_Q) &&
       (cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD) &&
       !cpi->rc.is_src_frame_alt_ref) {
     const int maxq_adj_limit =
         rc->worst_quality - twopass->active_worst_quality;
     const int minq_adj_limit =
-        (cpi->oxcf.rc_mode == VPX_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
+        (cpi->oxcf.rc_mode == AOM_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
 
     // Undershoot.
     if (rc->rate_error_estimate > cpi->oxcf.under_shoot_pct) {
@@ -2904,17 +2903,17 @@
         rc->vbr_bits_off_target_fast +=
             fast_extra_thresh - rc->projected_frame_size;
         rc->vbr_bits_off_target_fast =
-            VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
+            AOMMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
 
         // Fast adaptation of minQ if necessary to use up the extra bits.
         if (rc->avg_frame_bandwidth) {
           twopass->extend_minq_fast =
               (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth);
         }
-        twopass->extend_minq_fast = VPXMIN(
+        twopass->extend_minq_fast = AOMMIN(
             twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
       } else if (rc->vbr_bits_off_target_fast) {
-        twopass->extend_minq_fast = VPXMIN(
+        twopass->extend_minq_fast = AOMMIN(
             twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
       } else {
         twopass->extend_minq_fast = 0;
diff --git a/av1/encoder/firstpass.h b/av1/encoder/firstpass.h
index 5623540..2b161a1 100644
--- a/av1/encoder/firstpass.h
+++ b/av1/encoder/firstpass.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_FIRSTPASS_H_
-#define VP10_ENCODER_FIRSTPASS_H_
+#ifndef AV1_ENCODER_FIRSTPASS_H_
+#define AV1_ENCODER_FIRSTPASS_H_
 
 #include "av1/encoder/lookahead.h"
 #include "av1/encoder/ratectrl.h"
@@ -162,25 +162,24 @@
   GF_GROUP gf_group;
 } TWO_PASS;
 
-struct VP10_COMP;
+struct AV1_COMP;
 
-void vp10_init_first_pass(struct VP10_COMP *cpi);
-void vp10_rc_get_first_pass_params(struct VP10_COMP *cpi);
-void vp10_first_pass(struct VP10_COMP *cpi,
-                     const struct lookahead_entry *source);
-void vp10_end_first_pass(struct VP10_COMP *cpi);
+void av1_init_first_pass(struct AV1_COMP *cpi);
+void av1_rc_get_first_pass_params(struct AV1_COMP *cpi);
+void av1_first_pass(struct AV1_COMP *cpi, const struct lookahead_entry *source);
+void av1_end_first_pass(struct AV1_COMP *cpi);
 
-void vp10_init_second_pass(struct VP10_COMP *cpi);
-void vp10_rc_get_second_pass_params(struct VP10_COMP *cpi);
-void vp10_twopass_postencode_update(struct VP10_COMP *cpi);
+void av1_init_second_pass(struct AV1_COMP *cpi);
+void av1_rc_get_second_pass_params(struct AV1_COMP *cpi);
+void av1_twopass_postencode_update(struct AV1_COMP *cpi);
 
 // Post encode update of the rate control parameters for 2-pass
-void vp10_twopass_postencode_update(struct VP10_COMP *cpi);
+void av1_twopass_postencode_update(struct AV1_COMP *cpi);
 
-void vp10_init_subsampling(struct VP10_COMP *cpi);
+void av1_init_subsampling(struct AV1_COMP *cpi);
 
-void vp10_calculate_coded_size(struct VP10_COMP *cpi, int *scaled_frame_width,
-                               int *scaled_frame_height);
+void av1_calculate_coded_size(struct AV1_COMP *cpi, int *scaled_frame_width,
+                              int *scaled_frame_height);
 
 #if CONFIG_EXT_REFS
 static inline int get_number_of_extra_arfs(int interval, int arf_pending) {
@@ -199,4 +198,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_FIRSTPASS_H_
+#endif  // AV1_ENCODER_FIRSTPASS_H_
diff --git a/av1/encoder/global_motion.h b/av1/encoder/global_motion.h
index ed088d6..aad8cc4 100644
--- a/av1/encoder/global_motion.h
+++ b/av1/encoder/global_motion.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_GLOBAL_MOTION_H_
-#define VP10_ENCODER_GLOBAL_MOTION_H_
+#ifndef AV1_ENCODER_GLOBAL_MOTION_H_
+#define AV1_ENCODER_GLOBAL_MOTION_H_
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -25,4 +25,4 @@
 #ifdef __cplusplus
 }  // extern "C"
 #endif
-#endif  // VP10_ENCODER_GLOBAL_MOTION_H_
+#endif  // AV1_ENCODER_GLOBAL_MOTION_H_
diff --git a/av1/encoder/hybrid_fwd_txfm.c b/av1/encoder/hybrid_fwd_txfm.c
index ccfab0a..a0e37a3 100644
--- a/av1/encoder/hybrid_fwd_txfm.c
+++ b/av1/encoder/hybrid_fwd_txfm.c
@@ -8,9 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
 #include "av1/common/idct.h"
 #include "av1/encoder/hybrid_fwd_txfm.h"
@@ -18,16 +18,16 @@
 static INLINE void fdct32x32(int rd_transform, const int16_t *src,
                              tran_low_t *dst, int src_stride) {
   if (rd_transform)
-    vpx_fdct32x32_rd(src, dst, src_stride);
+    aom_fdct32x32_rd(src, dst, src_stride);
   else
-    vpx_fdct32x32(src, dst, src_stride);
+    aom_fdct32x32(src, dst, src_stride);
 }
 
 static void fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
                          int diff_stride, TX_TYPE tx_type, int lossless) {
   if (lossless) {
     assert(tx_type == DCT_DCT);
-    vp10_fwht4x4(src_diff, coeff, diff_stride);
+    av1_fwht4x4(src_diff, coeff, diff_stride);
     return;
   }
 
@@ -35,7 +35,7 @@
     case DCT_DCT:
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST: vp10_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
+    case ADST_ADST: av1_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
     case DCT_FLIPADST:
@@ -47,8 +47,8 @@
     case V_ADST:
     case H_ADST:
     case V_FLIPADST:
-    case H_FLIPADST: vp10_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
-    case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 4, tx_type); break;
+    case H_FLIPADST: av1_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
+    case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 4, tx_type); break;
 #endif  // CONFIG_EXT_TX
     default: assert(0);
   }
@@ -59,42 +59,42 @@
                          int diff_stride, TX_TYPE tx_type,
                          FWD_TXFM_OPT fwd_txfm_opt) {
   (void)fwd_txfm_opt;
-  vp10_fht4x8(src_diff, coeff, diff_stride, tx_type);
+  av1_fht4x8(src_diff, coeff, diff_stride, tx_type);
 }
 
 static void fwd_txfm_8x4(const int16_t *src_diff, tran_low_t *coeff,
                          int diff_stride, TX_TYPE tx_type,
                          FWD_TXFM_OPT fwd_txfm_opt) {
   (void)fwd_txfm_opt;
-  vp10_fht8x4(src_diff, coeff, diff_stride, tx_type);
+  av1_fht8x4(src_diff, coeff, diff_stride, tx_type);
 }
 
 static void fwd_txfm_8x16(const int16_t *src_diff, tran_low_t *coeff,
                           int diff_stride, TX_TYPE tx_type,
                           FWD_TXFM_OPT fwd_txfm_opt) {
   (void)fwd_txfm_opt;
-  vp10_fht8x16(src_diff, coeff, diff_stride, tx_type);
+  av1_fht8x16(src_diff, coeff, diff_stride, tx_type);
 }
 
 static void fwd_txfm_16x8(const int16_t *src_diff, tran_low_t *coeff,
                           int diff_stride, TX_TYPE tx_type,
                           FWD_TXFM_OPT fwd_txfm_opt) {
   (void)fwd_txfm_opt;
-  vp10_fht16x8(src_diff, coeff, diff_stride, tx_type);
+  av1_fht16x8(src_diff, coeff, diff_stride, tx_type);
 }
 
 static void fwd_txfm_16x32(const int16_t *src_diff, tran_low_t *coeff,
                            int diff_stride, TX_TYPE tx_type,
                            FWD_TXFM_OPT fwd_txfm_opt) {
   (void)fwd_txfm_opt;
-  vp10_fht16x32(src_diff, coeff, diff_stride, tx_type);
+  av1_fht16x32(src_diff, coeff, diff_stride, tx_type);
 }
 
 static void fwd_txfm_32x16(const int16_t *src_diff, tran_low_t *coeff,
                            int diff_stride, TX_TYPE tx_type,
                            FWD_TXFM_OPT fwd_txfm_opt) {
   (void)fwd_txfm_opt;
-  vp10_fht32x16(src_diff, coeff, diff_stride, tx_type);
+  av1_fht32x16(src_diff, coeff, diff_stride, tx_type);
 }
 #endif  // CONFIG_EXT_TX
 
@@ -107,9 +107,9 @@
     case DCT_ADST:
     case ADST_ADST:
       if (fwd_txfm_opt == FWD_TXFM_OPT_NORMAL)
-        vp10_fht8x8(src_diff, coeff, diff_stride, tx_type);
+        av1_fht8x8(src_diff, coeff, diff_stride, tx_type);
       else  // FWD_TXFM_OPT_DC
-        vpx_fdct8x8_1(src_diff, coeff, diff_stride);
+        aom_fdct8x8_1(src_diff, coeff, diff_stride);
       break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
@@ -122,8 +122,8 @@
     case V_ADST:
     case H_ADST:
     case V_FLIPADST:
-    case H_FLIPADST: vp10_fht8x8(src_diff, coeff, diff_stride, tx_type); break;
-    case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 8, tx_type); break;
+    case H_FLIPADST: av1_fht8x8(src_diff, coeff, diff_stride, tx_type); break;
+    case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 8, tx_type); break;
 #endif  // CONFIG_EXT_TX
     default: assert(0);
   }
@@ -138,9 +138,9 @@
     case DCT_ADST:
     case ADST_ADST:
       if (fwd_txfm_opt == FWD_TXFM_OPT_NORMAL)
-        vp10_fht16x16(src_diff, coeff, diff_stride, tx_type);
+        av1_fht16x16(src_diff, coeff, diff_stride, tx_type);
       else  // FWD_TXFM_OPT_DC
-        vpx_fdct16x16_1(src_diff, coeff, diff_stride);
+        aom_fdct16x16_1(src_diff, coeff, diff_stride);
       break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
@@ -153,12 +153,8 @@
     case V_ADST:
     case H_ADST:
     case V_FLIPADST:
-    case H_FLIPADST:
-      vp10_fht16x16(src_diff, coeff, diff_stride, tx_type);
-      break;
-    case IDTX:
-      vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 16, tx_type);
-      break;
+    case H_FLIPADST: av1_fht16x16(src_diff, coeff, diff_stride, tx_type); break;
+    case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 16, tx_type); break;
 #endif  // CONFIG_EXT_TX
     default: assert(0);
   }
@@ -172,7 +168,7 @@
       if (fwd_txfm_opt == FWD_TXFM_OPT_NORMAL)
         fdct32x32(rd_transform, src_diff, coeff, diff_stride);
       else  // FWD_TXFM_OPT_DC
-        vpx_fdct32x32_1(src_diff, coeff, diff_stride);
+        aom_fdct32x32_1(src_diff, coeff, diff_stride);
       break;
 #if CONFIG_EXT_TX
     case ADST_DCT:
@@ -183,7 +179,7 @@
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
+      av1_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
       break;
     case V_DCT:
     case H_DCT:
@@ -191,23 +187,21 @@
     case H_ADST:
     case V_FLIPADST:
     case H_FLIPADST:
-      vp10_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
+      av1_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
       break;
-    case IDTX:
-      vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 32, tx_type);
-      break;
+    case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 32, tx_type); break;
 #endif  // CONFIG_EXT_TX
     default: assert(0); break;
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
                                 int diff_stride, TX_TYPE tx_type, int lossless,
                                 const int bd) {
   if (lossless) {
     assert(tx_type == DCT_DCT);
-    vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
+    av1_highbd_fwht4x4(src_diff, coeff, diff_stride);
     return;
   }
 
@@ -216,7 +210,7 @@
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_fwd_txfm2d_4x4(src_diff, coeff, diff_stride, tx_type, bd);
+      av1_fwd_txfm2d_4x4(src_diff, coeff, diff_stride, tx_type, bd);
       break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
@@ -224,7 +218,7 @@
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_fwd_txfm2d_4x4(src_diff, coeff, diff_stride, tx_type, bd);
+      av1_fwd_txfm2d_4x4(src_diff, coeff, diff_stride, tx_type, bd);
       break;
     case V_DCT:
     case H_DCT:
@@ -232,9 +226,9 @@
     case H_ADST:
     case V_FLIPADST:
     case H_FLIPADST:
-      vp10_highbd_fht4x4_c(src_diff, coeff, diff_stride, tx_type);
+      av1_highbd_fht4x4_c(src_diff, coeff, diff_stride, tx_type);
       break;
-    case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 4, tx_type); break;
+    case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 4, tx_type); break;
 #endif  // CONFIG_EXT_TX
     default: assert(0);
   }
@@ -246,7 +240,7 @@
                                 FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
   (void)fwd_txfm_opt;
   (void)bd;
-  vp10_highbd_fht4x8(src_diff, coeff, diff_stride, tx_type);
+  av1_highbd_fht4x8(src_diff, coeff, diff_stride, tx_type);
 }
 
 static void highbd_fwd_txfm_8x4(const int16_t *src_diff, tran_low_t *coeff,
@@ -254,7 +248,7 @@
                                 FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
   (void)fwd_txfm_opt;
   (void)bd;
-  vp10_highbd_fht8x4(src_diff, coeff, diff_stride, tx_type);
+  av1_highbd_fht8x4(src_diff, coeff, diff_stride, tx_type);
 }
 
 static void highbd_fwd_txfm_8x16(const int16_t *src_diff, tran_low_t *coeff,
@@ -262,7 +256,7 @@
                                  FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
   (void)fwd_txfm_opt;
   (void)bd;
-  vp10_highbd_fht8x16(src_diff, coeff, diff_stride, tx_type);
+  av1_highbd_fht8x16(src_diff, coeff, diff_stride, tx_type);
 }
 
 static void highbd_fwd_txfm_16x8(const int16_t *src_diff, tran_low_t *coeff,
@@ -270,7 +264,7 @@
                                  FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
   (void)fwd_txfm_opt;
   (void)bd;
-  vp10_highbd_fht16x8(src_diff, coeff, diff_stride, tx_type);
+  av1_highbd_fht16x8(src_diff, coeff, diff_stride, tx_type);
 }
 
 static void highbd_fwd_txfm_16x32(const int16_t *src_diff, tran_low_t *coeff,
@@ -278,7 +272,7 @@
                                   FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
   (void)fwd_txfm_opt;
   (void)bd;
-  vp10_highbd_fht16x32(src_diff, coeff, diff_stride, tx_type);
+  av1_highbd_fht16x32(src_diff, coeff, diff_stride, tx_type);
 }
 
 static void highbd_fwd_txfm_32x16(const int16_t *src_diff, tran_low_t *coeff,
@@ -286,7 +280,7 @@
                                   FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
   (void)fwd_txfm_opt;
   (void)bd;
-  vp10_highbd_fht32x16(src_diff, coeff, diff_stride, tx_type);
+  av1_highbd_fht32x16(src_diff, coeff, diff_stride, tx_type);
 }
 #endif  // CONFIG_EXT_TX
 
@@ -299,7 +293,7 @@
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_fwd_txfm2d_8x8(src_diff, coeff, diff_stride, tx_type, bd);
+      av1_fwd_txfm2d_8x8(src_diff, coeff, diff_stride, tx_type, bd);
       break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
@@ -307,7 +301,7 @@
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_fwd_txfm2d_8x8(src_diff, coeff, diff_stride, tx_type, bd);
+      av1_fwd_txfm2d_8x8(src_diff, coeff, diff_stride, tx_type, bd);
       break;
     case V_DCT:
     case H_DCT:
@@ -316,9 +310,9 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST exists only in C
-      vp10_highbd_fht8x8_c(src_diff, coeff, diff_stride, tx_type);
+      av1_highbd_fht8x8_c(src_diff, coeff, diff_stride, tx_type);
       break;
-    case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 8, tx_type); break;
+    case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 8, tx_type); break;
 #endif  // CONFIG_EXT_TX
     default: assert(0);
   }
@@ -333,7 +327,7 @@
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_fwd_txfm2d_16x16(src_diff, coeff, diff_stride, tx_type, bd);
+      av1_fwd_txfm2d_16x16(src_diff, coeff, diff_stride, tx_type, bd);
       break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
@@ -341,7 +335,7 @@
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_fwd_txfm2d_16x16(src_diff, coeff, diff_stride, tx_type, bd);
+      av1_fwd_txfm2d_16x16(src_diff, coeff, diff_stride, tx_type, bd);
       break;
     case V_DCT:
     case H_DCT:
@@ -350,11 +344,9 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST exists only in C
-      vp10_highbd_fht16x16_c(src_diff, coeff, diff_stride, tx_type);
+      av1_highbd_fht16x16_c(src_diff, coeff, diff_stride, tx_type);
       break;
-    case IDTX:
-      vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 16, tx_type);
-      break;
+    case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 16, tx_type); break;
 #endif  // CONFIG_EXT_TX
     default: assert(0);
   }
@@ -368,7 +360,7 @@
   (void)fwd_txfm_opt;
   switch (tx_type) {
     case DCT_DCT:
-      vp10_fwd_txfm2d_32x32(src_diff, coeff, diff_stride, tx_type, bd);
+      av1_fwd_txfm2d_32x32(src_diff, coeff, diff_stride, tx_type, bd);
       break;
 #if CONFIG_EXT_TX
     case ADST_DCT:
@@ -385,16 +377,14 @@
     case H_ADST:
     case V_FLIPADST:
     case H_FLIPADST:
-      vp10_highbd_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
+      av1_highbd_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
       break;
-    case IDTX:
-      vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 32, tx_type);
-      break;
+    case IDTX: av1_fwd_idtx_c(src_diff, coeff, diff_stride, 32, tx_type); break;
 #endif  // CONFIG_EXT_TX
     default: assert(0); break;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 void fwd_txfm(const int16_t *src_diff, tran_low_t *coeff, int diff_stride,
               FWD_TXFM_PARAM *fwd_txfm_param) {
@@ -441,7 +431,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void highbd_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff,
                      int diff_stride, FWD_TXFM_PARAM *fwd_txfm_param) {
   const int fwd_txfm_opt = fwd_txfm_param->fwd_txfm_opt;
@@ -495,4 +485,4 @@
     default: assert(0); break;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/encoder/hybrid_fwd_txfm.h b/av1/encoder/hybrid_fwd_txfm.h
index 07b832c..3ab4fd1 100644
--- a/av1/encoder/hybrid_fwd_txfm.h
+++ b/av1/encoder/hybrid_fwd_txfm.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_HYBRID_FWD_TXFM_H_
-#define VP10_ENCODER_HYBRID_FWD_TXFM_H_
+#ifndef AV1_ENCODER_HYBRID_FWD_TXFM_H_
+#define AV1_ENCODER_HYBRID_FWD_TXFM_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 typedef enum FWD_TXFM_OPT { FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_DC } FWD_TXFM_OPT;
 
@@ -21,9 +21,9 @@
   FWD_TXFM_OPT fwd_txfm_opt;
   int rd_transform;
   int lossless;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   int bd;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 } FWD_TXFM_PARAM;
 
 #ifdef __cplusplus
@@ -33,13 +33,13 @@
 void fwd_txfm(const int16_t *src_diff, tran_low_t *coeff, int diff_stride,
               FWD_TXFM_PARAM *fwd_txfm_param);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void highbd_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff,
                      int diff_stride, FWD_TXFM_PARAM *fwd_txfm_param);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_HYBRID_FWD_TXFM_H_
+#endif  // AV1_ENCODER_HYBRID_FWD_TXFM_H_
diff --git a/av1/encoder/lookahead.c b/av1/encoder/lookahead.c
index 3c4ff7d..094fb62 100644
--- a/av1/encoder/lookahead.c
+++ b/av1/encoder/lookahead.c
@@ -10,7 +10,7 @@
 #include <assert.h>
 #include <stdlib.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #include "av1/common/common.h"
 
@@ -29,26 +29,26 @@
   return buf;
 }
 
-void vp10_lookahead_destroy(struct lookahead_ctx *ctx) {
+void av1_lookahead_destroy(struct lookahead_ctx *ctx) {
   if (ctx) {
     if (ctx->buf) {
       int i;
 
-      for (i = 0; i < ctx->max_sz; i++) vpx_free_frame_buffer(&ctx->buf[i].img);
+      for (i = 0; i < ctx->max_sz; i++) aom_free_frame_buffer(&ctx->buf[i].img);
       free(ctx->buf);
     }
     free(ctx);
   }
 }
 
-struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
-                                          unsigned int height,
-                                          unsigned int subsampling_x,
-                                          unsigned int subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
-                                          int use_highbitdepth,
+struct lookahead_ctx *av1_lookahead_init(unsigned int width,
+                                         unsigned int height,
+                                         unsigned int subsampling_x,
+                                         unsigned int subsampling_y,
+#if CONFIG_AOM_HIGHBITDEPTH
+                                         int use_highbitdepth,
 #endif
-                                          unsigned int depth) {
+                                         unsigned int depth) {
   struct lookahead_ctx *ctx = NULL;
 
   // Clamp the lookahead queue depth
@@ -66,28 +66,28 @@
     ctx->buf = calloc(depth, sizeof(*ctx->buf));
     if (!ctx->buf) goto bail;
     for (i = 0; i < depth; i++)
-      if (vpx_alloc_frame_buffer(
+      if (aom_alloc_frame_buffer(
               &ctx->buf[i].img, width, height, subsampling_x, subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
               use_highbitdepth,
 #endif
-              VPX_ENC_BORDER_IN_PIXELS, legacy_byte_alignment))
+              AOM_ENC_BORDER_IN_PIXELS, legacy_byte_alignment))
         goto bail;
   }
   return ctx;
 bail:
-  vp10_lookahead_destroy(ctx);
+  av1_lookahead_destroy(ctx);
   return NULL;
 }
 
 #define USE_PARTIAL_COPY 0
 
-int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
-                        int64_t ts_start, int64_t ts_end,
-#if CONFIG_VP9_HIGHBITDEPTH
-                        int use_highbitdepth,
+int av1_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+                       int64_t ts_start, int64_t ts_end,
+#if CONFIG_AOM_HIGHBITDEPTH
+                       int use_highbitdepth,
 #endif
-                        unsigned int flags) {
+                       unsigned int flags) {
   struct lookahead_entry *buf;
 #if USE_PARTIAL_COPY
   int row, col, active_end;
@@ -117,7 +117,7 @@
 
 #if USE_PARTIAL_COPY
   // TODO(jkoleszar): This is disabled for now, as
-  // vp10_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
+  // av1_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
 
   // Only do this partial copy if the following conditions are all met:
   // 1. Lookahead queue has has size of 1.
@@ -144,8 +144,8 @@
         }
 
         // Only copy this active region.
-        vp10_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
-                                             16, (active_end - col) << 4);
+        av1_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
+                                            16, (active_end - col) << 4);
 
         // Start again from the end of this active region.
         col = active_end;
@@ -158,14 +158,14 @@
     if (larger_dimensions) {
       YV12_BUFFER_CONFIG new_img;
       memset(&new_img, 0, sizeof(new_img));
-      if (vpx_alloc_frame_buffer(&new_img, width, height, subsampling_x,
+      if (aom_alloc_frame_buffer(&new_img, width, height, subsampling_x,
                                  subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                  use_highbitdepth,
 #endif
-                                 VPX_ENC_BORDER_IN_PIXELS, 0))
+                                 AOM_ENC_BORDER_IN_PIXELS, 0))
         return 1;
-      vpx_free_frame_buffer(&buf->img);
+      aom_free_frame_buffer(&buf->img);
       buf->img = new_img;
     } else if (new_dimensions) {
       buf->img.y_crop_width = src->y_crop_width;
@@ -176,7 +176,7 @@
       buf->img.subsampling_y = src->subsampling_y;
     }
     // Partial copy not implemented yet
-    vp10_copy_and_extend_frame(src, &buf->img);
+    av1_copy_and_extend_frame(src, &buf->img);
 #if USE_PARTIAL_COPY
   }
 #endif
@@ -187,8 +187,8 @@
   return 0;
 }
 
-struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
-                                           int drain) {
+struct lookahead_entry *av1_lookahead_pop(struct lookahead_ctx *ctx,
+                                          int drain) {
   struct lookahead_entry *buf = NULL;
 
   if (ctx && ctx->sz && (drain || ctx->sz == ctx->max_sz - MAX_PRE_FRAMES)) {
@@ -198,8 +198,8 @@
   return buf;
 }
 
-struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
-                                            int index) {
+struct lookahead_entry *av1_lookahead_peek(struct lookahead_ctx *ctx,
+                                           int index) {
   struct lookahead_entry *buf = NULL;
 
   if (index >= 0) {
@@ -221,4 +221,4 @@
   return buf;
 }
 
-unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
+unsigned int av1_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
diff --git a/av1/encoder/lookahead.h b/av1/encoder/lookahead.h
index 4b26068..35b1e0c 100644
--- a/av1/encoder/lookahead.h
+++ b/av1/encoder/lookahead.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_LOOKAHEAD_H_
-#define VP10_ENCODER_LOOKAHEAD_H_
+#ifndef AV1_ENCODER_LOOKAHEAD_H_
+#define AV1_ENCODER_LOOKAHEAD_H_
 
 #include "aom_scale/yv12config.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -43,18 +43,18 @@
  * The lookahead stage is a queue of frame buffers on which some analysis
  * may be done when buffers are enqueued.
  */
-struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
-                                          unsigned int height,
-                                          unsigned int subsampling_x,
-                                          unsigned int subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
-                                          int use_highbitdepth,
+struct lookahead_ctx *av1_lookahead_init(unsigned int width,
+                                         unsigned int height,
+                                         unsigned int subsampling_x,
+                                         unsigned int subsampling_y,
+#if CONFIG_AOM_HIGHBITDEPTH
+                                         int use_highbitdepth,
 #endif
-                                          unsigned int depth);
+                                         unsigned int depth);
 
 /**\brief Destroys the lookahead stage
  */
-void vp10_lookahead_destroy(struct lookahead_ctx *ctx);
+void av1_lookahead_destroy(struct lookahead_ctx *ctx);
 
 /**\brief Enqueue a source buffer
  *
@@ -71,12 +71,12 @@
  * \param[in] flags       Flags set on this frame
  * \param[in] active_map  Map that specifies which macroblock is active
  */
-int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
-                        int64_t ts_start, int64_t ts_end,
-#if CONFIG_VP9_HIGHBITDEPTH
-                        int use_highbitdepth,
+int av1_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+                       int64_t ts_start, int64_t ts_end,
+#if CONFIG_AOM_HIGHBITDEPTH
+                       int use_highbitdepth,
 #endif
-                        unsigned int flags);
+                       unsigned int flags);
 
 /**\brief Get the next source buffer to encode
  *
@@ -88,8 +88,7 @@
  * \retval NULL, if drain set and queue is empty
  * \retval NULL, if drain not set and queue not of the configured depth
  */
-struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
-                                           int drain);
+struct lookahead_entry *av1_lookahead_pop(struct lookahead_ctx *ctx, int drain);
 
 /**\brief Get a future source buffer to encode
  *
@@ -98,17 +97,17 @@
  *
  * \retval NULL, if no buffer exists at the specified index
  */
-struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
-                                            int index);
+struct lookahead_entry *av1_lookahead_peek(struct lookahead_ctx *ctx,
+                                           int index);
 
 /**\brief Get the number of frames currently in the lookahead queue
  *
  * \param[in] ctx       Pointer to the lookahead context
  */
-unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx);
+unsigned int av1_lookahead_depth(struct lookahead_ctx *ctx);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_LOOKAHEAD_H_
+#endif  // AV1_ENCODER_LOOKAHEAD_H_
diff --git a/av1/encoder/mbgraph.c b/av1/encoder/mbgraph.c
index 43f0f87..f973e87 100644
--- a/av1/encoder/mbgraph.c
+++ b/av1/encoder/mbgraph.c
@@ -10,11 +10,11 @@
 
 #include <limits.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/system_state.h"
 #include "av1/encoder/segmentation.h"
 #include "av1/encoder/mcomp.h"
@@ -22,12 +22,12 @@
 #include "av1/common/reconinter.h"
 #include "av1/common/reconintra.h"
 
-static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, const MV *ref_mv,
+static unsigned int do_16x16_motion_iteration(AV1_COMP *cpi, const MV *ref_mv,
                                               int mb_row, int mb_col) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   const MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
-  const vpx_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
+  const aom_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
 
   const int tmp_col_min = x->mv_col_min;
   const int tmp_col_max = x->mv_col_max;
@@ -38,16 +38,16 @@
 
   // Further step/diamond searches as necessary
   int step_param = mv_sf->reduce_first_step_size;
-  step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
+  step_param = AOMMIN(step_param, MAX_MVSEARCH_STEPS - 2);
 
-  vp10_set_mv_search_range(x, ref_mv);
+  av1_set_mv_search_range(x, ref_mv);
 
   ref_full.col = ref_mv->col >> 3;
   ref_full.row = ref_mv->row >> 3;
 
   /*cpi->sf.search_method == HEX*/
-  vp10_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
-                  cond_cost_list(cpi, cost_list), &v_fn_ptr, 0, ref_mv);
+  av1_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
+                 cond_cost_list(cpi, cost_list), &v_fn_ptr, 0, ref_mv);
 
   // Try sub-pixel MC
   // if (bestsme > error_thresh && bestsme < INT_MAX)
@@ -73,7 +73,7 @@
   xd->mi[0]->mbmi.ref_frame[1] = NONE;
 #endif  // CONFIG_EXT_INTER
 
-  vp10_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
+  av1_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
 
   /* restore UMV window */
   x->mv_col_min = tmp_col_min;
@@ -81,11 +81,11 @@
   x->mv_row_min = tmp_row_min;
   x->mv_row_max = tmp_row_max;
 
-  return vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+  return aom_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
                       xd->plane[0].dst.buf, xd->plane[0].dst.stride);
 }
 
-static int do_16x16_motion_search(VP10_COMP *cpi, const MV *ref_mv, int mb_row,
+static int do_16x16_motion_search(AV1_COMP *cpi, const MV *ref_mv, int mb_row,
                                   int mb_col) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -94,7 +94,7 @@
 
   // Try zero MV first
   // FIXME should really use something like near/nearest MV and/or MV prediction
-  err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+  err = aom_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
                      xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride);
   best_mv.col = best_mv.row = 0;
 
@@ -123,21 +123,21 @@
   return err;
 }
 
-static int do_16x16_zerozero_search(VP10_COMP *cpi, int_mv *dst_mv) {
+static int do_16x16_zerozero_search(AV1_COMP *cpi, int_mv *dst_mv) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   unsigned int err;
 
   // Try zero MV first
   // FIXME should really use something like near/nearest MV and/or MV prediction
-  err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+  err = aom_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
                      xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride);
 
   dst_mv->as_int = 0;
 
   return err;
 }
-static int find_best_16x16_intra(VP10_COMP *cpi, PREDICTION_MODE *pbest_mode) {
+static int find_best_16x16_intra(AV1_COMP *cpi, PREDICTION_MODE *pbest_mode) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   PREDICTION_MODE best_mode = -1, mode;
@@ -149,10 +149,10 @@
     unsigned int err;
 
     xd->mi[0]->mbmi.mode = mode;
-    vp10_predict_intra_block(xd, 2, 2, TX_16X16, mode, x->plane[0].src.buf,
-                             x->plane[0].src.stride, xd->plane[0].dst.buf,
-                             xd->plane[0].dst.stride, 0, 0, 0);
-    err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+    av1_predict_intra_block(xd, 2, 2, TX_16X16, mode, x->plane[0].src.buf,
+                            x->plane[0].src.stride, xd->plane[0].dst.buf,
+                            xd->plane[0].dst.stride, 0, 0, 0);
+    err = aom_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
                        xd->plane[0].dst.buf, xd->plane[0].dst.stride);
 
     // find best
@@ -167,7 +167,7 @@
   return best_err;
 }
 
-static void update_mbgraph_mb_stats(VP10_COMP *cpi, MBGRAPH_MB_STATS *stats,
+static void update_mbgraph_mb_stats(AV1_COMP *cpi, MBGRAPH_MB_STATS *stats,
                                     YV12_BUFFER_CONFIG *buf, int mb_y_offset,
                                     YV12_BUFFER_CONFIG *golden_ref,
                                     const MV *prev_golden_ref_mv,
@@ -176,7 +176,7 @@
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   int intra_error;
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
 
   // FIXME in practice we're completely ignoring chroma here
   x->plane[0].src.buf = buf->y_buffer + mb_y_offset;
@@ -220,21 +220,21 @@
   }
 }
 
-static void update_mbgraph_frame_stats(VP10_COMP *cpi,
+static void update_mbgraph_frame_stats(AV1_COMP *cpi,
                                        MBGRAPH_FRAME_STATS *stats,
                                        YV12_BUFFER_CONFIG *buf,
                                        YV12_BUFFER_CONFIG *golden_ref,
                                        YV12_BUFFER_CONFIG *alt_ref) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
 
   int mb_col, mb_row, offset = 0;
   int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
   MV gld_top_mv = { 0, 0 };
   MODE_INFO mi_local;
 
-  vp10_zero(mi_local);
+  av1_zero(mi_local);
   // Set up limit values for motion vectors to prevent them extending outside
   // the UMV borders.
   x->mv_row_min = -BORDER_MV_PIXELS_B16;
@@ -287,8 +287,8 @@
 }
 
 // void separate_arf_mbs_byzz
-static void separate_arf_mbs(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void separate_arf_mbs(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   int mb_col, mb_row, offset, i;
   int mi_row, mi_col;
   int ncnt[4] = { 0 };
@@ -298,7 +298,7 @@
 
   CHECK_MEM_ERROR(
       cm, arf_not_zz,
-      vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
+      aom_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
 
   // We are not interested in results beyond the alt ref itself.
   if (n_frames > cpi->rc.frames_till_gf_update_due)
@@ -354,19 +354,19 @@
     else
       cpi->static_mb_pct = 0;
 
-    vp10_enable_segmentation(&cm->seg);
+    av1_enable_segmentation(&cm->seg);
   } else {
     cpi->static_mb_pct = 0;
-    vp10_disable_segmentation(&cm->seg);
+    av1_disable_segmentation(&cm->seg);
   }
 
   // Free localy allocated storage
-  vpx_free(arf_not_zz);
+  aom_free(arf_not_zz);
 }
 
-void vp10_update_mbgraph_stats(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
-  int i, n_frames = vp10_lookahead_depth(cpi->lookahead);
+void av1_update_mbgraph_stats(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
+  int i, n_frames = av1_lookahead_depth(cpi->lookahead);
   YV12_BUFFER_CONFIG *golden_ref = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
 
   assert(golden_ref != NULL);
@@ -390,7 +390,7 @@
   // the ARF MC search backwards, to get optimal results for MV caching
   for (i = 0; i < n_frames; i++) {
     MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
-    struct lookahead_entry *q_cur = vp10_lookahead_peek(cpi->lookahead, i);
+    struct lookahead_entry *q_cur = av1_lookahead_peek(cpi->lookahead, i);
 
     assert(q_cur != NULL);
 
@@ -398,7 +398,7 @@
                                cpi->Source);
   }
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   separate_arf_mbs(cpi);
 }
diff --git a/av1/encoder/mbgraph.h b/av1/encoder/mbgraph.h
index 0b056af..a01e5de 100644
--- a/av1/encoder/mbgraph.h
+++ b/av1/encoder/mbgraph.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_MBGRAPH_H_
-#define VP10_ENCODER_MBGRAPH_H_
+#ifndef AV1_ENCODER_MBGRAPH_H_
+#define AV1_ENCODER_MBGRAPH_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -27,12 +27,12 @@
 
 typedef struct { MBGRAPH_MB_STATS *mb_stats; } MBGRAPH_FRAME_STATS;
 
-struct VP10_COMP;
+struct AV1_COMP;
 
-void vp10_update_mbgraph_stats(struct VP10_COMP *cpi);
+void av1_update_mbgraph_stats(struct AV1_COMP *cpi);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_MBGRAPH_H_
+#endif  // AV1_ENCODER_MBGRAPH_H_
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
index 9ee06e9..303c5d5 100644
--- a/av1/encoder/mcomp.c
+++ b/av1/encoder/mcomp.c
@@ -12,11 +12,11 @@
 #include <math.h>
 #include <stdio.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #include "av1/common/common.h"
@@ -33,16 +33,16 @@
   return &buf->buf[mv->row * buf->stride + mv->col];
 }
 
-void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
+void av1_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
   int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0);
   int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0);
   int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL;
   int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL;
 
-  col_min = VPXMAX(col_min, (MV_LOW >> 3) + 1);
-  row_min = VPXMAX(row_min, (MV_LOW >> 3) + 1);
-  col_max = VPXMIN(col_max, (MV_UPP >> 3) - 1);
-  row_max = VPXMIN(row_max, (MV_UPP >> 3) - 1);
+  col_min = AOMMAX(col_min, (MV_LOW >> 3) + 1);
+  row_min = AOMMAX(row_min, (MV_LOW >> 3) + 1);
+  col_max = AOMMIN(col_max, (MV_UPP >> 3) - 1);
+  row_max = AOMMIN(row_max, (MV_UPP >> 3) - 1);
 
   // Get intersection of UMV window and valid MV window to reduce # of checks
   // in diamond search.
@@ -52,25 +52,25 @@
   if (x->mv_row_max > row_max) x->mv_row_max = row_max;
 }
 
-int vp10_init_search_range(int size) {
+int av1_init_search_range(int size) {
   int sr = 0;
   // Minimum search size no matter what the passed in value.
-  size = VPXMAX(16, size);
+  size = AOMMAX(16, size);
 
   while ((size << sr) < MAX_FULL_PEL_VAL) sr++;
 
-  sr = VPXMIN(sr, MAX_MVSEARCH_STEPS - 2);
+  sr = AOMMIN(sr, MAX_MVSEARCH_STEPS - 2);
   return sr;
 }
 
 static INLINE int mv_cost(const MV *mv, const int *joint_cost,
                           int *const comp_cost[2]) {
-  return joint_cost[vp10_get_mv_joint(mv)] + comp_cost[0][mv->row] +
+  return joint_cost[av1_get_mv_joint(mv)] + comp_cost[0][mv->row] +
          comp_cost[1][mv->col];
 }
 
-int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
-                     int *mvcost[2], int weight) {
+int av1_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+                    int *mvcost[2], int weight) {
   const MV diff = { mv->row - ref->row, mv->col - ref->col };
   return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
 }
@@ -84,7 +84,7 @@
     // accuracy in either bit cost or error cost will cause it to overflow.
     return ROUND_POWER_OF_TWO(
         (unsigned)mv_cost(&diff, mvjcost, mvcost) * error_per_bit,
-        RDDIV_BITS + VP10_PROB_COST_SHIFT - RD_EPB_SHIFT +
+        RDDIV_BITS + AV1_PROB_COST_SHIFT - RD_EPB_SHIFT +
             PIXEL_TRANSFORM_ERROR_SCALE);
   }
   return 0;
@@ -95,10 +95,10 @@
   const MV diff = { (mv->row - ref->row) * 8, (mv->col - ref->col) * 8 };
   return ROUND_POWER_OF_TWO(
       (unsigned)mv_cost(&diff, x->nmvjointsadcost, x->mvsadcost) * sad_per_bit,
-      VP10_PROB_COST_SHIFT);
+      AV1_PROB_COST_SHIFT);
 }
 
-void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {
+void av1_init_dsmotion_compensation(search_site_config *cfg, int stride) {
   int len, ss_count = 1;
 
   cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0;
@@ -119,7 +119,7 @@
   cfg->searches_per_step = 4;
 }
 
-void vp10_init3smotion_compensation(search_site_config *cfg, int stride) {
+void av1_init3smotion_compensation(search_site_config *cfg, int stride) {
   int len, ss_count = 1;
 
   cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0;
@@ -297,10 +297,10 @@
   int br = bestmv->row * 8;                                         \
   int bc = bestmv->col * 8;                                         \
   int hstep = 4;                                                    \
-  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \
-  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \
-  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \
-  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \
+  const int minc = AOMMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \
+  const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \
+  const int minr = AOMMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \
+  const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \
   int tr = br;                                                      \
   int tc = bc;                                                      \
                                                                     \
@@ -309,22 +309,22 @@
 
 static unsigned int setup_center_error(
     const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
-    int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+    int error_per_bit, const aom_variance_fn_ptr_t *vfp,
     const uint8_t *const src, const int src_stride, const uint8_t *const y,
     int y_stride, const uint8_t *second_pred, int w, int h, int offset,
     int *mvjcost, int *mvcost[2], unsigned int *sse1, int *distortion) {
   unsigned int besterr;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (second_pred != NULL) {
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       DECLARE_ALIGNED(16, uint16_t, comp_pred16[MAX_SB_SQUARE]);
-      vpx_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset,
+      aom_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset,
                                y_stride);
       besterr =
           vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, sse1);
     } else {
       DECLARE_ALIGNED(16, uint8_t, comp_pred[MAX_SB_SQUARE]);
-      vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
+      aom_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
       besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
     }
   } else {
@@ -336,14 +336,14 @@
   (void)xd;
   if (second_pred != NULL) {
     DECLARE_ALIGNED(16, uint8_t, comp_pred[MAX_SB_SQUARE]);
-    vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
+    aom_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
     besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
   } else {
     besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1);
   }
   *distortion = besterr;
   besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   return besterr;
 }
 
@@ -371,9 +371,9 @@
                          (cost_list[4] - 2 * cost_list[0] + cost_list[2]));
 }
 
-int vp10_find_best_sub_pixel_tree_pruned_evenmore(
+int av1_find_best_sub_pixel_tree_pruned_evenmore(
     MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
-    const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+    const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
     int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
     unsigned int *sse1, const uint8_t *second_pred, int w, int h,
     int use_upsampled_ref) {
@@ -423,7 +423,7 @@
   tr = br;
   tc = bc;
 
-  if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+  if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
     hstep >>= 1;
     FIRST_LEVEL_CHECKS;
     if (eighthiters > 1) {
@@ -441,9 +441,9 @@
   return besterr;
 }
 
-int vp10_find_best_sub_pixel_tree_pruned_more(
+int av1_find_best_sub_pixel_tree_pruned_more(
     MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
-    const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+    const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
     int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
     unsigned int *sse1, const uint8_t *second_pred, int w, int h,
     int use_upsampled_ref) {
@@ -483,7 +483,7 @@
     }
   }
 
-  if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+  if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
     tr = br;
     tc = bc;
     hstep >>= 1;
@@ -507,9 +507,9 @@
   return besterr;
 }
 
-int vp10_find_best_sub_pixel_tree_pruned(
+int av1_find_best_sub_pixel_tree_pruned(
     MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
-    const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+    const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
     int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
     unsigned int *sse1, const uint8_t *second_pred, int w, int h,
     int use_upsampled_ref) {
@@ -571,7 +571,7 @@
     tc = bc;
   }
 
-  if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+  if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
     hstep >>= 1;
     FIRST_LEVEL_CHECKS;
     if (eighthiters > 1) {
@@ -605,20 +605,20 @@
 /* clang-format on */
 
 static int upsampled_pref_error(const MACROBLOCKD *xd,
-                                const vpx_variance_fn_ptr_t *vfp,
+                                const aom_variance_fn_ptr_t *vfp,
                                 const uint8_t *const src, const int src_stride,
                                 const uint8_t *const y, int y_stride,
                                 const uint8_t *second_pred, int w, int h,
                                 unsigned int *sse) {
   unsigned int besterr;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     DECLARE_ALIGNED(16, uint16_t, pred16[MAX_SB_SQUARE]);
     if (second_pred != NULL)
-      vpx_highbd_comp_avg_upsampled_pred(pred16, second_pred, w, h, y,
+      aom_highbd_comp_avg_upsampled_pred(pred16, second_pred, w, h, y,
                                          y_stride);
     else
-      vpx_highbd_upsampled_pred(pred16, w, h, y, y_stride);
+      aom_highbd_upsampled_pred(pred16, w, h, y, y_stride);
 
     besterr = vfp->vf(CONVERT_TO_BYTEPTR(pred16), w, src, src_stride, sse);
   } else {
@@ -626,14 +626,14 @@
 #else
   DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
   (void)xd;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     if (second_pred != NULL)
-      vpx_comp_avg_upsampled_pred(pred, second_pred, w, h, y, y_stride);
+      aom_comp_avg_upsampled_pred(pred, second_pred, w, h, y, y_stride);
     else
-      vpx_upsampled_pred(pred, w, h, y, y_stride);
+      aom_upsampled_pred(pred, w, h, y, y_stride);
 
     besterr = vfp->vf(pred, w, src, src_stride, sse);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   }
 #endif
   return besterr;
@@ -641,7 +641,7 @@
 
 static unsigned int upsampled_setup_center_error(
     const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
-    int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+    int error_per_bit, const aom_variance_fn_ptr_t *vfp,
     const uint8_t *const src, const int src_stride, const uint8_t *const y,
     int y_stride, const uint8_t *second_pred, int w, int h, int offset,
     int *mvjcost, int *mvcost[2], unsigned int *sse1, int *distortion) {
@@ -652,14 +652,14 @@
   return besterr;
 }
 
-int vp10_find_best_sub_pixel_tree(MACROBLOCK *x, const MV *ref_mv, int allow_hp,
-                                  int error_per_bit,
-                                  const vpx_variance_fn_ptr_t *vfp,
-                                  int forced_stop, int iters_per_step,
-                                  int *cost_list, int *mvjcost, int *mvcost[2],
-                                  int *distortion, unsigned int *sse1,
-                                  const uint8_t *second_pred, int w, int h,
-                                  int use_upsampled_ref) {
+int av1_find_best_sub_pixel_tree(MACROBLOCK *x, const MV *ref_mv, int allow_hp,
+                                 int error_per_bit,
+                                 const aom_variance_fn_ptr_t *vfp,
+                                 int forced_stop, int iters_per_step,
+                                 int *cost_list, int *mvjcost, int *mvcost[2],
+                                 int *distortion, unsigned int *sse1,
+                                 const uint8_t *second_pred, int w, int h,
+                                 int use_upsampled_ref) {
   const uint8_t *const src_address = x->plane[0].src.buf;
   const int src_stride = x->plane[0].src.stride;
   const MACROBLOCKD *xd = &x->e_mbd;
@@ -675,10 +675,10 @@
   int bc = bestmv->col * 8;
   int hstep = 4;
   int iter, round = 3 - forced_stop;
-  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
-  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
-  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
-  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+  const int minc = AOMMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+  const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+  const int minr = AOMMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+  const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
   int tr = br;
   int tc = bc;
   const MV *search_step = search_step_table;
@@ -686,7 +686,7 @@
   unsigned int cost_array[5];
   int kr, kc;
 
-  if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
+  if (!(allow_hp && av1_use_mv_hp(ref_mv)))
     if (round == 3) round = 2;
 
   bestmv->row *= 8;
@@ -852,7 +852,7 @@
 // Calculate and return a sad+mvcost list around an integer best pel.
 static INLINE void calc_int_cost_list(const MACROBLOCK *x,
                                       const MV *const ref_mv, int sadpb,
-                                      const vpx_variance_fn_ptr_t *fn_ptr,
+                                      const aom_variance_fn_ptr_t *fn_ptr,
                                       const MV *best_mv, int *cost_list) {
   static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
   const struct buf_2d *const what = &x->plane[0].src;
@@ -896,7 +896,7 @@
 
 static INLINE void calc_int_sad_list(const MACROBLOCK *x,
                                      const MV *const ref_mv, int sadpb,
-                                     const vpx_variance_fn_ptr_t *fn_ptr,
+                                     const aom_variance_fn_ptr_t *fn_ptr,
                                      const MV *best_mv, int *cost_list,
                                      const int use_mvcost, const int bestsad) {
   static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
@@ -946,7 +946,7 @@
 //
 static int pattern_search(
     MACROBLOCK *x, MV *start_mv, int search_param, int sad_per_bit,
-    int do_init_search, int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+    int do_init_search, int *cost_list, const aom_variance_fn_ptr_t *vfp,
     int use_mvcost, const MV *center_mv,
     const int num_candidates[MAX_PATTERN_SCALES],
     const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) {
@@ -1198,9 +1198,9 @@
   return bestsad;
 }
 
-int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
-                        const MV *center_mv, const vpx_variance_fn_ptr_t *vfp,
-                        int use_mvcost) {
+int av1_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+                       const MV *center_mv, const aom_variance_fn_ptr_t *vfp,
+                       int use_mvcost) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
   const struct buf_2d *const in_what = &xd->plane[0].pre[0];
@@ -1214,9 +1214,9 @@
                      : 0);
 }
 
-int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
-                           const MV *center_mv, const uint8_t *second_pred,
-                           const vpx_variance_fn_ptr_t *vfp, int use_mvcost) {
+int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+                          const MV *center_mv, const uint8_t *second_pred,
+                          const aom_variance_fn_ptr_t *vfp, int use_mvcost) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
   const struct buf_2d *const in_what = &xd->plane[0].pre[0];
@@ -1230,10 +1230,10 @@
                      : 0);
 }
 
-int vp10_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
-                    int sad_per_bit, int do_init_search, int *cost_list,
-                    const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
-                    const MV *center_mv) {
+int av1_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
+                   int sad_per_bit, int do_init_search, int *cost_list,
+                   const aom_variance_fn_ptr_t *vfp, int use_mvcost,
+                   const MV *center_mv) {
   // First scale has 8-closest points, the rest have 6 points in hex shape
   // at increasing scales
   static const int hex_num_candidates[MAX_PATTERN_SCALES] = { 8, 6, 6, 6, 6, 6,
@@ -1268,7 +1268,7 @@
 
 static int bigdia_search(MACROBLOCK *x, MV *start_mv, int search_param,
                          int sad_per_bit, int do_init_search, int *cost_list,
-                         const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                         const aom_variance_fn_ptr_t *vfp, int use_mvcost,
                          const MV *center_mv) {
   // First scale has 4-closest points, the rest have 8 points in diamond
   // shape at increasing scales
@@ -1309,7 +1309,7 @@
 
 static int square_search(MACROBLOCK *x, MV *start_mv, int search_param,
                          int sad_per_bit, int do_init_search, int *cost_list,
-                         const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                         const aom_variance_fn_ptr_t *vfp, int use_mvcost,
                          const MV *center_mv) {
   // All scales have 8 closest points in square shape
   static const int square_num_candidates[MAX_PATTERN_SCALES] = {
@@ -1351,18 +1351,18 @@
 static int fast_hex_search(MACROBLOCK *x, MV *ref_mv, int search_param,
                            int sad_per_bit,
                            int do_init_search,  // must be zero for fast_hex
-                           int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+                           int *cost_list, const aom_variance_fn_ptr_t *vfp,
                            int use_mvcost, const MV *center_mv) {
-  return vp10_hex_search(
-      x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
-      do_init_search, cost_list, vfp, use_mvcost, center_mv);
+  return av1_hex_search(x, ref_mv, AOMMAX(MAX_MVSEARCH_STEPS - 2, search_param),
+                        sad_per_bit, do_init_search, cost_list, vfp, use_mvcost,
+                        center_mv);
 }
 
 static int fast_dia_search(MACROBLOCK *x, MV *ref_mv, int search_param,
                            int sad_per_bit, int do_init_search, int *cost_list,
-                           const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                           const aom_variance_fn_ptr_t *vfp, int use_mvcost,
                            const MV *center_mv) {
-  return bigdia_search(x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param),
+  return bigdia_search(x, ref_mv, AOMMAX(MAX_MVSEARCH_STEPS - 2, search_param),
                        sad_per_bit, do_init_search, cost_list, vfp, use_mvcost,
                        center_mv);
 }
@@ -1373,7 +1373,7 @@
 // step size.
 static int exhuastive_mesh_search(MACROBLOCK *x, MV *ref_mv, MV *best_mv,
                                   int range, int step, int sad_per_bit,
-                                  const vpx_variance_fn_ptr_t *fn_ptr,
+                                  const aom_variance_fn_ptr_t *fn_ptr,
                                   const MV *center_mv) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
@@ -1393,10 +1393,10 @@
       fn_ptr->sdf(what->buf, what->stride,
                   get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) +
       mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit);
-  start_row = VPXMAX(-range, x->mv_row_min - fcenter_mv.row);
-  start_col = VPXMAX(-range, x->mv_col_min - fcenter_mv.col);
-  end_row = VPXMIN(range, x->mv_row_max - fcenter_mv.row);
-  end_col = VPXMIN(range, x->mv_col_max - fcenter_mv.col);
+  start_row = AOMMAX(-range, x->mv_row_min - fcenter_mv.row);
+  start_col = AOMMAX(-range, x->mv_col_min - fcenter_mv.col);
+  end_row = AOMMIN(range, x->mv_row_max - fcenter_mv.row);
+  end_col = AOMMIN(range, x->mv_col_max - fcenter_mv.col);
 
   for (r = start_row; r <= end_row; r += step) {
     for (c = start_col; c <= end_col; c += col_step) {
@@ -1460,11 +1460,11 @@
   return best_sad;
 }
 
-int vp10_diamond_search_sad_c(MACROBLOCK *x, const search_site_config *cfg,
-                              MV *ref_mv, MV *best_mv, int search_param,
-                              int sad_per_bit, int *num00,
-                              const vpx_variance_fn_ptr_t *fn_ptr,
-                              const MV *center_mv) {
+int av1_diamond_search_sad_c(MACROBLOCK *x, const search_site_config *cfg,
+                             MV *ref_mv, MV *best_mv, int search_param,
+                             int sad_per_bit, int *num00,
+                             const aom_variance_fn_ptr_t *fn_ptr,
+                             const MV *center_mv) {
   int i, j, step;
 
   const MACROBLOCKD *const xd = &x->e_mbd;
@@ -1609,7 +1609,7 @@
   int center, offset = 0;
   int bw = 4 << bwl;  // redundant variable, to be changed in the experiments.
   for (d = 0; d <= bw; d += 16) {
-    this_sad = vpx_vector_var(&ref[d], src, bwl);
+    this_sad = aom_vector_var(&ref[d], src, bwl);
     if (this_sad < best_sad) {
       best_sad = this_sad;
       offset = d;
@@ -1621,7 +1621,7 @@
     int this_pos = offset + d;
     // check limit
     if (this_pos < 0 || this_pos > bw) continue;
-    this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+    this_sad = aom_vector_var(&ref[this_pos], src, bwl);
     if (this_sad < best_sad) {
       best_sad = this_sad;
       center = this_pos;
@@ -1633,7 +1633,7 @@
     int this_pos = offset + d;
     // check limit
     if (this_pos < 0 || this_pos > bw) continue;
-    this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+    this_sad = aom_vector_var(&ref[this_pos], src, bwl);
     if (this_sad < best_sad) {
       best_sad = this_sad;
       center = this_pos;
@@ -1645,7 +1645,7 @@
     int this_pos = offset + d;
     // check limit
     if (this_pos < 0 || this_pos > bw) continue;
-    this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+    this_sad = aom_vector_var(&ref[this_pos], src, bwl);
     if (this_sad < best_sad) {
       best_sad = this_sad;
       center = this_pos;
@@ -1657,7 +1657,7 @@
     int this_pos = offset + d;
     // check limit
     if (this_pos < 0 || this_pos > bw) continue;
-    this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+    this_sad = aom_vector_var(&ref[this_pos], src, bwl);
     if (this_sad < best_sad) {
       best_sad = this_sad;
       center = this_pos;
@@ -1671,9 +1671,9 @@
   { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
 };
 
-unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
-                                            BLOCK_SIZE bsize, int mi_row,
-                                            int mi_col) {
+unsigned int av1_int_pro_motion_estimation(const AV1_COMP *cpi, MACROBLOCK *x,
+                                           BLOCK_SIZE bsize, int mi_row,
+                                           int mi_col) {
   MACROBLOCKD *xd = &x->e_mbd;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
@@ -1694,7 +1694,7 @@
   MV this_mv;
   const int norm_factor = 3 + (bw >> 5);
   const YV12_BUFFER_CONFIG *scaled_ref_frame =
-      vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]);
+      av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]);
 
   if (scaled_ref_frame) {
     int i;
@@ -1702,10 +1702,10 @@
     // match the resolution of the current frame, allowing the existing
     // motion search code to be used without additional modifications.
     for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
-    vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
+    av1_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
   }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   {
     unsigned int this_sad;
     tmp_mv->row = 0;
@@ -1724,25 +1724,25 @@
   // Set up prediction 1-D reference set
   ref_buf = xd->plane[0].pre[0].buf - (bw >> 1);
   for (idx = 0; idx < search_width; idx += 16) {
-    vpx_int_pro_row(&hbuf[idx], ref_buf, ref_stride, bh);
+    aom_int_pro_row(&hbuf[idx], ref_buf, ref_stride, bh);
     ref_buf += 16;
   }
 
   ref_buf = xd->plane[0].pre[0].buf - (bh >> 1) * ref_stride;
   for (idx = 0; idx < search_height; ++idx) {
-    vbuf[idx] = vpx_int_pro_col(ref_buf, bw) >> norm_factor;
+    vbuf[idx] = aom_int_pro_col(ref_buf, bw) >> norm_factor;
     ref_buf += ref_stride;
   }
 
   // Set up src 1-D reference set
   for (idx = 0; idx < bw; idx += 16) {
     src_buf = x->plane[0].src.buf + idx;
-    vpx_int_pro_row(&src_hbuf[idx], src_buf, src_stride, bh);
+    aom_int_pro_row(&src_hbuf[idx], src_buf, src_stride, bh);
   }
 
   src_buf = x->plane[0].src.buf;
   for (idx = 0; idx < bh; ++idx) {
-    src_vbuf[idx] = vpx_int_pro_col(src_buf, bw) >> norm_factor;
+    src_vbuf[idx] = aom_int_pro_col(src_buf, bw) >> norm_factor;
     src_buf += src_stride;
   }
 
@@ -1803,17 +1803,17 @@
 /* do_refine: If last step (1-away) of n-step search doesn't pick the center
               point as the best match, we will do a final 1-away diamond
               refining search  */
-static int full_pixel_diamond(VP10_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
+static int full_pixel_diamond(AV1_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
                               int step_param, int sadpb, int further_steps,
                               int do_refine, int *cost_list,
-                              const vpx_variance_fn_ptr_t *fn_ptr,
+                              const aom_variance_fn_ptr_t *fn_ptr,
                               const MV *ref_mv) {
   MV temp_mv;
   int thissme, n, num00 = 0;
   int bestsme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
                                         step_param, sadpb, &n, fn_ptr, ref_mv);
   if (bestsme < INT_MAX)
-    bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+    bestsme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
   x->best_mv.as_mv = temp_mv;
 
   // If there won't be more n-step search, check to see if refining search is
@@ -1830,7 +1830,7 @@
                                         step_param + n, sadpb, &num00, fn_ptr,
                                         ref_mv);
       if (thissme < INT_MAX)
-        thissme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+        thissme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
 
       // check to see if refining search is needed.
       if (num00 > further_steps - n) do_refine = 0;
@@ -1846,10 +1846,10 @@
   if (do_refine) {
     const int search_range = 8;
     MV best_mv = x->best_mv.as_mv;
-    thissme = vp10_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
-                                       ref_mv);
+    thissme = av1_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
+                                      ref_mv);
     if (thissme < INT_MAX)
-      thissme = vp10_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
+      thissme = av1_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
     if (thissme < bestsme) {
       bestsme = thissme;
       x->best_mv.as_mv = best_mv;
@@ -1868,10 +1868,10 @@
 #define MIN_INTERVAL 1
 // Runs an limited range exhaustive mesh search using a pattern set
 // according to the encode speed profile.
-static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
+static int full_pixel_exhaustive(AV1_COMP *cpi, MACROBLOCK *x,
                                  const MV *centre_mv_full, int sadpb,
                                  int *cost_list,
-                                 const vpx_variance_fn_ptr_t *fn_ptr,
+                                 const aom_variance_fn_ptr_t *fn_ptr,
                                  const MV *ref_mv, MV *dst_mv) {
   const SPEED_FEATURES *const sf = &cpi->sf;
   MV temp_mv = { centre_mv_full->row, centre_mv_full->col };
@@ -1894,9 +1894,9 @@
 
   // Check size of proposed first range against magnitude of the centre
   // value used as a starting point.
-  range = VPXMAX(range, (5 * VPXMAX(abs(temp_mv.row), abs(temp_mv.col))) / 4);
-  range = VPXMIN(range, MAX_RANGE);
-  interval = VPXMAX(interval, range / baseline_interval_divisor);
+  range = AOMMAX(range, (5 * AOMMAX(abs(temp_mv.row), abs(temp_mv.col))) / 4);
+  range = AOMMIN(range, MAX_RANGE);
+  interval = AOMMAX(interval, range / baseline_interval_divisor);
 
   // initial search
   bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range, interval,
@@ -1916,7 +1916,7 @@
   }
 
   if (bestsme < INT_MAX)
-    bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+    bestsme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
   *dst_mv = temp_mv;
 
   // Return cost list.
@@ -1926,18 +1926,18 @@
   return bestsme;
 }
 
-int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
-                           int sad_per_bit, int distance,
-                           const vpx_variance_fn_ptr_t *fn_ptr,
-                           const MV *center_mv, MV *best_mv) {
+int av1_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
+                          int sad_per_bit, int distance,
+                          const aom_variance_fn_ptr_t *fn_ptr,
+                          const MV *center_mv, MV *best_mv) {
   int r, c;
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
   const struct buf_2d *const in_what = &xd->plane[0].pre[0];
-  const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min);
-  const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
-  const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
-  const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
+  const int row_min = AOMMAX(ref_mv->row - distance, x->mv_row_min);
+  const int row_max = AOMMIN(ref_mv->row + distance, x->mv_row_max);
+  const int col_min = AOMMAX(ref_mv->col - distance, x->mv_col_min);
+  const int col_max = AOMMIN(ref_mv->col + distance, x->mv_col_max);
   const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
   int best_sad =
       fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
@@ -1961,18 +1961,18 @@
   return best_sad;
 }
 
-int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
-                           int sad_per_bit, int distance,
-                           const vpx_variance_fn_ptr_t *fn_ptr,
-                           const MV *center_mv, MV *best_mv) {
+int av1_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
+                          int sad_per_bit, int distance,
+                          const aom_variance_fn_ptr_t *fn_ptr,
+                          const MV *center_mv, MV *best_mv) {
   int r;
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
   const struct buf_2d *const in_what = &xd->plane[0].pre[0];
-  const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min);
-  const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
-  const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
-  const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
+  const int row_min = AOMMAX(ref_mv->row - distance, x->mv_row_min);
+  const int row_max = AOMMIN(ref_mv->row + distance, x->mv_row_max);
+  const int col_min = AOMMAX(ref_mv->col - distance, x->mv_col_min);
+  const int col_max = AOMMIN(ref_mv->col + distance, x->mv_col_max);
   const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
   unsigned int best_sad =
       fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
@@ -2027,18 +2027,18 @@
   return best_sad;
 }
 
-int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
-                           int sad_per_bit, int distance,
-                           const vpx_variance_fn_ptr_t *fn_ptr,
-                           const MV *center_mv, MV *best_mv) {
+int av1_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
+                          int sad_per_bit, int distance,
+                          const aom_variance_fn_ptr_t *fn_ptr,
+                          const MV *center_mv, MV *best_mv) {
   int r;
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
   const struct buf_2d *const in_what = &xd->plane[0].pre[0];
-  const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min);
-  const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
-  const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
-  const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
+  const int row_min = AOMMAX(ref_mv->row - distance, x->mv_row_min);
+  const int row_max = AOMMIN(ref_mv->row + distance, x->mv_row_max);
+  const int col_min = AOMMAX(ref_mv->col - distance, x->mv_col_min);
+  const int col_max = AOMMIN(ref_mv->col + distance, x->mv_col_max);
   const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
   unsigned int best_sad =
       fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
@@ -2117,10 +2117,10 @@
   return best_sad;
 }
 
-int vp10_refining_search_sad(MACROBLOCK *x, MV *ref_mv, int error_per_bit,
-                             int search_range,
-                             const vpx_variance_fn_ptr_t *fn_ptr,
-                             const MV *center_mv) {
+int av1_refining_search_sad(MACROBLOCK *x, MV *ref_mv, int error_per_bit,
+                            int search_range,
+                            const aom_variance_fn_ptr_t *fn_ptr,
+                            const MV *center_mv) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
   const struct buf_2d *const what = &x->plane[0].src;
@@ -2193,10 +2193,9 @@
 
 // This function is called when we do joint motion search in comp_inter_inter
 // mode.
-int vp10_refining_search_8p_c(MACROBLOCK *x, int error_per_bit,
-                              int search_range,
-                              const vpx_variance_fn_ptr_t *fn_ptr,
-                              const MV *center_mv, const uint8_t *second_pred) {
+int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
+                             const aom_variance_fn_ptr_t *fn_ptr,
+                             const MV *center_mv, const uint8_t *second_pred) {
   const MV neighbors[8] = { { -1, 0 },  { 0, -1 }, { 0, 1 },  { 1, 0 },
                             { -1, -1 }, { 1, -1 }, { -1, 1 }, { 1, 1 } };
   const MACROBLOCKD *const xd = &x->e_mbd;
@@ -2242,10 +2241,10 @@
 }
 
 #define MIN_EX_SEARCH_LIMIT 128
-static int is_exhaustive_allowed(VP10_COMP *cpi, MACROBLOCK *x) {
+static int is_exhaustive_allowed(AV1_COMP *cpi, MACROBLOCK *x) {
   const SPEED_FEATURES *const sf = &cpi->sf;
   const int max_ex =
-      VPXMAX(MIN_EX_SEARCH_LIMIT,
+      AOMMAX(MIN_EX_SEARCH_LIMIT,
              (*x->m_search_count_ptr * sf->max_exaustive_pct) / 100);
 
   return sf->allow_exhaustive_searches &&
@@ -2253,13 +2252,13 @@
          (*x->ex_search_count_ptr <= max_ex) && !cpi->rc.is_src_frame_alt_ref;
 }
 
-int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
-                           MV *mvp_full, int step_param, int error_per_bit,
-                           int *cost_list, const MV *ref_mv, int var_max,
-                           int rd) {
+int av1_full_pixel_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+                          MV *mvp_full, int step_param, int error_per_bit,
+                          int *cost_list, const MV *ref_mv, int var_max,
+                          int rd) {
   const SPEED_FEATURES *const sf = &cpi->sf;
   const SEARCH_METHODS method = sf->mv.search_method;
-  vpx_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
+  aom_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
   int var = 0;
 
   if (cost_list) {
@@ -2283,8 +2282,8 @@
                             cost_list, fn_ptr, 1, ref_mv);
       break;
     case HEX:
-      var = vp10_hex_search(x, mvp_full, step_param, error_per_bit, 1,
-                            cost_list, fn_ptr, 1, ref_mv);
+      var = av1_hex_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
+                           fn_ptr, 1, ref_mv);
       break;
     case SQUARE:
       var = square_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
@@ -2326,7 +2325,7 @@
   }
 
   if (method != NSTEP && rd && var < var_max)
-    var = vp10_get_mvpred_var(x, &x->best_mv.as_mv, ref_mv, fn_ptr, 1);
+    var = av1_get_mvpred_var(x, &x->best_mv.as_mv, ref_mv, fn_ptr, 1);
 
   return var;
 }
@@ -2382,10 +2381,10 @@
     v = INT_MAX;                                                               \
   }
 
-int vp10_find_best_masked_sub_pixel_tree(
+int av1_find_best_masked_sub_pixel_tree(
     const MACROBLOCK *x, const uint8_t *mask, int mask_stride, MV *bestmv,
     const MV *ref_mv, int allow_hp, int error_per_bit,
-    const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+    const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
     int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
     int is_second) {
   const uint8_t *const z = x->plane[0].src.buf;
@@ -2408,10 +2407,10 @@
   int br = bestmv->row * 8;
   int bc = bestmv->col * 8;
   int hstep = 4;
-  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
-  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
-  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
-  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+  const int minc = AOMMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+  const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+  const int minr = AOMMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+  const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
 
   int tr = br;
   int tc = bc;
@@ -2445,7 +2444,7 @@
     tc = bc;
   }
 
-  if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+  if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
     hstep >>= 1;
     FIRST_LEVEL_CHECKS;
     if (eighthiters > 1) {
@@ -2471,7 +2470,7 @@
 
 static unsigned int setup_masked_center_error(
     const uint8_t *mask, int mask_stride, const MV *bestmv, const MV *ref_mv,
-    int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+    int error_per_bit, const aom_variance_fn_ptr_t *vfp,
     const uint8_t *const src, const int src_stride, const uint8_t *const y,
     int y_stride, int offset, int *mvjcost, int *mvcost[2], unsigned int *sse1,
     int *distortion) {
@@ -2485,16 +2484,16 @@
 
 static int upsampled_masked_pref_error(const MACROBLOCKD *xd,
                                        const uint8_t *mask, int mask_stride,
-                                       const vpx_variance_fn_ptr_t *vfp,
+                                       const aom_variance_fn_ptr_t *vfp,
                                        const uint8_t *const src,
                                        const int src_stride,
                                        const uint8_t *const y, int y_stride,
                                        int w, int h, unsigned int *sse) {
   unsigned int besterr;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     DECLARE_ALIGNED(16, uint16_t, pred16[MAX_SB_SQUARE]);
-    vpx_highbd_upsampled_pred(pred16, w, h, y, y_stride);
+    aom_highbd_upsampled_pred(pred16, w, h, y, y_stride);
 
     besterr = vfp->mvf(CONVERT_TO_BYTEPTR(pred16), w, src, src_stride, mask,
                        mask_stride, sse);
@@ -2503,11 +2502,11 @@
 #else
   DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
   (void)xd;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-    vpx_upsampled_pred(pred, w, h, y, y_stride);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+    aom_upsampled_pred(pred, w, h, y, y_stride);
 
     besterr = vfp->mvf(pred, w, src, src_stride, mask, mask_stride, sse);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   }
 #endif
   return besterr;
@@ -2516,7 +2515,7 @@
 static unsigned int upsampled_setup_masked_center_error(
     const MACROBLOCKD *xd, const uint8_t *mask, int mask_stride,
     const MV *bestmv, const MV *ref_mv, int error_per_bit,
-    const vpx_variance_fn_ptr_t *vfp, const uint8_t *const src,
+    const aom_variance_fn_ptr_t *vfp, const uint8_t *const src,
     const int src_stride, const uint8_t *const y, int y_stride, int w, int h,
     int offset, int *mvjcost, int *mvcost[2], unsigned int *sse1,
     int *distortion) {
@@ -2528,10 +2527,10 @@
   return besterr;
 }
 
-int vp10_find_best_masked_sub_pixel_tree_up(
-    VP10_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
+int av1_find_best_masked_sub_pixel_tree_up(
+    AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
     int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
-    int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+    int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
     int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
     unsigned int *sse1, int is_second, int use_upsampled_ref) {
   const uint8_t *const z = x->plane[0].src.buf;
@@ -2551,10 +2550,10 @@
   int hstep = 4;
   int iter;
   int round = 3 - forced_stop;
-  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
-  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
-  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
-  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+  const int minc = AOMMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+  const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+  const int minr = AOMMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+  const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
   int tr = br;
   int tc = bc;
   const MV *search_step = search_step_table;
@@ -2580,7 +2579,7 @@
   y_stride = pd->pre[is_second].stride;
   offset = bestmv->row * y_stride + bestmv->col;
 
-  if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
+  if (!(allow_hp && av1_use_mv_hp(ref_mv)))
     if (round == 3) round = 2;
 
   bestmv->row *= 8;
@@ -2717,7 +2716,7 @@
 static int get_masked_mvpred_var(const MACROBLOCK *x, const uint8_t *mask,
                                  int mask_stride, const MV *best_mv,
                                  const MV *center_mv,
-                                 const vpx_variance_fn_ptr_t *vfp,
+                                 const aom_variance_fn_ptr_t *vfp,
                                  int use_mvcost, int is_second) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
@@ -2735,7 +2734,7 @@
 int masked_refining_search_sad(const MACROBLOCK *x, const uint8_t *mask,
                                int mask_stride, MV *ref_mv, int error_per_bit,
                                int search_range,
-                               const vpx_variance_fn_ptr_t *fn_ptr,
+                               const aom_variance_fn_ptr_t *fn_ptr,
                                const MV *center_mv, int is_second) {
   const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
   const MACROBLOCKD *const xd = &x->e_mbd;
@@ -2782,7 +2781,7 @@
                               const search_site_config *cfg,
                               const uint8_t *mask, int mask_stride, MV *ref_mv,
                               MV *best_mv, int search_param, int sad_per_bit,
-                              int *num00, const vpx_variance_fn_ptr_t *fn_ptr,
+                              int *num00, const aom_variance_fn_ptr_t *fn_ptr,
                               const MV *center_mv, int is_second) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
@@ -2867,13 +2866,12 @@
   return best_sad;
 }
 
-int vp10_masked_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x,
-                                   const uint8_t *mask, int mask_stride,
-                                   MV *mvp_full, int step_param, int sadpb,
-                                   int further_steps, int do_refine,
-                                   const vpx_variance_fn_ptr_t *fn_ptr,
-                                   const MV *ref_mv, MV *dst_mv,
-                                   int is_second) {
+int av1_masked_full_pixel_diamond(const AV1_COMP *cpi, MACROBLOCK *x,
+                                  const uint8_t *mask, int mask_stride,
+                                  MV *mvp_full, int step_param, int sadpb,
+                                  int further_steps, int do_refine,
+                                  const aom_variance_fn_ptr_t *fn_ptr,
+                                  const MV *ref_mv, MV *dst_mv, int is_second) {
   MV temp_mv;
   int thissme, n, num00 = 0;
   int bestsme = masked_diamond_search_sad(x, &cpi->ss_cfg, mask, mask_stride,
@@ -2980,7 +2978,7 @@
 
 static unsigned int setup_obmc_center_error(
     const int32_t *mask, const MV *bestmv, const MV *ref_mv, int error_per_bit,
-    const vpx_variance_fn_ptr_t *vfp, const int32_t *const wsrc,
+    const aom_variance_fn_ptr_t *vfp, const int32_t *const wsrc,
     const uint8_t *const y, int y_stride, int offset, int *mvjcost,
     int *mvcost[2], unsigned int *sse1, int *distortion) {
   unsigned int besterr;
@@ -2991,15 +2989,15 @@
 }
 
 static int upsampled_obmc_pref_error(const MACROBLOCKD *xd, const int32_t *mask,
-                                     const vpx_variance_fn_ptr_t *vfp,
+                                     const aom_variance_fn_ptr_t *vfp,
                                      const int32_t *const wsrc,
                                      const uint8_t *const y, int y_stride,
                                      int w, int h, unsigned int *sse) {
   unsigned int besterr;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     DECLARE_ALIGNED(16, uint16_t, pred16[MAX_SB_SQUARE]);
-    vpx_highbd_upsampled_pred(pred16, w, h, y, y_stride);
+    aom_highbd_upsampled_pred(pred16, w, h, y, y_stride);
 
     besterr = vfp->ovf(CONVERT_TO_BYTEPTR(pred16), w, wsrc, mask, sse);
   } else {
@@ -3007,11 +3005,11 @@
 #else
   DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
   (void)xd;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-    vpx_upsampled_pred(pred, w, h, y, y_stride);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+    aom_upsampled_pred(pred, w, h, y, y_stride);
 
     besterr = vfp->ovf(pred, w, wsrc, mask, sse);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   }
 #endif
   return besterr;
@@ -3019,7 +3017,7 @@
 
 static unsigned int upsampled_setup_obmc_center_error(
     const MACROBLOCKD *xd, const int32_t *mask, const MV *bestmv,
-    const MV *ref_mv, int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+    const MV *ref_mv, int error_per_bit, const aom_variance_fn_ptr_t *vfp,
     const int32_t *const wsrc, const uint8_t *const y, int y_stride, int w,
     int h, int offset, int *mvjcost, int *mvcost[2], unsigned int *sse1,
     int *distortion) {
@@ -3030,10 +3028,10 @@
   return besterr;
 }
 
-int vp10_find_best_obmc_sub_pixel_tree_up(
-    VP10_COMP *cpi, MACROBLOCK *x, const int32_t *wsrc, const int32_t *mask,
+int av1_find_best_obmc_sub_pixel_tree_up(
+    AV1_COMP *cpi, MACROBLOCK *x, const int32_t *wsrc, const int32_t *mask,
     int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
-    int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+    int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
     int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
     unsigned int *sse1, int is_second, int use_upsampled_ref) {
   const int *const z = wsrc;
@@ -3052,10 +3050,10 @@
   int hstep = 4;
   int iter;
   int round = 3 - forced_stop;
-  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
-  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
-  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
-  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+  const int minc = AOMMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+  const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+  const int minr = AOMMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+  const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
   int tr = br;
   int tc = bc;
   const MV *search_step = search_step_table;
@@ -3081,7 +3079,7 @@
   y_stride = pd->pre[is_second].stride;
   offset = bestmv->row * y_stride + bestmv->col;
 
-  if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
+  if (!(allow_hp && av1_use_mv_hp(ref_mv)))
     if (round == 3) round = 2;
 
   bestmv->row *= 8;
@@ -3213,7 +3211,7 @@
 static int get_obmc_mvpred_var(const MACROBLOCK *x, const int32_t *wsrc,
                                const int32_t *mask, const MV *best_mv,
                                const MV *center_mv,
-                               const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                               const aom_variance_fn_ptr_t *vfp, int use_mvcost,
                                int is_second) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const in_what = &xd->plane[0].pre[is_second];
@@ -3230,7 +3228,7 @@
 int obmc_refining_search_sad(const MACROBLOCK *x, const int32_t *wsrc,
                              const int32_t *mask, MV *ref_mv, int error_per_bit,
                              int search_range,
-                             const vpx_variance_fn_ptr_t *fn_ptr,
+                             const aom_variance_fn_ptr_t *fn_ptr,
                              const MV *center_mv, int is_second) {
   const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
   const MACROBLOCKD *const xd = &x->e_mbd;
@@ -3274,7 +3272,7 @@
                             const int32_t *wsrc, const int32_t *mask,
                             MV *ref_mv, MV *best_mv, int search_param,
                             int sad_per_bit, int *num00,
-                            const vpx_variance_fn_ptr_t *fn_ptr,
+                            const aom_variance_fn_ptr_t *fn_ptr,
                             const MV *center_mv, int is_second) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const in_what = &xd->plane[0].pre[is_second];
@@ -3355,12 +3353,12 @@
   return best_sad;
 }
 
-int vp10_obmc_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x,
-                                 const int32_t *wsrc, const int32_t *mask,
-                                 MV *mvp_full, int step_param, int sadpb,
-                                 int further_steps, int do_refine,
-                                 const vpx_variance_fn_ptr_t *fn_ptr,
-                                 const MV *ref_mv, MV *dst_mv, int is_second) {
+int av1_obmc_full_pixel_diamond(const AV1_COMP *cpi, MACROBLOCK *x,
+                                const int32_t *wsrc, const int32_t *mask,
+                                MV *mvp_full, int step_param, int sadpb,
+                                int further_steps, int do_refine,
+                                const aom_variance_fn_ptr_t *fn_ptr,
+                                const MV *ref_mv, MV *dst_mv, int is_second) {
   MV temp_mv;
   int thissme, n, num00 = 0;
   int bestsme =
diff --git a/av1/encoder/mcomp.h b/av1/encoder/mcomp.h
index d26b9bd..3c57139 100644
--- a/av1/encoder/mcomp.h
+++ b/av1/encoder/mcomp.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_MCOMP_H_
-#define VP10_ENCODER_MCOMP_H_
+#ifndef AV1_ENCODER_MCOMP_H_
+#define AV1_ENCODER_MCOMP_H_
 
 #include "av1/encoder/block.h"
 #include "aom_dsp/variance.h"
@@ -28,7 +28,7 @@
 #define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS - 1))
 // Allowed motion vector pixel distance outside image border
 // for Block_16x16
-#define BORDER_MV_PIXELS_B16 (16 + VPX_INTERP_EXTEND)
+#define BORDER_MV_PIXELS_B16 (16 + AOM_INTERP_EXTEND)
 
 // motion search site
 typedef struct search_site {
@@ -42,115 +42,114 @@
   int searches_per_step;
 } search_site_config;
 
-void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride);
-void vp10_init3smotion_compensation(search_site_config *cfg, int stride);
+void av1_init_dsmotion_compensation(search_site_config *cfg, int stride);
+void av1_init3smotion_compensation(search_site_config *cfg, int stride);
 
-void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv);
-int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
-                     int *mvcost[2], int weight);
+void av1_set_mv_search_range(MACROBLOCK *x, const MV *mv);
+int av1_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+                    int *mvcost[2], int weight);
 
 // Utility to compute variance + MV rate cost for a given MV
-int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
-                        const MV *center_mv, const vpx_variance_fn_ptr_t *vfp,
-                        int use_mvcost);
-int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
-                           const MV *center_mv, const uint8_t *second_pred,
-                           const vpx_variance_fn_ptr_t *vfp, int use_mvcost);
+int av1_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+                       const MV *center_mv, const aom_variance_fn_ptr_t *vfp,
+                       int use_mvcost);
+int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+                          const MV *center_mv, const uint8_t *second_pred,
+                          const aom_variance_fn_ptr_t *vfp, int use_mvcost);
 
-struct VP10_COMP;
+struct AV1_COMP;
 struct SPEED_FEATURES;
 
-int vp10_init_search_range(int size);
+int av1_init_search_range(int size);
 
-int vp10_refining_search_sad(struct macroblock *x, struct mv *ref_mv,
-                             int sad_per_bit, int distance,
-                             const vpx_variance_fn_ptr_t *fn_ptr,
-                             const struct mv *center_mv);
+int av1_refining_search_sad(struct macroblock *x, struct mv *ref_mv,
+                            int sad_per_bit, int distance,
+                            const aom_variance_fn_ptr_t *fn_ptr,
+                            const struct mv *center_mv);
 
 // Runs sequence of diamond searches in smaller steps for RD.
-int vp10_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
-                            MV *mvp_full, int step_param, int sadpb,
-                            int further_steps, int do_refine, int *cost_list,
-                            const vpx_variance_fn_ptr_t *fn_ptr,
-                            const MV *ref_mv, MV *dst_mv);
+int av1_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
+                           MV *mvp_full, int step_param, int sadpb,
+                           int further_steps, int do_refine, int *cost_list,
+                           const aom_variance_fn_ptr_t *fn_ptr,
+                           const MV *ref_mv, MV *dst_mv);
 
 // Perform integral projection based motion estimation.
-unsigned int vp10_int_pro_motion_estimation(const struct VP10_COMP *cpi,
-                                            MACROBLOCK *x, BLOCK_SIZE bsize,
-                                            int mi_row, int mi_col);
+unsigned int av1_int_pro_motion_estimation(const struct AV1_COMP *cpi,
+                                           MACROBLOCK *x, BLOCK_SIZE bsize,
+                                           int mi_row, int mi_col);
 
-int vp10_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
-                    int sad_per_bit, int do_init_search, int *cost_list,
-                    const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
-                    const MV *center_mv);
+int av1_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
+                   int sad_per_bit, int do_init_search, int *cost_list,
+                   const aom_variance_fn_ptr_t *vfp, int use_mvcost,
+                   const MV *center_mv);
 
 typedef int(fractional_mv_step_fp)(
     MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
-    const vpx_variance_fn_ptr_t *vfp,
+    const aom_variance_fn_ptr_t *vfp,
     int forced_stop,  // 0 - full, 1 - qtr only, 2 - half only
     int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
     int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
     int h, int use_upsampled_ref);
 
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_more;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_evenmore;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned_more;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned_evenmore;
 
-typedef int (*vp10_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
-                                     int sad_per_bit, int distance,
-                                     const vpx_variance_fn_ptr_t *fn_ptr,
-                                     const MV *center_mv, MV *best_mv);
+typedef int (*av1_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
+                                    int sad_per_bit, int distance,
+                                    const aom_variance_fn_ptr_t *fn_ptr,
+                                    const MV *center_mv, MV *best_mv);
 
-typedef int (*vp10_diamond_search_fn_t)(
+typedef int (*av1_diamond_search_fn_t)(
     MACROBLOCK *x, const search_site_config *cfg, MV *ref_mv, MV *best_mv,
     int search_param, int sad_per_bit, int *num00,
-    const vpx_variance_fn_ptr_t *fn_ptr, const MV *center_mv);
+    const aom_variance_fn_ptr_t *fn_ptr, const MV *center_mv);
 
-int vp10_refining_search_8p_c(MACROBLOCK *x, int error_per_bit,
-                              int search_range,
-                              const vpx_variance_fn_ptr_t *fn_ptr,
-                              const MV *center_mv, const uint8_t *second_pred);
+int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
+                             const aom_variance_fn_ptr_t *fn_ptr,
+                             const MV *center_mv, const uint8_t *second_pred);
 
-struct VP10_COMP;
+struct AV1_COMP;
 
-int vp10_full_pixel_search(struct VP10_COMP *cpi, MACROBLOCK *x,
-                           BLOCK_SIZE bsize, MV *mvp_full, int step_param,
-                           int error_per_bit, int *cost_list, const MV *ref_mv,
-                           int var_max, int rd);
+int av1_full_pixel_search(struct AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+                          MV *mvp_full, int step_param, int error_per_bit,
+                          int *cost_list, const MV *ref_mv, int var_max,
+                          int rd);
 
 #if CONFIG_EXT_INTER
-int vp10_find_best_masked_sub_pixel_tree(
+int av1_find_best_masked_sub_pixel_tree(
     const MACROBLOCK *x, const uint8_t *mask, int mask_stride, MV *bestmv,
     const MV *ref_mv, int allow_hp, int error_per_bit,
-    const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+    const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
     int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
     int is_second);
-int vp10_find_best_masked_sub_pixel_tree_up(
-    struct VP10_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
+int av1_find_best_masked_sub_pixel_tree_up(
+    struct AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
     int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
-    int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+    int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
     int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
     unsigned int *sse1, int is_second, int use_upsampled_ref);
-int vp10_masked_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
-                                   const uint8_t *mask, int mask_stride,
-                                   MV *mvp_full, int step_param, int sadpb,
-                                   int further_steps, int do_refine,
-                                   const vpx_variance_fn_ptr_t *fn_ptr,
-                                   const MV *ref_mv, MV *dst_mv, int is_second);
+int av1_masked_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
+                                  const uint8_t *mask, int mask_stride,
+                                  MV *mvp_full, int step_param, int sadpb,
+                                  int further_steps, int do_refine,
+                                  const aom_variance_fn_ptr_t *fn_ptr,
+                                  const MV *ref_mv, MV *dst_mv, int is_second);
 #endif  // CONFIG_EXT_INTER
 
 #if CONFIG_OBMC
-int vp10_obmc_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
-                                 const int32_t *wsrc, const int32_t *mask,
-                                 MV *mvp_full, int step_param, int sadpb,
-                                 int further_steps, int do_refine,
-                                 const vpx_variance_fn_ptr_t *fn_ptr,
-                                 const MV *ref_mv, MV *dst_mv, int is_second);
-int vp10_find_best_obmc_sub_pixel_tree_up(
-    struct VP10_COMP *cpi, MACROBLOCK *x, const int32_t *wsrc,
+int av1_obmc_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
+                                const int32_t *wsrc, const int32_t *mask,
+                                MV *mvp_full, int step_param, int sadpb,
+                                int further_steps, int do_refine,
+                                const aom_variance_fn_ptr_t *fn_ptr,
+                                const MV *ref_mv, MV *dst_mv, int is_second);
+int av1_find_best_obmc_sub_pixel_tree_up(
+    struct AV1_COMP *cpi, MACROBLOCK *x, const int32_t *wsrc,
     const int32_t *mask, int mi_row, int mi_col, MV *bestmv, const MV *ref_mv,
-    int allow_hp, int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+    int allow_hp, int error_per_bit, const aom_variance_fn_ptr_t *vfp,
     int forced_stop, int iters_per_step, int *mvjcost, int *mvcost[2],
     int *distortion, unsigned int *sse1, int is_second, int use_upsampled_ref);
 #endif  // CONFIG_OBMC
@@ -158,4 +157,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_MCOMP_H_
+#endif  // AV1_ENCODER_MCOMP_H_
diff --git a/av1/encoder/mips/msa/error_msa.c b/av1/encoder/mips/msa/error_msa.c
index 71c5ad3..ad422f1 100644
--- a/av1/encoder/mips/msa/error_msa.c
+++ b/av1/encoder/mips/msa/error_msa.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "aom_dsp/mips/macros_msa.h"
 
 #define BLOCK_ERROR_BLOCKSIZE_MSA(BSize)                                     \
@@ -86,9 +86,9 @@
 BLOCK_ERROR_BLOCKSIZE_MSA(1024)
 /* clang-format on */
 
-int64_t vp10_block_error_msa(const tran_low_t *coeff_ptr,
-                             const tran_low_t *dq_coeff_ptr, intptr_t blk_size,
-                             int64_t *ssz) {
+int64_t av1_block_error_msa(const tran_low_t *coeff_ptr,
+                            const tran_low_t *dq_coeff_ptr, intptr_t blk_size,
+                            int64_t *ssz) {
   int64_t err;
   const int16_t *coeff = (const int16_t *)coeff_ptr;
   const int16_t *dq_coeff = (const int16_t *)dq_coeff_ptr;
@@ -99,7 +99,7 @@
     case 256: err = block_error_256size_msa(coeff, dq_coeff, ssz); break;
     case 1024: err = block_error_1024size_msa(coeff, dq_coeff, ssz); break;
     default:
-      err = vp10_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
+      err = av1_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
       break;
   }
 
diff --git a/av1/encoder/mips/msa/fdct16x16_msa.c b/av1/encoder/mips/msa/fdct16x16_msa.c
index cda2138..252d118 100644
--- a/av1/encoder/mips/msa/fdct16x16_msa.c
+++ b/av1/encoder/mips/msa/fdct16x16_msa.c
@@ -403,8 +403,8 @@
   ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16);
 }
 
-void vp10_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
-                       int32_t tx_type) {
+void av1_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
+                      int32_t tx_type) {
   DECLARE_ALIGNED(32, int16_t, tmp[256]);
   DECLARE_ALIGNED(32, int16_t, trans_buf[256]);
   DECLARE_ALIGNED(32, int16_t, tmp_buf[128]);
diff --git a/av1/encoder/mips/msa/fdct4x4_msa.c b/av1/encoder/mips/msa/fdct4x4_msa.c
index a3731c3..26087e4 100644
--- a/av1/encoder/mips/msa/fdct4x4_msa.c
+++ b/av1/encoder/mips/msa/fdct4x4_msa.c
@@ -13,8 +13,8 @@
 #include "av1/common/enums.h"
 #include "av1/encoder/mips/msa/fdct_msa.h"
 
-void vp10_fwht4x4_msa(const int16_t *input, int16_t *output,
-                      int32_t src_stride) {
+void av1_fwht4x4_msa(const int16_t *input, int16_t *output,
+                     int32_t src_stride) {
   v8i16 in0, in1, in2, in3, in4;
 
   LD_SH4(input, src_stride, in0, in1, in2, in3);
@@ -45,8 +45,8 @@
   ST4x2_UB(in2, output + 12, 4);
 }
 
-void vp10_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
-                     int32_t tx_type) {
+void av1_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
+                    int32_t tx_type) {
   v8i16 in0, in1, in2, in3;
 
   LD_SH4(input, stride, in0, in1, in2, in3);
@@ -67,24 +67,24 @@
 
   switch (tx_type) {
     case DCT_DCT:
-      VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     case ADST_DCT:
-      VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     case DCT_ADST:
-      VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     case ADST_ADST:
-      VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     default: assert(0); break;
   }
diff --git a/av1/encoder/mips/msa/fdct8x8_msa.c b/av1/encoder/mips/msa/fdct8x8_msa.c
index 3b6532a..aa759cc 100644
--- a/av1/encoder/mips/msa/fdct8x8_msa.c
+++ b/av1/encoder/mips/msa/fdct8x8_msa.c
@@ -13,8 +13,8 @@
 #include "av1/common/enums.h"
 #include "av1/encoder/mips/msa/fdct_msa.h"
 
-void vp10_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
-                     int32_t tx_type) {
+void av1_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
+                    int32_t tx_type) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
 
   LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7);
@@ -23,35 +23,35 @@
 
   switch (tx_type) {
     case DCT_DCT:
-      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       break;
     case ADST_DCT:
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       break;
     case DCT_ADST:
-      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       break;
     case ADST_ADST:
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       break;
     default: assert(0); break;
diff --git a/av1/encoder/mips/msa/fdct_msa.h b/av1/encoder/mips/msa/fdct_msa.h
index 07471d0..7373659 100644
--- a/av1/encoder/mips/msa/fdct_msa.h
+++ b/av1/encoder/mips/msa/fdct_msa.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
-#define VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+#ifndef AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
+#define AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
 
 #include "aom_dsp/mips/fwd_txfm_msa.h"
 #include "aom_dsp/mips/txfm_macros_msa.h"
 #include "aom_ports/mem.h"
 
-#define VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2,  \
+#define AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2,  \
                   out3, out4, out5, out6, out7)                              \
   {                                                                          \
     v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m;                       \
@@ -79,7 +79,7 @@
     out5 = -out5;                                                            \
   }
 
-#define VPX_FADST4(in0, in1, in2, in3, out0, out1, out2, out3)              \
+#define AOM_FADST4(in0, in1, in2, in3, out0, out1, out2, out3)              \
   {                                                                         \
     v4i32 s0_m, s1_m, s2_m, s3_m, constant_m;                               \
     v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m;                               \
@@ -113,4 +113,4 @@
     PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, s3_m, s3_m, \
                 out0, out1, out2, out3);                                    \
   }
-#endif  // VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+#endif  // AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
diff --git a/av1/encoder/mips/msa/temporal_filter_msa.c b/av1/encoder/mips/msa/temporal_filter_msa.c
index 4d60d37..17b7b82 100644
--- a/av1/encoder/mips/msa/temporal_filter_msa.c
+++ b/av1/encoder/mips/msa/temporal_filter_msa.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "aom_dsp/mips/macros_msa.h"
 
 static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, uint32_t stride,
@@ -265,11 +265,11 @@
   }
 }
 
-void vp10_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
-                                    uint8_t *frame2_ptr, uint32_t blk_w,
-                                    uint32_t blk_h, int32_t strength,
-                                    int32_t filt_wgt, uint32_t *accu,
-                                    uint16_t *cnt) {
+void av1_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
+                                   uint8_t *frame2_ptr, uint32_t blk_w,
+                                   uint32_t blk_h, int32_t strength,
+                                   int32_t filt_wgt, uint32_t *accu,
+                                   uint16_t *cnt) {
   if (8 == (blk_w * blk_h)) {
     temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr, strength,
                                     filt_wgt, accu, cnt);
@@ -277,7 +277,7 @@
     temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, strength,
                                      filt_wgt, accu, cnt);
   } else {
-    vp10_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
-                                 strength, filt_wgt, accu, cnt);
+    av1_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
+                                strength, filt_wgt, accu, cnt);
   }
 }
diff --git a/av1/encoder/palette.c b/av1/encoder/palette.c
index 74f91b7..8da52f7 100644
--- a/av1/encoder/palette.c
+++ b/av1/encoder/palette.c
@@ -22,8 +22,8 @@
   return dist;
 }
 
-void vp10_calc_indices(const float *data, const float *centroids,
-                       uint8_t *indices, int n, int k, int dim) {
+void av1_calc_indices(const float *data, const float *centroids,
+                      uint8_t *indices, int n, int k, int dim) {
   int i, j;
   for (i = 0; i < n; ++i) {
     float min_dist = calc_dist(data + i * dim, centroids, dim);
@@ -93,14 +93,14 @@
   return dist;
 }
 
-void vp10_k_means(const float *data, float *centroids, uint8_t *indices, int n,
-                  int k, int dim, int max_itr) {
+void av1_k_means(const float *data, float *centroids, uint8_t *indices, int n,
+                 int k, int dim, int max_itr) {
   int i;
   float this_dist;
   float pre_centroids[2 * PALETTE_MAX_SIZE];
   uint8_t pre_indices[MAX_SB_SQUARE];
 
-  vp10_calc_indices(data, centroids, indices, n, k, dim);
+  av1_calc_indices(data, centroids, indices, n, k, dim);
   this_dist = calc_total_dist(data, centroids, indices, n, k, dim);
 
   for (i = 0; i < max_itr; ++i) {
@@ -109,7 +109,7 @@
     memcpy(pre_indices, indices, sizeof(pre_indices[0]) * n);
 
     calc_centroids(data, centroids, indices, n, k, dim);
-    vp10_calc_indices(data, centroids, indices, n, k, dim);
+    av1_calc_indices(data, centroids, indices, n, k, dim);
     this_dist = calc_total_dist(data, centroids, indices, n, k, dim);
 
     if (this_dist > pre_dist) {
@@ -128,7 +128,7 @@
   return (fa > fb) - (fb < fa);
 }
 
-int vp10_remove_duplicates(float *centroids, int num_centroids) {
+int av1_remove_duplicates(float *centroids, int num_centroids) {
   int num_unique;  // number of unique centroids
   int i;
   qsort(centroids, num_centroids, sizeof(*centroids), float_comparer);
@@ -142,7 +142,7 @@
   return num_unique;
 }
 
-int vp10_count_colors(const uint8_t *src, int stride, int rows, int cols) {
+int av1_count_colors(const uint8_t *src, int stride, int rows, int cols) {
   int n = 0, r, c, i, val_count[256];
   uint8_t val;
   memset(val_count, 0, sizeof(val_count));
@@ -163,9 +163,9 @@
   return n;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-int vp10_count_colors_highbd(const uint8_t *src8, int stride, int rows,
-                             int cols, int bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
+int av1_count_colors_highbd(const uint8_t *src8, int stride, int rows, int cols,
+                            int bit_depth) {
   int n = 0, r, c, i;
   uint16_t val;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
@@ -188,4 +188,4 @@
 
   return n;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/encoder/palette.h b/av1/encoder/palette.h
index fbbb39c..e570e4d 100644
--- a/av1/encoder/palette.h
+++ b/av1/encoder/palette.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_PALETTE_H_
-#define VP10_ENCODER_PALETTE_H_
+#ifndef AV1_ENCODER_PALETTE_H_
+#define AV1_ENCODER_PALETTE_H_
 
 #include "av1/common/blockd.h"
 
@@ -17,30 +17,30 @@
 extern "C" {
 #endif
 
-void vp10_calc_indices(const float *data, const float *centroids,
-                       uint8_t *indices, int n, int k, int dim);
+void av1_calc_indices(const float *data, const float *centroids,
+                      uint8_t *indices, int n, int k, int dim);
 
 // Given 'data' of size 'n' and initial guess of 'centroids' of size 'k x dim',
 // runs up to 'max_itr' iterations of k-means algorithm to get updated
 // 'centroids' and the centroid 'indices' for elements in 'data'.
 // Note: the output centroids are rounded off to nearest integers.
-void vp10_k_means(const float *data, float *centroids, uint8_t *indices, int n,
-                  int k, int dim, int max_itr);
+void av1_k_means(const float *data, float *centroids, uint8_t *indices, int n,
+                 int k, int dim, int max_itr);
 
 // Given a list of centroids, returns the unique number of centroids 'k', and
 // puts these unique centroids in first 'k' indices of 'centroids' array.
 // Ideally, the centroids should be rounded to integers before calling this
 // method.
-int vp10_remove_duplicates(float *centroids, int num_centroids);
+int av1_remove_duplicates(float *centroids, int num_centroids);
 
-int vp10_count_colors(const uint8_t *src, int stride, int rows, int cols);
-#if CONFIG_VP9_HIGHBITDEPTH
-int vp10_count_colors_highbd(const uint8_t *src8, int stride, int rows,
-                             int cols, int bit_depth);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+int av1_count_colors(const uint8_t *src, int stride, int rows, int cols);
+#if CONFIG_AOM_HIGHBITDEPTH
+int av1_count_colors_highbd(const uint8_t *src8, int stride, int rows, int cols,
+                            int bit_depth);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif /* VP10_ENCODER_PALETTE_H_ */
+#endif /* AV1_ENCODER_PALETTE_H_ */
diff --git a/av1/encoder/pickdering.c b/av1/encoder/pickdering.c
index 91e9b54..b5db43c 100644
--- a/av1/encoder/pickdering.c
+++ b/av1/encoder/pickdering.c
@@ -10,12 +10,12 @@
 
 #include <string.h>
 
-#include "./vpx_scale_rtcd.h"
+#include "./aom_scale_rtcd.h"
 #include "av1/common/dering.h"
 #include "av1/common/onyxc_int.h"
 #include "av1/common/reconinter.h"
 #include "av1/encoder/encoder.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 static double compute_dist(int16_t *x, int xstride, int16_t *y, int ystride,
                            int nhb, int nvb, int coeff_shift) {
@@ -32,8 +32,8 @@
   return sum / (double)(1 << 2 * coeff_shift);
 }
 
-int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
-                       VP10_COMMON *cm, MACROBLOCKD *xd) {
+int av1_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
+                      AV1_COMMON *cm, MACROBLOCKD *xd) {
   int r, c;
   int sbr, sbc;
   int nhsb, nvsb;
@@ -52,11 +52,11 @@
   int best_level;
   int global_level;
   double best_tot_mse = 1e15;
-  int coeff_shift = VPXMAX(cm->bit_depth - 8, 0);
-  src = vpx_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64);
-  ref_coeff = vpx_malloc(sizeof(*ref_coeff) * cm->mi_rows * cm->mi_cols * 64);
-  bskip = vpx_malloc(sizeof(*bskip) * cm->mi_rows * cm->mi_cols);
-  vp10_setup_dst_planes(xd->plane, frame, 0, 0);
+  int coeff_shift = AOMMAX(cm->bit_depth - 8, 0);
+  src = aom_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64);
+  ref_coeff = aom_malloc(sizeof(*ref_coeff) * cm->mi_rows * cm->mi_cols * 64);
+  bskip = aom_malloc(sizeof(*bskip) * cm->mi_rows * cm->mi_cols);
+  av1_setup_dst_planes(xd->plane, frame, 0, 0);
   for (pli = 0; pli < 3; pli++) {
     dec[pli] = xd->plane[pli].subsampling_x;
     bsize[pli] = 8 >> dec[pli];
@@ -64,7 +64,7 @@
   stride = bsize[0] * cm->mi_cols;
   for (r = 0; r < bsize[0] * cm->mi_rows; ++r) {
     for (c = 0; c < bsize[0] * cm->mi_cols; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cm->use_highbitdepth) {
         src[r * stride + c] = CONVERT_TO_SHORTPTR(
             xd->plane[0].dst.buf)[r * xd->plane[0].dst.stride + c];
@@ -75,7 +75,7 @@
         src[r * stride + c] =
             xd->plane[0].dst.buf[r * xd->plane[0].dst.stride + c];
         ref_coeff[r * stride + c] = ref->y_buffer[r * ref->y_stride + c];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       }
 #endif
     }
@@ -89,15 +89,15 @@
   }
   nvsb = (cm->mi_rows + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
   nhsb = (cm->mi_cols + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
-  mse = vpx_malloc(nvsb * nhsb * sizeof(*mse));
+  mse = aom_malloc(nvsb * nhsb * sizeof(*mse));
   for (sbr = 0; sbr < nvsb; sbr++) {
     for (sbc = 0; sbc < nhsb; sbc++) {
       int best_mse = 1000000000;
       int nvb, nhb;
       int16_t dst[MAX_MIB_SIZE * MAX_MIB_SIZE * 8 * 8];
       best_level = 0;
-      nhb = VPXMIN(MAX_MIB_SIZE, cm->mi_cols - MAX_MIB_SIZE * sbc);
-      nvb = VPXMIN(MAX_MIB_SIZE, cm->mi_rows - MAX_MIB_SIZE * sbr);
+      nhb = AOMMIN(MAX_MIB_SIZE, cm->mi_cols - MAX_MIB_SIZE * sbc);
+      nvb = AOMMIN(MAX_MIB_SIZE, cm->mi_rows - MAX_MIB_SIZE * sbr);
       for (level = 0; level < 64; level++) {
         int threshold;
         threshold = level << coeff_shift;
@@ -169,9 +169,9 @@
       if (tot_mse[level] < tot_mse[best_level]) best_level = level;
     }
 #endif
-  vpx_free(src);
-  vpx_free(ref_coeff);
-  vpx_free(bskip);
-  vpx_free(mse);
+  aom_free(src);
+  aom_free(ref_coeff);
+  aom_free(bskip);
+  aom_free(mse);
   return best_level;
 }
diff --git a/av1/encoder/picklpf.c b/av1/encoder/picklpf.c
index c4e9b7d..12254bd 100644
--- a/av1/encoder/picklpf.c
+++ b/av1/encoder/picklpf.c
@@ -11,11 +11,11 @@
 #include <assert.h>
 #include <limits.h>
 
-#include "./vpx_scale_rtcd.h"
+#include "./aom_scale_rtcd.h"
 
 #include "aom_dsp/psnr.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #include "av1/common/loopfilter.h"
@@ -26,7 +26,7 @@
 #include "av1/encoder/picklpf.h"
 #include "av1/encoder/quantize.h"
 
-int vp10_get_max_filter_level(const VP10_COMP *cpi) {
+int av1_get_max_filter_level(const AV1_COMP *cpi) {
   if (cpi->oxcf.pass == 2) {
     return cpi->twopass.section_intra_rating > 8 ? MAX_LOOP_FILTER * 3 / 4
                                                  : MAX_LOOP_FILTER;
@@ -36,46 +36,46 @@
 }
 
 static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd,
-                                VP10_COMP *const cpi, int filt_level,
+                                AV1_COMP *const cpi, int filt_level,
                                 int partial_frame) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   int64_t filt_err;
 
 #if CONFIG_VAR_TX || CONFIG_EXT_PARTITION
-  vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
-                         1, partial_frame);
+  av1_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level, 1,
+                        partial_frame);
 #else
   if (cpi->num_workers > 1)
-    vp10_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
-                              filt_level, 1, partial_frame, cpi->workers,
-                              cpi->num_workers, &cpi->lf_row_sync);
+    av1_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
+                             filt_level, 1, partial_frame, cpi->workers,
+                             cpi->num_workers, &cpi->lf_row_sync);
   else
-    vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
-                           1, partial_frame);
+    av1_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
+                          1, partial_frame);
 #endif
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (cm->use_highbitdepth) {
-    filt_err = vpx_highbd_get_y_sse(sd, cm->frame_to_show);
+    filt_err = aom_highbd_get_y_sse(sd, cm->frame_to_show);
   } else {
-    filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
+    filt_err = aom_get_y_sse(sd, cm->frame_to_show);
   }
 #else
-  filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+  filt_err = aom_get_y_sse(sd, cm->frame_to_show);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // Re-instate the unfiltered frame
-  vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+  aom_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
 
   return filt_err;
 }
 
-int vp10_search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
-                             int partial_frame, double *best_cost_ret) {
-  const VP10_COMMON *const cm = &cpi->common;
+int av1_search_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
+                            int partial_frame, double *best_cost_ret) {
+  const AV1_COMMON *const cm = &cpi->common;
   const struct loopfilter *const lf = &cm->lf;
   const int min_filter_level = 0;
-  const int max_filter_level = vp10_get_max_filter_level(cpi);
+  const int max_filter_level = av1_get_max_filter_level(cpi);
   int filt_direction = 0;
   int64_t best_err;
   int filt_best;
@@ -92,15 +92,15 @@
   memset(ss_err, 0xFF, sizeof(ss_err));
 
   //  Make a copy of the unfiltered / processed recon buffer
-  vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+  aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
 
   best_err = try_filter_frame(sd, cpi, filt_mid, partial_frame);
   filt_best = filt_mid;
   ss_err[filt_mid] = best_err;
 
   while (filter_step > 0) {
-    const int filt_high = VPXMIN(filt_mid + filter_step, max_filter_level);
-    const int filt_low = VPXMAX(filt_mid - filter_step, min_filter_level);
+    const int filt_high = AOMMIN(filt_mid + filter_step, max_filter_level);
+    const int filt_low = AOMMAX(filt_mid - filter_step, min_filter_level);
 
     // Bias against raising loop filter in favor of lowering it.
     int64_t bias = (best_err >> (15 - (filt_mid / 8))) * filter_step;
@@ -159,9 +159,9 @@
 }
 
 #if !CONFIG_LOOP_RESTORATION
-void vp10_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
-                            LPF_PICK_METHOD method) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_pick_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
+                           LPF_PICK_METHOD method) {
+  AV1_COMMON *const cm = &cpi->common;
   struct loopfilter *const lf = &cm->lf;
 
   lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 : cpi->oxcf.sharpness;
@@ -170,35 +170,35 @@
     lf->filter_level = 0;
   } else if (method >= LPF_PICK_FROM_Q) {
     const int min_filter_level = 0;
-    const int max_filter_level = vp10_get_max_filter_level(cpi);
-    const int q = vp10_ac_quant(cm->base_qindex, 0, cm->bit_depth);
+    const int max_filter_level = av1_get_max_filter_level(cpi);
+    const int q = av1_ac_quant(cm->base_qindex, 0, cm->bit_depth);
 // These values were determined by linear fitting the result of the
 // searched level, filt_guess = q * 0.316206 + 3.87252
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     int filt_guess;
     switch (cm->bit_depth) {
-      case VPX_BITS_8:
+      case AOM_BITS_8:
         filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
         break;
-      case VPX_BITS_10:
+      case AOM_BITS_10:
         filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 4060632, 20);
         break;
-      case VPX_BITS_12:
+      case AOM_BITS_12:
         filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 16242526, 22);
         break;
       default:
         assert(0 &&
-               "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
-               "or VPX_BITS_12");
+               "bit_depth should be AOM_BITS_8, AOM_BITS_10 "
+               "or AOM_BITS_12");
         return;
     }
 #else
     int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     if (cm->frame_type == KEY_FRAME) filt_guess -= 4;
     lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level);
   } else {
-    lf->filter_level = vp10_search_filter_level(
+    lf->filter_level = av1_search_filter_level(
         sd, cpi, method == LPF_PICK_FROM_SUBIMAGE, NULL);
   }
 
diff --git a/av1/encoder/picklpf.h b/av1/encoder/picklpf.h
index cd8afc6..75fdb24 100644
--- a/av1/encoder/picklpf.h
+++ b/av1/encoder/picklpf.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_PICKLPF_H_
-#define VP10_ENCODER_PICKLPF_H_
+#ifndef AV1_ENCODER_PICKLPF_H_
+#define AV1_ENCODER_PICKLPF_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -18,14 +18,14 @@
 #include "av1/encoder/encoder.h"
 
 struct yv12_buffer_config;
-struct VP10_COMP;
-int vp10_get_max_filter_level(const VP10_COMP *cpi);
-int vp10_search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
-                             int partial_frame, double *err);
-void vp10_pick_filter_level(const struct yv12_buffer_config *sd,
-                            struct VP10_COMP *cpi, LPF_PICK_METHOD method);
+struct AV1_COMP;
+int av1_get_max_filter_level(const AV1_COMP *cpi);
+int av1_search_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
+                            int partial_frame, double *err);
+void av1_pick_filter_level(const struct yv12_buffer_config *sd,
+                           struct AV1_COMP *cpi, LPF_PICK_METHOD method);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_PICKLPF_H_
+#endif  // AV1_ENCODER_PICKLPF_H_
diff --git a/av1/encoder/pickrst.c b/av1/encoder/pickrst.c
index b6ee6f0..22bd019 100644
--- a/av1/encoder/pickrst.c
+++ b/av1/encoder/pickrst.c
@@ -13,11 +13,11 @@
 #include <limits.h>
 #include <math.h>
 
-#include "./vpx_scale_rtcd.h"
+#include "./aom_scale_rtcd.h"
 
 #include "aom_dsp/psnr.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #include "av1/common/onyxc_int.h"
@@ -29,59 +29,59 @@
 #include "av1/encoder/quantize.h"
 
 static int64_t try_restoration_frame(const YV12_BUFFER_CONFIG *sd,
-                                     VP10_COMP *const cpi, RestorationInfo *rsi,
+                                     AV1_COMP *const cpi, RestorationInfo *rsi,
                                      int partial_frame) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   int64_t filt_err;
-  vp10_loop_restoration_frame(cm->frame_to_show, cm, rsi, 1, partial_frame);
-#if CONFIG_VP9_HIGHBITDEPTH
+  av1_loop_restoration_frame(cm->frame_to_show, cm, rsi, 1, partial_frame);
+#if CONFIG_AOM_HIGHBITDEPTH
   if (cm->use_highbitdepth) {
-    filt_err = vpx_highbd_get_y_sse(sd, cm->frame_to_show);
+    filt_err = aom_highbd_get_y_sse(sd, cm->frame_to_show);
   } else {
-    filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
+    filt_err = aom_get_y_sse(sd, cm->frame_to_show);
   }
 #else
-  filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+  filt_err = aom_get_y_sse(sd, cm->frame_to_show);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // Re-instate the unfiltered frame
-  vpx_yv12_copy_y(&cpi->last_frame_db, cm->frame_to_show);
+  aom_yv12_copy_y(&cpi->last_frame_db, cm->frame_to_show);
   return filt_err;
 }
 
-static int search_bilateral_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+static int search_bilateral_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
                                   int filter_level, int partial_frame,
                                   int *bilateral_level, double *best_cost_ret) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   int i, j, tile_idx;
   int64_t err;
   int bits;
   double cost, best_cost, cost_norestore, cost_bilateral;
-  const int bilateral_level_bits = vp10_bilateral_level_bits(&cpi->common);
+  const int bilateral_level_bits = av1_bilateral_level_bits(&cpi->common);
   const int bilateral_levels = 1 << bilateral_level_bits;
   MACROBLOCK *x = &cpi->td.mb;
   RestorationInfo rsi;
   const int ntiles =
-      vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+      av1_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
 
   //  Make a copy of the unfiltered / processed recon buffer
-  vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
-  vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filter_level,
-                         1, partial_frame);
-  vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_db);
+  aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+  av1_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filter_level,
+                        1, partial_frame);
+  aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_db);
 
   // RD cost associated with no restoration
   rsi.restoration_type = RESTORE_NONE;
   err = try_restoration_frame(sd, cpi, &rsi, partial_frame);
   bits = 0;
-  cost_norestore = RDCOST_DBL(x->rdmult, x->rddiv,
-                              (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+  cost_norestore =
+      RDCOST_DBL(x->rdmult, x->rddiv, (bits << (AV1_PROB_COST_SHIFT - 4)), err);
   best_cost = cost_norestore;
 
   // RD cost associated with bilateral filtering
   rsi.restoration_type = RESTORE_BILATERAL;
   rsi.bilateral_level =
-      (int *)vpx_malloc(sizeof(*rsi.bilateral_level) * ntiles);
+      (int *)aom_malloc(sizeof(*rsi.bilateral_level) * ntiles);
   assert(rsi.bilateral_level != NULL);
 
   for (j = 0; j < ntiles; ++j) bilateral_level[j] = -1;
@@ -98,7 +98,7 @@
       // when RDCOST is used.  However below we just scale both in the correct
       // ratios appropriately but not exactly by these values.
       cost = RDCOST_DBL(x->rdmult, x->rddiv,
-                        (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+                        (bits << (AV1_PROB_COST_SHIFT - 4)), err);
       if (cost < best_cost) {
         bilateral_level[tile_idx] = i;
         best_cost = cost;
@@ -116,12 +116,12 @@
     }
   }
   err = try_restoration_frame(sd, cpi, &rsi, partial_frame);
-  cost_bilateral = RDCOST_DBL(x->rdmult, x->rddiv,
-                              (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+  cost_bilateral =
+      RDCOST_DBL(x->rdmult, x->rddiv, (bits << (AV1_PROB_COST_SHIFT - 4)), err);
 
-  vpx_free(rsi.bilateral_level);
+  aom_free(rsi.bilateral_level);
 
-  vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+  aom_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
   if (cost_bilateral < cost_norestore) {
     if (best_cost_ret) *best_cost_ret = cost_bilateral;
     return 1;
@@ -132,13 +132,13 @@
 }
 
 static int search_filter_bilateral_level(const YV12_BUFFER_CONFIG *sd,
-                                         VP10_COMP *cpi, int partial_frame,
+                                         AV1_COMP *cpi, int partial_frame,
                                          int *filter_best, int *bilateral_level,
                                          double *best_cost_ret) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const struct loopfilter *const lf = &cm->lf;
   const int min_filter_level = 0;
-  const int max_filter_level = vp10_get_max_filter_level(cpi);
+  const int max_filter_level = av1_get_max_filter_level(cpi);
   int filt_direction = 0;
   int filt_best;
   double best_err;
@@ -147,7 +147,7 @@
   int bilateral_success[MAX_LOOP_FILTER + 1];
 
   const int ntiles =
-      vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+      av1_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
 
   // Start the search at the previous frame filter level unless it is now out of
   // range.
@@ -157,7 +157,7 @@
   // Set each entry to -1
   for (i = 0; i <= MAX_LOOP_FILTER; ++i) ss_err[i] = -1.0;
 
-  tmp_level = (int *)vpx_malloc(sizeof(*tmp_level) * ntiles);
+  tmp_level = (int *)aom_malloc(sizeof(*tmp_level) * ntiles);
 
   bilateral_success[filt_mid] = search_bilateral_level(
       sd, cpi, filt_mid, partial_frame, tmp_level, &best_err);
@@ -168,8 +168,8 @@
   }
 
   while (filter_step > 0) {
-    const int filt_high = VPXMIN(filt_mid + filter_step, max_filter_level);
-    const int filt_low = VPXMAX(filt_mid - filter_step, min_filter_level);
+    const int filt_high = AOMMIN(filt_mid + filter_step, max_filter_level);
+    const int filt_low = AOMMAX(filt_mid - filter_step, min_filter_level);
 
     // Bias against raising loop filter in favor of lowering it.
     double bias = (best_err / (1 << (15 - (filt_mid / 8)))) * filter_step;
@@ -227,7 +227,7 @@
     }
   }
 
-  vpx_free(tmp_level);
+  aom_free(tmp_level);
 
   // Update best error
   best_err = ss_err[filt_best];
@@ -282,7 +282,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static double find_average_highbd(uint16_t *src, int h_start, int h_end,
                                   int v_start, int v_end, int stride) {
   uint64_t sum = 0;
@@ -329,7 +329,7 @@
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // Solves Ax = b, where x and b are column vectors
 static int linsolve(int n, double *A, int stride, double *b, double *x) {
@@ -544,12 +544,12 @@
   fi[2] = CLIP(fi[2], WIENER_FILT_TAP2_MINV, WIENER_FILT_TAP2_MAXV);
 }
 
-static int search_wiener_filter(const YV12_BUFFER_CONFIG *src, VP10_COMP *cpi,
+static int search_wiener_filter(const YV12_BUFFER_CONFIG *src, AV1_COMP *cpi,
                                 int filter_level, int partial_frame,
                                 int (*vfilter)[RESTORATION_HALFWIN],
                                 int (*hfilter)[RESTORATION_HALFWIN],
                                 int *process_tile, double *best_cost_ret) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   RestorationInfo rsi;
   int64_t err;
   int bits;
@@ -569,36 +569,36 @@
   int i, j;
 
   const int tilesize = WIENER_TILESIZE;
-  const int ntiles = vp10_get_restoration_ntiles(tilesize, width, height);
+  const int ntiles = av1_get_restoration_ntiles(tilesize, width, height);
 
   assert(width == dgd->y_crop_width);
   assert(height == dgd->y_crop_height);
   assert(width == src->y_crop_width);
   assert(height == src->y_crop_height);
 
-  vp10_get_restoration_tile_size(tilesize, width, height, &tile_width,
-                                 &tile_height, &nhtiles, &nvtiles);
+  av1_get_restoration_tile_size(tilesize, width, height, &tile_width,
+                                &tile_height, &nhtiles, &nvtiles);
 
   //  Make a copy of the unfiltered / processed recon buffer
-  vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
-  vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filter_level,
-                         1, partial_frame);
-  vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_db);
+  aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+  av1_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filter_level,
+                        1, partial_frame);
+  aom_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_db);
 
   rsi.restoration_type = RESTORE_NONE;
   err = try_restoration_frame(src, cpi, &rsi, partial_frame);
   bits = 0;
-  cost_norestore = RDCOST_DBL(x->rdmult, x->rddiv,
-                              (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+  cost_norestore =
+      RDCOST_DBL(x->rdmult, x->rddiv, (bits << (AV1_PROB_COST_SHIFT - 4)), err);
 
   rsi.restoration_type = RESTORE_WIENER;
   rsi.vfilter =
-      (int(*)[RESTORATION_HALFWIN])vpx_malloc(sizeof(*rsi.vfilter) * ntiles);
+      (int(*)[RESTORATION_HALFWIN])aom_malloc(sizeof(*rsi.vfilter) * ntiles);
   assert(rsi.vfilter != NULL);
   rsi.hfilter =
-      (int(*)[RESTORATION_HALFWIN])vpx_malloc(sizeof(*rsi.hfilter) * ntiles);
+      (int(*)[RESTORATION_HALFWIN])aom_malloc(sizeof(*rsi.hfilter) * ntiles);
   assert(rsi.hfilter != NULL);
-  rsi.wiener_level = (int *)vpx_malloc(sizeof(*rsi.wiener_level) * ntiles);
+  rsi.wiener_level = (int *)aom_malloc(sizeof(*rsi.wiener_level) * ntiles);
   assert(rsi.wiener_level != NULL);
 
   // Compute best Wiener filters for each tile
@@ -614,12 +614,12 @@
     v_end = (vtile_idx < nvtiles - 1) ? ((vtile_idx + 1) * tile_height)
                                       : (height - RESTORATION_HALFWIN);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth)
       compute_stats_highbd(dgd->y_buffer, src->y_buffer, h_start, h_end,
                            v_start, v_end, dgd_stride, src_stride, M, H);
     else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       compute_stats(dgd->y_buffer, src->y_buffer, h_start, h_end, v_start,
                     v_end, dgd_stride, src_stride, M, H);
 
@@ -650,7 +650,7 @@
     err = try_restoration_frame(src, cpi, &rsi, partial_frame);
     bits = 1 + WIENER_FILT_BITS;
     cost_wiener = RDCOST_DBL(x->rdmult, x->rddiv,
-                             (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+                             (bits << (AV1_PROB_COST_SHIFT - 4)), err);
     if (cost_wiener >= cost_norestore) process_tile[tile_idx] = 0;
   }
   // Cost for Wiener filtering
@@ -660,8 +660,8 @@
     rsi.wiener_level[tile_idx] = process_tile[tile_idx];
   }
   err = try_restoration_frame(src, cpi, &rsi, partial_frame);
-  cost_wiener = RDCOST_DBL(x->rdmult, x->rddiv,
-                           (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+  cost_wiener =
+      RDCOST_DBL(x->rdmult, x->rddiv, (bits << (AV1_PROB_COST_SHIFT - 4)), err);
 
   for (tile_idx = 0; tile_idx < ntiles; ++tile_idx) {
     if (process_tile[tile_idx] == 0) continue;
@@ -671,11 +671,11 @@
     }
   }
 
-  vpx_free(rsi.vfilter);
-  vpx_free(rsi.hfilter);
-  vpx_free(rsi.wiener_level);
+  aom_free(rsi.vfilter);
+  aom_free(rsi.hfilter);
+  aom_free(rsi.wiener_level);
 
-  vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+  aom_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
   if (cost_wiener < cost_norestore) {
     if (best_cost_ret) *best_cost_ret = cost_wiener;
     return 1;
@@ -685,9 +685,9 @@
   }
 }
 
-void vp10_pick_filter_restoration(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
-                                  LPF_PICK_METHOD method) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_pick_filter_restoration(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
+                                 LPF_PICK_METHOD method) {
+  AV1_COMMON *const cm = &cpi->common;
   struct loopfilter *const lf = &cm->lf;
   int wiener_success = 0;
   int bilateral_success = 0;
@@ -697,20 +697,20 @@
   int ntiles;
 
   ntiles =
-      vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+      av1_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
   cm->rst_info.bilateral_level =
-      (int *)vpx_realloc(cm->rst_info.bilateral_level,
+      (int *)aom_realloc(cm->rst_info.bilateral_level,
                          sizeof(*cm->rst_info.bilateral_level) * ntiles);
   assert(cm->rst_info.bilateral_level != NULL);
 
-  ntiles = vp10_get_restoration_ntiles(WIENER_TILESIZE, cm->width, cm->height);
-  cm->rst_info.wiener_level = (int *)vpx_realloc(
+  ntiles = av1_get_restoration_ntiles(WIENER_TILESIZE, cm->width, cm->height);
+  cm->rst_info.wiener_level = (int *)aom_realloc(
       cm->rst_info.wiener_level, sizeof(*cm->rst_info.wiener_level) * ntiles);
   assert(cm->rst_info.wiener_level != NULL);
-  cm->rst_info.vfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+  cm->rst_info.vfilter = (int(*)[RESTORATION_HALFWIN])aom_realloc(
       cm->rst_info.vfilter, sizeof(*cm->rst_info.vfilter) * ntiles);
   assert(cm->rst_info.vfilter != NULL);
-  cm->rst_info.hfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+  cm->rst_info.hfilter = (int(*)[RESTORATION_HALFWIN])aom_realloc(
       cm->rst_info.hfilter, sizeof(*cm->rst_info.hfilter) * ntiles);
   assert(cm->rst_info.hfilter != NULL);
 
@@ -721,31 +721,31 @@
     cm->rst_info.restoration_type = RESTORE_NONE;
   } else if (method >= LPF_PICK_FROM_Q) {
     const int min_filter_level = 0;
-    const int max_filter_level = vp10_get_max_filter_level(cpi);
-    const int q = vp10_ac_quant(cm->base_qindex, 0, cm->bit_depth);
+    const int max_filter_level = av1_get_max_filter_level(cpi);
+    const int q = av1_ac_quant(cm->base_qindex, 0, cm->bit_depth);
 // These values were determined by linear fitting the result of the
 // searched level, filt_guess = q * 0.316206 + 3.87252
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     int filt_guess;
     switch (cm->bit_depth) {
-      case VPX_BITS_8:
+      case AOM_BITS_8:
         filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
         break;
-      case VPX_BITS_10:
+      case AOM_BITS_10:
         filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 4060632, 20);
         break;
-      case VPX_BITS_12:
+      case AOM_BITS_12:
         filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 16242526, 22);
         break;
       default:
         assert(0 &&
-               "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
-               "or VPX_BITS_12");
+               "bit_depth should be AOM_BITS_8, AOM_BITS_10 "
+               "or AOM_BITS_12");
         return;
     }
 #else
     int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     if (cm->frame_type == KEY_FRAME) filt_guess -= 4;
     lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level);
     bilateral_success = search_bilateral_level(
@@ -771,7 +771,7 @@
     bilateral_success = search_filter_bilateral_level(
         sd, cpi, method == LPF_PICK_FROM_SUBIMAGE, &blf_filter_level,
         cm->rst_info.bilateral_level, &cost_bilateral);
-    lf->filter_level = vp10_search_filter_level(
+    lf->filter_level = av1_search_filter_level(
         sd, cpi, method == LPF_PICK_FROM_SUBIMAGE, &cost_norestore);
     wiener_success = search_wiener_filter(
         sd, cpi, lf->filter_level, method == LPF_PICK_FROM_SUBIMAGE,
@@ -794,15 +794,15 @@
     //        wiener_success);
   }
   if (cm->rst_info.restoration_type != RESTORE_BILATERAL) {
-    vpx_free(cm->rst_info.bilateral_level);
+    aom_free(cm->rst_info.bilateral_level);
     cm->rst_info.bilateral_level = NULL;
   }
   if (cm->rst_info.restoration_type != RESTORE_WIENER) {
-    vpx_free(cm->rst_info.vfilter);
+    aom_free(cm->rst_info.vfilter);
     cm->rst_info.vfilter = NULL;
-    vpx_free(cm->rst_info.hfilter);
+    aom_free(cm->rst_info.hfilter);
     cm->rst_info.hfilter = NULL;
-    vpx_free(cm->rst_info.wiener_level);
+    aom_free(cm->rst_info.wiener_level);
     cm->rst_info.wiener_level = NULL;
   }
 }
diff --git a/av1/encoder/pickrst.h b/av1/encoder/pickrst.h
index 6d94cef..7ddda43 100644
--- a/av1/encoder/pickrst.h
+++ b/av1/encoder/pickrst.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_PICKRST_H_
-#define VP10_ENCODER_PICKRST_H_
+#ifndef AV1_ENCODER_PICKRST_H_
+#define AV1_ENCODER_PICKRST_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -18,13 +18,13 @@
 #include "av1/encoder/encoder.h"
 
 struct yv12_buffer_config;
-struct VP10_COMP;
+struct AV1_COMP;
 
-void vp10_pick_filter_restoration(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
-                                  LPF_PICK_METHOD method);
+void av1_pick_filter_restoration(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
+                                 LPF_PICK_METHOD method);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_PICKRST_H_
+#endif  // AV1_ENCODER_PICKRST_H_
diff --git a/av1/encoder/quantize.c b/av1/encoder/quantize.c
index ed8a04b..902a449 100644
--- a/av1/encoder/quantize.c
+++ b/av1/encoder/quantize.c
@@ -9,9 +9,9 @@
  */
 
 #include <math.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/quantize.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #include "av1/common/quant_common.h"
@@ -44,7 +44,7 @@
     q = NUQ_KNOTS + (((((tmp * quant) >> 16) + tmp) * quant_shift) >> 16);
   }
   if (q) {
-    *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+    *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val);
     *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
     *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
   } else {
@@ -77,8 +77,8 @@
   }
   if (q) {
     *dqcoeff_ptr = ROUND_POWER_OF_TWO(
-        vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
-    // *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val) >>
+        av1_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+    // *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val) >>
     // (1 + logsizeby32);
     *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
     *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
@@ -109,7 +109,7 @@
         ((((int64_t)tmp - cuml_bins_ptr[NUQ_KNOTS - 1]) * quant) >> 16);
   }
   if (q) {
-    *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+    *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val);
     *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
     *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
   } else {
@@ -143,8 +143,8 @@
   }
   if (q) {
     *dqcoeff_ptr = ROUND_POWER_OF_TWO(
-        vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
-    // *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val) >>
+        av1_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+    // *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val) >>
     // (1 + logsizeby32);
     *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
     *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
@@ -332,34 +332,14 @@
 }
 #endif  // CONFIG_NEW_QUANT
 
-void vp10_quantize_skip(intptr_t n_coeffs, tran_low_t *qcoeff_ptr,
-                        tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr) {
+void av1_quantize_skip(intptr_t n_coeffs, tran_low_t *qcoeff_ptr,
+                       tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr) {
   memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
   memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
   *eob_ptr = 0;
 }
 
-void vp10_quantize_fp_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                             const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
-                             const MACROBLOCKD_PLANE *pd,
-                             tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
-                             const scan_order *sc, const QUANT_PARAM *qparam) {
-  // obsolete skip_block
-  const int skip_block = 0;
-
-  if (qparam->log_scale == 0) {
-    vp10_quantize_fp(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
-                     p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
-                     pd->dequant, eob_ptr, sc->scan, sc->iscan);
-  } else {
-    vp10_quantize_fp_32x32(coeff_ptr, n_coeffs, skip_block, p->zbin,
-                           p->round_fp, p->quant_fp, p->quant_shift, qcoeff_ptr,
-                           dqcoeff_ptr, pd->dequant, eob_ptr, sc->scan,
-                           sc->iscan);
-  }
-}
-
-void vp10_quantize_b_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void av1_quantize_fp_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                             const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
                             const MACROBLOCKD_PLANE *pd,
                             tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
@@ -368,50 +348,55 @@
   const int skip_block = 0;
 
   if (qparam->log_scale == 0) {
-    vpx_quantize_b(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round, p->quant,
+    av1_quantize_fp(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
+                    p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+                    pd->dequant, eob_ptr, sc->scan, sc->iscan);
+  } else {
+    av1_quantize_fp_32x32(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
+                          p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+                          pd->dequant, eob_ptr, sc->scan, sc->iscan);
+  }
+}
+
+void av1_quantize_b_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                           const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+                           const MACROBLOCKD_PLANE *pd, tran_low_t *dqcoeff_ptr,
+                           uint16_t *eob_ptr, const scan_order *sc,
+                           const QUANT_PARAM *qparam) {
+  // obsolete skip_block
+  const int skip_block = 0;
+
+  if (qparam->log_scale == 0) {
+    aom_quantize_b(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round, p->quant,
                    p->quant_shift, qcoeff_ptr, dqcoeff_ptr, pd->dequant,
                    eob_ptr, sc->scan, sc->iscan);
   } else {
-    vpx_quantize_b_32x32(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round,
+    aom_quantize_b_32x32(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round,
                          p->quant, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
                          pd->dequant, eob_ptr, sc->scan, sc->iscan);
   }
 }
 
-void vp10_quantize_dc_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                             const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
-                             const MACROBLOCKD_PLANE *pd,
-                             tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
-                             const scan_order *sc, const QUANT_PARAM *qparam) {
+void av1_quantize_dc_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                            const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+                            const MACROBLOCKD_PLANE *pd,
+                            tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                            const scan_order *sc, const QUANT_PARAM *qparam) {
   // obsolete skip_block
   const int skip_block = 0;
   (void)sc;
   if (qparam->log_scale == 0) {
-    vpx_quantize_dc(coeff_ptr, (int)n_coeffs, skip_block, p->round,
+    aom_quantize_dc(coeff_ptr, (int)n_coeffs, skip_block, p->round,
                     p->quant_fp[0], qcoeff_ptr, dqcoeff_ptr, pd->dequant[0],
                     eob_ptr);
   } else {
-    vpx_quantize_dc_32x32(coeff_ptr, skip_block, p->round, p->quant_fp[0],
+    aom_quantize_dc_32x32(coeff_ptr, skip_block, p->round, p->quant_fp[0],
                           qcoeff_ptr, dqcoeff_ptr, pd->dequant[0], eob_ptr);
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_facade(
-    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
-    tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
-    tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
-    const QUANT_PARAM *qparam) {
-  // obsolete skip_block
-  const int skip_block = 0;
-
-  vp10_highbd_quantize_fp(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
-                          p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
-                          pd->dequant, eob_ptr, sc->scan, sc->iscan,
-                          qparam->log_scale);
-}
-
-void vp10_highbd_quantize_b_facade(const tran_low_t *coeff_ptr,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_fp_facade(const tran_low_t *coeff_ptr,
                                    intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
                                    tran_low_t *qcoeff_ptr,
                                    const MACROBLOCKD_PLANE *pd,
@@ -421,25 +406,43 @@
   // obsolete skip_block
   const int skip_block = 0;
 
-  vp10_highbd_quantize_b(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round,
-                         p->quant, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+  av1_highbd_quantize_fp(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
+                         p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
                          pd->dequant, eob_ptr, sc->scan, sc->iscan,
                          qparam->log_scale);
 }
 
-void vp10_highbd_quantize_dc_facade(
-    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
-    tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
-    tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
-    const QUANT_PARAM *qparam) {
+void av1_highbd_quantize_b_facade(const tran_low_t *coeff_ptr,
+                                  intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+                                  tran_low_t *qcoeff_ptr,
+                                  const MACROBLOCKD_PLANE *pd,
+                                  tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                                  const scan_order *sc,
+                                  const QUANT_PARAM *qparam) {
+  // obsolete skip_block
+  const int skip_block = 0;
+
+  av1_highbd_quantize_b(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round,
+                        p->quant, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+                        pd->dequant, eob_ptr, sc->scan, sc->iscan,
+                        qparam->log_scale);
+}
+
+void av1_highbd_quantize_dc_facade(const tran_low_t *coeff_ptr,
+                                   intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+                                   tran_low_t *qcoeff_ptr,
+                                   const MACROBLOCKD_PLANE *pd,
+                                   tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                                   const scan_order *sc,
+                                   const QUANT_PARAM *qparam) {
   // obsolete skip_block
   const int skip_block = 0;
 
   (void)sc;
 
-  vp10_highbd_quantize_dc(coeff_ptr, (int)n_coeffs, skip_block, p->round,
-                          p->quant_fp[0], qcoeff_ptr, dqcoeff_ptr,
-                          pd->dequant[0], eob_ptr, qparam->log_scale);
+  av1_highbd_quantize_dc(coeff_ptr, (int)n_coeffs, skip_block, p->round,
+                         p->quant_fp[0], qcoeff_ptr, dqcoeff_ptr,
+                         pd->dequant[0], eob_ptr, qparam->log_scale);
 }
 
 #if CONFIG_NEW_QUANT
@@ -464,7 +467,7 @@
     q = NUQ_KNOTS + (((((tmp * quant) >> 16) + tmp) * quant_shift) >> 16);
   }
   if (q) {
-    *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+    *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val);
     *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
     *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
   } else {
@@ -493,7 +496,7 @@
     q = NUQ_KNOTS + (((tmp - cuml_bins_ptr[NUQ_KNOTS - 1]) * quant) >> 16);
   }
   if (q) {
-    *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+    *dqcoeff_ptr = av1_dequant_abscoeff_nuq(q, dequant, dequant_val);
     *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
     *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
   } else {
@@ -526,7 +529,7 @@
   }
   if (q) {
     *dqcoeff_ptr = ROUND_POWER_OF_TWO(
-        vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+        av1_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
     *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
     *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
   } else {
@@ -559,7 +562,7 @@
   }
   if (q) {
     *dqcoeff_ptr = ROUND_POWER_OF_TWO(
-        vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+        av1_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
     *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
     *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
   } else {
@@ -748,20 +751,20 @@
   *eob_ptr = eob + 1;
 }
 #endif  // CONFIG_NEW_QUANT
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                        int skip_block, const int16_t *zbin_ptr,
-                        const int16_t *round_ptr, const int16_t *quant_ptr,
-                        const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
-                        tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
-                        uint16_t *eob_ptr, const int16_t *scan,
-                        const int16_t *iscan
+void av1_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                       int skip_block, const int16_t *zbin_ptr,
+                       const int16_t *round_ptr, const int16_t *quant_ptr,
+                       const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+                       tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                       uint16_t *eob_ptr, const int16_t *scan,
+                       const int16_t *iscan
 #if CONFIG_AOM_QM
-                        ,
-                        const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
+                       ,
+                       const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
 #endif
-                        ) {
+                       ) {
   int i, eob = -1;
   // TODO(jingning) Decide the need of these arguments after the
   // quantization process is completed.
@@ -806,19 +809,19 @@
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
-                               int skip_block, const int16_t *zbin_ptr,
-                               const int16_t *round_ptr,
-                               const int16_t *quant_ptr,
-                               const int16_t *quant_shift_ptr,
-                               tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                               const int16_t *dequant_ptr, uint16_t *eob_ptr,
-                               const int16_t *scan, const int16_t *iscan,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
+                              int skip_block, const int16_t *zbin_ptr,
+                              const int16_t *round_ptr,
+                              const int16_t *quant_ptr,
+                              const int16_t *quant_shift_ptr,
+                              tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                              const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                              const int16_t *scan, const int16_t *iscan,
 #if CONFIG_AOM_QM
-                               const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr,
+                              const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr,
 #endif
-                               int log_scale) {
+                              int log_scale) {
   int i;
   int eob = -1;
   const int scale = 1 << log_scale;
@@ -865,23 +868,22 @@
   *eob_ptr = eob + 1;
 }
 
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // TODO(jingning) Refactor this file and combine functions with similar
 // operations.
-void vp10_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                              int skip_block, const int16_t *zbin_ptr,
-                              const int16_t *round_ptr,
-                              const int16_t *quant_ptr,
-                              const int16_t *quant_shift_ptr,
-                              tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                              const int16_t *dequant_ptr, uint16_t *eob_ptr,
-                              const int16_t *scan, const int16_t *iscan
+void av1_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                             int skip_block, const int16_t *zbin_ptr,
+                             const int16_t *round_ptr, const int16_t *quant_ptr,
+                             const int16_t *quant_shift_ptr,
+                             tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                             const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                             const int16_t *scan, const int16_t *iscan
 #if CONFIG_AOM_QM
-                              ,
-                              const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
+                             ,
+                             const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
 #endif
-                              ) {
+                             ) {
   int i, eob = -1;
   (void)zbin_ptr;
   (void)quant_shift_ptr;
@@ -931,19 +933,18 @@
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                              int skip_block, const int16_t *zbin_ptr,
-                              const int16_t *round_ptr,
-                              const int16_t *quant_ptr,
-                              const int16_t *quant_shift_ptr,
-                              tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                              const int16_t *dequant_ptr, uint16_t *eob_ptr,
-                              const int16_t *scan, const int16_t *iscan,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                             int skip_block, const int16_t *zbin_ptr,
+                             const int16_t *round_ptr, const int16_t *quant_ptr,
+                             const int16_t *quant_shift_ptr,
+                             tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                             const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                             const int16_t *scan, const int16_t *iscan,
 #if CONFIG_AOM_QM
-                              const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr,
+                             const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr,
 #endif
-                              int log_scale) {
+                             int log_scale) {
   int i, non_zero_count = (int)n_coeffs, eob = -1;
   int zbins[2] = { zbin_ptr[0], zbin_ptr[1] };
   int round[2] = { round_ptr[0], round_ptr[1] };
@@ -1021,12 +1022,12 @@
 }
 #endif
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
-                             int skip_block, const int16_t *round_ptr,
-                             const int16_t quant, tran_low_t *qcoeff_ptr,
-                             tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
-                             uint16_t *eob_ptr, const int log_scale) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+                            int skip_block, const int16_t *round_ptr,
+                            const int16_t quant, tran_low_t *qcoeff_ptr,
+                            tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
+                            uint16_t *eob_ptr, const int log_scale) {
   int eob = -1;
 
   memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
@@ -1056,15 +1057,15 @@
   *shift = 1 << (16 - l);
 }
 
-static int get_qzbin_factor(int q, vpx_bit_depth_t bit_depth) {
-  const int quant = vp10_dc_quant(q, 0, bit_depth);
-#if CONFIG_VP9_HIGHBITDEPTH
+static int get_qzbin_factor(int q, aom_bit_depth_t bit_depth) {
+  const int quant = av1_dc_quant(q, 0, bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8: return q == 0 ? 64 : (quant < 148 ? 84 : 80);
-    case VPX_BITS_10: return q == 0 ? 64 : (quant < 592 ? 84 : 80);
-    case VPX_BITS_12: return q == 0 ? 64 : (quant < 2368 ? 84 : 80);
+    case AOM_BITS_8: return q == 0 ? 64 : (quant < 148 ? 84 : 80);
+    case AOM_BITS_10: return q == 0 ? 64 : (quant < 592 ? 84 : 80);
+    case AOM_BITS_12: return q == 0 ? 64 : (quant < 2368 ? 84 : 80);
     default:
-      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
       return -1;
   }
 #else
@@ -1073,8 +1074,8 @@
 #endif
 }
 
-void vp10_init_quantizer(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_init_quantizer(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   QUANTS *const quants = &cpi->quants;
   int i, q, quant;
 #if CONFIG_NEW_QUANT
@@ -1088,8 +1089,8 @@
     for (i = 0; i < 2; ++i) {
       int qrounding_factor_fp = 64;
       // y
-      quant = i == 0 ? vp10_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
-                     : vp10_ac_quant(q, 0, cm->bit_depth);
+      quant = i == 0 ? av1_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
+                     : av1_ac_quant(q, 0, cm->bit_depth);
       invert_quant(&quants->y_quant[q][i], &quants->y_quant_shift[q][i], quant);
       quants->y_quant_fp[q][i] = (1 << 16) / quant;
       quants->y_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7;
@@ -1098,8 +1099,8 @@
       cpi->y_dequant[q][i] = quant;
 
       // uv
-      quant = i == 0 ? vp10_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
-                     : vp10_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
+      quant = i == 0 ? av1_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
+                     : av1_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
       invert_quant(&quants->uv_quant[q][i], &quants->uv_quant_shift[q][i],
                    quant);
       quants->uv_quant_fp[q][i] = (1 << 16) / quant;
@@ -1114,11 +1115,11 @@
       for (i = 0; i < COEF_BANDS; i++) {
         const int quant = cpi->y_dequant[q][i != 0];
         const int uvquant = cpi->uv_dequant[q][i != 0];
-        vp10_get_dequant_val_nuq(quant, q, i, cpi->y_dequant_val_nuq[dq][q][i],
-                                 quants->y_cuml_bins_nuq[dq][q][i], dq);
-        vp10_get_dequant_val_nuq(uvquant, q, i,
-                                 cpi->uv_dequant_val_nuq[dq][q][i],
-                                 quants->uv_cuml_bins_nuq[dq][q][i], dq);
+        av1_get_dequant_val_nuq(quant, q, i, cpi->y_dequant_val_nuq[dq][q][i],
+                                quants->y_cuml_bins_nuq[dq][q][i], dq);
+        av1_get_dequant_val_nuq(uvquant, q, i,
+                                cpi->uv_dequant_val_nuq[dq][q][i],
+                                quants->uv_cuml_bins_nuq[dq][q][i], dq);
       }
     }
 #endif  // CONFIG_NEW_QUANT
@@ -1143,13 +1144,13 @@
   }
 }
 
-void vp10_init_plane_quantizers(const VP10_COMP *cpi, MACROBLOCK *x,
-                                int segment_id) {
-  const VP10_COMMON *const cm = &cpi->common;
+void av1_init_plane_quantizers(const AV1_COMP *cpi, MACROBLOCK *x,
+                               int segment_id) {
+  const AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   const QUANTS *const quants = &cpi->quants;
-  const int qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
-  const int rdmult = vp10_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q);
+  const int qindex = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+  const int rdmult = av1_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q);
   int i;
 #if CONFIG_AOM_QM
   int minqm = cm->min_qmlevel;
@@ -1218,17 +1219,17 @@
 
   set_error_per_bit(x, rdmult);
 
-  vp10_initialize_me_consts(cpi, x, x->q_index);
+  av1_initialize_me_consts(cpi, x, x->q_index);
 }
 
-void vp10_frame_init_quantizer(VP10_COMP *cpi) {
+void av1_frame_init_quantizer(AV1_COMP *cpi) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
-  vp10_init_plane_quantizers(cpi, x, xd->mi[0]->mbmi.segment_id);
+  av1_init_plane_quantizers(cpi, x, xd->mi[0]->mbmi.segment_id);
 }
 
-void vp10_set_quantizer(VP10_COMMON *cm, int q) {
-  // quantizer has to be reinitialized with vp10_init_quantizer() if any
+void av1_set_quantizer(AV1_COMMON *cm, int q) {
+  // quantizer has to be reinitialized with av1_init_quantizer() if any
   // delta_q changes.
   cm->base_qindex = q;
   cm->y_dc_delta_q = 0;
@@ -1246,11 +1247,11 @@
   208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 249, 255,
 };
 
-int vp10_quantizer_to_qindex(int quantizer) {
+int av1_quantizer_to_qindex(int quantizer) {
   return quantizer_to_qindex[quantizer];
 }
 
-int vp10_qindex_to_quantizer(int qindex) {
+int av1_qindex_to_quantizer(int qindex) {
   int quantizer;
 
   for (quantizer = 0; quantizer < 64; ++quantizer)
diff --git a/av1/encoder/quantize.h b/av1/encoder/quantize.h
index 6b1e739..faf26b0 100644
--- a/av1/encoder/quantize.h
+++ b/av1/encoder/quantize.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_QUANTIZE_H_
-#define VP10_ENCODER_QUANTIZE_H_
+#ifndef AV1_ENCODER_QUANTIZE_H_
+#define AV1_ENCODER_QUANTIZE_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "av1/common/quant_common.h"
 #include "av1/common/scan.h"
 #include "av1/encoder/block.h"
@@ -22,13 +22,13 @@
 
 typedef struct QUANT_PARAM { int log_scale; } QUANT_PARAM;
 
-typedef void (*VP10_QUANT_FACADE)(const tran_low_t *coeff_ptr,
-                                  intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
-                                  tran_low_t *qcoeff_ptr,
-                                  const MACROBLOCKD_PLANE *pd,
-                                  tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
-                                  const scan_order *sc,
-                                  const QUANT_PARAM *qparam);
+typedef void (*AV1_QUANT_FACADE)(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                                 const MACROBLOCK_PLANE *p,
+                                 tran_low_t *qcoeff_ptr,
+                                 const MACROBLOCKD_PLANE *pd,
+                                 tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                                 const scan_order *sc,
+                                 const QUANT_PARAM *qparam);
 
 typedef struct {
 #if CONFIG_NEW_QUANT
@@ -58,42 +58,42 @@
   DECLARE_ALIGNED(16, int16_t, uv_round[QINDEX_RANGE][8]);
 } QUANTS;
 
-struct VP10_COMP;
-struct VP10Common;
+struct AV1_COMP;
+struct AV1Common;
 
-void vp10_frame_init_quantizer(struct VP10_COMP *cpi);
+void av1_frame_init_quantizer(struct AV1_COMP *cpi);
 
-void vp10_init_plane_quantizers(const struct VP10_COMP *cpi, MACROBLOCK *x,
-                                int segment_id);
+void av1_init_plane_quantizers(const struct AV1_COMP *cpi, MACROBLOCK *x,
+                               int segment_id);
 
-void vp10_init_quantizer(struct VP10_COMP *cpi);
+void av1_init_quantizer(struct AV1_COMP *cpi);
 
-void vp10_set_quantizer(struct VP10Common *cm, int q);
+void av1_set_quantizer(struct AV1Common *cm, int q);
 
-int vp10_quantizer_to_qindex(int quantizer);
+int av1_quantizer_to_qindex(int quantizer);
 
-int vp10_qindex_to_quantizer(int qindex);
+int av1_qindex_to_quantizer(int qindex);
 
-void vp10_quantize_skip(intptr_t n_coeffs, tran_low_t *qcoeff_ptr,
-                        tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr);
+void av1_quantize_skip(intptr_t n_coeffs, tran_low_t *qcoeff_ptr,
+                       tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr);
 
-void vp10_quantize_fp_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                             const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
-                             const MACROBLOCKD_PLANE *pd,
-                             tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
-                             const scan_order *sc, const QUANT_PARAM *qparam);
-
-void vp10_quantize_b_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void av1_quantize_fp_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                             const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
                             const MACROBLOCKD_PLANE *pd,
                             tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
                             const scan_order *sc, const QUANT_PARAM *qparam);
 
-void vp10_quantize_dc_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                             const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
-                             const MACROBLOCKD_PLANE *pd,
-                             tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
-                             const scan_order *sc, const QUANT_PARAM *qparam);
+void av1_quantize_b_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                           const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+                           const MACROBLOCKD_PLANE *pd, tran_low_t *dqcoeff_ptr,
+                           uint16_t *eob_ptr, const scan_order *sc,
+                           const QUANT_PARAM *qparam);
+
+void av1_quantize_dc_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                            const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+                            const MACROBLOCKD_PLANE *pd,
+                            tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                            const scan_order *sc, const QUANT_PARAM *qparam);
 
 #if CONFIG_NEW_QUANT
 void quantize_dc_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
@@ -123,14 +123,8 @@
                               uint16_t *eob_ptr);
 #endif  // CONFIG_NEW_QUANT
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_facade(
-    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
-    tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
-    tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
-    const QUANT_PARAM *qparam);
-
-void vp10_highbd_quantize_b_facade(const tran_low_t *coeff_ptr,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_fp_facade(const tran_low_t *coeff_ptr,
                                    intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
                                    tran_low_t *qcoeff_ptr,
                                    const MACROBLOCKD_PLANE *pd,
@@ -138,17 +132,27 @@
                                    const scan_order *sc,
                                    const QUANT_PARAM *qparam);
 
-void vp10_highbd_quantize_dc_facade(
-    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
-    tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
-    tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
-    const QUANT_PARAM *qparam);
+void av1_highbd_quantize_b_facade(const tran_low_t *coeff_ptr,
+                                  intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+                                  tran_low_t *qcoeff_ptr,
+                                  const MACROBLOCKD_PLANE *pd,
+                                  tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                                  const scan_order *sc,
+                                  const QUANT_PARAM *qparam);
 
-void vp10_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
-                             int skip_block, const int16_t *round_ptr,
-                             const int16_t quant, tran_low_t *qcoeff_ptr,
-                             tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
-                             uint16_t *eob_ptr, const int log_scale);
+void av1_highbd_quantize_dc_facade(const tran_low_t *coeff_ptr,
+                                   intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+                                   tran_low_t *qcoeff_ptr,
+                                   const MACROBLOCKD_PLANE *pd,
+                                   tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                                   const scan_order *sc,
+                                   const QUANT_PARAM *qparam);
+
+void av1_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+                            int skip_block, const int16_t *round_ptr,
+                            const int16_t quant, tran_low_t *qcoeff_ptr,
+                            tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
+                            uint16_t *eob_ptr, const int log_scale);
 #if CONFIG_NEW_QUANT
 void highbd_quantize_dc_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                             int skip_block, const int16_t quant,
@@ -176,10 +180,10 @@
     tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr);
 
 #endif  // CONFIG_NEW_QUANT
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_QUANTIZE_H_
+#endif  // AV1_ENCODER_QUANTIZE_H_
diff --git a/av1/encoder/ransac.c b/av1/encoder/ransac.c
index e925068..0beaab8 100644
--- a/av1/encoder/ransac.c
+++ b/av1/encoder/ransac.c
@@ -15,7 +15,7 @@
 #include <stdlib.h>
 #include <assert.h>
 
-#include "vp10/encoder/ransac.h"
+#include "av1/encoder/ransac.h"
 
 #define MAX_PARAMDIM 9
 #define MAX_MINPTS 4
@@ -468,7 +468,7 @@
       corners1_int[2 * i + 1] = (int)corners1[i * 2 + 1];
     }
 
-    vp10_integerize_model(H, type, &wm);
+    av1_integerize_model(H, type, &wm);
     projectpoints(wm.wmmat, corners1_int, image1_coord, npoints, 2, 2, 0, 0);
 
     for (i = 0; i < npoints; ++i) {
diff --git a/av1/encoder/ransac.h b/av1/encoder/ransac.h
index 0b14ecf..c8fbdc8 100644
--- a/av1/encoder/ransac.h
+++ b/av1/encoder/ransac.h
@@ -8,15 +8,15 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_RANSAC_H_
-#define VP10_ENCODER_RANSAC_H_
+#ifndef AV1_ENCODER_RANSAC_H_
+#define AV1_ENCODER_RANSAC_H_
 
 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
 #include <memory.h>
 
-#include "vp10/common/warped_motion.h"
+#include "av1/common/warped_motion.h"
 
 typedef int (*RansacType)(double *matched_points, int npoints,
                           int *number_of_inliers, int *best_inlier_mask,
@@ -34,4 +34,4 @@
 int ransacTranslation(double *matched_points, int npoints,
                       int *number_of_inliers, int *best_inlier_indices,
                       double *bestH);
-#endif  // VP10_ENCODER_RANSAC_H
+#endif  // AV1_ENCODER_RANSAC_H
diff --git a/av1/encoder/ratectrl.c b/av1/encoder/ratectrl.c
index ddd5762..e94571c 100644
--- a/av1/encoder/ratectrl.c
+++ b/av1/encoder/ratectrl.c
@@ -15,8 +15,8 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/system_state.h"
 
@@ -42,17 +42,17 @@
 #define MAX_BPB_FACTOR 50
 
 #define FRAME_OVERHEAD_BITS 200
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define ASSIGN_MINQ_TABLE(bit_depth, name)                   \
   do {                                                       \
     switch (bit_depth) {                                     \
-      case VPX_BITS_8: name = name##_8; break;               \
-      case VPX_BITS_10: name = name##_10; break;             \
-      case VPX_BITS_12: name = name##_12; break;             \
+      case AOM_BITS_8: name = name##_8; break;               \
+      case AOM_BITS_10: name = name##_10; break;             \
+      case AOM_BITS_12: name = name##_12; break;             \
       default:                                               \
         assert(0 &&                                          \
-               "bit_depth should be VPX_BITS_8, VPX_BITS_10" \
-               " or VPX_BITS_12");                           \
+               "bit_depth should be AOM_BITS_8, AOM_BITS_10" \
+               " or AOM_BITS_12");                           \
         name = NULL;                                         \
     }                                                        \
   } while (0)
@@ -72,7 +72,7 @@
 static int inter_minq_8[QINDEX_RANGE];
 static int rtc_minq_8[QINDEX_RANGE];
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static int kf_low_motion_minq_10[QINDEX_RANGE];
 static int kf_high_motion_minq_10[QINDEX_RANGE];
 static int arfgf_low_motion_minq_10[QINDEX_RANGE];
@@ -97,16 +97,16 @@
 // The formulae were derived from computing a 3rd order polynomial best
 // fit to the original data (after plotting real maxq vs minq (not q index))
 static int get_minq_index(double maxq, double x3, double x2, double x1,
-                          vpx_bit_depth_t bit_depth) {
+                          aom_bit_depth_t bit_depth) {
   int i;
-  const double minqtarget = VPXMIN(((x3 * maxq + x2) * maxq + x1) * maxq, maxq);
+  const double minqtarget = AOMMIN(((x3 * maxq + x2) * maxq + x1) * maxq, maxq);
 
   // Special case handling to deal with the step from q2.0
   // down to lossless mode represented by q 1.0.
   if (minqtarget <= 2.0) return 0;
 
   for (i = 0; i < QINDEX_RANGE; i++) {
-    if (minqtarget <= vp10_convert_qindex_to_q(i, bit_depth)) return i;
+    if (minqtarget <= av1_convert_qindex_to_q(i, bit_depth)) return i;
   }
 
   return QINDEX_RANGE - 1;
@@ -114,10 +114,10 @@
 
 static void init_minq_luts(int *kf_low_m, int *kf_high_m, int *arfgf_low,
                            int *arfgf_high, int *inter, int *rtc,
-                           vpx_bit_depth_t bit_depth) {
+                           aom_bit_depth_t bit_depth) {
   int i;
   for (i = 0; i < QINDEX_RANGE; i++) {
-    const double maxq = vp10_convert_qindex_to_q(i, bit_depth);
+    const double maxq = av1_convert_qindex_to_q(i, bit_depth);
     kf_low_m[i] = get_minq_index(maxq, 0.000001, -0.0004, 0.150, bit_depth);
     kf_high_m[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.55, bit_depth);
     arfgf_low[i] = get_minq_index(maxq, 0.0000015, -0.0009, 0.30, bit_depth);
@@ -127,42 +127,42 @@
   }
 }
 
-void vp10_rc_init_minq_luts(void) {
+void av1_rc_init_minq_luts(void) {
   init_minq_luts(kf_low_motion_minq_8, kf_high_motion_minq_8,
                  arfgf_low_motion_minq_8, arfgf_high_motion_minq_8,
-                 inter_minq_8, rtc_minq_8, VPX_BITS_8);
-#if CONFIG_VP9_HIGHBITDEPTH
+                 inter_minq_8, rtc_minq_8, AOM_BITS_8);
+#if CONFIG_AOM_HIGHBITDEPTH
   init_minq_luts(kf_low_motion_minq_10, kf_high_motion_minq_10,
                  arfgf_low_motion_minq_10, arfgf_high_motion_minq_10,
-                 inter_minq_10, rtc_minq_10, VPX_BITS_10);
+                 inter_minq_10, rtc_minq_10, AOM_BITS_10);
   init_minq_luts(kf_low_motion_minq_12, kf_high_motion_minq_12,
                  arfgf_low_motion_minq_12, arfgf_high_motion_minq_12,
-                 inter_minq_12, rtc_minq_12, VPX_BITS_12);
+                 inter_minq_12, rtc_minq_12, AOM_BITS_12);
 #endif
 }
 
 // These functions use formulaic calculations to make playing with the
 // quantizer tables easier. If necessary they can be replaced by lookup
 // tables if and when things settle down in the experimental bitstream
-double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth) {
+double av1_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth) {
 // Convert the index to a real Q value (scaled down to match old Q values)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8: return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
-    case VPX_BITS_10: return vp10_ac_quant(qindex, 0, bit_depth) / 16.0;
-    case VPX_BITS_12: return vp10_ac_quant(qindex, 0, bit_depth) / 64.0;
+    case AOM_BITS_8: return av1_ac_quant(qindex, 0, bit_depth) / 4.0;
+    case AOM_BITS_10: return av1_ac_quant(qindex, 0, bit_depth) / 16.0;
+    case AOM_BITS_12: return av1_ac_quant(qindex, 0, bit_depth) / 64.0;
     default:
-      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
       return -1.0;
   }
 #else
-  return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
+  return av1_ac_quant(qindex, 0, bit_depth) / 4.0;
 #endif
 }
 
-int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
-                        double correction_factor, vpx_bit_depth_t bit_depth) {
-  const double q = vp10_convert_qindex_to_q(qindex, bit_depth);
+int av1_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+                       double correction_factor, aom_bit_depth_t bit_depth) {
+  const double q = av1_convert_qindex_to_q(qindex, bit_depth);
   int enumerator = frame_type == KEY_FRAME ? 2700000 : 1800000;
 
   assert(correction_factor <= MAX_BPB_FACTOR &&
@@ -173,20 +173,20 @@
   return (int)(enumerator * correction_factor / q);
 }
 
-int vp10_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
-                            double correction_factor,
-                            vpx_bit_depth_t bit_depth) {
+int av1_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
+                           double correction_factor,
+                           aom_bit_depth_t bit_depth) {
   const int bpm =
-      (int)(vp10_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
-  return VPXMAX(FRAME_OVERHEAD_BITS,
+      (int)(av1_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
+  return AOMMAX(FRAME_OVERHEAD_BITS,
                 (int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS);
 }
 
-int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) {
+int av1_rc_clamp_pframe_target_size(const AV1_COMP *const cpi, int target) {
   const RATE_CONTROL *rc = &cpi->rc;
-  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *oxcf = &cpi->oxcf;
   const int min_frame_target =
-      VPXMAX(rc->min_frame_bandwidth, rc->avg_frame_bandwidth >> 5);
+      AOMMAX(rc->min_frame_bandwidth, rc->avg_frame_bandwidth >> 5);
 // Clip the frame target to the minimum setup value.
 #if CONFIG_EXT_REFS
   if (cpi->rc.is_src_frame_alt_ref) {
@@ -207,27 +207,27 @@
   if (oxcf->rc_max_inter_bitrate_pct) {
     const int max_rate =
         rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
-    target = VPXMIN(target, max_rate);
+    target = AOMMIN(target, max_rate);
   }
 
   return target;
 }
 
-int vp10_rc_clamp_iframe_target_size(const VP10_COMP *const cpi, int target) {
+int av1_rc_clamp_iframe_target_size(const AV1_COMP *const cpi, int target) {
   const RATE_CONTROL *rc = &cpi->rc;
-  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *oxcf = &cpi->oxcf;
   if (oxcf->rc_max_intra_bitrate_pct) {
     const int max_rate =
         rc->avg_frame_bandwidth * oxcf->rc_max_intra_bitrate_pct / 100;
-    target = VPXMIN(target, max_rate);
+    target = AOMMIN(target, max_rate);
   }
   if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth;
   return target;
 }
 
 // Update the buffer level: leaky bucket model.
-static void update_buffer_level(VP10_COMP *cpi, int encoded_frame_size) {
-  const VP10_COMMON *const cm = &cpi->common;
+static void update_buffer_level(AV1_COMP *cpi, int encoded_frame_size) {
+  const AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
 
 // Non-viewable frames are a special case and are treated as pure overhead.
@@ -243,12 +243,12 @@
     rc->bits_off_target += rc->avg_frame_bandwidth - encoded_frame_size;
 
   // Clip the buffer level to the maximum specified buffer size.
-  rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
+  rc->bits_off_target = AOMMIN(rc->bits_off_target, rc->maximum_buffer_size);
   rc->buffer_level = rc->bits_off_target;
 }
 
-int vp10_rc_get_default_min_gf_interval(int width, int height,
-                                        double framerate) {
+int av1_rc_get_default_min_gf_interval(int width, int height,
+                                       double framerate) {
   // Assume we do not need any constraint lower than 4K 20 fps
   static const double factor_safe = 3840 * 2160 * 20.0;
   const double factor = width * height * framerate;
@@ -258,7 +258,7 @@
   if (factor <= factor_safe)
     return default_interval;
   else
-    return VPXMAX(default_interval,
+    return AOMMAX(default_interval,
                   (int)(MIN_GF_INTERVAL * factor / factor_safe + 0.5));
   // Note this logic makes:
   // 4K24: 5
@@ -266,16 +266,16 @@
   // 4K60: 12
 }
 
-int vp10_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) {
-  int interval = VPXMIN(MAX_GF_INTERVAL, (int)(framerate * 0.75));
+int av1_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) {
+  int interval = AOMMIN(MAX_GF_INTERVAL, (int)(framerate * 0.75));
   interval += (interval & 0x01);  // Round to even value
-  return VPXMAX(interval, min_gf_interval);
+  return AOMMAX(interval, min_gf_interval);
 }
 
-void vp10_rc_init(const VP10EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
+void av1_rc_init(const AV1EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
   int i;
 
-  if (pass == 0 && oxcf->rc_mode == VPX_CBR) {
+  if (pass == 0 && oxcf->rc_mode == AOM_CBR) {
     rc->avg_frame_qindex[KEY_FRAME] = oxcf->worst_allowed_q;
     rc->avg_frame_qindex[INTER_FRAME] = oxcf->worst_allowed_q;
   } else {
@@ -312,7 +312,7 @@
   rc->ni_frames = 0;
 
   rc->tot_q = 0.0;
-  rc->avg_q = vp10_convert_qindex_to_q(oxcf->worst_allowed_q, oxcf->bit_depth);
+  rc->avg_q = av1_convert_qindex_to_q(oxcf->worst_allowed_q, oxcf->bit_depth);
 
   for (i = 0; i < RATE_FACTOR_LEVELS; ++i) {
     rc->rate_correction_factors[i] = 1.0;
@@ -321,16 +321,16 @@
   rc->min_gf_interval = oxcf->min_gf_interval;
   rc->max_gf_interval = oxcf->max_gf_interval;
   if (rc->min_gf_interval == 0)
-    rc->min_gf_interval = vp10_rc_get_default_min_gf_interval(
+    rc->min_gf_interval = av1_rc_get_default_min_gf_interval(
         oxcf->width, oxcf->height, oxcf->init_framerate);
   if (rc->max_gf_interval == 0)
-    rc->max_gf_interval = vp10_rc_get_default_max_gf_interval(
+    rc->max_gf_interval = av1_rc_get_default_max_gf_interval(
         oxcf->init_framerate, rc->min_gf_interval);
   rc->baseline_gf_interval = (rc->min_gf_interval + rc->max_gf_interval) / 2;
 }
 
-int vp10_rc_drop_frame(VP10_COMP *cpi) {
-  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+int av1_rc_drop_frame(AV1_COMP *cpi) {
+  const AV1EncoderConfig *oxcf = &cpi->oxcf;
   RATE_CONTROL *const rc = &cpi->rc;
 
   if (!oxcf->drop_frames_water_mark) {
@@ -365,7 +365,7 @@
   }
 }
 
-static double get_rate_correction_factor(const VP10_COMP *cpi) {
+static double get_rate_correction_factor(const AV1_COMP *cpi) {
   const RATE_CONTROL *const rc = &cpi->rc;
   double rcf;
 
@@ -378,7 +378,7 @@
   } else {
     if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
         !rc->is_src_frame_alt_ref &&
-        (cpi->oxcf.rc_mode != VPX_CBR || cpi->oxcf.gf_cbr_boost_pct > 20))
+        (cpi->oxcf.rc_mode != AOM_CBR || cpi->oxcf.gf_cbr_boost_pct > 20))
       rcf = rc->rate_correction_factors[GF_ARF_STD];
     else
       rcf = rc->rate_correction_factors[INTER_NORMAL];
@@ -387,7 +387,7 @@
   return fclamp(rcf, MIN_BPB_FACTOR, MAX_BPB_FACTOR);
 }
 
-static void set_rate_correction_factor(VP10_COMP *cpi, double factor) {
+static void set_rate_correction_factor(AV1_COMP *cpi, double factor) {
   RATE_CONTROL *const rc = &cpi->rc;
 
   // Normalize RCF to account for the size-dependent scaling factor.
@@ -404,15 +404,15 @@
   } else {
     if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
         !rc->is_src_frame_alt_ref &&
-        (cpi->oxcf.rc_mode != VPX_CBR || cpi->oxcf.gf_cbr_boost_pct > 20))
+        (cpi->oxcf.rc_mode != AOM_CBR || cpi->oxcf.gf_cbr_boost_pct > 20))
       rc->rate_correction_factors[GF_ARF_STD] = factor;
     else
       rc->rate_correction_factors[INTER_NORMAL] = factor;
   }
 }
 
-void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
+void av1_rc_update_rate_correction_factors(AV1_COMP *cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
   int correction_factor = 100;
   double rate_correction_factor = get_rate_correction_factor(cpi);
   double adjustment_limit;
@@ -423,18 +423,18 @@
   if (cpi->rc.is_src_frame_alt_ref) return;
 
   // Clear down mmx registers to allow floating point in what follows
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   // Work out how big we would have expected the frame to be at this Q given
   // the current correction factor.
   // Stay in double to avoid int overflow when values are large
   if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cpi->common.seg.enabled) {
     projected_size_based_on_q =
-        vp10_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
+        av1_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
   } else {
     projected_size_based_on_q =
-        vp10_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex,
-                                cm->MBs, rate_correction_factor, cm->bit_depth);
+        av1_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex, cm->MBs,
+                               rate_correction_factor, cm->bit_depth);
   }
   // Work out a size correction factor.
   if (projected_size_based_on_q > FRAME_OVERHEAD_BITS)
@@ -444,7 +444,7 @@
   // More heavily damped adjustment used if we have been oscillating either side
   // of target.
   adjustment_limit =
-      0.25 + 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor)));
+      0.25 + 0.5 * AOMMIN(1, fabs(log10(0.01 * correction_factor)));
 
   cpi->rc.q_2_frame = cpi->rc.q_1_frame;
   cpi->rc.q_1_frame = cm->base_qindex;
@@ -478,9 +478,9 @@
   set_rate_correction_factor(cpi, rate_correction_factor);
 }
 
-int vp10_rc_regulate_q(const VP10_COMP *cpi, int target_bits_per_frame,
-                       int active_best_quality, int active_worst_quality) {
-  const VP10_COMMON *const cm = &cpi->common;
+int av1_rc_regulate_q(const AV1_COMP *cpi, int target_bits_per_frame,
+                      int active_best_quality, int active_worst_quality) {
+  const AV1_COMMON *const cm = &cpi->common;
   int q = active_worst_quality;
   int last_error = INT_MAX;
   int i, target_bits_per_mb, bits_per_mb_at_this_q;
@@ -496,9 +496,9 @@
   do {
     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
       bits_per_mb_at_this_q =
-          (int)vp10_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
+          (int)av1_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
     } else {
-      bits_per_mb_at_this_q = (int)vp10_rc_bits_per_mb(
+      bits_per_mb_at_this_q = (int)av1_rc_bits_per_mb(
           cm->frame_type, i, correction_factor, cm->bit_depth);
     }
 
@@ -516,11 +516,11 @@
 
   // In CBR mode, this makes sure q is between oscillating Qs to prevent
   // resonance.
-  if (cpi->oxcf.rc_mode == VPX_CBR &&
+  if (cpi->oxcf.rc_mode == AOM_CBR &&
       (cpi->rc.rc_1_frame * cpi->rc.rc_2_frame == -1) &&
       cpi->rc.q_1_frame != cpi->rc.q_2_frame) {
-    q = clamp(q, VPXMIN(cpi->rc.q_1_frame, cpi->rc.q_2_frame),
-              VPXMAX(cpi->rc.q_1_frame, cpi->rc.q_2_frame));
+    q = clamp(q, AOMMIN(cpi->rc.q_1_frame, cpi->rc.q_2_frame),
+              AOMMAX(cpi->rc.q_1_frame, cpi->rc.q_2_frame));
   }
   return q;
 }
@@ -541,7 +541,7 @@
 }
 
 static int get_kf_active_quality(const RATE_CONTROL *const rc, int q,
-                                 vpx_bit_depth_t bit_depth) {
+                                 aom_bit_depth_t bit_depth) {
   int *kf_low_motion_minq;
   int *kf_high_motion_minq;
   ASSIGN_MINQ_TABLE(bit_depth, kf_low_motion_minq);
@@ -551,7 +551,7 @@
 }
 
 static int get_gf_active_quality(const RATE_CONTROL *const rc, int q,
-                                 vpx_bit_depth_t bit_depth) {
+                                 aom_bit_depth_t bit_depth) {
   int *arfgf_low_motion_minq;
   int *arfgf_high_motion_minq;
   ASSIGN_MINQ_TABLE(bit_depth, arfgf_low_motion_minq);
@@ -560,7 +560,7 @@
                             arfgf_low_motion_minq, arfgf_high_motion_minq);
 }
 
-static int calc_active_worst_quality_one_pass_vbr(const VP10_COMP *cpi) {
+static int calc_active_worst_quality_one_pass_vbr(const AV1_COMP *cpi) {
   const RATE_CONTROL *const rc = &cpi->rc;
   const unsigned int curr_frame = cpi->common.current_video_frame;
   int active_worst_quality;
@@ -578,17 +578,17 @@
                                              : rc->last_q[INTER_FRAME] * 2;
     }
   }
-  return VPXMIN(active_worst_quality, rc->worst_quality);
+  return AOMMIN(active_worst_quality, rc->worst_quality);
 }
 
 // Adjust active_worst_quality level based on buffer level.
-static int calc_active_worst_quality_one_pass_cbr(const VP10_COMP *cpi) {
+static int calc_active_worst_quality_one_pass_cbr(const AV1_COMP *cpi) {
   // Adjust active_worst_quality: If buffer is above the optimal/target level,
   // bring active_worst_quality down depending on fullness of buffer.
   // If buffer is below the optimal level, let the active_worst_quality go from
   // ambient Q (at buffer = optimal level) to worst_quality level
   // (at buffer = critical level).
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *rc = &cpi->rc;
   // Buffer level below which we push active_worst to worst_quality.
   int64_t critical_level = rc->optimal_buffer_level >> 3;
@@ -603,10 +603,10 @@
   // So for first few frames following key, the qp of that key frame is weighted
   // into the active_worst_quality setting.
   ambient_qp = (cm->current_video_frame < 5)
-                   ? VPXMIN(rc->avg_frame_qindex[INTER_FRAME],
+                   ? AOMMIN(rc->avg_frame_qindex[INTER_FRAME],
                             rc->avg_frame_qindex[KEY_FRAME])
                    : rc->avg_frame_qindex[INTER_FRAME];
-  active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 / 4);
+  active_worst_quality = AOMMIN(rc->worst_quality, ambient_qp * 5 / 4);
   if (rc->buffer_level > rc->optimal_buffer_level) {
     // Adjust down.
     // Maximum limit for down adjustment, ~30%.
@@ -637,10 +637,10 @@
   return active_worst_quality;
 }
 
-static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
+static int rc_pick_q_and_bounds_one_pass_cbr(const AV1_COMP *cpi,
                                              int *bottom_index,
                                              int *top_index) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *const rc = &cpi->rc;
   int active_best_quality;
   int active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
@@ -655,10 +655,10 @@
     // based on the ambient Q to reduce the risk of popping.
     if (rc->this_key_frame_forced) {
       int qindex = rc->last_boosted_qindex;
-      double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-      int delta_qindex = vp10_compute_qdelta(
+      double last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+      int delta_qindex = av1_compute_qdelta(
           rc, last_boosted_q, (last_boosted_q * 0.75), cm->bit_depth);
-      active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+      active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
     } else if (cm->current_video_frame > 0) {
       // not first frame of one pass and kf_boost is set
       double q_adj_factor = 1.0;
@@ -674,9 +674,9 @@
 
       // Convert the adjustment factor to a qindex delta
       // on active_best_quality.
-      q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+      q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
       active_best_quality +=
-          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+          av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
     }
   } else if (!rc->is_src_frame_alt_ref &&
              (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -718,19 +718,19 @@
   if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
       !(cm->current_video_frame == 0)) {
     int qdelta = 0;
-    vpx_clear_system_state();
-    qdelta = vp10_compute_qdelta_by_rate(
+    aom_clear_system_state();
+    qdelta = av1_compute_qdelta_by_rate(
         &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
     *top_index = active_worst_quality + qdelta;
-    *top_index = VPXMAX(*top_index, *bottom_index);
+    *top_index = AOMMAX(*top_index, *bottom_index);
   }
 
   // Special case code to try and match quality with forced key frames
   if (cm->frame_type == KEY_FRAME && rc->this_key_frame_forced) {
     q = rc->last_boosted_qindex;
   } else {
-    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
-                           active_worst_quality);
+    q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+                          active_worst_quality);
     if (q > *top_index) {
       // Special case when we are targeting the max allowed rate
       if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -748,10 +748,10 @@
 }
 
 static int get_active_cq_level(const RATE_CONTROL *rc,
-                               const VP10EncoderConfig *const oxcf) {
+                               const AV1EncoderConfig *const oxcf) {
   static const double cq_adjust_threshold = 0.1;
   int active_cq_level = oxcf->cq_level;
-  if (oxcf->rc_mode == VPX_CQ && rc->total_target_bits > 0) {
+  if (oxcf->rc_mode == AOM_CQ && rc->total_target_bits > 0) {
     const double x = (double)rc->total_actual_bits / rc->total_target_bits;
     if (x < cq_adjust_threshold) {
       active_cq_level = (int)(active_cq_level * x / cq_adjust_threshold);
@@ -760,12 +760,12 @@
   return active_cq_level;
 }
 
-static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
+static int rc_pick_q_and_bounds_one_pass_vbr(const AV1_COMP *cpi,
                                              int *bottom_index,
                                              int *top_index) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *const rc = &cpi->rc;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   const int cq_level = get_active_cq_level(rc, oxcf);
   int active_best_quality;
   int active_worst_quality = calc_active_worst_quality_one_pass_vbr(cpi);
@@ -774,17 +774,17 @@
   ASSIGN_MINQ_TABLE(cm->bit_depth, inter_minq);
 
   if (frame_is_intra_only(cm)) {
-    if (oxcf->rc_mode == VPX_Q) {
+    if (oxcf->rc_mode == AOM_Q) {
       int qindex = cq_level;
-      double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-      int delta_qindex = vp10_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
-      active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+      double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+      int delta_qindex = av1_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
+      active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
     } else if (rc->this_key_frame_forced) {
       int qindex = rc->last_boosted_qindex;
-      double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-      int delta_qindex = vp10_compute_qdelta(
+      double last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+      int delta_qindex = av1_compute_qdelta(
           rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
-      active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+      active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
     } else {
       // not first frame of one pass and kf_boost is set
       double q_adj_factor = 1.0;
@@ -800,9 +800,9 @@
 
       // Convert the adjustment factor to a qindex delta
       // on active_best_quality.
-      q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+      q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
       active_best_quality +=
-          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+          av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
     }
   } else if (!rc->is_src_frame_alt_ref &&
              (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -816,7 +816,7 @@
       q = rc->avg_frame_qindex[KEY_FRAME];
     }
     // For constrained quality dont allow Q less than the cq level
-    if (oxcf->rc_mode == VPX_CQ) {
+    if (oxcf->rc_mode == AOM_CQ) {
       if (q < cq_level) q = cq_level;
 
       active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
@@ -824,28 +824,28 @@
       // Constrained quality use slightly lower active best.
       active_best_quality = active_best_quality * 15 / 16;
 
-    } else if (oxcf->rc_mode == VPX_Q) {
+    } else if (oxcf->rc_mode == AOM_Q) {
       int qindex = cq_level;
-      double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
       int delta_qindex;
       if (cpi->refresh_alt_ref_frame)
-        delta_qindex = vp10_compute_qdelta(rc, q, q * 0.40, cm->bit_depth);
+        delta_qindex = av1_compute_qdelta(rc, q, q * 0.40, cm->bit_depth);
       else
-        delta_qindex = vp10_compute_qdelta(rc, q, q * 0.50, cm->bit_depth);
-      active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+        delta_qindex = av1_compute_qdelta(rc, q, q * 0.50, cm->bit_depth);
+      active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
     } else {
       active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
     }
   } else {
-    if (oxcf->rc_mode == VPX_Q) {
+    if (oxcf->rc_mode == AOM_Q) {
       int qindex = cq_level;
-      double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
       double delta_rate[FIXED_GF_INTERVAL] = { 0.50, 1.0, 0.85, 1.0,
                                                0.70, 1.0, 0.85, 1.0 };
-      int delta_qindex = vp10_compute_qdelta(
+      int delta_qindex = av1_compute_qdelta(
           rc, q, q * delta_rate[cm->current_video_frame % FIXED_GF_INTERVAL],
           cm->bit_depth);
-      active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+      active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
     } else {
       // Use the lower of active_worst_quality and recent/average Q.
       if (cm->current_video_frame > 1)
@@ -854,7 +854,7 @@
         active_best_quality = inter_minq[rc->avg_frame_qindex[KEY_FRAME]];
       // For the constrained quality mode we don't want
       // q to fall below the cq level.
-      if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
+      if ((oxcf->rc_mode == AOM_CQ) && (active_best_quality < cq_level)) {
         active_best_quality = cq_level;
       }
     }
@@ -872,28 +872,28 @@
   // Limit Q range for the adaptive loop.
   {
     int qdelta = 0;
-    vpx_clear_system_state();
+    aom_clear_system_state();
     if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
         !(cm->current_video_frame == 0)) {
-      qdelta = vp10_compute_qdelta_by_rate(
+      qdelta = av1_compute_qdelta_by_rate(
           &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
     } else if (!rc->is_src_frame_alt_ref &&
                (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
-      qdelta = vp10_compute_qdelta_by_rate(
+      qdelta = av1_compute_qdelta_by_rate(
           &cpi->rc, cm->frame_type, active_worst_quality, 1.75, cm->bit_depth);
     }
     *top_index = active_worst_quality + qdelta;
-    *top_index = VPXMAX(*top_index, *bottom_index);
+    *top_index = AOMMAX(*top_index, *bottom_index);
   }
 
-  if (oxcf->rc_mode == VPX_Q) {
+  if (oxcf->rc_mode == AOM_Q) {
     q = active_best_quality;
     // Special case code to try and match quality with forced key frames
   } else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) {
     q = rc->last_boosted_qindex;
   } else {
-    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
-                           active_worst_quality);
+    q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+                          active_worst_quality);
     if (q > *top_index) {
       // Special case when we are targeting the max allowed rate
       if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -910,7 +910,7 @@
   return q;
 }
 
-int vp10_frame_type_qdelta(const VP10_COMP *cpi, int rf_level, int q) {
+int av1_frame_type_qdelta(const AV1_COMP *cpi, int rf_level, int q) {
   static const double rate_factor_deltas[RATE_FACTOR_LEVELS] = {
     1.00,  // INTER_NORMAL
 #if CONFIG_EXT_REFS
@@ -931,19 +931,19 @@
 #else
       { INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME };
 #endif  // CONFIG_EXT_REFS
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   int qdelta =
-      vp10_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
-                                  rate_factor_deltas[rf_level], cm->bit_depth);
+      av1_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
+                                 rate_factor_deltas[rf_level], cm->bit_depth);
   return qdelta;
 }
 
 #define STATIC_MOTION_THRESH 95
-static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
-                                         int *bottom_index, int *top_index) {
-  const VP10_COMMON *const cm = &cpi->common;
+static int rc_pick_q_and_bounds_two_pass(const AV1_COMP *cpi, int *bottom_index,
+                                         int *top_index) {
+  const AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *const rc = &cpi->rc;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   const GF_GROUP *gf_group = &cpi->twopass.gf_group;
   const int cq_level = get_active_cq_level(rc, oxcf);
   int active_best_quality;
@@ -962,19 +962,19 @@
       int qindex;
 
       if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
-        qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
+        qindex = AOMMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
         active_best_quality = qindex;
-        last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-        delta_qindex = vp10_compute_qdelta(
-            rc, last_boosted_q, last_boosted_q * 1.25, cm->bit_depth);
+        last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+        delta_qindex = av1_compute_qdelta(rc, last_boosted_q,
+                                          last_boosted_q * 1.25, cm->bit_depth);
         active_worst_quality =
-            VPXMIN(qindex + delta_qindex, active_worst_quality);
+            AOMMIN(qindex + delta_qindex, active_worst_quality);
       } else {
         qindex = rc->last_boosted_qindex;
-        last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-        delta_qindex = vp10_compute_qdelta(
-            rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
-        active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+        last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+        delta_qindex = av1_compute_qdelta(rc, last_boosted_q,
+                                          last_boosted_q * 0.75, cm->bit_depth);
+        active_best_quality = AOMMAX(qindex + delta_qindex, rc->best_quality);
       }
     } else {
       // Not forced keyframe.
@@ -995,9 +995,9 @@
 
       // Convert the adjustment factor to a qindex delta
       // on active_best_quality.
-      q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+      q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
       active_best_quality +=
-          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+          av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
     }
   } else if (!rc->is_src_frame_alt_ref &&
              (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -1011,7 +1011,7 @@
       q = active_worst_quality;
     }
     // For constrained quality dont allow Q less than the cq level
-    if (oxcf->rc_mode == VPX_CQ) {
+    if (oxcf->rc_mode == AOM_CQ) {
       if (q < cq_level) q = cq_level;
 
       active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
@@ -1019,13 +1019,13 @@
       // Constrained quality use slightly lower active best.
       active_best_quality = active_best_quality * 15 / 16;
 
-    } else if (oxcf->rc_mode == VPX_Q) {
+    } else if (oxcf->rc_mode == AOM_Q) {
       if (!cpi->refresh_alt_ref_frame) {
         active_best_quality = cq_level;
       } else {
         active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
 
-        // Modify best quality for second level arfs. For mode VPX_Q this
+        // Modify best quality for second level arfs. For mode AOM_Q this
         // becomes the baseline frame q.
         if (gf_group->rf_level[gf_group->index] == GF_ARF_LOW)
           active_best_quality = (active_best_quality + cq_level + 1) / 2;
@@ -1034,14 +1034,14 @@
       active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
     }
   } else {
-    if (oxcf->rc_mode == VPX_Q) {
+    if (oxcf->rc_mode == AOM_Q) {
       active_best_quality = cq_level;
     } else {
       active_best_quality = inter_minq[active_worst_quality];
 
       // For the constrained quality mode we don't want
       // q to fall below the cq level.
-      if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
+      if ((oxcf->rc_mode == AOM_CQ) && (active_best_quality < cq_level)) {
         active_best_quality = cq_level;
       }
     }
@@ -1049,7 +1049,7 @@
 
   // Extension to max or min Q if undershoot or overshoot is outside
   // the permitted range.
-  if ((cpi->oxcf.rc_mode != VPX_Q) &&
+  if ((cpi->oxcf.rc_mode != AOM_Q) &&
       (cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD)) {
     if (frame_is_intra_only(cm) ||
         (!rc->is_src_frame_alt_ref &&
@@ -1064,22 +1064,22 @@
     }
   }
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
   // Static forced key frames Q restrictions dealt with elsewhere.
   if (!(frame_is_intra_only(cm)) || !rc->this_key_frame_forced ||
       (cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) {
-    int qdelta = vp10_frame_type_qdelta(
-        cpi, gf_group->rf_level[gf_group->index], active_worst_quality);
+    int qdelta = av1_frame_type_qdelta(cpi, gf_group->rf_level[gf_group->index],
+                                       active_worst_quality);
     active_worst_quality =
-        VPXMAX(active_worst_quality + qdelta, active_best_quality);
+        AOMMAX(active_worst_quality + qdelta, active_best_quality);
   }
 
   // Modify active_best_quality for downscaled normal frames.
   if (rc->frame_size_selector != UNSCALED && !frame_is_kf_gf_arf(cpi)) {
-    int qdelta = vp10_compute_qdelta_by_rate(
+    int qdelta = av1_compute_qdelta_by_rate(
         rc, cm->frame_type, active_best_quality, 2.0, cm->bit_depth);
     active_best_quality =
-        VPXMAX(active_best_quality + qdelta, rc->best_quality);
+        AOMMAX(active_best_quality + qdelta, rc->best_quality);
   }
 
   active_best_quality =
@@ -1087,19 +1087,19 @@
   active_worst_quality =
       clamp(active_worst_quality, active_best_quality, rc->worst_quality);
 
-  if (oxcf->rc_mode == VPX_Q) {
+  if (oxcf->rc_mode == AOM_Q) {
     q = active_best_quality;
     // Special case code to try and match quality with forced key frames.
   } else if (frame_is_intra_only(cm) && rc->this_key_frame_forced) {
     // If static since last kf use better of last boosted and last kf q.
     if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
-      q = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
+      q = AOMMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
     } else {
       q = rc->last_boosted_qindex;
     }
   } else {
-    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
-                           active_worst_quality);
+    q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+                          active_worst_quality);
     if (q > active_worst_quality) {
       // Special case when we are targeting the max allowed rate.
       if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -1120,11 +1120,11 @@
   return q;
 }
 
-int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi, int *bottom_index,
-                              int *top_index) {
+int av1_rc_pick_q_and_bounds(const AV1_COMP *cpi, int *bottom_index,
+                             int *top_index) {
   int q;
   if (cpi->oxcf.pass == 0) {
-    if (cpi->oxcf.rc_mode == VPX_CBR)
+    if (cpi->oxcf.rc_mode == AOM_CBR)
       q = rc_pick_q_and_bounds_one_pass_cbr(cpi, bottom_index, top_index);
     else
       q = rc_pick_q_and_bounds_one_pass_vbr(cpi, bottom_index, top_index);
@@ -1135,24 +1135,24 @@
   return q;
 }
 
-void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi, int frame_target,
-                                       int *frame_under_shoot_limit,
-                                       int *frame_over_shoot_limit) {
-  if (cpi->oxcf.rc_mode == VPX_Q) {
+void av1_rc_compute_frame_size_bounds(const AV1_COMP *cpi, int frame_target,
+                                      int *frame_under_shoot_limit,
+                                      int *frame_over_shoot_limit) {
+  if (cpi->oxcf.rc_mode == AOM_Q) {
     *frame_under_shoot_limit = 0;
     *frame_over_shoot_limit = INT_MAX;
   } else {
     // For very small rate targets where the fractional adjustment
     // may be tiny make sure there is at least a minimum range.
     const int tolerance = (cpi->sf.recode_tolerance * frame_target) / 100;
-    *frame_under_shoot_limit = VPXMAX(frame_target - tolerance - 200, 0);
+    *frame_under_shoot_limit = AOMMAX(frame_target - tolerance - 200, 0);
     *frame_over_shoot_limit =
-        VPXMIN(frame_target + tolerance + 200, cpi->rc.max_frame_bandwidth);
+        AOMMIN(frame_target + tolerance + 200, cpi->rc.max_frame_bandwidth);
   }
 }
 
-void vp10_rc_set_frame_target(VP10_COMP *cpi, int target) {
-  const VP10_COMMON *const cm = &cpi->common;
+void av1_rc_set_frame_target(AV1_COMP *cpi, int target) {
+  const AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
 
   rc->this_frame_target = target;
@@ -1168,7 +1168,7 @@
       ((int64_t)rc->this_frame_target * 64 * 64) / (cm->width * cm->height);
 }
 
-static void update_alt_ref_frame_stats(VP10_COMP *cpi) {
+static void update_alt_ref_frame_stats(AV1_COMP *cpi) {
   // this frame refreshes means next frames don't unless specified by user
   RATE_CONTROL *const rc = &cpi->rc;
   rc->frames_since_golden = 0;
@@ -1180,7 +1180,7 @@
   rc->source_alt_ref_active = 1;
 }
 
-static void update_golden_frame_stats(VP10_COMP *cpi) {
+static void update_golden_frame_stats(AV1_COMP *cpi) {
   RATE_CONTROL *const rc = &cpi->rc;
 
 #if CONFIG_EXT_REFS
@@ -1221,21 +1221,21 @@
   }
 }
 
-void vp10_rc_postencode_update(VP10_COMP *cpi, uint64_t bytes_used) {
-  const VP10_COMMON *const cm = &cpi->common;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_rc_postencode_update(AV1_COMP *cpi, uint64_t bytes_used) {
+  const AV1_COMMON *const cm = &cpi->common;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   RATE_CONTROL *const rc = &cpi->rc;
   const int qindex = cm->base_qindex;
 
   if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
-    vp10_cyclic_refresh_postencode(cpi);
+    av1_cyclic_refresh_postencode(cpi);
   }
 
   // Update rate control heuristics
   rc->projected_frame_size = (int)(bytes_used << 3);
 
   // Post encode loop adjustment of Q prediction.
-  vp10_rc_update_rate_correction_factors(cpi);
+  av1_rc_update_rate_correction_factors(cpi);
 
   // Keep a record of last Q and ambient average Q.
   if (cm->frame_type == KEY_FRAME) {
@@ -1249,7 +1249,7 @@
       rc->avg_frame_qindex[INTER_FRAME] =
           ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
       rc->ni_frames++;
-      rc->tot_q += vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      rc->tot_q += av1_convert_qindex_to_q(qindex, cm->bit_depth);
       rc->avg_q = rc->tot_q / rc->ni_frames;
       // Calculate the average Q for normal inter frames (not key or GFU
       // frames).
@@ -1324,7 +1324,7 @@
   }
 }
 
-void vp10_rc_postencode_update_drop_frame(VP10_COMP *cpi) {
+void av1_rc_postencode_update_drop_frame(AV1_COMP *cpi) {
   // Update buffer level with zero size, update frame counters, and return.
   update_buffer_level(cpi, 0);
   cpi->rc.frames_since_key++;
@@ -1336,7 +1336,7 @@
 // Use this macro to turn on/off use of alt-refs in one-pass mode.
 #define USE_ALTREF_FOR_ONE_PASS 1
 
-static int calc_pframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
+static int calc_pframe_target_size_one_pass_vbr(const AV1_COMP *const cpi) {
   static const int af_ratio = 10;
   const RATE_CONTROL *const rc = &cpi->rc;
   int target;
@@ -1351,18 +1351,18 @@
 #else
   target = rc->avg_frame_bandwidth;
 #endif
-  return vp10_rc_clamp_pframe_target_size(cpi, target);
+  return av1_rc_clamp_pframe_target_size(cpi, target);
 }
 
-static int calc_iframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
+static int calc_iframe_target_size_one_pass_vbr(const AV1_COMP *const cpi) {
   static const int kf_ratio = 25;
   const RATE_CONTROL *rc = &cpi->rc;
   const int target = rc->avg_frame_bandwidth * kf_ratio;
-  return vp10_rc_clamp_iframe_target_size(cpi, target);
+  return av1_rc_clamp_iframe_target_size(cpi, target);
 }
 
-void vp10_rc_get_one_pass_vbr_params(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_one_pass_vbr_params(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   int target;
   // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
@@ -1396,16 +1396,16 @@
     target = calc_iframe_target_size_one_pass_vbr(cpi);
   else
     target = calc_pframe_target_size_one_pass_vbr(cpi);
-  vp10_rc_set_frame_target(cpi, target);
+  av1_rc_set_frame_target(cpi, target);
 }
 
-static int calc_pframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
-  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+static int calc_pframe_target_size_one_pass_cbr(const AV1_COMP *cpi) {
+  const AV1EncoderConfig *oxcf = &cpi->oxcf;
   const RATE_CONTROL *rc = &cpi->rc;
   const int64_t diff = rc->optimal_buffer_level - rc->buffer_level;
   const int64_t one_pct_bits = 1 + rc->optimal_buffer_level / 100;
   int min_frame_target =
-      VPXMAX(rc->avg_frame_bandwidth >> 4, FRAME_OVERHEAD_BITS);
+      AOMMAX(rc->avg_frame_bandwidth >> 4, FRAME_OVERHEAD_BITS);
   int target;
 
   if (oxcf->gf_cbr_boost_pct) {
@@ -1422,23 +1422,23 @@
 
   if (diff > 0) {
     // Lower the target bandwidth for this frame.
-    const int pct_low = (int)VPXMIN(diff / one_pct_bits, oxcf->under_shoot_pct);
+    const int pct_low = (int)AOMMIN(diff / one_pct_bits, oxcf->under_shoot_pct);
     target -= (target * pct_low) / 200;
   } else if (diff < 0) {
     // Increase the target bandwidth for this frame.
     const int pct_high =
-        (int)VPXMIN(-diff / one_pct_bits, oxcf->over_shoot_pct);
+        (int)AOMMIN(-diff / one_pct_bits, oxcf->over_shoot_pct);
     target += (target * pct_high) / 200;
   }
   if (oxcf->rc_max_inter_bitrate_pct) {
     const int max_rate =
         rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
-    target = VPXMIN(target, max_rate);
+    target = AOMMIN(target, max_rate);
   }
-  return VPXMAX(min_frame_target, target);
+  return AOMMAX(min_frame_target, target);
 }
 
-static int calc_iframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
+static int calc_iframe_target_size_one_pass_cbr(const AV1_COMP *cpi) {
   const RATE_CONTROL *rc = &cpi->rc;
   int target;
   if (cpi->common.current_video_frame == 0) {
@@ -1449,17 +1449,17 @@
     int kf_boost = 32;
     double framerate = cpi->framerate;
 
-    kf_boost = VPXMAX(kf_boost, (int)(2 * framerate - 16));
+    kf_boost = AOMMAX(kf_boost, (int)(2 * framerate - 16));
     if (rc->frames_since_key < framerate / 2) {
       kf_boost = (int)(kf_boost * rc->frames_since_key / (framerate / 2));
     }
     target = ((16 + kf_boost) * rc->avg_frame_bandwidth) >> 4;
   }
-  return vp10_rc_clamp_iframe_target_size(cpi, target);
+  return av1_rc_clamp_iframe_target_size(cpi, target);
 }
 
-void vp10_rc_get_one_pass_cbr_params(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_one_pass_cbr_params(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   int target;
   // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
@@ -1476,7 +1476,7 @@
   }
   if (rc->frames_till_gf_update_due == 0) {
     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
-      vp10_cyclic_refresh_set_golden_update(cpi);
+      av1_cyclic_refresh_set_golden_update(cpi);
     else
       rc->baseline_gf_interval =
           (rc->min_gf_interval + rc->max_gf_interval) / 2;
@@ -1491,22 +1491,22 @@
   // Any update/change of global cyclic refresh parameters (amount/delta-qp)
   // should be done here, before the frame qp is selected.
   if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
-    vp10_cyclic_refresh_update_parameters(cpi);
+    av1_cyclic_refresh_update_parameters(cpi);
 
   if (cm->frame_type == KEY_FRAME)
     target = calc_iframe_target_size_one_pass_cbr(cpi);
   else
     target = calc_pframe_target_size_one_pass_cbr(cpi);
 
-  vp10_rc_set_frame_target(cpi, target);
+  av1_rc_set_frame_target(cpi, target);
   if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC)
-    cpi->resize_pending = vp10_resize_one_pass_cbr(cpi);
+    cpi->resize_pending = av1_resize_one_pass_cbr(cpi);
   else
     cpi->resize_pending = 0;
 }
 
-int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
-                        vpx_bit_depth_t bit_depth) {
+int av1_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
+                       aom_bit_depth_t bit_depth) {
   int start_index = rc->worst_quality;
   int target_index = rc->worst_quality;
   int i;
@@ -1514,34 +1514,34 @@
   // Convert the average q value to an index.
   for (i = rc->best_quality; i < rc->worst_quality; ++i) {
     start_index = i;
-    if (vp10_convert_qindex_to_q(i, bit_depth) >= qstart) break;
+    if (av1_convert_qindex_to_q(i, bit_depth) >= qstart) break;
   }
 
   // Convert the q target to an index
   for (i = rc->best_quality; i < rc->worst_quality; ++i) {
     target_index = i;
-    if (vp10_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
+    if (av1_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
   }
 
   return target_index - start_index;
 }
 
-int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
-                                int qindex, double rate_target_ratio,
-                                vpx_bit_depth_t bit_depth) {
+int av1_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
+                               int qindex, double rate_target_ratio,
+                               aom_bit_depth_t bit_depth) {
   int target_index = rc->worst_quality;
   int i;
 
   // Look up the current projected bits per block for the base index
   const int base_bits_per_mb =
-      vp10_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
+      av1_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
 
   // Find the target bits per mb based on the base value and given ratio.
   const int target_bits_per_mb = (int)(rate_target_ratio * base_bits_per_mb);
 
   // Convert the q target to an index
   for (i = rc->best_quality; i < rc->worst_quality; ++i) {
-    if (vp10_rc_bits_per_mb(frame_type, i, 1.0, bit_depth) <=
+    if (av1_rc_bits_per_mb(frame_type, i, 1.0, bit_depth) <=
         target_bits_per_mb) {
       target_index = i;
       break;
@@ -1550,12 +1550,12 @@
   return target_index - qindex;
 }
 
-void vp10_rc_set_gf_interval_range(const VP10_COMP *const cpi,
-                                   RATE_CONTROL *const rc) {
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_rc_set_gf_interval_range(const AV1_COMP *const cpi,
+                                  RATE_CONTROL *const rc) {
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
 
   // Special case code for 1 pass fixed Q mode tests
-  if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
+  if ((oxcf->pass == 0) && (oxcf->rc_mode == AOM_Q)) {
     rc->max_gf_interval = FIXED_GF_INTERVAL;
     rc->min_gf_interval = FIXED_GF_INTERVAL;
     rc->static_scene_max_gf_interval = FIXED_GF_INTERVAL;
@@ -1564,10 +1564,10 @@
     rc->max_gf_interval = oxcf->max_gf_interval;
     rc->min_gf_interval = oxcf->min_gf_interval;
     if (rc->min_gf_interval == 0)
-      rc->min_gf_interval = vp10_rc_get_default_min_gf_interval(
+      rc->min_gf_interval = av1_rc_get_default_min_gf_interval(
           oxcf->width, oxcf->height, cpi->framerate);
     if (rc->max_gf_interval == 0)
-      rc->max_gf_interval = vp10_rc_get_default_max_gf_interval(
+      rc->max_gf_interval = av1_rc_get_default_max_gf_interval(
           cpi->framerate, rc->min_gf_interval);
 
     // Extended interval for genuinely static scenes
@@ -1582,13 +1582,13 @@
       rc->max_gf_interval = rc->static_scene_max_gf_interval;
 
     // Clamp min to max
-    rc->min_gf_interval = VPXMIN(rc->min_gf_interval, rc->max_gf_interval);
+    rc->min_gf_interval = AOMMIN(rc->min_gf_interval, rc->max_gf_interval);
   }
 }
 
-void vp10_rc_update_framerate(VP10_COMP *cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_rc_update_framerate(AV1_COMP *cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   RATE_CONTROL *const rc = &cpi->rc;
   int vbr_max_bits;
 
@@ -1597,7 +1597,7 @@
       (int)(rc->avg_frame_bandwidth * oxcf->two_pass_vbrmin_section / 100);
 
   rc->min_frame_bandwidth =
-      VPXMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS);
+      AOMMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS);
 
   // A maximum bitrate for a frame is defined.
   // The baseline for this aligns with HW implementations that
@@ -1610,14 +1610,14 @@
       (int)(((int64_t)rc->avg_frame_bandwidth * oxcf->two_pass_vbrmax_section) /
             100);
   rc->max_frame_bandwidth =
-      VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits);
+      AOMMAX(AOMMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits);
 
-  vp10_rc_set_gf_interval_range(cpi, rc);
+  av1_rc_set_gf_interval_range(cpi, rc);
 }
 
 #define VBR_PCT_ADJUSTMENT_LIMIT 50
 // For VBR...adjustment to the frame target based on error from previous frames
-static void vbr_rate_correction(VP10_COMP *cpi, int *this_frame_target) {
+static void vbr_rate_correction(AV1_COMP *cpi, int *this_frame_target) {
   RATE_CONTROL *const rc = &cpi->rc;
   int64_t vbr_bits_off_target = rc->vbr_bits_off_target;
   int max_delta;
@@ -1648,31 +1648,31 @@
   // Dont do it for kf,arf,gf or overlay frames.
   if (!frame_is_kf_gf_arf(cpi) && !rc->is_src_frame_alt_ref &&
       rc->vbr_bits_off_target_fast) {
-    int one_frame_bits = VPXMAX(rc->avg_frame_bandwidth, *this_frame_target);
+    int one_frame_bits = AOMMAX(rc->avg_frame_bandwidth, *this_frame_target);
     int fast_extra_bits;
-    fast_extra_bits = (int)VPXMIN(rc->vbr_bits_off_target_fast, one_frame_bits);
-    fast_extra_bits = (int)VPXMIN(
+    fast_extra_bits = (int)AOMMIN(rc->vbr_bits_off_target_fast, one_frame_bits);
+    fast_extra_bits = (int)AOMMIN(
         fast_extra_bits,
-        VPXMAX(one_frame_bits / 8, rc->vbr_bits_off_target_fast / 8));
+        AOMMAX(one_frame_bits / 8, rc->vbr_bits_off_target_fast / 8));
     *this_frame_target += (int)fast_extra_bits;
     rc->vbr_bits_off_target_fast -= fast_extra_bits;
   }
 }
 
-void vp10_set_target_rate(VP10_COMP *cpi) {
+void av1_set_target_rate(AV1_COMP *cpi) {
   RATE_CONTROL *const rc = &cpi->rc;
   int target_rate = rc->base_frame_target;
 
   // Correction to rate target based on prior over or under shoot.
-  if (cpi->oxcf.rc_mode == VPX_VBR || cpi->oxcf.rc_mode == VPX_CQ)
+  if (cpi->oxcf.rc_mode == AOM_VBR || cpi->oxcf.rc_mode == AOM_CQ)
     vbr_rate_correction(cpi, &target_rate);
-  vp10_rc_set_frame_target(cpi, target_rate);
+  av1_rc_set_frame_target(cpi, target_rate);
 }
 
 // Check if we should resize, based on average QP from past x frames.
 // Only allow for resize at most one scale down for now, scaling factor is 2.
-int vp10_resize_one_pass_cbr(VP10_COMP *cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
+int av1_resize_one_pass_cbr(AV1_COMP *cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   int resize_now = 0;
   cpi->resize_scale_num = 1;
@@ -1731,15 +1731,15 @@
     rc->this_frame_target = calc_pframe_target_size_one_pass_cbr(cpi);
     // Reset cyclic refresh parameters.
     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
-      vp10_cyclic_refresh_reset_resize(cpi);
+      av1_cyclic_refresh_reset_resize(cpi);
     // Get the projected qindex, based on the scaled target frame size (scaled
-    // so target_bits_per_mb in vp10_rc_regulate_q will be correct target).
+    // so target_bits_per_mb in av1_rc_regulate_q will be correct target).
     target_bits_per_frame = (resize_now == 1)
                                 ? rc->this_frame_target * tot_scale_change
                                 : rc->this_frame_target / tot_scale_change;
     active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
-    qindex = vp10_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
-                                active_worst_quality);
+    qindex = av1_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
+                               active_worst_quality);
     // If resize is down, check if projected q index is close to worst_quality,
     // and if so, reduce the rate correction factor (since likely can afford
     // lower q for resized frame).
diff --git a/av1/encoder/ratectrl.h b/av1/encoder/ratectrl.h
index 88a14bc..b690918 100644
--- a/av1/encoder/ratectrl.h
+++ b/av1/encoder/ratectrl.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_RATECTRL_H_
-#define VP10_ENCODER_RATECTRL_H_
+#ifndef AV1_ENCODER_RATECTRL_H_
+#define AV1_ENCODER_RATECTRL_H_
 
-#include "aom/vpx_codec.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_codec.h"
+#include "aom/aom_integer.h"
 
 #include "av1/common/blockd.h"
 
@@ -168,118 +168,116 @@
   int rf_level_maxq[RATE_FACTOR_LEVELS];
 } RATE_CONTROL;
 
-struct VP10_COMP;
-struct VP10EncoderConfig;
+struct AV1_COMP;
+struct AV1EncoderConfig;
 
-void vp10_rc_init(const struct VP10EncoderConfig *oxcf, int pass,
-                  RATE_CONTROL *rc);
+void av1_rc_init(const struct AV1EncoderConfig *oxcf, int pass,
+                 RATE_CONTROL *rc);
 
-int vp10_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
-                            double correction_factor,
-                            vpx_bit_depth_t bit_depth);
+int av1_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
+                           double correction_factor, aom_bit_depth_t bit_depth);
 
-double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth);
+double av1_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth);
 
-void vp10_rc_init_minq_luts(void);
+void av1_rc_init_minq_luts(void);
 
-int vp10_rc_get_default_min_gf_interval(int width, int height,
-                                        double framerate);
-// Note vp10_rc_get_default_max_gf_interval() requires the min_gf_interval to
+int av1_rc_get_default_min_gf_interval(int width, int height, double framerate);
+// Note av1_rc_get_default_max_gf_interval() requires the min_gf_interval to
 // be passed in to ensure that the max_gf_interval returned is at least as bis
 // as that.
-int vp10_rc_get_default_max_gf_interval(double framerate, int min_frame_rate);
+int av1_rc_get_default_max_gf_interval(double framerate, int min_frame_rate);
 
 // Generally at the high level, the following flow is expected
 // to be enforced for rate control:
 // First call per frame, one of:
-//   vp10_rc_get_one_pass_vbr_params()
-//   vp10_rc_get_one_pass_cbr_params()
-//   vp10_rc_get_first_pass_params()
-//   vp10_rc_get_second_pass_params()
+//   av1_rc_get_one_pass_vbr_params()
+//   av1_rc_get_one_pass_cbr_params()
+//   av1_rc_get_first_pass_params()
+//   av1_rc_get_second_pass_params()
 // depending on the usage to set the rate control encode parameters desired.
 //
 // Then, call encode_frame_to_data_rate() to perform the
 // actual encode. This function will in turn call encode_frame()
 // one or more times, followed by one of:
-//   vp10_rc_postencode_update()
-//   vp10_rc_postencode_update_drop_frame()
+//   av1_rc_postencode_update()
+//   av1_rc_postencode_update_drop_frame()
 //
 // The majority of rate control parameters are only expected
-// to be set in the vp10_rc_get_..._params() functions and
-// updated during the vp10_rc_postencode_update...() functions.
-// The only exceptions are vp10_rc_drop_frame() and
-// vp10_rc_update_rate_correction_factors() functions.
+// to be set in the av1_rc_get_..._params() functions and
+// updated during the av1_rc_postencode_update...() functions.
+// The only exceptions are av1_rc_drop_frame() and
+// av1_rc_update_rate_correction_factors() functions.
 
 // Functions to set parameters for encoding before the actual
 // encode_frame_to_data_rate() function.
-void vp10_rc_get_one_pass_vbr_params(struct VP10_COMP *cpi);
-void vp10_rc_get_one_pass_cbr_params(struct VP10_COMP *cpi);
+void av1_rc_get_one_pass_vbr_params(struct AV1_COMP *cpi);
+void av1_rc_get_one_pass_cbr_params(struct AV1_COMP *cpi);
 
 // Post encode update of the rate control parameters based
 // on bytes used
-void vp10_rc_postencode_update(struct VP10_COMP *cpi, uint64_t bytes_used);
+void av1_rc_postencode_update(struct AV1_COMP *cpi, uint64_t bytes_used);
 // Post encode update of the rate control parameters for dropped frames
-void vp10_rc_postencode_update_drop_frame(struct VP10_COMP *cpi);
+void av1_rc_postencode_update_drop_frame(struct AV1_COMP *cpi);
 
 // Updates rate correction factors
 // Changes only the rate correction factors in the rate control structure.
-void vp10_rc_update_rate_correction_factors(struct VP10_COMP *cpi);
+void av1_rc_update_rate_correction_factors(struct AV1_COMP *cpi);
 
 // Decide if we should drop this frame: For 1-pass CBR.
 // Changes only the decimation count in the rate control structure
-int vp10_rc_drop_frame(struct VP10_COMP *cpi);
+int av1_rc_drop_frame(struct AV1_COMP *cpi);
 
 // Computes frame size bounds.
-void vp10_rc_compute_frame_size_bounds(const struct VP10_COMP *cpi,
-                                       int this_frame_target,
-                                       int *frame_under_shoot_limit,
-                                       int *frame_over_shoot_limit);
+void av1_rc_compute_frame_size_bounds(const struct AV1_COMP *cpi,
+                                      int this_frame_target,
+                                      int *frame_under_shoot_limit,
+                                      int *frame_over_shoot_limit);
 
 // Picks q and q bounds given the target for bits
-int vp10_rc_pick_q_and_bounds(const struct VP10_COMP *cpi, int *bottom_index,
-                              int *top_index);
+int av1_rc_pick_q_and_bounds(const struct AV1_COMP *cpi, int *bottom_index,
+                             int *top_index);
 
 // Estimates q to achieve a target bits per frame
-int vp10_rc_regulate_q(const struct VP10_COMP *cpi, int target_bits_per_frame,
-                       int active_best_quality, int active_worst_quality);
+int av1_rc_regulate_q(const struct AV1_COMP *cpi, int target_bits_per_frame,
+                      int active_best_quality, int active_worst_quality);
 
 // Estimates bits per mb for a given qindex and correction factor.
-int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
-                        double correction_factor, vpx_bit_depth_t bit_depth);
+int av1_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+                       double correction_factor, aom_bit_depth_t bit_depth);
 
 // Clamping utilities for bitrate targets for iframes and pframes.
-int vp10_rc_clamp_iframe_target_size(const struct VP10_COMP *const cpi,
-                                     int target);
-int vp10_rc_clamp_pframe_target_size(const struct VP10_COMP *const cpi,
-                                     int target);
+int av1_rc_clamp_iframe_target_size(const struct AV1_COMP *const cpi,
+                                    int target);
+int av1_rc_clamp_pframe_target_size(const struct AV1_COMP *const cpi,
+                                    int target);
 // Utility to set frame_target into the RATE_CONTROL structure
-// This function is called only from the vp10_rc_get_..._params() functions.
-void vp10_rc_set_frame_target(struct VP10_COMP *cpi, int target);
+// This function is called only from the av1_rc_get_..._params() functions.
+void av1_rc_set_frame_target(struct AV1_COMP *cpi, int target);
 
 // Computes a q delta (in "q index" terms) to get from a starting q value
 // to a target q value
-int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
-                        vpx_bit_depth_t bit_depth);
+int av1_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
+                       aom_bit_depth_t bit_depth);
 
 // Computes a q delta (in "q index" terms) to get from a starting q value
 // to a value that should equate to the given rate ratio.
-int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
-                                int qindex, double rate_target_ratio,
-                                vpx_bit_depth_t bit_depth);
+int av1_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
+                               int qindex, double rate_target_ratio,
+                               aom_bit_depth_t bit_depth);
 
-int vp10_frame_type_qdelta(const struct VP10_COMP *cpi, int rf_level, int q);
+int av1_frame_type_qdelta(const struct AV1_COMP *cpi, int rf_level, int q);
 
-void vp10_rc_update_framerate(struct VP10_COMP *cpi);
+void av1_rc_update_framerate(struct AV1_COMP *cpi);
 
-void vp10_rc_set_gf_interval_range(const struct VP10_COMP *const cpi,
-                                   RATE_CONTROL *const rc);
+void av1_rc_set_gf_interval_range(const struct AV1_COMP *const cpi,
+                                  RATE_CONTROL *const rc);
 
-void vp10_set_target_rate(struct VP10_COMP *cpi);
+void av1_set_target_rate(struct AV1_COMP *cpi);
 
-int vp10_resize_one_pass_cbr(struct VP10_COMP *cpi);
+int av1_resize_one_pass_cbr(struct AV1_COMP *cpi);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_RATECTRL_H_
+#endif  // AV1_ENCODER_RATECTRL_H_
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
index a8a8691..2379db0 100644
--- a/av1/encoder/rd.c
+++ b/av1/encoder/rd.c
@@ -12,10 +12,10 @@
 #include <math.h>
 #include <stdio.h>
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/bitops.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/system_state.h"
@@ -45,13 +45,13 @@
 // Factor to weigh the rate for switchable interp filters.
 #define SWITCHABLE_INTERP_RATE_FACTOR 1
 
-void vp10_rd_cost_reset(RD_COST *rd_cost) {
+void av1_rd_cost_reset(RD_COST *rd_cost) {
   rd_cost->rate = INT_MAX;
   rd_cost->dist = INT64_MAX;
   rd_cost->rdcost = INT64_MAX;
 }
 
-void vp10_rd_cost_init(RD_COST *rd_cost) {
+void av1_rd_cost_init(RD_COST *rd_cost) {
   rd_cost->rate = 0;
   rd_cost->dist = 0;
   rd_cost->rdcost = 0;
@@ -68,94 +68,90 @@
 #endif  // CONFIG_EXT_PARTITION
 };
 
-static void fill_mode_costs(VP10_COMP *cpi) {
+static void fill_mode_costs(AV1_COMP *cpi) {
   const FRAME_CONTEXT *const fc = cpi->common.fc;
   int i, j;
 
   for (i = 0; i < INTRA_MODES; ++i)
     for (j = 0; j < INTRA_MODES; ++j)
-      vp10_cost_tokens(cpi->y_mode_costs[i][j], vp10_kf_y_mode_prob[i][j],
-                       vp10_intra_mode_tree);
+      av1_cost_tokens(cpi->y_mode_costs[i][j], av1_kf_y_mode_prob[i][j],
+                      av1_intra_mode_tree);
 
   for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
-    vp10_cost_tokens(cpi->mbmode_cost[i], fc->y_mode_prob[i],
-                     vp10_intra_mode_tree);
+    av1_cost_tokens(cpi->mbmode_cost[i], fc->y_mode_prob[i],
+                    av1_intra_mode_tree);
 
   for (i = 0; i < INTRA_MODES; ++i)
-    vp10_cost_tokens(cpi->intra_uv_mode_cost[i], fc->uv_mode_prob[i],
-                     vp10_intra_mode_tree);
+    av1_cost_tokens(cpi->intra_uv_mode_cost[i], fc->uv_mode_prob[i],
+                    av1_intra_mode_tree);
 
   for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
-    vp10_cost_tokens(cpi->switchable_interp_costs[i],
-                     fc->switchable_interp_prob[i],
-                     vp10_switchable_interp_tree);
+    av1_cost_tokens(cpi->switchable_interp_costs[i],
+                    fc->switchable_interp_prob[i], av1_switchable_interp_tree);
 
   for (i = 0; i < PALETTE_BLOCK_SIZES; ++i) {
-    vp10_cost_tokens(cpi->palette_y_size_cost[i],
-                     vp10_default_palette_y_size_prob[i],
-                     vp10_palette_size_tree);
-    vp10_cost_tokens(cpi->palette_uv_size_cost[i],
-                     vp10_default_palette_uv_size_prob[i],
-                     vp10_palette_size_tree);
+    av1_cost_tokens(cpi->palette_y_size_cost[i],
+                    av1_default_palette_y_size_prob[i], av1_palette_size_tree);
+    av1_cost_tokens(cpi->palette_uv_size_cost[i],
+                    av1_default_palette_uv_size_prob[i], av1_palette_size_tree);
   }
 
   for (i = 0; i < PALETTE_MAX_SIZE - 1; ++i)
     for (j = 0; j < PALETTE_COLOR_CONTEXTS; ++j) {
-      vp10_cost_tokens(cpi->palette_y_color_cost[i][j],
-                       vp10_default_palette_y_color_prob[i][j],
-                       vp10_palette_color_tree[i]);
-      vp10_cost_tokens(cpi->palette_uv_color_cost[i][j],
-                       vp10_default_palette_uv_color_prob[i][j],
-                       vp10_palette_color_tree[i]);
+      av1_cost_tokens(cpi->palette_y_color_cost[i][j],
+                      av1_default_palette_y_color_prob[i][j],
+                      av1_palette_color_tree[i]);
+      av1_cost_tokens(cpi->palette_uv_color_cost[i][j],
+                      av1_default_palette_uv_color_prob[i][j],
+                      av1_palette_color_tree[i]);
     }
 
   for (i = 0; i < TX_SIZES - 1; ++i)
     for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
-      vp10_cost_tokens(cpi->tx_size_cost[i][j], fc->tx_size_probs[i][j],
-                       vp10_tx_size_tree[i]);
+      av1_cost_tokens(cpi->tx_size_cost[i][j], fc->tx_size_probs[i][j],
+                      av1_tx_size_tree[i]);
 
 #if CONFIG_EXT_TX
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     int s;
     for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
       if (use_inter_ext_tx_for_txsize[s][i]) {
-        vp10_cost_tokens(cpi->inter_tx_type_costs[s][i],
-                         fc->inter_ext_tx_prob[s][i],
-                         vp10_ext_tx_inter_tree[s]);
+        av1_cost_tokens(cpi->inter_tx_type_costs[s][i],
+                        fc->inter_ext_tx_prob[s][i], av1_ext_tx_inter_tree[s]);
       }
     }
     for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
       if (use_intra_ext_tx_for_txsize[s][i]) {
         for (j = 0; j < INTRA_MODES; ++j)
-          vp10_cost_tokens(cpi->intra_tx_type_costs[s][i][j],
-                           fc->intra_ext_tx_prob[s][i][j],
-                           vp10_ext_tx_intra_tree[s]);
+          av1_cost_tokens(cpi->intra_tx_type_costs[s][i][j],
+                          fc->intra_ext_tx_prob[s][i][j],
+                          av1_ext_tx_intra_tree[s]);
       }
     }
   }
 #else
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     for (j = 0; j < TX_TYPES; ++j)
-      vp10_cost_tokens(cpi->intra_tx_type_costs[i][j],
-                       fc->intra_ext_tx_prob[i][j], vp10_ext_tx_tree);
+      av1_cost_tokens(cpi->intra_tx_type_costs[i][j],
+                      fc->intra_ext_tx_prob[i][j], av1_ext_tx_tree);
   }
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
-    vp10_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_ext_tx_prob[i],
-                     vp10_ext_tx_tree);
+    av1_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_ext_tx_prob[i],
+                    av1_ext_tx_tree);
   }
 #endif  // CONFIG_EXT_TX
 #if CONFIG_EXT_INTRA
   for (i = 0; i < INTRA_FILTERS + 1; ++i)
-    vp10_cost_tokens(cpi->intra_filter_cost[i], fc->intra_filter_probs[i],
-                     vp10_intra_filter_tree);
+    av1_cost_tokens(cpi->intra_filter_cost[i], fc->intra_filter_probs[i],
+                    av1_intra_filter_tree);
 #endif  // CONFIG_EXT_INTRA
 }
 
-void vp10_fill_token_costs(vp10_coeff_cost *c,
+void av1_fill_token_costs(av1_coeff_cost *c,
 #if CONFIG_ANS
-                           coeff_cdf_model (*cdf)[PLANE_TYPES],
+                          coeff_cdf_model (*cdf)[PLANE_TYPES],
 #endif  // CONFIG_ANS
-                           vp10_coeff_probs_model (*p)[PLANE_TYPES]) {
+                          av1_coeff_probs_model (*p)[PLANE_TYPES]) {
   int i, j, k, l;
   TX_SIZE t;
   for (t = TX_4X4; t <= TX_32X32; ++t)
@@ -164,17 +160,17 @@
         for (k = 0; k < COEF_BANDS; ++k)
           for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
 #if CONFIG_ANS
-            const vpx_prob *const tree_probs = p[t][i][j][k][l];
-            vp10_cost_tokens_ans((int *)c[t][i][j][k][0][l], tree_probs,
-                                 cdf[t][i][j][k][l], 0);
-            vp10_cost_tokens_ans((int *)c[t][i][j][k][1][l], tree_probs,
-                                 cdf[t][i][j][k][l], 1);
+            const aom_prob *const tree_probs = p[t][i][j][k][l];
+            av1_cost_tokens_ans((int *)c[t][i][j][k][0][l], tree_probs,
+                                cdf[t][i][j][k][l], 0);
+            av1_cost_tokens_ans((int *)c[t][i][j][k][1][l], tree_probs,
+                                cdf[t][i][j][k][l], 1);
 #else
-            vpx_prob probs[ENTROPY_NODES];
-            vp10_model_to_full_probs(p[t][i][j][k][l], probs);
-            vp10_cost_tokens((int *)c[t][i][j][k][0][l], probs, vp10_coef_tree);
-            vp10_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
-                                  vp10_coef_tree);
+            aom_prob probs[ENTROPY_NODES];
+            av1_model_to_full_probs(p[t][i][j][k][l], probs);
+            av1_cost_tokens((int *)c[t][i][j][k][0][l], probs, av1_coef_tree);
+            av1_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
+                                 av1_coef_tree);
 #endif  // CONFIG_ANS
             assert(c[t][i][j][k][0][l][EOB_TOKEN] ==
                    c[t][i][j][k][1][l][EOB_TOKEN]);
@@ -185,7 +181,7 @@
 static int sad_per_bit16lut_8[QINDEX_RANGE];
 static int sad_per_bit4lut_8[QINDEX_RANGE];
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static int sad_per_bit16lut_10[QINDEX_RANGE];
 static int sad_per_bit4lut_10[QINDEX_RANGE];
 static int sad_per_bit16lut_12[QINDEX_RANGE];
@@ -193,26 +189,26 @@
 #endif
 
 static void init_me_luts_bd(int *bit16lut, int *bit4lut, int range,
-                            vpx_bit_depth_t bit_depth) {
+                            aom_bit_depth_t bit_depth) {
   int i;
   // Initialize the sad lut tables using a formulaic calculation for now.
   // This is to make it easier to resolve the impact of experimental changes
   // to the quantizer tables.
   for (i = 0; i < range; i++) {
-    const double q = vp10_convert_qindex_to_q(i, bit_depth);
+    const double q = av1_convert_qindex_to_q(i, bit_depth);
     bit16lut[i] = (int)(0.0418 * q + 2.4107);
     bit4lut[i] = (int)(0.063 * q + 2.742);
   }
 }
 
-void vp10_init_me_luts(void) {
+void av1_init_me_luts(void) {
   init_me_luts_bd(sad_per_bit16lut_8, sad_per_bit4lut_8, QINDEX_RANGE,
-                  VPX_BITS_8);
-#if CONFIG_VP9_HIGHBITDEPTH
+                  AOM_BITS_8);
+#if CONFIG_AOM_HIGHBITDEPTH
   init_me_luts_bd(sad_per_bit16lut_10, sad_per_bit4lut_10, QINDEX_RANGE,
-                  VPX_BITS_10);
+                  AOM_BITS_10);
   init_me_luts_bd(sad_per_bit16lut_12, sad_per_bit4lut_12, QINDEX_RANGE,
-                  VPX_BITS_12);
+                  AOM_BITS_12);
 #endif
 }
 
@@ -230,25 +226,25 @@
 #endif  // CONFIG_EXT_REFS
 };
 
-int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) {
-  const int64_t q = vp10_dc_quant(qindex, 0, cpi->common.bit_depth);
-#if CONFIG_VP9_HIGHBITDEPTH
+int av1_compute_rd_mult(const AV1_COMP *cpi, int qindex) {
+  const int64_t q = av1_dc_quant(qindex, 0, cpi->common.bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
   int64_t rdmult = 0;
   switch (cpi->common.bit_depth) {
-    case VPX_BITS_8: rdmult = 88 * q * q / 24; break;
-    case VPX_BITS_10: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); break;
-    case VPX_BITS_12: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8); break;
+    case AOM_BITS_8: rdmult = 88 * q * q / 24; break;
+    case AOM_BITS_10: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); break;
+    case AOM_BITS_12: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8); break;
     default:
-      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
       return -1;
   }
 #else
   int64_t rdmult = 88 * q * q / 24;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   if (cpi->oxcf.pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
     const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
     const FRAME_UPDATE_TYPE frame_type = gf_group->update_type[gf_group->index];
-    const int boost_index = VPXMIN(15, (cpi->rc.gfu_boost / 100));
+    const int boost_index = AOMMIN(15, (cpi->rc.gfu_boost / 100));
 
     rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7;
     rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7);
@@ -257,57 +253,56 @@
   return (int)rdmult;
 }
 
-static int compute_rd_thresh_factor(int qindex, vpx_bit_depth_t bit_depth) {
+static int compute_rd_thresh_factor(int qindex, aom_bit_depth_t bit_depth) {
   double q;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8: q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; break;
-    case VPX_BITS_10: q = vp10_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; break;
-    case VPX_BITS_12: q = vp10_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; break;
+    case AOM_BITS_8: q = av1_dc_quant(qindex, 0, AOM_BITS_8) / 4.0; break;
+    case AOM_BITS_10: q = av1_dc_quant(qindex, 0, AOM_BITS_10) / 16.0; break;
+    case AOM_BITS_12: q = av1_dc_quant(qindex, 0, AOM_BITS_12) / 64.0; break;
     default:
-      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
       return -1;
   }
 #else
   (void)bit_depth;
-  q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+  q = av1_dc_quant(qindex, 0, AOM_BITS_8) / 4.0;
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   // TODO(debargha): Adjust the function below.
-  return VPXMAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8);
+  return AOMMAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8);
 }
 
-void vp10_initialize_me_consts(const VP10_COMP *cpi, MACROBLOCK *x,
-                               int qindex) {
-#if CONFIG_VP9_HIGHBITDEPTH
+void av1_initialize_me_consts(const AV1_COMP *cpi, MACROBLOCK *x, int qindex) {
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (cpi->common.bit_depth) {
-    case VPX_BITS_8:
+    case AOM_BITS_8:
       x->sadperbit16 = sad_per_bit16lut_8[qindex];
       x->sadperbit4 = sad_per_bit4lut_8[qindex];
       break;
-    case VPX_BITS_10:
+    case AOM_BITS_10:
       x->sadperbit16 = sad_per_bit16lut_10[qindex];
       x->sadperbit4 = sad_per_bit4lut_10[qindex];
       break;
-    case VPX_BITS_12:
+    case AOM_BITS_12:
       x->sadperbit16 = sad_per_bit16lut_12[qindex];
       x->sadperbit4 = sad_per_bit4lut_12[qindex];
       break;
     default:
-      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
   }
 #else
   (void)cpi;
   x->sadperbit16 = sad_per_bit16lut_8[qindex];
   x->sadperbit4 = sad_per_bit4lut_8[qindex];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
 
-static void set_block_thresholds(const VP10_COMMON *cm, RD_OPT *rd) {
+static void set_block_thresholds(const AV1_COMMON *cm, RD_OPT *rd) {
   int i, bsize, segment_id;
 
   for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) {
     const int qindex =
-        clamp(vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
+        clamp(av1_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
                   cm->y_dc_delta_q,
               0, MAXQ);
     const int q = compute_rd_thresh_factor(qindex, cm->bit_depth);
@@ -335,10 +330,10 @@
 }
 
 #if CONFIG_REF_MV
-void vp10_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame) {
+void av1_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame) {
   MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
-  int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[ref_frame],
-                             mbmi_ext->ref_mv_stack[ref_frame]);
+  int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[ref_frame],
+                            mbmi_ext->ref_mv_stack[ref_frame]);
   x->mvcost = x->mv_cost_stack[nmv_ctx];
   x->nmvjointcost = x->nmv_vec_cost[nmv_ctx];
   x->mvsadcost = x->mvcost;
@@ -349,16 +344,16 @@
 }
 #endif
 
-void vp10_initialize_rd_consts(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_initialize_rd_consts(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &cpi->td.mb;
   RD_OPT *const rd = &cpi->rd;
   int i;
 
-  vpx_clear_system_state();
+  aom_clear_system_state();
 
   rd->RDDIV = RDDIV_BITS;  // In bits (to multiply D by 128).
-  rd->RDMULT = vp10_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
+  rd->RDMULT = av1_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
 
   set_error_per_bit(x, rd->RDMULT);
 
@@ -369,10 +364,10 @@
     int nmv_ctx;
 
     for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
-      vpx_prob tmp_prob = cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO];
+      aom_prob tmp_prob = cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO];
       cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO] = 1;
 
-      vp10_build_nmv_cost_table(
+      av1_build_nmv_cost_table(
           x->nmv_vec_cost[nmv_ctx],
           cm->allow_high_precision_mv ? x->nmvcost_hp[nmv_ctx]
                                       : x->nmvcost[nmv_ctx],
@@ -381,40 +376,40 @@
 
       x->nmv_vec_cost[nmv_ctx][MV_JOINT_ZERO] = 0;
       x->zero_rmv_cost[nmv_ctx][0] =
-          vp10_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 0);
+          av1_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 0);
       x->zero_rmv_cost[nmv_ctx][1] =
-          vp10_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 1);
+          av1_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 1);
     }
     x->mvcost = x->mv_cost_stack[0];
     x->nmvjointcost = x->nmv_vec_cost[0];
     x->mvsadcost = x->mvcost;
     x->nmvjointsadcost = x->nmvjointcost;
 #else
-    vp10_build_nmv_cost_table(
+    av1_build_nmv_cost_table(
         x->nmvjointcost,
         cm->allow_high_precision_mv ? x->nmvcost_hp : x->nmvcost, &cm->fc->nmvc,
         cm->allow_high_precision_mv);
 #endif
   }
   if (cpi->oxcf.pass != 1) {
-    vp10_fill_token_costs(x->token_costs,
+    av1_fill_token_costs(x->token_costs,
 #if CONFIG_ANS
-                          cm->fc->coef_cdfs,
+                         cm->fc->coef_cdfs,
 #endif  // CONFIG_ANS
-                          cm->fc->coef_probs);
+                         cm->fc->coef_probs);
 
     if (cpi->sf.partition_search_type != VAR_BASED_PARTITION ||
         cm->frame_type == KEY_FRAME) {
 #if CONFIG_EXT_PARTITION_TYPES
-      vp10_cost_tokens(cpi->partition_cost[0], cm->fc->partition_prob[0],
-                       vp10_partition_tree);
+      av1_cost_tokens(cpi->partition_cost[0], cm->fc->partition_prob[0],
+                      av1_partition_tree);
       for (i = 1; i < PARTITION_CONTEXTS; ++i)
-        vp10_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
-                         vp10_ext_partition_tree);
+        av1_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
+                        av1_ext_partition_tree);
 #else
       for (i = 0; i < PARTITION_CONTEXTS; ++i)
-        vp10_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
-                         vp10_partition_tree);
+        av1_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
+                        av1_partition_tree);
 #endif  // CONFIG_EXT_PARTITION_TYPES
     }
 
@@ -423,47 +418,47 @@
     if (!frame_is_intra_only(cm)) {
 #if CONFIG_REF_MV
       for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i) {
-        cpi->newmv_mode_cost[i][0] = vp10_cost_bit(cm->fc->newmv_prob[i], 0);
-        cpi->newmv_mode_cost[i][1] = vp10_cost_bit(cm->fc->newmv_prob[i], 1);
+        cpi->newmv_mode_cost[i][0] = av1_cost_bit(cm->fc->newmv_prob[i], 0);
+        cpi->newmv_mode_cost[i][1] = av1_cost_bit(cm->fc->newmv_prob[i], 1);
       }
 
       for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i) {
-        cpi->zeromv_mode_cost[i][0] = vp10_cost_bit(cm->fc->zeromv_prob[i], 0);
-        cpi->zeromv_mode_cost[i][1] = vp10_cost_bit(cm->fc->zeromv_prob[i], 1);
+        cpi->zeromv_mode_cost[i][0] = av1_cost_bit(cm->fc->zeromv_prob[i], 0);
+        cpi->zeromv_mode_cost[i][1] = av1_cost_bit(cm->fc->zeromv_prob[i], 1);
       }
 
       for (i = 0; i < REFMV_MODE_CONTEXTS; ++i) {
-        cpi->refmv_mode_cost[i][0] = vp10_cost_bit(cm->fc->refmv_prob[i], 0);
-        cpi->refmv_mode_cost[i][1] = vp10_cost_bit(cm->fc->refmv_prob[i], 1);
+        cpi->refmv_mode_cost[i][0] = av1_cost_bit(cm->fc->refmv_prob[i], 0);
+        cpi->refmv_mode_cost[i][1] = av1_cost_bit(cm->fc->refmv_prob[i], 1);
       }
 
       for (i = 0; i < DRL_MODE_CONTEXTS; ++i) {
-        cpi->drl_mode_cost0[i][0] = vp10_cost_bit(cm->fc->drl_prob[i], 0);
-        cpi->drl_mode_cost0[i][1] = vp10_cost_bit(cm->fc->drl_prob[i], 1);
+        cpi->drl_mode_cost0[i][0] = av1_cost_bit(cm->fc->drl_prob[i], 0);
+        cpi->drl_mode_cost0[i][1] = av1_cost_bit(cm->fc->drl_prob[i], 1);
       }
 #if CONFIG_EXT_INTER
-      cpi->new2mv_mode_cost[0] = vp10_cost_bit(cm->fc->new2mv_prob, 0);
-      cpi->new2mv_mode_cost[1] = vp10_cost_bit(cm->fc->new2mv_prob, 1);
+      cpi->new2mv_mode_cost[0] = av1_cost_bit(cm->fc->new2mv_prob, 0);
+      cpi->new2mv_mode_cost[1] = av1_cost_bit(cm->fc->new2mv_prob, 1);
 #endif  // CONFIG_EXT_INTER
 #else
       for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
-        vp10_cost_tokens((int *)cpi->inter_mode_cost[i],
-                         cm->fc->inter_mode_probs[i], vp10_inter_mode_tree);
+        av1_cost_tokens((int *)cpi->inter_mode_cost[i],
+                        cm->fc->inter_mode_probs[i], av1_inter_mode_tree);
 #endif  // CONFIG_REF_MV
 #if CONFIG_EXT_INTER
       for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
-        vp10_cost_tokens((int *)cpi->inter_compound_mode_cost[i],
-                         cm->fc->inter_compound_mode_probs[i],
-                         vp10_inter_compound_mode_tree);
+        av1_cost_tokens((int *)cpi->inter_compound_mode_cost[i],
+                        cm->fc->inter_compound_mode_probs[i],
+                        av1_inter_compound_mode_tree);
       for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
-        vp10_cost_tokens((int *)cpi->interintra_mode_cost[i],
-                         cm->fc->interintra_mode_prob[i],
-                         vp10_interintra_mode_tree);
+        av1_cost_tokens((int *)cpi->interintra_mode_cost[i],
+                        cm->fc->interintra_mode_prob[i],
+                        av1_interintra_mode_tree);
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
       for (i = BLOCK_8X8; i < BLOCK_SIZES; i++) {
-        vp10_cost_tokens((int *)cpi->motvar_cost[i], cm->fc->motvar_prob[i],
-                         vp10_motvar_tree);
+        av1_cost_tokens((int *)cpi->motvar_cost[i], cm->fc->motvar_prob[i],
+                        av1_motvar_tree);
       }
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
     }
@@ -536,9 +531,9 @@
   *d_q10 = (dist_tab_q10[xq] * b_q10 + dist_tab_q10[xq + 1] * a_q10) >> 10;
 }
 
-void vp10_model_rd_from_var_lapndz(int64_t var, unsigned int n_log2,
-                                   unsigned int qstep, int *rate,
-                                   int64_t *dist) {
+void av1_model_rd_from_var_lapndz(int64_t var, unsigned int n_log2,
+                                  unsigned int qstep, int *rate,
+                                  int64_t *dist) {
   // This function models the rate and distortion for a Laplacian
   // source with given variance when quantized with a uniform quantizer
   // with given stepsize. The closed form expressions are in:
@@ -553,9 +548,9 @@
     static const uint32_t MAX_XSQ_Q10 = 245727;
     const uint64_t xsq_q10_64 =
         (((uint64_t)qstep * qstep << (n_log2 + 10)) + (var >> 1)) / var;
-    const int xsq_q10 = (int)VPXMIN(xsq_q10_64, MAX_XSQ_Q10);
+    const int xsq_q10 = (int)AOMMIN(xsq_q10_64, MAX_XSQ_Q10);
     model_rd_norm(xsq_q10, &r_q10, &d_q10);
-    *rate = ROUND_POWER_OF_TWO(r_q10 << n_log2, 10 - VP10_PROB_COST_SHIFT);
+    *rate = ROUND_POWER_OF_TWO(r_q10 << n_log2, 10 - AV1_PROB_COST_SHIFT);
     *dist = (var * (int64_t)d_q10 + 512) >> 10;
   }
 }
@@ -633,16 +628,16 @@
   }
 }
 
-void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
-                               const struct macroblockd_plane *pd,
-                               ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE],
-                               ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE]) {
+void av1_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
+                              const struct macroblockd_plane *pd,
+                              ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE],
+                              ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE]) {
   const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
   get_entropy_contexts_plane(plane_bsize, tx_size, pd, t_above, t_left);
 }
 
-void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
-                  int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
+void av1_mv_pred(AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+                 int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
   int i;
   int zero_seen = 0;
   int best_index = 0;
@@ -672,7 +667,7 @@
     if (i == 1 && near_same_nearest) continue;
     fp_row = (this_mv->row + 3 + (this_mv->row >= 0)) >> 3;
     fp_col = (this_mv->col + 3 + (this_mv->col >= 0)) >> 3;
-    max_mv = VPXMAX(max_mv, VPXMAX(abs(this_mv->row), abs(this_mv->col)) >> 3);
+    max_mv = AOMMAX(max_mv, AOMMAX(abs(this_mv->row), abs(this_mv->col)) >> 3);
 
     if (fp_row == 0 && fp_col == 0 && zero_seen) continue;
     zero_seen |= (fp_row == 0 && fp_col == 0);
@@ -694,11 +689,11 @@
   x->pred_mv_sad[ref_frame] = best_sad;
 }
 
-void vp10_setup_pred_block(const MACROBLOCKD *xd,
-                           struct buf_2d dst[MAX_MB_PLANE],
-                           const YV12_BUFFER_CONFIG *src, int mi_row,
-                           int mi_col, const struct scale_factors *scale,
-                           const struct scale_factors *scale_uv) {
+void av1_setup_pred_block(const MACROBLOCKD *xd,
+                          struct buf_2d dst[MAX_MB_PLANE],
+                          const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
+                          const struct scale_factors *scale,
+                          const struct scale_factors *scale_uv) {
   int i;
 
   dst[0].buf = src->y_buffer;
@@ -716,23 +711,23 @@
   }
 }
 
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
-                             int stride) {
+int av1_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+                            int stride) {
   const int bw = b_width_log2_lookup[plane_bsize];
   const int y = 4 * (raster_block >> bw);
   const int x = 4 * (raster_block & ((1 << bw) - 1));
   return y * stride + x;
 }
 
-int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
-                                        int raster_block, int16_t *base) {
+int16_t *av1_raster_block_offset_int16(BLOCK_SIZE plane_bsize, int raster_block,
+                                       int16_t *base) {
   const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
-  return base + vp10_raster_block_offset(plane_bsize, raster_block, stride);
+  return base + av1_raster_block_offset(plane_bsize, raster_block, stride);
 }
 
-YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const VP10_COMP *cpi,
-                                              int ref_frame) {
-  const VP10_COMMON *const cm = &cpi->common;
+YV12_BUFFER_CONFIG *av1_get_scaled_ref_frame(const AV1_COMP *cpi,
+                                             int ref_frame) {
+  const AV1_COMMON *const cm = &cpi->common;
   const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
   const int ref_idx = get_ref_frame_buf_idx(cpi, ref_frame);
   return (scaled_idx != ref_idx && scaled_idx != INVALID_IDX)
@@ -741,8 +736,7 @@
 }
 
 #if CONFIG_DUAL_FILTER
-int vp10_get_switchable_rate(const VP10_COMP *cpi,
-                             const MACROBLOCKD *const xd) {
+int av1_get_switchable_rate(const AV1_COMP *cpi, const MACROBLOCKD *const xd) {
   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   int inter_filter_cost = 0;
   int dir;
@@ -751,7 +745,7 @@
     if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
         (mbmi->ref_frame[1] > INTRA_FRAME &&
          has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
-      const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
+      const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
       inter_filter_cost +=
           cpi->switchable_interp_costs[ctx][mbmi->interp_filter[dir]];
     }
@@ -759,19 +753,18 @@
   return SWITCHABLE_INTERP_RATE_FACTOR * inter_filter_cost;
 }
 #else
-int vp10_get_switchable_rate(const VP10_COMP *cpi,
-                             const MACROBLOCKD *const xd) {
+int av1_get_switchable_rate(const AV1_COMP *cpi, const MACROBLOCKD *const xd) {
   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
-  const int ctx = vp10_get_pred_context_switchable_interp(xd);
+  const int ctx = av1_get_pred_context_switchable_interp(xd);
 #if CONFIG_EXT_INTERP
-  if (!vp10_is_interp_needed(xd)) return 0;
+  if (!av1_is_interp_needed(xd)) return 0;
 #endif  // CONFIG_EXT_INTERP
   return SWITCHABLE_INTERP_RATE_FACTOR *
          cpi->switchable_interp_costs[ctx][mbmi->interp_filter];
 }
 #endif
 
-void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) {
+void av1_set_rd_speed_thresholds(AV1_COMP *cpi) {
   int i;
   RD_OPT *const rd = &cpi->rd;
   SPEED_FEATURES *const sf = &cpi->sf;
@@ -1046,7 +1039,7 @@
 #endif  // CONFIG_EXT_INTER
 }
 
-void vp10_set_rd_speed_thresholds_sub8x8(VP10_COMP *cpi) {
+void av1_set_rd_speed_thresholds_sub8x8(AV1_COMP *cpi) {
   static const int thresh_mult[2][MAX_REFS] = {
 #if CONFIG_EXT_REFS
     { 2500, 2500, 2500, 2500, 2500, 2500, 4500, 4500, 4500, 4500, 4500, 4500,
@@ -1063,41 +1056,41 @@
   memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx]));
 }
 
-void vp10_update_rd_thresh_fact(const VP10_COMMON *const cm,
-                                int (*factor_buf)[MAX_MODES], int rd_thresh,
-                                int bsize, int best_mode_index) {
+void av1_update_rd_thresh_fact(const AV1_COMMON *const cm,
+                               int (*factor_buf)[MAX_MODES], int rd_thresh,
+                               int bsize, int best_mode_index) {
   if (rd_thresh > 0) {
     const int top_mode = bsize < BLOCK_8X8 ? MAX_REFS : MAX_MODES;
     int mode;
     for (mode = 0; mode < top_mode; ++mode) {
-      const BLOCK_SIZE min_size = VPXMAX(bsize - 1, BLOCK_4X4);
-      const BLOCK_SIZE max_size = VPXMIN(bsize + 2, cm->sb_size);
+      const BLOCK_SIZE min_size = AOMMAX(bsize - 1, BLOCK_4X4);
+      const BLOCK_SIZE max_size = AOMMIN(bsize + 2, cm->sb_size);
       BLOCK_SIZE bs;
       for (bs = min_size; bs <= max_size; ++bs) {
         int *const fact = &factor_buf[bs][mode];
         if (mode == best_mode_index) {
           *fact -= (*fact >> 4);
         } else {
-          *fact = VPXMIN(*fact + RD_THRESH_INC, rd_thresh * RD_THRESH_MAX_FACT);
+          *fact = AOMMIN(*fact + RD_THRESH_INC, rd_thresh * RD_THRESH_MAX_FACT);
         }
       }
     }
   }
 }
 
-int vp10_get_intra_cost_penalty(int qindex, int qdelta,
-                                vpx_bit_depth_t bit_depth) {
-  const int q = vp10_dc_quant(qindex, qdelta, bit_depth);
-#if CONFIG_VP9_HIGHBITDEPTH
+int av1_get_intra_cost_penalty(int qindex, int qdelta,
+                               aom_bit_depth_t bit_depth) {
+  const int q = av1_dc_quant(qindex, qdelta, bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8: return 20 * q;
-    case VPX_BITS_10: return 5 * q;
-    case VPX_BITS_12: return ROUND_POWER_OF_TWO(5 * q, 2);
+    case AOM_BITS_8: return 20 * q;
+    case AOM_BITS_10: return 5 * q;
+    case AOM_BITS_12: return ROUND_POWER_OF_TWO(5 * q, 2);
     default:
-      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
       return -1;
   }
 #else
   return 20 * q;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
diff --git a/av1/encoder/rd.h b/av1/encoder/rd.h
index 9680215..c902429 100644
--- a/av1/encoder/rd.h
+++ b/av1/encoder/rd.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_RD_H_
-#define VP10_ENCODER_RD_H_
+#ifndef AV1_ENCODER_RD_H_
+#define AV1_ENCODER_RD_H_
 
 #include <limits.h>
 
@@ -30,10 +30,10 @@
 #define RD_EPB_SHIFT 6
 
 #define RDCOST(RM, DM, R, D) \
-  (ROUND_POWER_OF_TWO(((int64_t)R) * (RM), VP10_PROB_COST_SHIFT) + (D << DM))
+  (ROUND_POWER_OF_TWO(((int64_t)R) * (RM), AV1_PROB_COST_SHIFT) + (D << DM))
 
-#define RDCOST_DBL(RM, DM, R, D)                                    \
-  (((((double)(R)) * (RM)) / (double)(1 << VP10_PROB_COST_SHIFT)) + \
+#define RDCOST_DBL(RM, DM, R, D)                                   \
+  (((((double)(R)) * (RM)) / (double)(1 << AV1_PROB_COST_SHIFT)) + \
    ((double)(D) * (1 << (DM))))
 
 #define QIDX_SKIP_THRESH 115
@@ -71,7 +71,7 @@
 #define RD_THRESH_INC 1
 
 // This enumerator type needs to be kept aligned with the mode order in
-// const MODE_DEFINITION vp10_mode_order[MAX_MODES] used in the rd code.
+// const MODE_DEFINITION av1_mode_order[MAX_MODES] used in the rd code.
 typedef enum {
   THR_NEARESTMV,
 #if CONFIG_EXT_REFS
@@ -378,87 +378,86 @@
 } RD_COST;
 
 // Reset the rate distortion cost values to maximum (invalid) value.
-void vp10_rd_cost_reset(RD_COST *rd_cost);
+void av1_rd_cost_reset(RD_COST *rd_cost);
 // Initialize the rate distortion cost values to zero.
-void vp10_rd_cost_init(RD_COST *rd_cost);
+void av1_rd_cost_init(RD_COST *rd_cost);
 
 struct TileInfo;
 struct TileDataEnc;
-struct VP10_COMP;
+struct AV1_COMP;
 struct macroblock;
 
-int vp10_compute_rd_mult(const struct VP10_COMP *cpi, int qindex);
+int av1_compute_rd_mult(const struct AV1_COMP *cpi, int qindex);
 
-void vp10_initialize_rd_consts(struct VP10_COMP *cpi);
+void av1_initialize_rd_consts(struct AV1_COMP *cpi);
 
-void vp10_initialize_me_consts(const struct VP10_COMP *cpi, MACROBLOCK *x,
-                               int qindex);
+void av1_initialize_me_consts(const struct AV1_COMP *cpi, MACROBLOCK *x,
+                              int qindex);
 
-void vp10_model_rd_from_var_lapndz(int64_t var, unsigned int n,
-                                   unsigned int qstep, int *rate,
-                                   int64_t *dist);
+void av1_model_rd_from_var_lapndz(int64_t var, unsigned int n,
+                                  unsigned int qstep, int *rate, int64_t *dist);
 
-int vp10_get_switchable_rate(const struct VP10_COMP *cpi,
-                             const MACROBLOCKD *const xd);
+int av1_get_switchable_rate(const struct AV1_COMP *cpi,
+                            const MACROBLOCKD *const xd);
 
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
-                             int stride);
+int av1_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+                            int stride);
 
-int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
-                                        int raster_block, int16_t *base);
+int16_t *av1_raster_block_offset_int16(BLOCK_SIZE plane_bsize, int raster_block,
+                                       int16_t *base);
 
-YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const struct VP10_COMP *cpi,
-                                              int ref_frame);
+YV12_BUFFER_CONFIG *av1_get_scaled_ref_frame(const struct AV1_COMP *cpi,
+                                             int ref_frame);
 
-void vp10_init_me_luts(void);
+void av1_init_me_luts(void);
 
 #if CONFIG_REF_MV
-void vp10_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame);
+void av1_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame);
 #endif
 
-void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
-                               const struct macroblockd_plane *pd,
-                               ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE],
-                               ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE]);
+void av1_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
+                              const struct macroblockd_plane *pd,
+                              ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE],
+                              ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE]);
 
-void vp10_set_rd_speed_thresholds(struct VP10_COMP *cpi);
+void av1_set_rd_speed_thresholds(struct AV1_COMP *cpi);
 
-void vp10_set_rd_speed_thresholds_sub8x8(struct VP10_COMP *cpi);
+void av1_set_rd_speed_thresholds_sub8x8(struct AV1_COMP *cpi);
 
-void vp10_update_rd_thresh_fact(const VP10_COMMON *const cm,
-                                int (*fact)[MAX_MODES], int rd_thresh,
-                                int bsize, int best_mode_index);
+void av1_update_rd_thresh_fact(const AV1_COMMON *const cm,
+                               int (*fact)[MAX_MODES], int rd_thresh, int bsize,
+                               int best_mode_index);
 
-void vp10_fill_token_costs(vp10_coeff_cost *c,
+void av1_fill_token_costs(av1_coeff_cost *c,
 #if CONFIG_ANS
-                           coeff_cdf_model (*cdf)[PLANE_TYPES],
+                          coeff_cdf_model (*cdf)[PLANE_TYPES],
 #endif  // CONFIG_ANS
-                           vp10_coeff_probs_model (*p)[PLANE_TYPES]);
+                          av1_coeff_probs_model (*p)[PLANE_TYPES]);
 
 static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
                                       int thresh_fact) {
   return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
 }
 
-void vp10_mv_pred(struct VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
-                  int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
+void av1_mv_pred(struct AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+                 int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
 
 static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) {
   x->errorperbit = rdmult >> RD_EPB_SHIFT;
   x->errorperbit += (x->errorperbit == 0);
 }
 
-void vp10_setup_pred_block(const MACROBLOCKD *xd,
-                           struct buf_2d dst[MAX_MB_PLANE],
-                           const YV12_BUFFER_CONFIG *src, int mi_row,
-                           int mi_col, const struct scale_factors *scale,
-                           const struct scale_factors *scale_uv);
+void av1_setup_pred_block(const MACROBLOCKD *xd,
+                          struct buf_2d dst[MAX_MB_PLANE],
+                          const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
+                          const struct scale_factors *scale,
+                          const struct scale_factors *scale_uv);
 
-int vp10_get_intra_cost_penalty(int qindex, int qdelta,
-                                vpx_bit_depth_t bit_depth);
+int av1_get_intra_cost_penalty(int qindex, int qdelta,
+                               aom_bit_depth_t bit_depth);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_RD_H_
+#endif  // AV1_ENCODER_RD_H_
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index 62334a3..bd93746 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -11,12 +11,12 @@
 #include <assert.h>
 #include <math.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_dsp/blend.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/system_state.h"
 
@@ -120,7 +120,7 @@
 typedef struct { MV_REFERENCE_FRAME ref_frame[2]; } REF_DEFINITION;
 
 struct rdcost_block_args {
-  const VP10_COMP *cpi;
+  const AV1_COMP *cpi;
   MACROBLOCK *x;
   ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE];
   ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE];
@@ -136,7 +136,7 @@
 };
 
 #define LAST_NEW_MV_INDEX 6
-static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = {
+static const MODE_DEFINITION av1_mode_order[MAX_MODES] = {
   { NEARESTMV, { LAST_FRAME, NONE } },
 #if CONFIG_EXT_REFS
   { NEARESTMV, { LAST2_FRAME, NONE } },
@@ -391,7 +391,7 @@
 #endif  // CONFIG_EXT_INTER
 };
 
-static const REF_DEFINITION vp10_ref_order[MAX_REFS] = {
+static const REF_DEFINITION av1_ref_order[MAX_REFS] = {
   { { LAST_FRAME, NONE } },
 #if CONFIG_EXT_REFS
   { { LAST2_FRAME, NONE } },          { { LAST3_FRAME, NONE } },
@@ -417,9 +417,9 @@
   int l = get_unsigned_bits(n), m = (1 << l) - n;
   if (l == 0) return 0;
   if (v < m)
-    return (l - 1) * vp10_cost_bit(128, 0);
+    return (l - 1) * av1_cost_bit(128, 0);
   else
-    return l * vp10_cost_bit(128, 0);
+    return l * av1_cost_bit(128, 0);
 }
 
 // constants for prune 1 and prune 2 decision boundaries
@@ -444,7 +444,7 @@
 #endif  // CONFIG_EXT_TX
 };
 
-static void get_energy_distribution_fine(const VP10_COMP *cpi, BLOCK_SIZE bsize,
+static void get_energy_distribution_fine(const AV1_COMP *cpi, BLOCK_SIZE bsize,
                                          uint8_t *src, int src_stride,
                                          uint8_t *dst, int dst_stride,
                                          double *hordist, double *verdist) {
@@ -459,7 +459,7 @@
     int i, j, index;
     int w_shift = bw == 8 ? 1 : 2;
     int h_shift = bh == 8 ? 1 : 2;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cpi->common.use_highbitdepth) {
       uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
       uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
@@ -471,7 +471,7 @@
               (src16[j + i * src_stride] - dst16[j + i * dst_stride]);
         }
     } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
       for (i = 0; i < bh; ++i)
         for (j = 0; j < bw; ++j) {
@@ -479,9 +479,9 @@
           esq[index] += (src[j + i * src_stride] - dst[j + i * dst_stride]) *
                         (src[j + i * src_stride] - dst[j + i * dst_stride]);
         }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   } else {
     var[0] = cpi->fn_ptr[f_index].vf(src, src_stride, dst, dst_stride, &esq[0]);
     var[1] = cpi->fn_ptr[f_index].vf(src + bw / 4, src_stride, dst + bw / 4,
@@ -569,9 +569,9 @@
   (void)var[15];
 }
 
-static int adst_vs_flipadst(const VP10_COMP *cpi, BLOCK_SIZE bsize,
-                            uint8_t *src, int src_stride, uint8_t *dst,
-                            int dst_stride, double *hdist, double *vdist) {
+static int adst_vs_flipadst(const AV1_COMP *cpi, BLOCK_SIZE bsize, uint8_t *src,
+                            int src_stride, uint8_t *dst, int dst_stride,
+                            double *hdist, double *vdist) {
   int prune_bitmask = 0;
   double svm_proj_h = 0, svm_proj_v = 0;
   get_energy_distribution_fine(cpi, bsize, src, src_stride, dst, dst_stride,
@@ -657,7 +657,7 @@
 }
 
 // Performance drop: 0.5%, Speed improvement: 24%
-static int prune_two_for_sby(const VP10_COMP *cpi, BLOCK_SIZE bsize,
+static int prune_two_for_sby(const AV1_COMP *cpi, BLOCK_SIZE bsize,
                              MACROBLOCK *x, MACROBLOCKD *xd, int adst_flipadst,
                              int dct_idtx) {
   struct macroblock_plane *const p = &x->plane[0];
@@ -668,7 +668,7 @@
   double hdist[3] = { 0, 0, 0 }, vdist[3] = { 0, 0, 0 };
   double hcorr, vcorr;
   int prune = 0;
-  vp10_subtract_plane(x, bsize, 0);
+  av1_subtract_plane(x, bsize, 0);
 
   if (adst_flipadst)
     prune |= adst_vs_flipadst(cpi, bsize, p->src.buf, p->src.stride,
@@ -680,17 +680,17 @@
 #endif  // CONFIG_EXT_TX
 
 // Performance drop: 0.3%, Speed improvement: 5%
-static int prune_one_for_sby(const VP10_COMP *cpi, BLOCK_SIZE bsize,
+static int prune_one_for_sby(const AV1_COMP *cpi, BLOCK_SIZE bsize,
                              MACROBLOCK *x, MACROBLOCKD *xd) {
   struct macroblock_plane *const p = &x->plane[0];
   struct macroblockd_plane *const pd = &xd->plane[0];
   double hdist[3] = { 0, 0, 0 }, vdist[3] = { 0, 0, 0 };
-  vp10_subtract_plane(x, bsize, 0);
+  av1_subtract_plane(x, bsize, 0);
   return adst_vs_flipadst(cpi, bsize, p->src.buf, p->src.stride, pd->dst.buf,
                           pd->dst.stride, hdist, vdist);
 }
 
-static int prune_tx_types(const VP10_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
+static int prune_tx_types(const AV1_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
                           MACROBLOCKD *xd, int tx_set) {
 #if CONFIG_EXT_TX
   const int *tx_set_1D = ext_tx_used_inter_1D[tx_set];
@@ -736,15 +736,15 @@
 #endif
 }
 
-static void model_rd_from_sse(const VP10_COMP *const cpi,
+static void model_rd_from_sse(const AV1_COMP *const cpi,
                               const MACROBLOCKD *const xd, BLOCK_SIZE bsize,
                               int plane, int64_t sse, int *rate,
                               int64_t *dist) {
   const struct macroblockd_plane *const pd = &xd->plane[plane];
   const int dequant_shift =
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
                                                     3;
 
   // Fast approximate the modelling function.
@@ -754,19 +754,19 @@
 
     if (quantizer < 120)
       *rate = (int)((square_error * (280 - quantizer)) >>
-                    (16 - VP10_PROB_COST_SHIFT));
+                    (16 - AV1_PROB_COST_SHIFT));
     else
       *rate = 0;
     *dist = (square_error * quantizer) >> 8;
   } else {
-    vp10_model_rd_from_var_lapndz(sse, num_pels_log2_lookup[bsize],
-                                  pd->dequant[1] >> dequant_shift, rate, dist);
+    av1_model_rd_from_var_lapndz(sse, num_pels_log2_lookup[bsize],
+                                 pd->dequant[1] >> dequant_shift, rate, dist);
   }
 
   *dist <<= 4;
 }
 
-static void model_rd_for_sb(const VP10_COMP *const cpi, BLOCK_SIZE bsize,
+static void model_rd_for_sb(const AV1_COMP *const cpi, BLOCK_SIZE bsize,
                             MACROBLOCK *x, MACROBLOCKD *xd, int plane_from,
                             int plane_to, int *out_rate_sum,
                             int64_t *out_dist_sum, int *skip_txfm_sb,
@@ -813,8 +813,8 @@
   *out_dist_sum = dist_sum;
 }
 
-int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
-                           intptr_t block_size, int64_t *ssz) {
+int64_t av1_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
+                          intptr_t block_size, int64_t *ssz) {
   int i;
   int64_t error = 0, sqcoeff = 0;
 
@@ -828,8 +828,8 @@
   return error;
 }
 
-int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
-                              int block_size) {
+int64_t av1_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
+                             int block_size) {
   int i;
   int64_t error = 0;
 
@@ -841,10 +841,10 @@
   return error;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
-                                  const tran_low_t *dqcoeff,
-                                  intptr_t block_size, int64_t *ssz, int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t av1_highbd_block_error_c(const tran_low_t *coeff,
+                                 const tran_low_t *dqcoeff, intptr_t block_size,
+                                 int64_t *ssz, int bd) {
   int i;
   int64_t error = 0, sqcoeff = 0;
   int shift = 2 * (bd - 8);
@@ -862,7 +862,7 @@
   *ssz = sqcoeff;
   return error;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 /* The trailing '0' is a terminator which is used inside cost_coeffs() to
  * decide whether to include cost of a trailing EOB node or not (i.e. we
@@ -895,10 +895,10 @@
   int pt = combine_entropy_contexts(*A, *L);
 #endif
   int c, cost;
-#if CONFIG_VP9_HIGHBITDEPTH
-  const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+  const int *cat6_high_cost = av1_get_high_cost_table(xd->bd);
 #else
-  const int *cat6_high_cost = vp10_get_high_cost_table(8);
+  const int *cat6_high_cost = av1_get_high_cost_table(8);
 #endif
 
 #if !CONFIG_VAR_TX && !CONFIG_SUPERTX
@@ -918,10 +918,10 @@
       // dc token
       int v = qcoeff[0];
       int16_t prev_t;
-      cost = vp10_get_token_cost(v, &prev_t, cat6_high_cost);
+      cost = av1_get_token_cost(v, &prev_t, cat6_high_cost);
       cost += (*token_costs)[0][pt][prev_t];
 
-      token_cache[0] = vp10_pt_energy_class[prev_t];
+      token_cache[0] = av1_pt_energy_class[prev_t];
       ++token_costs;
 
       // ac tokens
@@ -930,7 +930,7 @@
         int16_t t;
 
         v = qcoeff[rc];
-        cost += vp10_get_token_cost(v, &t, cat6_high_cost);
+        cost += av1_get_token_cost(v, &t, cat6_high_cost);
         cost += (*token_costs)[!prev_t][!prev_t][t];
         prev_t = t;
         if (!--band_left) {
@@ -949,10 +949,10 @@
       int v = qcoeff[0];
       int16_t tok;
       unsigned int(*tok_cost_ptr)[COEFF_CONTEXTS][ENTROPY_TOKENS];
-      cost = vp10_get_token_cost(v, &tok, cat6_high_cost);
+      cost = av1_get_token_cost(v, &tok, cat6_high_cost);
       cost += (*token_costs)[0][pt][tok];
 
-      token_cache[0] = vp10_pt_energy_class[tok];
+      token_cache[0] = av1_pt_energy_class[tok];
       ++token_costs;
 
       tok_cost_ptr = &((*token_costs)[!tok]);
@@ -962,10 +962,10 @@
         const int rc = scan[c];
 
         v = qcoeff[rc];
-        cost += vp10_get_token_cost(v, &tok, cat6_high_cost);
+        cost += av1_get_token_cost(v, &tok, cat6_high_cost);
         pt = get_coef_context(nb, token_cache, c);
         cost += (*tok_cost_ptr)[pt][tok];
-        token_cache[rc] = vp10_pt_energy_class[tok];
+        token_cache[rc] = av1_pt_energy_class[tok];
         if (!--band_left) {
           band_left = *band_count++;
           ++token_costs;
@@ -989,8 +989,8 @@
   return cost;
 }
 
-static void dist_block(const VP10_COMP *cpi, MACROBLOCK *x, int plane,
-                       int block, int blk_row, int blk_col, TX_SIZE tx_size,
+static void dist_block(const AV1_COMP *cpi, MACROBLOCK *x, int plane, int block,
+                       int blk_row, int blk_col, TX_SIZE tx_size,
                        int64_t *out_dist, int64_t *out_sse) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
@@ -1004,16 +1004,16 @@
     int shift = (MAX_TX_SCALE - get_tx_scale(xd, tx_type, tx_size)) * 2;
     tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
     tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
-    *out_dist = vp10_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
-                                        &this_sse, bd) >>
+    *out_dist = av1_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
+                                       &this_sse, bd) >>
                 shift;
 #else
     *out_dist =
-        vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >>
+        av1_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >>
         shift;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     *out_sse = this_sse >> shift;
   } else {
     const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
@@ -1037,12 +1037,12 @@
 
     if (eob) {
       const MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       DECLARE_ALIGNED(16, uint16_t, recon16[MAX_TX_SQUARE]);
       uint8_t *recon = (uint8_t *)recon16;
 #else
       DECLARE_ALIGNED(16, uint8_t, recon[MAX_TX_SQUARE]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
       const PLANE_TYPE plane_type = plane == 0 ? PLANE_TYPE_Y : PLANE_TYPE_UV;
 
@@ -1053,17 +1053,17 @@
       inv_txfm_param.eob = eob;
       inv_txfm_param.lossless = xd->lossless[mbmi->segment_id];
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
         recon = CONVERT_TO_BYTEPTR(recon);
         inv_txfm_param.bd = xd->bd;
-        vpx_highbd_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, NULL, 0,
+        aom_highbd_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, NULL, 0,
                                  NULL, 0, bsw, bsh, xd->bd);
         highbd_inv_txfm_add(dqcoeff, recon, MAX_TX_SIZE, &inv_txfm_param);
       } else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       {
-        vpx_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, NULL, 0, NULL, 0,
+        aom_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, NULL, 0, NULL, 0,
                           bsw, bsh);
         inv_txfm_add(dqcoeff, recon, MAX_TX_SIZE, &inv_txfm_param);
       }
@@ -1100,33 +1100,33 @@
   switch (tx_size) {
 #if CONFIG_EXT_TX
     case TX_4X8:
-      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 4) +
-            vpx_sum_squares_2d_i16(diff + 4 * diff_stride, diff_stride, 4);
+      sse = aom_sum_squares_2d_i16(diff, diff_stride, 4) +
+            aom_sum_squares_2d_i16(diff + 4 * diff_stride, diff_stride, 4);
       break;
     case TX_8X4:
-      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 4) +
-            vpx_sum_squares_2d_i16(diff + 4, diff_stride, 4);
+      sse = aom_sum_squares_2d_i16(diff, diff_stride, 4) +
+            aom_sum_squares_2d_i16(diff + 4, diff_stride, 4);
       break;
     case TX_8X16:
-      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 8) +
-            vpx_sum_squares_2d_i16(diff + 8 * diff_stride, diff_stride, 8);
+      sse = aom_sum_squares_2d_i16(diff, diff_stride, 8) +
+            aom_sum_squares_2d_i16(diff + 8 * diff_stride, diff_stride, 8);
       break;
     case TX_16X8:
-      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 8) +
-            vpx_sum_squares_2d_i16(diff + 8, diff_stride, 8);
+      sse = aom_sum_squares_2d_i16(diff, diff_stride, 8) +
+            aom_sum_squares_2d_i16(diff + 8, diff_stride, 8);
       break;
     case TX_16X32:
-      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 16) +
-            vpx_sum_squares_2d_i16(diff + 16 * diff_stride, diff_stride, 16);
+      sse = aom_sum_squares_2d_i16(diff, diff_stride, 16) +
+            aom_sum_squares_2d_i16(diff + 16 * diff_stride, diff_stride, 16);
       break;
     case TX_32X16:
-      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 16) +
-            vpx_sum_squares_2d_i16(diff + 16, diff_stride, 16);
+      sse = aom_sum_squares_2d_i16(diff, diff_stride, 16) +
+            aom_sum_squares_2d_i16(diff + 16, diff_stride, 16);
       break;
 #endif  // CONFIG_EXT_TX
     default:
       assert(tx_size < TX_SIZES);
-      sse = vpx_sum_squares_2d_i16(
+      sse = aom_sum_squares_2d_i16(
           diff, diff_stride, num_4x4_blocks_wide_txsize_lookup[tx_size] << 2);
       break;
   }
@@ -1152,8 +1152,8 @@
     struct encode_b_args intra_arg = {
       x, NULL, &mbmi->skip, args->t_above, args->t_left, 1
     };
-    vp10_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize,
-                            tx_size, &intra_arg);
+    av1_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                           &intra_arg);
 
     if (args->cpi->sf.use_transform_domain_distortion) {
       dist_block(args->cpi, x, plane, block, blk_row, blk_col, tx_size, &dist,
@@ -1162,7 +1162,7 @@
       // Note that the encode block_intra call above already calls
       // inv_txfm_add, so we can't just call dist_block here.
       const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
-      const vpx_variance_fn_t variance = args->cpi->fn_ptr[tx_bsize].vf;
+      const aom_variance_fn_t variance = args->cpi->fn_ptr[tx_bsize].vf;
 
       const struct macroblock_plane *const p = &x->plane[plane];
       const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -1178,10 +1178,10 @@
       unsigned int tmp;
       sse = sum_squares_2d(diff, diff_stride, tx_size);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
         sse = ROUND_POWER_OF_TWO(sse, (xd->bd - 8) * 2);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       sse = (int64_t)sse * 16;
 
       variance(src, src_stride, dst, dst_stride, &tmp);
@@ -1190,14 +1190,14 @@
   } else {
 // full forward transform and quantization
 #if CONFIG_NEW_QUANT
-    vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
-                            tx_size, coeff_ctx);
+    av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                           tx_size, coeff_ctx);
 #else
-    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
-                     VP10_XFORM_QUANT_FP);
+    av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                    AV1_XFORM_QUANT_FP);
 #endif  // CONFIG_NEW_QUANT
     if (x->plane[plane].eobs[block])
-      vp10_optimize_b(x, plane, block, tx_size, coeff_ctx);
+      av1_optimize_b(x, plane, block, tx_size, coeff_ctx);
     dist_block(args->cpi, x, plane, block, blk_row, blk_col, tx_size, &dist,
                &sse);
   }
@@ -1213,7 +1213,7 @@
   rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse);
 
   // TODO(jingning): temporarily enabled only for luma component
-  rd = VPXMIN(rd1, rd2);
+  rd = AOMMIN(rd1, rd2);
 
   args->this_rate += rate;
   args->this_dist += dist;
@@ -1228,7 +1228,7 @@
   args->skippable &= !x->plane[plane].eobs[block];
 }
 
-static void txfm_rd_in_plane(MACROBLOCK *x, const VP10_COMP *cpi, int *rate,
+static void txfm_rd_in_plane(MACROBLOCK *x, const AV1_COMP *cpi, int *rate,
                              int64_t *distortion, int *skippable, int64_t *sse,
                              int64_t ref_best_rd, int plane, BLOCK_SIZE bsize,
                              TX_SIZE tx_size, int use_fast_coef_casting) {
@@ -1236,7 +1236,7 @@
   const struct macroblockd_plane *const pd = &xd->plane[plane];
   TX_TYPE tx_type;
   struct rdcost_block_args args;
-  vp10_zero(args);
+  av1_zero(args);
   args.x = x;
   args.cpi = cpi;
   args.best_rd = ref_best_rd;
@@ -1245,13 +1245,13 @@
 
   if (plane == 0) xd->mi[0]->mbmi.tx_size = tx_size;
 
-  vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
+  av1_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
 
   tx_type = get_tx_type(pd->plane_type, xd, 0, tx_size);
   args.so = get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
 
-  vp10_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
-                                          &args);
+  av1_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
+                                         &args);
   if (args.exit_early) {
     *rate = INT_MAX;
     *distortion = INT64_MAX;
@@ -1266,18 +1266,17 @@
 }
 
 #if CONFIG_SUPERTX
-void vp10_txfm_rd_in_plane_supertx(MACROBLOCK *x, const VP10_COMP *cpi,
-                                   int *rate, int64_t *distortion,
-                                   int *skippable, int64_t *sse,
-                                   int64_t ref_best_rd, int plane,
-                                   BLOCK_SIZE bsize, TX_SIZE tx_size,
-                                   int use_fast_coef_casting) {
+void av1_txfm_rd_in_plane_supertx(MACROBLOCK *x, const AV1_COMP *cpi, int *rate,
+                                  int64_t *distortion, int *skippable,
+                                  int64_t *sse, int64_t ref_best_rd, int plane,
+                                  BLOCK_SIZE bsize, TX_SIZE tx_size,
+                                  int use_fast_coef_casting) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblockd_plane *const pd = &xd->plane[plane];
   struct rdcost_block_args args;
   TX_TYPE tx_type;
 
-  vp10_zero(args);
+  av1_zero(args);
   args.cpi = cpi;
   args.x = x;
   args.best_rd = ref_best_rd;
@@ -1289,7 +1288,7 @@
 
   if (plane == 0) xd->mi[0]->mbmi.tx_size = tx_size;
 
-  vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
+  av1_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
 
   tx_type = get_tx_type(pd->plane_type, xd, 0, tx_size);
   args.so = get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
@@ -1311,14 +1310,14 @@
 }
 #endif  // CONFIG_SUPERTX
 
-static int64_t txfm_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *r, int64_t *d,
+static int64_t txfm_yrd(AV1_COMP *cpi, MACROBLOCK *x, int *r, int64_t *d,
                         int *s, int64_t *sse, int64_t ref_best_rd,
                         BLOCK_SIZE bs, TX_TYPE tx_type, int tx_size) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   int64_t rd = INT64_MAX;
-  vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
+  aom_prob skip_prob = av1_get_skip_prob(cm, xd);
   int s0, s1;
   const int is_inter = is_inter_block(mbmi);
   const int tx_size_ctx = get_tx_size_context(xd);
@@ -1334,8 +1333,8 @@
   assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed_bsize(bs)));
 #endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
 
-  s0 = vp10_cost_bit(skip_prob, 0);
-  s1 = vp10_cost_bit(skip_prob, 1);
+  s0 = av1_cost_bit(skip_prob, 0);
+  s1 = av1_cost_bit(skip_prob, 1);
 
   mbmi->tx_type = tx_type;
   mbmi->tx_size = tx_size;
@@ -1382,17 +1381,17 @@
   if (tx_select) *r += r_tx_size;
 
   if (is_inter && !xd->lossless[xd->mi[0]->mbmi.segment_id] && !(*s))
-    rd = VPXMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, *sse));
+    rd = AOMMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, *sse));
 
   return rd;
 }
 
-static int64_t choose_tx_size_fix_type(VP10_COMP *cpi, BLOCK_SIZE bs,
+static int64_t choose_tx_size_fix_type(AV1_COMP *cpi, BLOCK_SIZE bs,
                                        MACROBLOCK *x, int *rate,
                                        int64_t *distortion, int *skip,
                                        int64_t *psse, int64_t ref_best_rd,
                                        TX_TYPE tx_type, int prune) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   int r, s;
@@ -1510,7 +1509,7 @@
 }
 
 #if CONFIG_EXT_INTER
-static int64_t estimate_yrd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bs, MACROBLOCK *x,
+static int64_t estimate_yrd_for_sb(AV1_COMP *cpi, BLOCK_SIZE bs, MACROBLOCK *x,
                                    int *r, int64_t *d, int *s, int64_t *sse,
                                    int64_t ref_best_rd) {
   return txfm_yrd(cpi, x, r, d, s, sse, ref_best_rd, bs, DCT_DCT,
@@ -1518,18 +1517,18 @@
 }
 #endif  // CONFIG_EXT_INTER
 
-static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void choose_largest_tx_size(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                    int64_t *distortion, int *skip, int64_t *sse,
                                    int64_t ref_best_rd, BLOCK_SIZE bs) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   TX_TYPE tx_type, best_tx_type = DCT_DCT;
   int r, s;
   int64_t d, psse, this_rd, best_rd = INT64_MAX;
-  vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
-  int s0 = vp10_cost_bit(skip_prob, 0);
-  int s1 = vp10_cost_bit(skip_prob, 1);
+  aom_prob skip_prob = av1_get_skip_prob(cm, xd);
+  int s0 = av1_cost_bit(skip_prob, 0);
+  int s1 = av1_cost_bit(skip_prob, 1);
   const int is_inter = is_inter_block(mbmi);
   int prune = 0;
 #if CONFIG_EXT_TX
@@ -1593,7 +1592,7 @@
       else
         this_rd = RDCOST(x->rdmult, x->rddiv, r + s0, d);
       if (is_inter_block(mbmi) && !xd->lossless[mbmi->segment_id] && !s)
-        this_rd = VPXMIN(this_rd, RDCOST(x->rdmult, x->rddiv, s1, psse));
+        this_rd = AOMMIN(this_rd, RDCOST(x->rdmult, x->rddiv, s1, psse));
 
       if (this_rd < best_rd) {
         best_rd = this_rd;
@@ -1630,7 +1629,7 @@
       else
         this_rd = RDCOST(x->rdmult, x->rddiv, r + s0, d);
       if (is_inter && !xd->lossless[mbmi->segment_id] && !s)
-        this_rd = VPXMIN(this_rd, RDCOST(x->rdmult, x->rddiv, s1, psse));
+        this_rd = AOMMIN(this_rd, RDCOST(x->rdmult, x->rddiv, s1, psse));
 
       if (this_rd < best_rd) {
         best_rd = this_rd;
@@ -1645,7 +1644,7 @@
                    mbmi->tx_size, cpi->sf.use_fast_coef_costing);
 }
 
-static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void choose_smallest_tx_size(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                     int64_t *distortion, int *skip,
                                     int64_t *sse, int64_t ref_best_rd,
                                     BLOCK_SIZE bs) {
@@ -1659,10 +1658,10 @@
                    mbmi->tx_size, cpi->sf.use_fast_coef_costing);
 }
 
-static void choose_tx_size_type_from_rd(VP10_COMP *cpi, MACROBLOCK *x,
-                                        int *rate, int64_t *distortion,
-                                        int *skip, int64_t *psse,
-                                        int64_t ref_best_rd, BLOCK_SIZE bs) {
+static void choose_tx_size_type_from_rd(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
+                                        int64_t *distortion, int *skip,
+                                        int64_t *psse, int64_t ref_best_rd,
+                                        BLOCK_SIZE bs) {
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   int r, s;
@@ -1709,7 +1708,7 @@
 #endif
 }
 
-static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void super_block_yrd(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                             int64_t *distortion, int *skip, int64_t *psse,
                             BLOCK_SIZE bs, int64_t ref_best_rd) {
   MACROBLOCKD *xd = &x->e_mbd;
@@ -1748,7 +1747,7 @@
 }
 
 static int rd_pick_palette_intra_sby(
-    VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int palette_ctx,
+    AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int palette_ctx,
     int dc_mode_cost, PALETTE_MODE_INFO *palette_mode_info,
     uint8_t *best_palette_color_map, TX_SIZE *best_tx, TX_TYPE *best_tx_type,
     PREDICTION_MODE *mode_selected, int64_t *best_rd) {
@@ -1764,13 +1763,13 @@
 
   assert(cpi->common.allow_screen_content_tools);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (cpi->common.use_highbitdepth)
-    colors = vp10_count_colors_highbd(src, src_stride, rows, cols,
-                                      cpi->common.bit_depth);
+    colors = av1_count_colors_highbd(src, src_stride, rows, cols,
+                                     cpi->common.bit_depth);
   else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-    colors = vp10_count_colors(src, src_stride, rows, cols);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+    colors = av1_count_colors(src, src_stride, rows, cols);
   palette_mode_info->palette_size[0] = 0;
 #if CONFIG_EXT_INTRA
   mic->mbmi.ext_intra_mode_info.use_ext_intra_mode[0] = 0;
@@ -1787,15 +1786,15 @@
     float lb, ub, val;
     MB_MODE_INFO *const mbmi = &mic->mbmi;
     PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
     if (cpi->common.use_highbitdepth)
       lb = ub = src16[0];
     else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       lb = ub = src[0];
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cpi->common.use_highbitdepth) {
       for (r = 0; r < rows; ++r) {
         for (c = 0; c < cols; ++c) {
@@ -1808,7 +1807,7 @@
         }
       }
     } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       for (r = 0; r < rows; ++r) {
         for (c = 0; c < cols; ++c) {
           val = src[r * src_stride + c];
@@ -1819,9 +1818,9 @@
             ub = val;
         }
       }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     mbmi->mode = DC_PRED;
 #if CONFIG_EXT_INTRA
@@ -1834,21 +1833,21 @@
          --n) {
       for (i = 0; i < n; ++i)
         centroids[i] = lb + (2 * i + 1) * (ub - lb) / n / 2;
-      vp10_k_means(data, centroids, color_map, rows * cols, n, 1, max_itr);
-      k = vp10_remove_duplicates(centroids, n);
+      av1_k_means(data, centroids, color_map, rows * cols, n, 1, max_itr);
+      k = av1_remove_duplicates(centroids, n);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cpi->common.use_highbitdepth)
         for (i = 0; i < k; ++i)
           pmi->palette_colors[i] =
               clip_pixel_highbd((int)centroids[i], cpi->common.bit_depth);
       else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         for (i = 0; i < k; ++i)
           pmi->palette_colors[i] = clip_pixel((int)centroids[i]);
       pmi->palette_size[0] = k;
 
-      vp10_calc_indices(data, centroids, color_map, rows * cols, k, 1);
+      av1_calc_indices(data, centroids, color_map, rows * cols, k, 1);
 
       super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
                       bsize, *best_rd);
@@ -1856,16 +1855,16 @@
 
       this_rate =
           this_rate_tokenonly + dc_mode_cost +
-          cpi->common.bit_depth * k * vp10_cost_bit(128, 0) +
+          cpi->common.bit_depth * k * av1_cost_bit(128, 0) +
           cpi->palette_y_size_cost[bsize - BLOCK_8X8][k - 2] +
           write_uniform_cost(k, color_map[0]) +
-          vp10_cost_bit(
-              vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx],
+          av1_cost_bit(
+              av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx],
               1);
       for (i = 0; i < rows; ++i) {
         for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
-          color_ctx = vp10_get_palette_color_context(color_map, cols, i, j, k,
-                                                     color_order);
+          color_ctx = av1_get_palette_color_context(color_map, cols, i, j, k,
+                                                    color_order);
           for (r = 0; r < k; ++r)
             if (color_map[i * cols + j] == color_order[r]) {
               color_idx = r;
@@ -1892,7 +1891,7 @@
   return rate_overhead;
 }
 
-static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
+static int64_t rd_pick_intra4x4block(AV1_COMP *cpi, MACROBLOCK *x, int row,
                                      int col, PREDICTION_MODE *best_mode,
                                      const int *bmode_costs, ENTROPY_CONTEXT *a,
                                      ENTROPY_CONTEXT *l, int *bestrate,
@@ -1913,7 +1912,7 @@
   const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
   int idx, idy;
   uint8_t best_dst[8 * 8];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint16_t best_dst16[8 * 8];
 #endif
 
@@ -1922,7 +1921,7 @@
   xd->mi[0]->mbmi.tx_size = TX_4X4;
   xd->mi[0]->mbmi.palette_mode_info.palette_size[0] = 0;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
       int64_t this_rd;
@@ -1947,11 +1946,11 @@
           const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
           uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
           int16_t *const src_diff =
-              vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
+              av1_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
           xd->mi[0]->bmi[block].as_mode = mode;
-          vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
-                                   dst_stride, col + idx, row + idy, 0);
-          vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
+          av1_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+                                  dst_stride, col + idx, row + idy, 0);
+          aom_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
                                     dst_stride, xd->bd);
           if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
             TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
@@ -1961,11 +1960,11 @@
                 combine_entropy_contexts(*(tempa + idx), *(templ + idy));
 #endif  // CONFIG_VAR_TX | CONFIG_NEW_QUANT
 #if CONFIG_NEW_QUANT
-            vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx,
-                                    BLOCK_8X8, TX_4X4, coeff_ctx);
+            av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+                                   TX_4X4, coeff_ctx);
 #else
-            vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
-                             TX_4X4, VP10_XFORM_QUANT_FP);
+            av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+                            TX_4X4, AV1_XFORM_QUANT_FP);
 #endif  // CONFIG_NEW_QUANT
 #if CONFIG_VAR_TX
             ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
@@ -1979,9 +1978,9 @@
 #endif  // CONFIG_VAR_TX
             if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
               goto next_highbd;
-            vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
-                                         dst_stride, p->eobs[block], xd->bd,
-                                         DCT_DCT, 1);
+            av1_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                                        dst_stride, p->eobs[block], xd->bd,
+                                        DCT_DCT, 1);
           } else {
             int64_t dist;
             unsigned int tmp;
@@ -1990,13 +1989,13 @@
             const int coeff_ctx =
                 combine_entropy_contexts(*(tempa + idx), *(templ + idy));
 #if CONFIG_NEW_QUANT
-            vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx,
-                                    BLOCK_8X8, TX_4X4, coeff_ctx);
+            av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+                                   TX_4X4, coeff_ctx);
 #else
-            vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
-                             TX_4X4, VP10_XFORM_QUANT_FP);
+            av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+                            TX_4X4, AV1_XFORM_QUANT_FP);
 #endif  // CONFIG_NEW_QUANT
-            vp10_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
+            av1_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
 #if CONFIG_VAR_TX
             ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
                                  so->neighbors, cpi->sf.use_fast_coef_costing);
@@ -2007,9 +2006,9 @@
                                  so->scan, so->neighbors,
                                  cpi->sf.use_fast_coef_costing);
 #endif  // CONFIG_VAR_TX
-            vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
-                                         dst_stride, p->eobs[block], xd->bd,
-                                         tx_type, 0);
+            av1_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                                        dst_stride, p->eobs[block], xd->bd,
+                                        tx_type, 0);
             cpi->fn_ptr[BLOCK_4X4].vf(src, src_stride, dst, dst_stride, &tmp);
             dist = (int64_t)tmp << 4;
             distortion += dist;
@@ -2048,7 +2047,7 @@
 
     return best_rd;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
     int64_t this_rd;
@@ -2073,11 +2072,11 @@
         const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
         uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
         int16_t *const src_diff =
-            vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
+            av1_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
         xd->mi[0]->bmi[block].as_mode = mode;
-        vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
-                                 dst_stride, col + idx, row + idy, 0);
-        vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
+        av1_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+                                dst_stride, col + idx, row + idy, 0);
+        aom_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
 
         if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
           TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
@@ -2087,11 +2086,11 @@
               combine_entropy_contexts(*(tempa + idx), *(templ + idy));
 #endif  // CONFIG_VAR_TX | CONFIG_NEW_QUANT
 #if CONFIG_NEW_QUANT
-          vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
-                                  TX_4X4, coeff_ctx);
+          av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+                                 TX_4X4, coeff_ctx);
 #else
-          vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
-                           VP10_XFORM_QUANT_B);
+          av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
+                          AV1_XFORM_QUANT_B);
 #endif  // CONFIG_NEW_QUANT
 #if CONFIG_VAR_TX
           ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
@@ -2105,8 +2104,8 @@
 #endif
           if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
             goto next;
-          vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
-                                dst_stride, p->eobs[block], DCT_DCT, 1);
+          av1_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                               dst_stride, p->eobs[block], DCT_DCT, 1);
         } else {
           int64_t dist;
           unsigned int tmp;
@@ -2115,13 +2114,13 @@
           const int coeff_ctx =
               combine_entropy_contexts(*(tempa + idx), *(templ + idy));
 #if CONFIG_NEW_QUANT
-          vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
-                                  TX_4X4, coeff_ctx);
+          av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+                                 TX_4X4, coeff_ctx);
 #else
-          vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
-                           VP10_XFORM_QUANT_FP);
+          av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
+                          AV1_XFORM_QUANT_FP);
 #endif  // CONFIG_NEW_QUANT
-          vp10_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
+          av1_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
 #if CONFIG_VAR_TX
           ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
                                so->neighbors, cpi->sf.use_fast_coef_costing);
@@ -2132,8 +2131,8 @@
                                so->scan, so->neighbors,
                                cpi->sf.use_fast_coef_costing);
 #endif
-          vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
-                                dst_stride, p->eobs[block], tx_type, 0);
+          av1_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                               dst_stride, p->eobs[block], tx_type, 0);
           cpi->fn_ptr[BLOCK_4X4].vf(src, src_stride, dst, dst_stride, &tmp);
           dist = (int64_t)tmp << 4;
           distortion += dist;
@@ -2173,7 +2172,7 @@
   return best_rd;
 }
 
-static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
+static int64_t rd_pick_intra_sub_8x8_y_mode(AV1_COMP *cpi, MACROBLOCK *mb,
                                             int *rate, int *rate_y,
                                             int64_t *distortion,
                                             int64_t best_rd) {
@@ -2210,8 +2209,8 @@
       int64_t d = INT64_MAX, this_rd = INT64_MAX;
       i = idy * 2 + idx;
       if (cpi->common.frame_type == KEY_FRAME) {
-        const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, i);
-        const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, i);
+        const PREDICTION_MODE A = av1_above_block_mode(mic, above_mi, i);
+        const PREDICTION_MODE L = av1_left_block_mode(mic, left_mi, i);
 
         bmode_costs = cpi->y_mode_costs[A][L];
       }
@@ -2267,7 +2266,7 @@
 
 #if CONFIG_EXT_INTRA
 // Return 1 if an ext intra mode is selected; return 0 otherwise.
-static int rd_pick_ext_intra_sby(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int rd_pick_ext_intra_sby(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                  int *rate_tokenonly, int64_t *distortion,
                                  int *skippable, BLOCK_SIZE bsize,
                                  int mode_cost, int64_t *best_rd,
@@ -2283,7 +2282,7 @@
   EXT_INTRA_MODE_INFO ext_intra_mode_info;
   TX_TYPE best_tx_type;
 
-  vp10_zero(ext_intra_mode_info);
+  av1_zero(ext_intra_mode_info);
   mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 1;
   mbmi->mode = DC_PRED;
   mbmi->palette_mode_info.palette_size[0] = 0;
@@ -2296,7 +2295,7 @@
     if (this_rate_tokenonly == INT_MAX) continue;
 
     this_rate = this_rate_tokenonly +
-                vp10_cost_bit(cpi->common.fc->ext_intra_probs[0], 1) +
+                av1_cost_bit(cpi->common.fc->ext_intra_probs[0], 1) +
                 write_uniform_cost(FILTER_INTRA_MODES, mode) + mode_cost;
     this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
 
@@ -2328,7 +2327,7 @@
 }
 
 static void pick_intra_angle_routine_sby(
-    VP10_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly,
+    AV1_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly,
     int64_t *distortion, int *skippable, int *best_angle_delta,
     TX_SIZE *best_tx_size, TX_TYPE *best_tx_type, INTRA_FILTER *best_filter,
     BLOCK_SIZE bsize, int rate_overhead, int64_t *best_rd) {
@@ -2355,7 +2354,7 @@
   }
 }
 
-static int64_t rd_pick_intra_angle_sby(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_pick_intra_angle_sby(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                        int *rate_tokenonly, int64_t *distortion,
                                        int *skippable, BLOCK_SIZE bsize,
                                        int rate_overhead, int64_t best_rd) {
@@ -2364,7 +2363,7 @@
   MB_MODE_INFO *mbmi = &mic->mbmi;
   int this_rate, this_rate_tokenonly, s;
   int angle_delta, best_angle_delta = 0, p_angle;
-  const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+  const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
   INTRA_FILTER filter, best_filter = INTRA_FILTER_LINEAR;
   const double rd_adjust = 1.2;
   int64_t this_distortion, this_rd;
@@ -2385,7 +2384,7 @@
           mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
       for (filter = INTRA_FILTER_LINEAR; filter < INTRA_FILTERS; ++filter) {
         int64_t tmp_best_rd;
-        if ((FILTER_FAST_SEARCH || !vp10_is_intra_filter_switchable(p_angle)) &&
+        if ((FILTER_FAST_SEARCH || !av1_is_intra_filter_switchable(p_angle)) &&
             filter != INTRA_FILTER_LINEAR)
           continue;
         mic->mbmi.intra_filter = filter;
@@ -2430,7 +2429,7 @@
         for (filter = INTRA_FILTER_LINEAR; filter < INTRA_FILTERS; ++filter) {
           mic->mbmi.intra_filter = filter;
           if ((FILTER_FAST_SEARCH ||
-               !vp10_is_intra_filter_switchable(p_angle)) &&
+               !av1_is_intra_filter_switchable(p_angle)) &&
               filter != INTRA_FILTER_LINEAR)
             continue;
           pick_intra_angle_routine_sby(
@@ -2450,7 +2449,7 @@
           mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
       for (filter = INTRA_FILTER_LINEAR; filter < INTRA_FILTERS; ++filter) {
         mic->mbmi.intra_filter = filter;
-        if ((FILTER_FAST_SEARCH || !vp10_is_intra_filter_switchable(p_angle)) &&
+        if ((FILTER_FAST_SEARCH || !av1_is_intra_filter_switchable(p_angle)) &&
             filter != INTRA_FILTER_LINEAR)
           continue;
         pick_intra_angle_routine_sby(
@@ -2466,7 +2465,7 @@
   if (FILTER_FAST_SEARCH && *rate_tokenonly < INT_MAX) {
     mbmi->angle_delta[0] = best_angle_delta;
     p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
-    if (vp10_is_intra_filter_switchable(p_angle)) {
+    if (av1_is_intra_filter_switchable(p_angle)) {
       for (filter = INTRA_FILTER_LINEAR + 1; filter < INTRA_FILTERS; ++filter) {
         mic->mbmi.intra_filter = filter;
         pick_intra_angle_routine_sby(
@@ -2534,7 +2533,7 @@
         remd = dx % dy;
         quot = dx / dy;
         remd = remd * 16 / dy;
-        index = gradient_to_angle_bin[sn][VPXMIN(quot, 6)][VPXMIN(remd, 15)];
+        index = gradient_to_angle_bin[sn][AOMMIN(quot, 6)][AOMMIN(remd, 15)];
       }
       hist[index] += temp;
     }
@@ -2561,7 +2560,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_angle_estimation(const uint8_t *src8, int src_stride,
                                     int rows, int cols,
                                     uint8_t *directional_mode_skip_mask) {
@@ -2586,7 +2585,7 @@
         remd = dx % dy;
         quot = dx / dy;
         remd = remd * 16 / dy;
-        index = gradient_to_angle_bin[sn][VPXMIN(quot, 6)][VPXMIN(remd, 15)];
+        index = gradient_to_angle_bin[sn][AOMMIN(quot, 6)][AOMMIN(remd, 15)];
       }
       hist[index] += temp;
     }
@@ -2612,11 +2611,11 @@
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_EXT_INTRA
 
 // This function is used only for intra_only frames
-static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_pick_intra_sby_mode(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                       int *rate_tokenonly, int64_t *distortion,
                                       int *skippable, BLOCK_SIZE bsize,
                                       int64_t best_rd) {
@@ -2628,7 +2627,7 @@
   int64_t this_distortion, this_rd;
   TX_SIZE best_tx = TX_4X4;
 #if CONFIG_EXT_INTRA
-  const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+  const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
   EXT_INTRA_MODE_INFO ext_intra_mode_info;
   int is_directional_mode, rate_overhead, best_angle_delta = 0;
   INTRA_FILTER best_filter = INTRA_FILTER_LINEAR;
@@ -2651,8 +2650,8 @@
   int palette_ctx = 0;
   const MODE_INFO *above_mi = xd->above_mi;
   const MODE_INFO *left_mi = xd->left_mi;
-  const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, 0);
-  const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, 0);
+  const PREDICTION_MODE A = av1_above_block_mode(mic, above_mi, 0);
+  const PREDICTION_MODE L = av1_left_block_mode(mic, left_mi, 0);
   const PREDICTION_MODE FINAL_MODE_SEARCH = TM_PRED + 1;
   const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
   bmode_costs = cpi->y_mode_costs[A][L];
@@ -2663,7 +2662,7 @@
   mic->mbmi.angle_delta[0] = 0;
   memset(directional_mode_skip_mask, 0,
          sizeof(directional_mode_skip_mask[0]) * INTRA_MODES);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
     highbd_angle_estimation(src, src_stride, rows, cols,
                             directional_mode_skip_mask);
@@ -2728,11 +2727,11 @@
                             TX_8X8][get_tx_size_context(xd)][mic->mbmi.tx_size];
     }
     if (cpi->common.allow_screen_content_tools && mic->mbmi.mode == DC_PRED)
-      this_rate += vp10_cost_bit(
-          vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
+      this_rate += av1_cost_bit(
+          av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
 #if CONFIG_EXT_INTRA
     if (mic->mbmi.mode == DC_PRED && ALLOW_FILTER_INTRA_MODES)
-      this_rate += vp10_cost_bit(cpi->common.fc->ext_intra_probs[0], 0);
+      this_rate += av1_cost_bit(cpi->common.fc->ext_intra_probs[0], 0);
     if (is_directional_mode) {
       int p_angle;
       this_rate +=
@@ -2740,7 +2739,7 @@
                              MAX_ANGLE_DELTAS + mic->mbmi.angle_delta[0]);
       p_angle = mode_to_angle_map[mic->mbmi.mode] +
                 mic->mbmi.angle_delta[0] * ANGLE_STEP;
-      if (vp10_is_intra_filter_switchable(p_angle))
+      if (av1_is_intra_filter_switchable(p_angle))
         this_rate +=
             cpi->intra_filter_cost[intra_filter_ctx][mic->mbmi.intra_filter];
     }
@@ -2815,10 +2814,10 @@
 }
 
 #if CONFIG_VAR_TX
-void vp10_tx_block_rd_b(const VP10_COMP *cpi, MACROBLOCK *x, TX_SIZE tx_size,
-                        int blk_row, int blk_col, int plane, int block,
-                        int plane_bsize, int coeff_ctx, int *rate,
-                        int64_t *dist, int64_t *bsse, int *skip) {
+void av1_tx_block_rd_b(const AV1_COMP *cpi, MACROBLOCK *x, TX_SIZE tx_size,
+                       int blk_row, int blk_col, int plane, int block,
+                       int plane_bsize, int coeff_ctx, int *rate, int64_t *dist,
+                       int64_t *bsse, int *skip) {
   MACROBLOCKD *xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
   struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -2834,12 +2833,12 @@
   int src_stride = p->src.stride;
   uint8_t *src = &p->src.buf[4 * blk_row * src_stride + 4 * blk_col];
   uint8_t *dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, rec_buffer16[MAX_TX_SQUARE]);
   uint8_t *rec_buffer;
 #else
   DECLARE_ALIGNED(16, uint8_t, rec_buffer[MAX_TX_SQUARE]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
   const int16_t *diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
 
@@ -2856,51 +2855,51 @@
     max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
 
 #if CONFIG_NEW_QUANT
-  vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
-                          tx_size, coeff_ctx);
+  av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                         tx_size, coeff_ctx);
 #else
-  vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
-                   VP10_XFORM_QUANT_FP);
+  av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                  AV1_XFORM_QUANT_FP);
 #endif  // CONFIG_NEW_QUANT
 
-  vp10_optimize_b(x, plane, block, tx_size, coeff_ctx);
+  av1_optimize_b(x, plane, block, tx_size, coeff_ctx);
 
 // TODO(any): Use dist_block to compute distortion
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     rec_buffer = CONVERT_TO_BYTEPTR(rec_buffer16);
-    vpx_highbd_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL,
+    aom_highbd_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL,
                              0, NULL, 0, bh, bh, xd->bd);
   } else {
     rec_buffer = (uint8_t *)rec_buffer16;
-    vpx_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL, 0,
+    aom_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL, 0,
                       NULL, 0, bh, bh);
   }
 #else
-  vpx_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL, 0, NULL,
+  aom_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL, 0, NULL,
                     0, bh, bh);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   if (blk_row + (bh >> 2) > max_blocks_high ||
       blk_col + (bh >> 2) > max_blocks_wide) {
     int idx, idy;
-    int blocks_height = VPXMIN(bh >> 2, max_blocks_high - blk_row);
-    int blocks_width = VPXMIN(bh >> 2, max_blocks_wide - blk_col);
+    int blocks_height = AOMMIN(bh >> 2, max_blocks_high - blk_row);
+    int blocks_width = AOMMIN(bh >> 2, max_blocks_wide - blk_col);
     tmp = 0;
     for (idy = 0; idy < blocks_height; idy += 2) {
       for (idx = 0; idx < blocks_width; idx += 2) {
         const int16_t *d = diff + 4 * idy * diff_stride + 4 * idx;
-        tmp += vpx_sum_squares_2d_i16(d, diff_stride, 8);
+        tmp += aom_sum_squares_2d_i16(d, diff_stride, 8);
       }
     }
   } else {
-    tmp = vpx_sum_squares_2d_i16(diff, diff_stride, bh);
+    tmp = aom_sum_squares_2d_i16(diff, diff_stride, bh);
   }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
     tmp = ROUND_POWER_OF_TWO(tmp, (xd->bd - 8) * 2);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   *bsse += tmp * 16;
 
   if (p->eobs[block] > 0) {
@@ -2909,23 +2908,23 @@
     inv_txfm_param.tx_size = tx_size;
     inv_txfm_param.eob = p->eobs[block];
     inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       inv_txfm_param.bd = xd->bd;
       highbd_inv_txfm_add(dqcoeff, rec_buffer, MAX_TX_SIZE, &inv_txfm_param);
     } else {
       inv_txfm_add(dqcoeff, rec_buffer, MAX_TX_SIZE, &inv_txfm_param);
     }
-#else   // CONFIG_VP9_HIGHBITDEPTH
+#else   // CONFIG_AOM_HIGHBITDEPTH
     inv_txfm_add(dqcoeff, rec_buffer, MAX_TX_SIZE, &inv_txfm_param);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     if ((bh >> 2) + blk_col > max_blocks_wide ||
         (bh >> 2) + blk_row > max_blocks_high) {
       int idx, idy;
       unsigned int this_dist;
-      int blocks_height = VPXMIN(bh >> 2, max_blocks_high - blk_row);
-      int blocks_width = VPXMIN(bh >> 2, max_blocks_wide - blk_col);
+      int blocks_height = AOMMIN(bh >> 2, max_blocks_high - blk_row);
+      int blocks_width = AOMMIN(bh >> 2, max_blocks_wide - blk_col);
       tmp = 0;
       for (idy = 0; idy < blocks_height; idy += 2) {
         for (idx = 0; idx < blocks_width; idx += 2) {
@@ -2948,7 +2947,7 @@
   *skip &= (p->eobs[block] == 0);
 }
 
-static void select_tx_block(const VP10_COMP *cpi, MACROBLOCK *x, int blk_row,
+static void select_tx_block(const AV1_COMP *cpi, MACROBLOCK *x, int blk_row,
                             int blk_col, int plane, int block, TX_SIZE tx_size,
                             BLOCK_SIZE plane_bsize, ENTROPY_CONTEXT *ta,
                             ENTROPY_CONTEXT *tl, TXFM_CONTEXT *tx_above,
@@ -2977,7 +2976,7 @@
 
   int64_t sum_dist = 0, sum_bsse = 0;
   int64_t sum_rd = INT64_MAX;
-  int sum_rate = vp10_cost_bit(cpi->common.fc->txfm_partition_prob[ctx], 1);
+  int sum_rate = av1_cost_bit(cpi->common.fc->txfm_partition_prob[ctx], 1);
   int all_skip = 1;
   int tmp_eob = 0;
   int zero_blk_rate;
@@ -3029,8 +3028,8 @@
 
   if (cpi->common.tx_mode == TX_MODE_SELECT || tx_size == TX_4X4) {
     inter_tx_size[0][0] = tx_size;
-    vp10_tx_block_rd_b(cpi, x, tx_size, blk_row, blk_col, plane, block,
-                       plane_bsize, coeff_ctx, rate, dist, bsse, skip);
+    av1_tx_block_rd_b(cpi, x, tx_size, blk_row, blk_col, plane, block,
+                      plane_bsize, coeff_ctx, rate, dist, bsse, skip);
 
     if ((RDCOST(x->rdmult, x->rddiv, *rate, *dist) >=
              RDCOST(x->rdmult, x->rddiv, zero_blk_rate, *bsse) ||
@@ -3047,7 +3046,7 @@
     }
 
     if (tx_size > TX_4X4)
-      *rate += vp10_cost_bit(cpi->common.fc->txfm_partition_prob[ctx], 0);
+      *rate += av1_cost_bit(cpi->common.fc->txfm_partition_prob[ctx], 0);
     this_rd = RDCOST(x->rdmult, x->rddiv, *rate, *dist);
     tmp_eob = p->eobs[block];
   }
@@ -3109,7 +3108,7 @@
   }
 }
 
-static void inter_block_yrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void inter_block_yrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                             int64_t *distortion, int *skippable, int64_t *sse,
                             BLOCK_SIZE bsize, int64_t ref_best_rd) {
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -3141,7 +3140,7 @@
     int pnrate = 0, pnskip = 1;
     int64_t pndist = 0, pnsse = 0;
 
-    vp10_get_entropy_contexts(bsize, TX_4X4, pd, ctxa, ctxl);
+    av1_get_entropy_contexts(bsize, TX_4X4, pd, ctxa, ctxl);
     memcpy(tx_above, xd->above_txfm_context,
            sizeof(TXFM_CONTEXT) * (mi_width >> 1));
     memcpy(tx_left, xd->left_txfm_context,
@@ -3157,14 +3156,14 @@
         *distortion += pndist;
         *sse += pnsse;
         *skippable &= pnskip;
-        this_rd += VPXMIN(RDCOST(x->rdmult, x->rddiv, pnrate, pndist),
+        this_rd += AOMMIN(RDCOST(x->rdmult, x->rddiv, pnrate, pndist),
                           RDCOST(x->rdmult, x->rddiv, 0, pnsse));
         block += step;
       }
     }
   }
 
-  this_rd = VPXMIN(RDCOST(x->rdmult, x->rddiv, *rate, *distortion),
+  this_rd = AOMMIN(RDCOST(x->rdmult, x->rddiv, *rate, *distortion),
                    RDCOST(x->rdmult, x->rddiv, 0, *sse));
   if (this_rd > ref_best_rd) is_cost_valid = 0;
 
@@ -3177,11 +3176,11 @@
   }
 }
 
-static int64_t select_tx_size_fix_type(const VP10_COMP *cpi, MACROBLOCK *x,
+static int64_t select_tx_size_fix_type(const AV1_COMP *cpi, MACROBLOCK *x,
                                        int *rate, int64_t *dist, int *skippable,
                                        int64_t *sse, BLOCK_SIZE bsize,
                                        int64_t ref_best_rd, TX_TYPE tx_type) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
@@ -3189,9 +3188,9 @@
 #if CONFIG_EXT_TX
   int ext_tx_set = get_ext_tx_set(max_tx_size, bsize, is_inter);
 #endif  // CONFIG_EXT_TX
-  vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
-  int s0 = vp10_cost_bit(skip_prob, 0);
-  int s1 = vp10_cost_bit(skip_prob, 1);
+  aom_prob skip_prob = av1_get_skip_prob(cm, xd);
+  int s0 = av1_cost_bit(skip_prob, 0);
+  int s1 = av1_cost_bit(skip_prob, 1);
   int64_t rd;
 
   mbmi->tx_type = tx_type;
@@ -3229,12 +3228,12 @@
     rd = RDCOST(x->rdmult, x->rddiv, *rate + s0, *dist);
 
   if (is_inter && !xd->lossless[xd->mi[0]->mbmi.segment_id] && !(*skippable))
-    rd = VPXMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, *sse));
+    rd = AOMMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, *sse));
 
   return rd;
 }
 
-static void select_tx_type_yrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void select_tx_type_yrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                int64_t *distortion, int *skippable,
                                int64_t *sse, BLOCK_SIZE bsize,
                                int64_t ref_best_rd) {
@@ -3320,7 +3319,7 @@
   memcpy(x->blk_skip[0], best_blk_skip, sizeof(best_blk_skip[0]) * n4);
 }
 
-static void tx_block_rd(const VP10_COMP *cpi, MACROBLOCK *x, int blk_row,
+static void tx_block_rd(const AV1_COMP *cpi, MACROBLOCK *x, int blk_row,
                         int blk_col, int plane, int block, TX_SIZE tx_size,
                         BLOCK_SIZE plane_bsize, ENTROPY_CONTEXT *above_ctx,
                         ENTROPY_CONTEXT *left_ctx, int *rate, int64_t *dist,
@@ -3372,8 +3371,8 @@
       default: assert(0 && "Invalid transform size."); break;
     }
     coeff_ctx = combine_entropy_contexts(ta[0], tl[0]);
-    vp10_tx_block_rd_b(cpi, x, tx_size, blk_row, blk_col, plane, block,
-                       plane_bsize, coeff_ctx, rate, dist, bsse, skip);
+    av1_tx_block_rd_b(cpi, x, tx_size, blk_row, blk_col, plane, block,
+                      plane_bsize, coeff_ctx, rate, dist, bsse, skip);
     for (i = 0; i < num_4x4_blocks_wide_txsize_lookup[tx_size]; ++i)
       ta[i] = !(p->eobs[block] == 0);
     for (i = 0; i < num_4x4_blocks_high_txsize_lookup[tx_size]; ++i)
@@ -3398,7 +3397,7 @@
 
 // Return value 0: early termination triggered, no valid rd cost available;
 //              1: rd cost values are valid.
-static int inter_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int inter_block_uvrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                             int64_t *distortion, int *skippable, int64_t *sse,
                             BLOCK_SIZE bsize, int64_t ref_best_rd) {
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -3412,7 +3411,7 @@
   if (is_inter_block(mbmi) && is_cost_valid) {
     int plane;
     for (plane = 1; plane < MAX_MB_PLANE; ++plane)
-      vp10_subtract_plane(x, bsize, plane);
+      av1_subtract_plane(x, bsize, plane);
   }
 
   *rate = 0;
@@ -3435,7 +3434,7 @@
     ENTROPY_CONTEXT ta[2 * MAX_MIB_SIZE];
     ENTROPY_CONTEXT tl[2 * MAX_MIB_SIZE];
 
-    vp10_get_entropy_contexts(bsize, TX_4X4, pd, ta, tl);
+    av1_get_entropy_contexts(bsize, TX_4X4, pd, ta, tl);
 
     for (idy = 0; idy < mi_height; idy += bh) {
       for (idx = 0; idx < mi_width; idx += bh) {
@@ -3456,7 +3455,7 @@
     *sse += pnsse;
     *skippable &= pnskip;
 
-    this_rd = VPXMIN(RDCOST(x->rdmult, x->rddiv, *rate, *distortion),
+    this_rd = AOMMIN(RDCOST(x->rdmult, x->rddiv, *rate, *distortion),
                      RDCOST(x->rdmult, x->rddiv, 0, *sse));
 
     if (this_rd > ref_best_rd) {
@@ -3479,7 +3478,7 @@
 
 // Return value 0: early termination triggered, no valid rd cost available;
 //              1: rd cost values are valid.
-static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int super_block_uvrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                             int64_t *distortion, int *skippable, int64_t *sse,
                             BLOCK_SIZE bsize, int64_t ref_best_rd) {
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -3495,7 +3494,7 @@
   if (is_inter_block(mbmi) && is_cost_valid) {
     int plane;
     for (plane = 1; plane < MAX_MB_PLANE; ++plane)
-      vp10_subtract_plane(x, bsize, plane);
+      av1_subtract_plane(x, bsize, plane);
   }
 
   *rate = 0;
@@ -3533,7 +3532,7 @@
 }
 
 static void rd_pick_palette_intra_sbuv(
-    VP10_COMP *cpi, MACROBLOCK *x, int dc_mode_cost,
+    AV1_COMP *cpi, MACROBLOCK *x, int dc_mode_cost,
     PALETTE_MODE_INFO *palette_mode_info, uint8_t *best_palette_color_map,
     PREDICTION_MODE *mode_selected, int64_t *best_rd, int *rate,
     int *rate_tokenonly, int64_t *distortion, int *skippable) {
@@ -3557,19 +3556,19 @@
   mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
 #endif  // CONFIG_EXT_INTRA
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (cpi->common.use_highbitdepth) {
-    colors_u = vp10_count_colors_highbd(src_u, src_stride, rows, cols,
-                                        cpi->common.bit_depth);
-    colors_v = vp10_count_colors_highbd(src_v, src_stride, rows, cols,
-                                        cpi->common.bit_depth);
+    colors_u = av1_count_colors_highbd(src_u, src_stride, rows, cols,
+                                       cpi->common.bit_depth);
+    colors_v = av1_count_colors_highbd(src_v, src_stride, rows, cols,
+                                       cpi->common.bit_depth);
   } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-    colors_u = vp10_count_colors(src_u, src_stride, rows, cols);
-    colors_v = vp10_count_colors(src_v, src_stride, rows, cols);
-#if CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+    colors_u = av1_count_colors(src_u, src_stride, rows, cols);
+    colors_v = av1_count_colors(src_v, src_stride, rows, cols);
+#if CONFIG_AOM_HIGHBITDEPTH
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   colors = colors_u > colors_v ? colors_u : colors_v;
   if (colors > 1 && colors <= 64) {
@@ -3585,7 +3584,7 @@
     uint8_t *const color_map = xd->plane[1].color_index_map;
     PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     uint16_t *src_u16 = CONVERT_TO_SHORTPTR(src_u);
     uint16_t *src_v16 = CONVERT_TO_SHORTPTR(src_v);
     if (cpi->common.use_highbitdepth) {
@@ -3594,14 +3593,14 @@
       lb_v = src_v16[0];
       ub_v = src_v16[0];
     } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       lb_u = src_u[0];
       ub_u = src_u[0];
       lb_v = src_v[0];
       ub_v = src_v[0];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     mbmi->uv_mode = DC_PRED;
 #if CONFIG_EXT_INTRA
@@ -3609,21 +3608,21 @@
 #endif  // CONFIG_EXT_INTRA
     for (r = 0; r < rows; ++r) {
       for (c = 0; c < cols; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (cpi->common.use_highbitdepth) {
           val_u = src_u16[r * src_stride + c];
           val_v = src_v16[r * src_stride + c];
           data[(r * cols + c) * 2] = val_u;
           data[(r * cols + c) * 2 + 1] = val_v;
         } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
           val_u = src_u[r * src_stride + c];
           val_v = src_v[r * src_stride + c];
           data[(r * cols + c) * 2] = val_u;
           data[(r * cols + c) * 2 + 1] = val_v;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         if (val_u < lb_u)
           lb_u = val_u;
         else if (val_u > ub_u)
@@ -3641,16 +3640,16 @@
         centroids[i * 2] = lb_u + (2 * i + 1) * (ub_u - lb_u) / n / 2;
         centroids[i * 2 + 1] = lb_v + (2 * i + 1) * (ub_v - lb_v) / n / 2;
       }
-      vp10_k_means(data, centroids, color_map, rows * cols, n, 2, max_itr);
+      av1_k_means(data, centroids, color_map, rows * cols, n, 2, max_itr);
       pmi->palette_size[1] = n;
       for (i = 1; i < 3; ++i) {
         for (j = 0; j < n; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (cpi->common.use_highbitdepth)
             pmi->palette_colors[i * PALETTE_MAX_SIZE + j] = clip_pixel_highbd(
                 (int)centroids[j * 2 + i - 1], cpi->common.bit_depth);
           else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
             pmi->palette_colors[i * PALETTE_MAX_SIZE + j] =
                 clip_pixel((int)centroids[j * 2 + i - 1]);
         }
@@ -3661,16 +3660,16 @@
       if (this_rate_tokenonly == INT_MAX) continue;
       this_rate =
           this_rate_tokenonly + dc_mode_cost +
-          2 * cpi->common.bit_depth * n * vp10_cost_bit(128, 0) +
+          2 * cpi->common.bit_depth * n * av1_cost_bit(128, 0) +
           cpi->palette_uv_size_cost[bsize - BLOCK_8X8][n - 2] +
           write_uniform_cost(n, color_map[0]) +
-          vp10_cost_bit(
-              vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0], 1);
+          av1_cost_bit(
+              av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0], 1);
 
       for (i = 0; i < rows; ++i) {
         for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
-          color_ctx = vp10_get_palette_color_context(color_map, cols, i, j, n,
-                                                     color_order);
+          color_ctx = av1_get_palette_color_context(color_map, cols, i, j, n,
+                                                    color_order);
           for (r = 0; r < n; ++r)
             if (color_map[i * cols + j] == color_order[r]) {
               color_idx = r;
@@ -3699,7 +3698,7 @@
 
 #if CONFIG_EXT_INTRA
 // Return 1 if an ext intra mode is selected; return 0 otherwise.
-static int rd_pick_ext_intra_sbuv(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int rd_pick_ext_intra_sbuv(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                   int *rate_tokenonly, int64_t *distortion,
                                   int *skippable, BLOCK_SIZE bsize,
                                   int64_t *best_rd) {
@@ -3711,7 +3710,7 @@
   EXT_INTRA_MODE mode;
   EXT_INTRA_MODE_INFO ext_intra_mode_info;
 
-  vp10_zero(ext_intra_mode_info);
+  av1_zero(ext_intra_mode_info);
   mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 1;
   mbmi->uv_mode = DC_PRED;
   mbmi->palette_mode_info.palette_size[1] = 0;
@@ -3723,7 +3722,7 @@
       continue;
 
     this_rate = this_rate_tokenonly +
-                vp10_cost_bit(cpi->common.fc->ext_intra_probs[1], 1) +
+                av1_cost_bit(cpi->common.fc->ext_intra_probs[1], 1) +
                 cpi->intra_uv_mode_cost[mbmi->mode][mbmi->uv_mode] +
                 write_uniform_cost(FILTER_INTRA_MODES, mode);
     this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
@@ -3750,7 +3749,7 @@
   }
 }
 
-static void pick_intra_angle_routine_sbuv(VP10_COMP *cpi, MACROBLOCK *x,
+static void pick_intra_angle_routine_sbuv(AV1_COMP *cpi, MACROBLOCK *x,
                                           int *rate, int *rate_tokenonly,
                                           int64_t *distortion, int *skippable,
                                           int *best_angle_delta,
@@ -3776,7 +3775,7 @@
   }
 }
 
-static int rd_pick_intra_angle_sbuv(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int rd_pick_intra_angle_sbuv(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                     int *rate_tokenonly, int64_t *distortion,
                                     int *skippable, BLOCK_SIZE bsize,
                                     int rate_overhead, int64_t best_rd) {
@@ -3846,7 +3845,7 @@
 }
 #endif  // CONFIG_EXT_INTRA
 
-static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_pick_intra_sbuv_mode(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                        int *rate_tokenonly, int64_t *distortion,
                                        int *skippable, BLOCK_SIZE bsize,
                                        TX_SIZE max_tx_size) {
@@ -3898,7 +3897,7 @@
                                       MAX_ANGLE_DELTAS + mbmi->angle_delta[1]);
     if (mbmi->sb_type >= BLOCK_8X8 && mode == DC_PRED &&
         ALLOW_FILTER_INTRA_MODES)
-      this_rate += vp10_cost_bit(cpi->common.fc->ext_intra_probs[1], 0);
+      this_rate += av1_cost_bit(cpi->common.fc->ext_intra_probs[1], 0);
 #else
     if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
                           &this_sse, bsize, best_rd))
@@ -3907,8 +3906,8 @@
 #endif  // CONFIG_EXT_INTRA
     if (cpi->common.allow_screen_content_tools && mbmi->sb_type >= BLOCK_8X8 &&
         mode == DC_PRED)
-      this_rate += vp10_cost_bit(
-          vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0], 0);
+      this_rate += av1_cost_bit(
+          av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0], 0);
 
     this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
 
@@ -3964,7 +3963,7 @@
   return best_rd;
 }
 
-static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_sbuv_dcpred(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                               int *rate_tokenonly, int64_t *distortion,
                               int *skippable, BLOCK_SIZE bsize) {
   int64_t unused;
@@ -3977,7 +3976,7 @@
   return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
 }
 
-static void choose_intra_uv_mode(VP10_COMP *cpi, MACROBLOCK *const x,
+static void choose_intra_uv_mode(AV1_COMP *cpi, MACROBLOCK *const x,
                                  PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
                                  TX_SIZE max_tx_size, int *rate_uv,
                                  int *rate_uv_tokenonly, int64_t *dist_uv,
@@ -3998,7 +3997,7 @@
   *mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
 }
 
-static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode,
+static int cost_mv_ref(const AV1_COMP *cpi, PREDICTION_MODE mode,
 #if CONFIG_REF_MV && CONFIG_EXT_INTER
                        int is_compound,
 #endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
@@ -4070,7 +4069,7 @@
 }
 
 static int set_and_cost_bmi_mvs(
-    VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, int i, PREDICTION_MODE mode,
+    AV1_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, int i, PREDICTION_MODE mode,
     int_mv this_mv[2], int_mv frame_mv[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME],
     int_mv seg_mvs[TOTAL_REFS_PER_FRAME],
 #if CONFIG_EXT_INTER
@@ -4095,29 +4094,28 @@
       this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
 #if CONFIG_EXT_INTER
       if (!cpi->common.allow_high_precision_mv ||
-          !vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
+          !av1_use_mv_hp(&best_ref_mv[0]->as_mv))
         lower_mv_precision(&this_mv[0].as_mv, 0);
 #endif  // CONFIG_EXT_INTER
 
 #if CONFIG_REF_MV
       for (idx = 0; idx < 1 + is_compound; ++idx) {
         this_mv[idx] = seg_mvs[mbmi->ref_frame[idx]];
-        vp10_set_mvcost(x, mbmi->ref_frame[idx]);
+        av1_set_mvcost(x, mbmi->ref_frame[idx]);
         thismvcost +=
-            vp10_mv_bit_cost(&this_mv[idx].as_mv, &best_ref_mv[idx]->as_mv,
-                             x->nmvjointcost, x->mvcost, MV_COST_WEIGHT_SUB);
+            av1_mv_bit_cost(&this_mv[idx].as_mv, &best_ref_mv[idx]->as_mv,
+                            x->nmvjointcost, x->mvcost, MV_COST_WEIGHT_SUB);
       }
       (void)mvjcost;
       (void)mvcost;
 #else
-      thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
-                                     mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+      thismvcost += av1_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
+                                    mvjcost, mvcost, MV_COST_WEIGHT_SUB);
 #if !CONFIG_EXT_INTER
       if (is_compound) {
         this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
-        thismvcost +=
-            vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv, mvjcost,
-                             mvcost, MV_COST_WEIGHT_SUB);
+        thismvcost += av1_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
+                                      mvjcost, mvcost, MV_COST_WEIGHT_SUB);
       }
 #endif  // !CONFIG_EXT_INTER
 #endif
@@ -4143,24 +4141,24 @@
         this_mv[1].as_int = compound_seg_newmvs[1].as_int;
       }
       if (!cpi->common.allow_high_precision_mv ||
-          !vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
+          !av1_use_mv_hp(&best_ref_mv[0]->as_mv))
         lower_mv_precision(&this_mv[0].as_mv, 0);
       if (!cpi->common.allow_high_precision_mv ||
-          !vp10_use_mv_hp(&best_ref_mv[1]->as_mv))
+          !av1_use_mv_hp(&best_ref_mv[1]->as_mv))
         lower_mv_precision(&this_mv[1].as_mv, 0);
-      thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
-                                     mvjcost, mvcost, MV_COST_WEIGHT_SUB);
-      thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
-                                     mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+      thismvcost += av1_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
+                                    mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+      thismvcost += av1_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
+                                    mvjcost, mvcost, MV_COST_WEIGHT_SUB);
       break;
     case NEW_NEARMV:
     case NEW_NEARESTMV:
       this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
       if (!cpi->common.allow_high_precision_mv ||
-          !vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
+          !av1_use_mv_hp(&best_ref_mv[0]->as_mv))
         lower_mv_precision(&this_mv[0].as_mv, 0);
-      thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
-                                     mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+      thismvcost += av1_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
+                                    mvjcost, mvcost, MV_COST_WEIGHT_SUB);
       this_mv[1].as_int = frame_mv[mode][mbmi->ref_frame[1]].as_int;
       break;
     case NEAR_NEWMV:
@@ -4168,10 +4166,10 @@
       this_mv[0].as_int = frame_mv[mode][mbmi->ref_frame[0]].as_int;
       this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
       if (!cpi->common.allow_high_precision_mv ||
-          !vp10_use_mv_hp(&best_ref_mv[1]->as_mv))
+          !av1_use_mv_hp(&best_ref_mv[1]->as_mv))
         lower_mv_precision(&this_mv[1].as_mv, 0);
-      thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
-                                     mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+      thismvcost += av1_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
+                                    mvjcost, mvcost, MV_COST_WEIGHT_SUB);
       break;
     case NEAREST_NEARMV:
     case NEAR_NEARESTMV:
@@ -4213,8 +4211,8 @@
     mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
   else
 #endif  // CONFIG_EXT_INTER
-    mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
-                                          mbmi->ref_frame, mbmi->sb_type, i);
+    mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+                                         mbmi->ref_frame, mbmi->sb_type, i);
 #endif
 #if CONFIG_REF_MV && CONFIG_EXT_INTER
   return cost_mv_ref(cpi, mode, is_compound, mode_ctx) + thismvcost;
@@ -4223,7 +4221,7 @@
 #endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
 }
 
-static int64_t encode_inter_mb_segment(VP10_COMP *cpi, MACROBLOCK *x,
+static int64_t encode_inter_mb_segment(AV1_COMP *cpi, MACROBLOCK *x,
                                        int64_t best_yrd, int i, int *labelyrate,
                                        int64_t *distortion, int64_t *sse,
                                        ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
@@ -4238,9 +4236,9 @@
   const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
   int idx, idy;
   const uint8_t *const src =
-      &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+      &p->src.buf[av1_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
   uint8_t *const dst =
-      &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
+      &pd->dst.buf[av1_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
   int64_t thisdistortion = 0, thissse = 0;
   int thisrate = 0;
   TX_SIZE tx_size = mi->mbmi.tx_size;
@@ -4259,24 +4257,23 @@
 #endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
   assert(tx_type == DCT_DCT);
 
-  vp10_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col);
+  av1_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    vpx_highbd_subtract_block(
-        height, width,
-        vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8, src,
-        p->src.stride, dst, pd->dst.stride, xd->bd);
+    aom_highbd_subtract_block(
+        height, width, av1_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
+        8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
   } else {
-    vpx_subtract_block(height, width, vp10_raster_block_offset_int16(
-                                          BLOCK_8X8, i, p->src_diff),
+    aom_subtract_block(height, width,
+                       av1_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
                        8, src, p->src.stride, dst, pd->dst.stride);
   }
 #else
-  vpx_subtract_block(height, width,
-                     vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
+  aom_subtract_block(height, width,
+                     av1_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
                      8, src, p->src.stride, dst, pd->dst.stride);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   k = i;
   for (idy = 0; idy < height / 4; idy += num_4x4_h) {
@@ -4291,14 +4288,14 @@
         block = (i ? 2 : 0);
       coeff_ctx = combine_entropy_contexts(*(ta + (k & 1)), *(tl + (k >> 1)));
 #if CONFIG_NEW_QUANT
-      vp10_xform_quant_fp_nuq(x, 0, block, idy + (i >> 1), idx + (i & 0x01),
-                              BLOCK_8X8, tx_size, coeff_ctx);
+      av1_xform_quant_fp_nuq(x, 0, block, idy + (i >> 1), idx + (i & 0x01),
+                             BLOCK_8X8, tx_size, coeff_ctx);
 #else
-      vp10_xform_quant(x, 0, block, idy + (i >> 1), idx + (i & 0x01), BLOCK_8X8,
-                       tx_size, VP10_XFORM_QUANT_FP);
+      av1_xform_quant(x, 0, block, idy + (i >> 1), idx + (i & 0x01), BLOCK_8X8,
+                      tx_size, AV1_XFORM_QUANT_FP);
 #endif  // CONFIG_NEW_QUANT
       if (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0)
-        vp10_optimize_b(x, 0, block, tx_size, coeff_ctx);
+        av1_optimize_b(x, 0, block, tx_size, coeff_ctx);
       dist_block(cpi, x, 0, block, idy + (i >> 1), idx + (i & 0x1), tx_size,
                  &dist, &ssz);
       thisdistortion += dist;
@@ -4323,7 +4320,7 @@
 #endif  // CONFIG_VAR_TX
       rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion);
       rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse);
-      rd = VPXMIN(rd1, rd2);
+      rd = AOMMIN(rd1, rd2);
       if (rd >= best_yrd) return INT64_MAX;
     }
   }
@@ -4382,15 +4379,14 @@
   struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
 
   p->src.buf =
-      &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+      &p->src.buf[av1_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
   assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
   pd->pre[0].buf =
-      &pd->pre[0]
-           .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
+      &pd->pre[0].buf[av1_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
   if (has_second_ref(mbmi))
     pd->pre[1].buf =
         &pd->pre[1]
-             .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
+             .buf[av1_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
 }
 
 static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
@@ -4404,7 +4400,7 @@
 // Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
 // TODO(aconverse): Find out if this is still productive then clean up or remove
 static int check_best_zero_mv(
-    const VP10_COMP *cpi, const int16_t mode_context[TOTAL_REFS_PER_FRAME],
+    const AV1_COMP *cpi, const int16_t mode_context[TOTAL_REFS_PER_FRAME],
 #if CONFIG_REF_MV && CONFIG_EXT_INTER
     const int16_t compound_mode_context[TOTAL_REFS_PER_FRAME],
 #endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
@@ -4421,7 +4417,7 @@
        frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
 #if CONFIG_REF_MV
     int16_t rfc =
-        vp10_mode_context_analyzer(mode_context, ref_frames, bsize, block);
+        av1_mode_context_analyzer(mode_context, ref_frames, bsize, block);
 #else
     int16_t rfc = mode_context[ref_frames[0]];
 #endif
@@ -4506,14 +4502,14 @@
   return 1;
 }
 
-static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+static void joint_motion_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
                                 int_mv *frame_mv, int mi_row, int mi_col,
 #if CONFIG_EXT_INTER
                                 int_mv *ref_mv_sub8x8[2],
 #endif
                                 int_mv single_newmv[TOTAL_REFS_PER_FRAME],
                                 int *rate_mv, const int block) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
   const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
   MACROBLOCKD *xd = &x->e_mbd;
@@ -4536,17 +4532,17 @@
   struct buf_2d backup_yv12[2][MAX_MB_PLANE];
   int last_besterr[2] = { INT_MAX, INT_MAX };
   const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
-    vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
-    vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
+    av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
+    av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
   };
 
 // Prediction buffer from second frame.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[MAX_SB_SQUARE]);
   uint8_t *second_pred;
 #else
   DECLARE_ALIGNED(16, uint8_t, second_pred[MAX_SB_SQUARE]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   for (ref = 0; ref < 2; ++ref) {
 #if CONFIG_EXT_INTER
@@ -4563,8 +4559,8 @@
       // motion search code to be used without additional modifications.
       for (i = 0; i < MAX_MB_PLANE; i++)
         backup_yv12[ref][i] = xd->plane[i].pre[ref];
-      vp10_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
-                            NULL);
+      av1_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
+                           NULL);
     }
 
     frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
@@ -4572,13 +4568,13 @@
 
 // Since we have scaled the reference frames to match the size of the current
 // frame we must use a unit scaling factor during mode selection.
-#if CONFIG_VP9_HIGHBITDEPTH
-  vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
-                                     cm->height, cm->use_highbitdepth);
+#if CONFIG_AOM_HIGHBITDEPTH
+  av1_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+                                    cm->height, cm->use_highbitdepth);
 #else
-  vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
-                                     cm->height);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+  av1_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+                                    cm->height);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // Allow joint search multiple times iteratively for each reference frame
   // and break out of the search loop if it couldn't find a better mv.
@@ -4610,30 +4606,30 @@
 #endif
 
 // Get the prediction block from the 'other' reference frame.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
-      vp10_highbd_build_inter_predictor(
+      av1_highbd_build_inter_predictor(
           ref_yv12[!id].buf, ref_yv12[!id].stride, second_pred, pw,
           &frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0, interp_filter,
           MV_PRECISION_Q3, mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
     } else {
       second_pred = (uint8_t *)second_pred_alloc_16;
-      vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
-                                 second_pred, pw, &frame_mv[refs[!id]].as_mv,
-                                 &sf, pw, ph, 0, interp_filter, MV_PRECISION_Q3,
-                                 mi_col * MI_SIZE, mi_row * MI_SIZE);
+      av1_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+                                second_pred, pw, &frame_mv[refs[!id]].as_mv,
+                                &sf, pw, ph, 0, interp_filter, MV_PRECISION_Q3,
+                                mi_col * MI_SIZE, mi_row * MI_SIZE);
     }
 #else
-    vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
-                               second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
-                               pw, ph, 0, interp_filter, MV_PRECISION_Q3,
-                               mi_col * MI_SIZE, mi_row * MI_SIZE);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+    av1_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+                              second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
+                              pw, ph, 0, interp_filter, MV_PRECISION_Q3,
+                              mi_col * MI_SIZE, mi_row * MI_SIZE);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     // Do compound motion search on the current reference frame.
     if (id) xd->plane[0].pre[0] = ref_yv12[id];
-    vp10_set_mv_search_range(x, &ref_mv[id].as_mv);
+    av1_set_mv_search_range(x, &ref_mv[id].as_mv);
 
     // Use the mv result from the single mode as mv predictor.
     *best_mv = frame_mv[refs[id]].as_mv;
@@ -4642,16 +4638,16 @@
     best_mv->row >>= 3;
 
 #if CONFIG_REF_MV
-    vp10_set_mvcost(x, refs[id]);
+    av1_set_mvcost(x, refs[id]);
 #endif
 
     // Small-range full-pixel motion search.
     bestsme =
-        vp10_refining_search_8p_c(x, sadpb, search_range, &cpi->fn_ptr[bsize],
-                                  &ref_mv[id].as_mv, second_pred);
+        av1_refining_search_8p_c(x, sadpb, search_range, &cpi->fn_ptr[bsize],
+                                 &ref_mv[id].as_mv, second_pred);
     if (bestsme < INT_MAX)
-      bestsme = vp10_get_mvpred_av_var(x, best_mv, &ref_mv[id].as_mv,
-                                       second_pred, &cpi->fn_ptr[bsize], 1);
+      bestsme = av1_get_mvpred_av_var(x, best_mv, &ref_mv[id].as_mv,
+                                      second_pred, &cpi->fn_ptr[bsize], 1);
 
     x->mv_col_min = tmp_col_min;
     x->mv_col_max = tmp_col_max;
@@ -4678,8 +4674,8 @@
         // If bsize < BLOCK_8X8, adjust pred pointer for this block
         if (bsize < BLOCK_8X8)
           pd->pre[0].buf =
-              &pd->pre[0].buf[(vp10_raster_block_offset(BLOCK_8X8, block,
-                                                        pd->pre[0].stride))
+              &pd->pre[0].buf[(av1_raster_block_offset(BLOCK_8X8, block,
+                                                       pd->pre[0].stride))
                               << 3];
 
         bestsme = cpi->find_fractional_mv_step(
@@ -4721,25 +4717,25 @@
         xd->plane[i].pre[ref] = backup_yv12[ref][i];
     }
 #if CONFIG_REF_MV
-    vp10_set_mvcost(x, refs[ref]);
+    av1_set_mvcost(x, refs[ref]);
 #endif
 #if CONFIG_EXT_INTER
     if (bsize >= BLOCK_8X8)
 #endif  // CONFIG_EXT_INTER
-      *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
-                                   &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
-                                   x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+      *rate_mv += av1_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
+                                  &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
+                                  x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
 #if CONFIG_EXT_INTER
     else
-      *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
-                                   &ref_mv_sub8x8[ref]->as_mv, x->nmvjointcost,
-                                   x->mvcost, MV_COST_WEIGHT);
+      *rate_mv += av1_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
+                                  &ref_mv_sub8x8[ref]->as_mv, x->nmvjointcost,
+                                  x->mvcost, MV_COST_WEIGHT);
 #endif  // CONFIG_EXT_INTER
   }
 }
 
 static int64_t rd_pick_best_sub8x8_mode(
-    VP10_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
+    AV1_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
     int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
     int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
     int mvthresh,
@@ -4761,7 +4757,7 @@
   int k, br = 0, idx, idy;
   int64_t bd = 0, block_sse = 0;
   PREDICTION_MODE this_mode;
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
   struct macroblock_plane *const p = &x->plane[0];
   struct macroblockd_plane *const pd = &xd->plane[0];
   const int label_count = 4;
@@ -4783,7 +4779,7 @@
   mbmi->tx_size = TX_4X4;
 #endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
 
-  vp10_zero(*bsi);
+  av1_zero(*bsi);
 
   bsi->segment_rd = best_rd;
   bsi->ref_mv[0] = best_ref_mv;
@@ -4838,19 +4834,19 @@
         const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
 #if CONFIG_EXT_INTER
         int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
-        vp10_update_mv_context(xd, mi, frame, mv_ref_list, i, mi_row, mi_col,
-                               NULL);
+        av1_update_mv_context(xd, mi, frame, mv_ref_list, i, mi_row, mi_col,
+                              NULL);
 #endif  // CONFIG_EXT_INTER
         frame_mv[ZEROMV][frame].as_int = 0;
-        vp10_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
+        av1_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
 #if CONFIG_REF_MV
-                                       ref_mv_stack[ref], &ref_mv_count[ref],
+                                      ref_mv_stack[ref], &ref_mv_count[ref],
 #endif
 #if CONFIG_EXT_INTER
-                                       mv_ref_list,
+                                      mv_ref_list,
 #endif  // CONFIG_EXT_INTER
-                                       &frame_mv[NEARESTMV][frame],
-                                       &frame_mv[NEARMV][frame]);
+                                      &frame_mv[NEARESTMV][frame],
+                                      &frame_mv[NEARMV][frame]);
 
 #if CONFIG_REF_MV
         tmp_ref_mv[ref] = frame_mv[NEARESTMV][mbmi->ref_frame[ref]];
@@ -4862,9 +4858,8 @@
 #if CONFIG_EXT_INTER
         mv_ref_list[0].as_int = frame_mv[NEARESTMV][frame].as_int;
         mv_ref_list[1].as_int = frame_mv[NEARMV][frame].as_int;
-        vp10_find_best_ref_mvs(cm->allow_high_precision_mv, mv_ref_list,
-                               &ref_mvs_sub8x8[0][ref],
-                               &ref_mvs_sub8x8[1][ref]);
+        av1_find_best_ref_mvs(cm->allow_high_precision_mv, mv_ref_list,
+                              &ref_mvs_sub8x8[0][ref], &ref_mvs_sub8x8[1][ref]);
 
         if (has_second_rf) {
           frame_mv[ZERO_ZEROMV][frame].as_int = 0;
@@ -4996,7 +4991,7 @@
 #if CONFIG_EXT_INTER
             have_newmv_in_inter_mode(this_mode) &&
             (seg_mvs[i][mv_idx][mbmi->ref_frame[0]].as_int == INVALID_MV ||
-             vp10_use_mv_hp(&bsi->ref_mv[0]->as_mv) == 0)
+             av1_use_mv_hp(&bsi->ref_mv[0]->as_mv) == 0)
 #else
             this_mode == NEWMV &&
             (seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV ||
@@ -5035,14 +5030,14 @@
             max_mv = x->max_mv_context[mbmi->ref_frame[0]];
           else
             max_mv =
-                VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
+                AOMMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
 
           if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
             // Take wtd average of the step_params based on the last frame's
             // max mv magnitude and the best ref mvs of the current block for
             // the given reference.
             step_param =
-                (vp10_init_search_range(max_mv) + cpi->mv_step_param) / 2;
+                (av1_init_search_range(max_mv) + cpi->mv_step_param) / 2;
           } else {
             step_param = cpi->mv_step_param;
           }
@@ -5058,20 +5053,20 @@
           if (cpi->sf.adaptive_motion_search) {
             mvp_full.row = x->pred_mv[mbmi->ref_frame[0]].row >> 3;
             mvp_full.col = x->pred_mv[mbmi->ref_frame[0]].col >> 3;
-            step_param = VPXMAX(step_param, 8);
+            step_param = AOMMAX(step_param, 8);
           }
 
           // adjust src pointer for this block
           mi_buf_shift(x, i);
 
-          vp10_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
+          av1_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
 
           x->best_mv.as_int = x->second_best_mv.as_int = INVALID_MV;
 
 #if CONFIG_REF_MV
-          vp10_set_mvcost(x, mbmi->ref_frame[0]);
+          av1_set_mvcost(x, mbmi->ref_frame[0]);
 #endif
-          bestsme = vp10_full_pixel_search(
+          bestsme = av1_full_pixel_search(
               cpi, x, bsize, &mvp_full, step_param, sadpb,
               cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
               &bsi->ref_mv[0]->as_mv, INT_MAX, 1);
@@ -5105,8 +5100,8 @@
 
               // adjust pred pointer for this block
               pd->pre[0].buf =
-                  &pd->pre[0].buf[(vp10_raster_block_offset(BLOCK_8X8, i,
-                                                            pd->pre[0].stride))
+                  &pd->pre[0].buf[(av1_raster_block_offset(BLOCK_8X8, i,
+                                                           pd->pre[0].stride))
                                   << 3];
 
               best_mv_var = cpi->find_fractional_mv_step(
@@ -5122,10 +5117,10 @@
                 int this_var;
                 MV best_mv = x->best_mv.as_mv;
                 const MV ref_mv = bsi->ref_mv[0]->as_mv;
-                const int minc = VPXMAX(x->mv_col_min * 8, ref_mv.col - MV_MAX);
-                const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv.col + MV_MAX);
-                const int minr = VPXMAX(x->mv_row_min * 8, ref_mv.row - MV_MAX);
-                const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv.row + MV_MAX);
+                const int minc = AOMMAX(x->mv_col_min * 8, ref_mv.col - MV_MAX);
+                const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv.col + MV_MAX);
+                const int minr = AOMMAX(x->mv_row_min * 8, ref_mv.row - MV_MAX);
+                const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv.row + MV_MAX);
 
                 x->best_mv = x->second_best_mv;
                 if (x->best_mv.as_mv.row * 8 <= maxr &&
@@ -5504,18 +5499,18 @@
   *returntotrate = bsi->r;
   *returndistortion = bsi->d;
   *returnyrate = bsi->segment_yrate;
-  *skippable = vp10_is_skippable_in_plane(x, BLOCK_8X8, 0);
+  *skippable = av1_is_skippable_in_plane(x, BLOCK_8X8, 0);
   *psse = bsi->sse;
   mbmi->mode = bsi->modes[3];
 
   return bsi->segment_rd;
 }
 
-static void estimate_ref_frame_costs(const VP10_COMMON *cm,
+static void estimate_ref_frame_costs(const AV1_COMMON *cm,
                                      const MACROBLOCKD *xd, int segment_id,
                                      unsigned int *ref_costs_single,
                                      unsigned int *ref_costs_comp,
-                                     vpx_prob *comp_mode_p) {
+                                     aom_prob *comp_mode_p) {
   int seg_ref_active =
       segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
   if (seg_ref_active) {
@@ -5524,28 +5519,28 @@
     memset(ref_costs_comp, 0, TOTAL_REFS_PER_FRAME * sizeof(*ref_costs_comp));
     *comp_mode_p = 128;
   } else {
-    vpx_prob intra_inter_p = vp10_get_intra_inter_prob(cm, xd);
-    vpx_prob comp_inter_p = 128;
+    aom_prob intra_inter_p = av1_get_intra_inter_prob(cm, xd);
+    aom_prob comp_inter_p = 128;
 
     if (cm->reference_mode == REFERENCE_MODE_SELECT) {
-      comp_inter_p = vp10_get_reference_mode_prob(cm, xd);
+      comp_inter_p = av1_get_reference_mode_prob(cm, xd);
       *comp_mode_p = comp_inter_p;
     } else {
       *comp_mode_p = 128;
     }
 
-    ref_costs_single[INTRA_FRAME] = vp10_cost_bit(intra_inter_p, 0);
+    ref_costs_single[INTRA_FRAME] = av1_cost_bit(intra_inter_p, 0);
 
     if (cm->reference_mode != COMPOUND_REFERENCE) {
-      vpx_prob ref_single_p1 = vp10_get_pred_prob_single_ref_p1(cm, xd);
-      vpx_prob ref_single_p2 = vp10_get_pred_prob_single_ref_p2(cm, xd);
+      aom_prob ref_single_p1 = av1_get_pred_prob_single_ref_p1(cm, xd);
+      aom_prob ref_single_p2 = av1_get_pred_prob_single_ref_p2(cm, xd);
 #if CONFIG_EXT_REFS
-      vpx_prob ref_single_p3 = vp10_get_pred_prob_single_ref_p3(cm, xd);
-      vpx_prob ref_single_p4 = vp10_get_pred_prob_single_ref_p4(cm, xd);
-      vpx_prob ref_single_p5 = vp10_get_pred_prob_single_ref_p5(cm, xd);
+      aom_prob ref_single_p3 = av1_get_pred_prob_single_ref_p3(cm, xd);
+      aom_prob ref_single_p4 = av1_get_pred_prob_single_ref_p4(cm, xd);
+      aom_prob ref_single_p5 = av1_get_pred_prob_single_ref_p5(cm, xd);
 #endif  // CONFIG_EXT_REFS
 
-      unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
+      unsigned int base_cost = av1_cost_bit(intra_inter_p, 1);
 
       ref_costs_single[LAST_FRAME] =
 #if CONFIG_EXT_REFS
@@ -5556,33 +5551,33 @@
                       ref_costs_single[ALTREF_FRAME] = base_cost;
 
 #if CONFIG_EXT_REFS
-      ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
-      ref_costs_single[LAST2_FRAME] += vp10_cost_bit(ref_single_p1, 0);
-      ref_costs_single[LAST3_FRAME] += vp10_cost_bit(ref_single_p1, 0);
-      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 0);
-      ref_costs_single[BWDREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
-      ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
+      ref_costs_single[LAST_FRAME] += av1_cost_bit(ref_single_p1, 0);
+      ref_costs_single[LAST2_FRAME] += av1_cost_bit(ref_single_p1, 0);
+      ref_costs_single[LAST3_FRAME] += av1_cost_bit(ref_single_p1, 0);
+      ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p1, 0);
+      ref_costs_single[BWDREF_FRAME] += av1_cost_bit(ref_single_p1, 1);
+      ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p1, 1);
 
-      ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p3, 0);
-      ref_costs_single[LAST2_FRAME] += vp10_cost_bit(ref_single_p3, 0);
-      ref_costs_single[LAST3_FRAME] += vp10_cost_bit(ref_single_p3, 1);
-      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p3, 1);
+      ref_costs_single[LAST_FRAME] += av1_cost_bit(ref_single_p3, 0);
+      ref_costs_single[LAST2_FRAME] += av1_cost_bit(ref_single_p3, 0);
+      ref_costs_single[LAST3_FRAME] += av1_cost_bit(ref_single_p3, 1);
+      ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p3, 1);
 
-      ref_costs_single[BWDREF_FRAME] += vp10_cost_bit(ref_single_p2, 0);
-      ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
+      ref_costs_single[BWDREF_FRAME] += av1_cost_bit(ref_single_p2, 0);
+      ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p2, 1);
 
-      ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p4, 0);
-      ref_costs_single[LAST2_FRAME] += vp10_cost_bit(ref_single_p4, 1);
+      ref_costs_single[LAST_FRAME] += av1_cost_bit(ref_single_p4, 0);
+      ref_costs_single[LAST2_FRAME] += av1_cost_bit(ref_single_p4, 1);
 
-      ref_costs_single[LAST3_FRAME] += vp10_cost_bit(ref_single_p5, 0);
-      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p5, 1);
+      ref_costs_single[LAST3_FRAME] += av1_cost_bit(ref_single_p5, 0);
+      ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p5, 1);
 #else
-      ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
-      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 1);
-      ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
+      ref_costs_single[LAST_FRAME] += av1_cost_bit(ref_single_p1, 0);
+      ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p1, 1);
+      ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p1, 1);
 
-      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p2, 0);
-      ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
+      ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p2, 0);
+      ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p2, 1);
 #endif  // CONFIG_EXT_REFS
     } else {
       ref_costs_single[LAST_FRAME] = 512;
@@ -5596,14 +5591,14 @@
     }
 
     if (cm->reference_mode != SINGLE_REFERENCE) {
-      vpx_prob ref_comp_p = vp10_get_pred_prob_comp_ref_p(cm, xd);
+      aom_prob ref_comp_p = av1_get_pred_prob_comp_ref_p(cm, xd);
 #if CONFIG_EXT_REFS
-      vpx_prob ref_comp_p1 = vp10_get_pred_prob_comp_ref_p1(cm, xd);
-      vpx_prob ref_comp_p2 = vp10_get_pred_prob_comp_ref_p2(cm, xd);
-      vpx_prob bwdref_comp_p = vp10_get_pred_prob_comp_bwdref_p(cm, xd);
+      aom_prob ref_comp_p1 = av1_get_pred_prob_comp_ref_p1(cm, xd);
+      aom_prob ref_comp_p2 = av1_get_pred_prob_comp_ref_p2(cm, xd);
+      aom_prob bwdref_comp_p = av1_get_pred_prob_comp_bwdref_p(cm, xd);
 #endif  // CONFIG_EXT_REFS
 
-      unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
+      unsigned int base_cost = av1_cost_bit(intra_inter_p, 1);
 
       ref_costs_comp[LAST_FRAME] =
 #if CONFIG_EXT_REFS
@@ -5616,24 +5611,24 @@
 #endif  // CONFIG_EXT_REFS
 
 #if CONFIG_EXT_REFS
-      ref_costs_comp[LAST_FRAME] += vp10_cost_bit(ref_comp_p, 0);
-      ref_costs_comp[LAST2_FRAME] += vp10_cost_bit(ref_comp_p, 0);
-      ref_costs_comp[LAST3_FRAME] += vp10_cost_bit(ref_comp_p, 1);
-      ref_costs_comp[GOLDEN_FRAME] += vp10_cost_bit(ref_comp_p, 1);
+      ref_costs_comp[LAST_FRAME] += av1_cost_bit(ref_comp_p, 0);
+      ref_costs_comp[LAST2_FRAME] += av1_cost_bit(ref_comp_p, 0);
+      ref_costs_comp[LAST3_FRAME] += av1_cost_bit(ref_comp_p, 1);
+      ref_costs_comp[GOLDEN_FRAME] += av1_cost_bit(ref_comp_p, 1);
 
-      ref_costs_comp[LAST_FRAME] += vp10_cost_bit(ref_comp_p1, 1);
-      ref_costs_comp[LAST2_FRAME] += vp10_cost_bit(ref_comp_p1, 0);
+      ref_costs_comp[LAST_FRAME] += av1_cost_bit(ref_comp_p1, 1);
+      ref_costs_comp[LAST2_FRAME] += av1_cost_bit(ref_comp_p1, 0);
 
-      ref_costs_comp[LAST3_FRAME] += vp10_cost_bit(ref_comp_p2, 0);
-      ref_costs_comp[GOLDEN_FRAME] += vp10_cost_bit(ref_comp_p2, 1);
+      ref_costs_comp[LAST3_FRAME] += av1_cost_bit(ref_comp_p2, 0);
+      ref_costs_comp[GOLDEN_FRAME] += av1_cost_bit(ref_comp_p2, 1);
 
       // NOTE(zoeliu): BWDREF and ALTREF each add an extra cost by coding 1
       //               more bit.
-      ref_costs_comp[BWDREF_FRAME] += vp10_cost_bit(bwdref_comp_p, 0);
-      ref_costs_comp[ALTREF_FRAME] += vp10_cost_bit(bwdref_comp_p, 1);
+      ref_costs_comp[BWDREF_FRAME] += av1_cost_bit(bwdref_comp_p, 0);
+      ref_costs_comp[ALTREF_FRAME] += av1_cost_bit(bwdref_comp_p, 1);
 #else
-      ref_costs_comp[LAST_FRAME] += vp10_cost_bit(ref_comp_p, 0);
-      ref_costs_comp[GOLDEN_FRAME] += vp10_cost_bit(ref_comp_p, 1);
+      ref_costs_comp[LAST_FRAME] += av1_cost_bit(ref_comp_p, 0);
+      ref_costs_comp[GOLDEN_FRAME] += av1_cost_bit(ref_comp_p, 1);
 #endif  // CONFIG_EXT_REFS
     } else {
       ref_costs_comp[LAST_FRAME] = 512;
@@ -5667,12 +5662,12 @@
 }
 
 static void setup_buffer_inter(
-    VP10_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
+    AV1_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
     BLOCK_SIZE block_size, int mi_row, int mi_col,
     int_mv frame_nearest_mv[TOTAL_REFS_PER_FRAME],
     int_mv frame_near_mv[TOTAL_REFS_PER_FRAME],
     struct buf_2d yv12_mb[TOTAL_REFS_PER_FRAME][MAX_MB_PLANE]) {
-  const VP10_COMMON *cm = &cpi->common;
+  const AV1_COMMON *cm = &cpi->common;
   const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
   MACROBLOCKD *const xd = &x->e_mbd;
   MODE_INFO *const mi = xd->mi[0];
@@ -5684,10 +5679,10 @@
 
   // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
   // use the UV scaling factors.
-  vp10_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
+  av1_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
 
   // Gets an initial list of candidate vectors from neighbours and orders them
-  vp10_find_mv_refs(
+  av1_find_mv_refs(
       cm, xd, mi, ref_frame,
 #if CONFIG_REF_MV
       &mbmi_ext->ref_mv_count[ref_frame], mbmi_ext->ref_mv_stack[ref_frame],
@@ -5698,26 +5693,26 @@
       candidates, mi_row, mi_col, NULL, NULL, mbmi_ext->mode_context);
 
   // Candidate refinement carried out at encoder and decoder
-  vp10_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
-                         &frame_nearest_mv[ref_frame],
-                         &frame_near_mv[ref_frame]);
+  av1_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
+                        &frame_nearest_mv[ref_frame],
+                        &frame_near_mv[ref_frame]);
 
   // Further refinement that is encode side only to test the top few candidates
   // in full and choose the best as the centre point for subsequent searches.
   // The current implementation doesn't support scaling.
-  if (!vp10_is_scaled(sf) && block_size >= BLOCK_8X8)
-    vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
-                 block_size);
+  if (!av1_is_scaled(sf) && block_size >= BLOCK_8X8)
+    av1_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
+                block_size);
 }
 
-static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
-                                 BLOCK_SIZE bsize, int mi_row, int mi_col,
+static void single_motion_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+                                 int mi_row, int mi_col,
 #if CONFIG_EXT_INTER
                                  int ref_idx, int mv_idx,
 #endif  // CONFIG_EXT_INTER
                                  int *rate_mv) {
   MACROBLOCKD *xd = &x->e_mbd;
-  const VP10_COMMON *cm = &cpi->common;
+  const AV1_COMMON *cm = &cpi->common;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
   int bestsme = INT_MAX;
@@ -5740,7 +5735,7 @@
   int cost_list[5];
 
   const YV12_BUFFER_CONFIG *scaled_ref_frame =
-      vp10_get_scaled_ref_frame(cpi, ref);
+      av1_get_scaled_ref_frame(cpi, ref);
 
   MV pred_mv[3];
   pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
@@ -5748,7 +5743,7 @@
   pred_mv[2] = x->pred_mv[ref];
 
 #if CONFIG_REF_MV
-  vp10_set_mvcost(x, ref);
+  av1_set_mvcost(x, ref);
 #endif
 
   if (scaled_ref_frame) {
@@ -5759,17 +5754,17 @@
     for (i = 0; i < MAX_MB_PLANE; i++)
       backup_yv12[i] = xd->plane[i].pre[ref_idx];
 
-    vp10_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
+    av1_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
   }
 
   // Work out the size of the first step in the mv step search.
-  // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
+  // 0 here is maximum length first step. 1 is AOMMAX >> 1 etc.
   if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
     // Take wtd average of the step_params based on the last frame's
     // max mv magnitude and that based on the best ref mvs of the current
     // block for the given reference.
     step_param =
-        (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+        (av1_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
         2;
   } else {
     step_param = cpi->mv_step_param;
@@ -5778,8 +5773,8 @@
   if (cpi->sf.adaptive_motion_search && bsize < cm->sb_size) {
     int boffset =
         2 * (b_width_log2_lookup[cm->sb_size] -
-             VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
-    step_param = VPXMAX(step_param, boffset);
+             AOMMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
+    step_param = AOMMAX(step_param, boffset);
   }
 
   if (cpi->sf.adaptive_motion_search) {
@@ -5809,7 +5804,7 @@
     }
   }
 
-  vp10_set_mv_search_range(x, &ref_mv);
+  av1_set_mv_search_range(x, &ref_mv);
 
   mvp_full = pred_mv[x->mv_best_ref_index[ref]];
 
@@ -5818,9 +5813,9 @@
 
   x->best_mv.as_int = x->second_best_mv.as_int = INVALID_MV;
 
-  bestsme = vp10_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
-                                   cond_cost_list(cpi, cost_list), &ref_mv,
-                                   INT_MAX, 1);
+  bestsme = av1_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
+                                  cond_cost_list(cpi, cost_list), &ref_mv,
+                                  INT_MAX, 1);
 
   x->mv_col_min = tmp_col_min;
   x->mv_col_max = tmp_col_max;
@@ -5854,10 +5849,10 @@
           x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, pw, ph, 1);
 
       if (try_second) {
-        const int minc = VPXMAX(x->mv_col_min * 8, ref_mv.col - MV_MAX);
-        const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv.col + MV_MAX);
-        const int minr = VPXMAX(x->mv_row_min * 8, ref_mv.row - MV_MAX);
-        const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv.row + MV_MAX);
+        const int minc = AOMMAX(x->mv_col_min * 8, ref_mv.col - MV_MAX);
+        const int maxc = AOMMIN(x->mv_col_max * 8, ref_mv.col + MV_MAX);
+        const int minr = AOMMAX(x->mv_row_min * 8, ref_mv.row - MV_MAX);
+        const int maxr = AOMMIN(x->mv_row_max * 8, ref_mv.row + MV_MAX);
         int this_var;
         MV best_mv = x->best_mv.as_mv;
 
@@ -5887,8 +5882,8 @@
           x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0, 0);
     }
   }
-  *rate_mv = vp10_mv_bit_cost(&x->best_mv.as_mv, &ref_mv, x->nmvjointcost,
-                              x->mvcost, MV_COST_WEIGHT);
+  *rate_mv = av1_mv_bit_cost(&x->best_mv.as_mv, &ref_mv, x->nmvjointcost,
+                             x->mvcost, MV_COST_WEIGHT);
 
   if (cpi->sf.adaptive_motion_search) x->pred_mv[ref] = x->best_mv.as_mv;
 
@@ -5910,7 +5905,7 @@
 }
 
 #if CONFIG_OBMC
-static void single_motion_search_obmc(VP10_COMP *cpi, MACROBLOCK *x,
+static void single_motion_search_obmc(AV1_COMP *cpi, MACROBLOCK *x,
                                       BLOCK_SIZE bsize, int mi_row, int mi_col,
                                       const int32_t *wsrc, const int32_t *mask,
 #if CONFIG_EXT_INTER
@@ -5919,7 +5914,7 @@
                                       int_mv *tmp_mv, int_mv pred_mv,
                                       int *rate_mv) {
   MACROBLOCKD *xd = &x->e_mbd;
-  const VP10_COMMON *cm = &cpi->common;
+  const AV1_COMMON *cm = &cpi->common;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
   int bestsme = INT_MAX;
@@ -5941,10 +5936,10 @@
   int tmp_row_max = x->mv_row_max;
 
   const YV12_BUFFER_CONFIG *scaled_ref_frame =
-      vp10_get_scaled_ref_frame(cpi, ref);
+      av1_get_scaled_ref_frame(cpi, ref);
 
 #if CONFIG_REF_MV
-  vp10_set_mvcost(x, ref);
+  av1_set_mvcost(x, ref);
 #endif
 
   if (scaled_ref_frame) {
@@ -5955,17 +5950,17 @@
     for (i = 0; i < MAX_MB_PLANE; i++)
       backup_yv12[i] = xd->plane[i].pre[ref_idx];
 
-    vp10_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
+    av1_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
   }
 
   // Work out the size of the first step in the mv step search.
-  // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
+  // 0 here is maximum length first step. 1 is AOMMAX >> 1 etc.
   if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
     // Take wtd average of the step_params based on the last frame's
     // max mv magnitude and that based on the best ref mvs of the current
     // block for the given reference.
     step_param =
-        (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+        (av1_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
         2;
   } else {
     step_param = cpi->mv_step_param;
@@ -5974,8 +5969,8 @@
   if (cpi->sf.adaptive_motion_search && bsize < cm->sb_size) {
     int boffset =
         2 * (b_width_log2_lookup[cm->sb_size] -
-             VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
-    step_param = VPXMAX(step_param, boffset);
+             AOMMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
+    step_param = AOMMAX(step_param, boffset);
   }
 
   if (cpi->sf.adaptive_motion_search) {
@@ -6005,13 +6000,13 @@
     }
   }
 
-  vp10_set_mv_search_range(x, &ref_mv);
+  av1_set_mv_search_range(x, &ref_mv);
 
   mvp_full = pred_mv.as_mv;
   mvp_full.col >>= 3;
   mvp_full.row >>= 3;
 
-  bestsme = vp10_obmc_full_pixel_diamond(
+  bestsme = av1_obmc_full_pixel_diamond(
       cpi, x, wsrc, mask, &mvp_full, step_param, sadpb,
       MAX_MVSEARCH_STEPS - 1 - step_param, 1, &cpi->fn_ptr[bsize], &ref_mv,
       &tmp_mv->as_mv, ref_idx);
@@ -6023,15 +6018,15 @@
 
   if (bestsme < INT_MAX) {
     int dis;
-    vp10_find_best_obmc_sub_pixel_tree_up(
+    av1_find_best_obmc_sub_pixel_tree_up(
         cpi, x, wsrc, mask, mi_row, mi_col, &tmp_mv->as_mv, &ref_mv,
         cm->allow_high_precision_mv, x->errorperbit, &cpi->fn_ptr[bsize],
         cpi->sf.mv.subpel_force_stop, cpi->sf.mv.subpel_iters_per_step,
         x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], ref_idx,
         cpi->sf.use_upsampled_references);
   }
-  *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
-                              x->mvcost, MV_COST_WEIGHT);
+  *rate_mv = av1_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+                             x->mvcost, MV_COST_WEIGHT);
 
   if (scaled_ref_frame) {
     int i;
@@ -6042,13 +6037,13 @@
 #endif  // CONFIG_OBMC
 
 #if CONFIG_EXT_INTER
-static void do_masked_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
+static void do_masked_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
                                     const uint8_t *mask, int mask_stride,
                                     BLOCK_SIZE bsize, int mi_row, int mi_col,
                                     int_mv *tmp_mv, int *rate_mv, int ref_idx,
                                     int mv_idx) {
   MACROBLOCKD *xd = &x->e_mbd;
-  const VP10_COMMON *cm = &cpi->common;
+  const AV1_COMMON *cm = &cpi->common;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
   int bestsme = INT_MAX;
@@ -6064,7 +6059,7 @@
   int tmp_row_max = x->mv_row_max;
 
   const YV12_BUFFER_CONFIG *scaled_ref_frame =
-      vp10_get_scaled_ref_frame(cpi, ref);
+      av1_get_scaled_ref_frame(cpi, ref);
 
   MV pred_mv[3];
   pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
@@ -6072,7 +6067,7 @@
   pred_mv[2] = x->pred_mv[ref];
 
 #if CONFIG_REF_MV
-  vp10_set_mvcost(x, ref);
+  av1_set_mvcost(x, ref);
 #endif
 
   if (scaled_ref_frame) {
@@ -6083,10 +6078,10 @@
     for (i = 0; i < MAX_MB_PLANE; i++)
       backup_yv12[i] = xd->plane[i].pre[ref_idx];
 
-    vp10_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
+    av1_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
   }
 
-  vp10_set_mv_search_range(x, &ref_mv);
+  av1_set_mv_search_range(x, &ref_mv);
 
   // Work out the size of the first step in the mv step search.
   // 0 here is maximum length first step. 1 is MAX >> 1 etc.
@@ -6095,7 +6090,7 @@
     // max mv magnitude and that based on the best ref mvs of the current
     // block for the given reference.
     step_param =
-        (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+        (av1_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
         2;
   } else {
     step_param = cpi->mv_step_param;
@@ -6105,8 +6100,8 @@
   if (cpi->sf.adaptive_motion_search && bsize < cm->sb_size && cm->show_frame) {
     int boffset =
         2 * (b_width_log2_lookup[cm->sb_size] -
-             VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
-    step_param = VPXMAX(step_param, boffset);
+             AOMMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
+    step_param = AOMMAX(step_param, boffset);
   }
 
   if (cpi->sf.adaptive_motion_search) {
@@ -6141,7 +6136,7 @@
   mvp_full.col >>= 3;
   mvp_full.row >>= 3;
 
-  bestsme = vp10_masked_full_pixel_diamond(
+  bestsme = av1_masked_full_pixel_diamond(
       cpi, x, mask, mask_stride, &mvp_full, step_param, sadpb,
       MAX_MVSEARCH_STEPS - 1 - step_param, 1, &cpi->fn_ptr[bsize], &ref_mv,
       &tmp_mv->as_mv, ref_idx);
@@ -6153,15 +6148,15 @@
 
   if (bestsme < INT_MAX) {
     int dis; /* TODO: use dis in distortion calculation later. */
-    vp10_find_best_masked_sub_pixel_tree_up(
+    av1_find_best_masked_sub_pixel_tree_up(
         cpi, x, mask, mask_stride, mi_row, mi_col, &tmp_mv->as_mv, &ref_mv,
         cm->allow_high_precision_mv, x->errorperbit, &cpi->fn_ptr[bsize],
         cpi->sf.mv.subpel_force_stop, cpi->sf.mv.subpel_iters_per_step,
         x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], ref_idx,
         cpi->sf.use_upsampled_references);
   }
-  *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
-                              x->mvcost, MV_COST_WEIGHT);
+  *rate_mv = av1_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+                             x->mvcost, MV_COST_WEIGHT);
 
   if (cpi->sf.adaptive_motion_search && cm->show_frame)
     x->pred_mv[ref] = tmp_mv->as_mv;
@@ -6173,7 +6168,7 @@
   }
 }
 
-static void do_masked_motion_search_indexed(VP10_COMP *cpi, MACROBLOCK *x,
+static void do_masked_motion_search_indexed(AV1_COMP *cpi, MACROBLOCK *x,
                                             int wedge_index, int wedge_sign,
                                             BLOCK_SIZE bsize, int mi_row,
                                             int mi_col, int_mv *tmp_mv,
@@ -6185,7 +6180,7 @@
   BLOCK_SIZE sb_type = mbmi->sb_type;
   const uint8_t *mask;
   const int mask_stride = 4 * num_4x4_blocks_wide_lookup[bsize];
-  mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
+  mask = av1_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
 
   if (which == 0 || which == 2)
     do_masked_motion_search(cpi, x, mask, mask_stride, bsize, mi_row, mi_col,
@@ -6193,7 +6188,7 @@
 
   if (which == 1 || which == 2) {
     // get the negative mask
-    mask = vp10_get_contiguous_soft_mask(wedge_index, !wedge_sign, sb_type);
+    mask = av1_get_contiguous_soft_mask(wedge_index, !wedge_sign, sb_type);
     do_masked_motion_search(cpi, x, mask, mask_stride, bsize, mi_row, mi_col,
                             &tmp_mv[1], &rate_mv[1], 1, mv_idx[1]);
   }
@@ -6207,7 +6202,7 @@
 // However, once established that vector may be usable through the nearest and
 // near mv modes to reduce distortion in subsequent blocks and also improve
 // visual quality.
-static int discount_newmv_test(const VP10_COMP *cpi, int this_mode,
+static int discount_newmv_test(const AV1_COMP *cpi, int this_mode,
                                int_mv this_mv,
                                int_mv (*mode_mv)[TOTAL_REFS_PER_FRAME],
                                int ref_frame) {
@@ -6219,9 +6214,9 @@
            (mode_mv[NEARMV][ref_frame].as_int == INVALID_MV)));
 }
 
-#define LEFT_TOP_MARGIN ((VPX_ENC_BORDER_IN_PIXELS - VPX_INTERP_EXTEND) << 3)
+#define LEFT_TOP_MARGIN ((AOM_ENC_BORDER_IN_PIXELS - AOM_INTERP_EXTEND) << 3)
 #define RIGHT_BOTTOM_MARGIN \
-  ((VPX_ENC_BORDER_IN_PIXELS - VPX_INTERP_EXTEND) << 3)
+  ((AOM_ENC_BORDER_IN_PIXELS - AOM_INTERP_EXTEND) << 3)
 
 // TODO(jingning): this mv clamping function should be block size dependent.
 static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
@@ -6232,7 +6227,7 @@
 }
 
 #if CONFIG_EXT_INTER
-static int estimate_wedge_sign(const VP10_COMP *cpi, const MACROBLOCK *x,
+static int estimate_wedge_sign(const AV1_COMP *cpi, const MACROBLOCK *x,
                                const BLOCK_SIZE bsize, const uint8_t *pred0,
                                int stride0, const uint8_t *pred1, int stride1) {
   const struct macroblock_plane *const p = &x->plane[0];
@@ -6244,12 +6239,12 @@
   uint32_t esq[2][4], var;
   int64_t tl, br;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     pred0 = CONVERT_TO_BYTEPTR(pred0);
     pred1 = CONVERT_TO_BYTEPTR(pred1);
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   var = cpi->fn_ptr[f_index].vf(src, src_stride, pred0, stride0, &esq[0][0]);
   var = cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, pred0 + bw / 2,
@@ -6279,11 +6274,11 @@
 
 #if !CONFIG_DUAL_FILTER
 static INTERP_FILTER predict_interp_filter(
-    const VP10_COMP *cpi, const MACROBLOCK *x, const BLOCK_SIZE bsize,
+    const AV1_COMP *cpi, const MACROBLOCK *x, const BLOCK_SIZE bsize,
     const int mi_row, const int mi_col,
     INTERP_FILTER (*single_filter)[TOTAL_REFS_PER_FRAME]) {
   INTERP_FILTER best_filter = SWITCHABLE;
-  const VP10_COMMON *cm = &cpi->common;
+  const AV1_COMMON *cm = &cpi->common;
   const MACROBLOCKD *xd = &x->e_mbd;
   int bsl = mi_width_log2_lookup[bsize];
   int pred_filter_search =
@@ -6383,7 +6378,7 @@
       best_filter = EIGHTTAP_REGULAR;
     }
 #if CONFIG_EXT_INTERP
-    else if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE) {
+    else if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE) {
       best_filter = EIGHTTAP_REGULAR;
     }
 #endif
@@ -6394,7 +6389,7 @@
 
 #if CONFIG_EXT_INTER
 // Choose the best wedge index and sign
-static int64_t pick_wedge(const VP10_COMP *const cpi, const MACROBLOCK *const x,
+static int64_t pick_wedge(const AV1_COMP *const cpi, const MACROBLOCK *const x,
                           const BLOCK_SIZE bsize, const uint8_t *const p0,
                           const uint8_t *const p1, int *const best_wedge_sign,
                           int *const best_wedge_index) {
@@ -6411,12 +6406,12 @@
   int wedge_types = (1 << get_wedge_bits_lookup(bsize));
   const uint8_t *mask;
   uint64_t sse;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
   const int bd_round = hbd ? (xd->bd - 8) * 2 : 0;
 #else
   const int bd_round = 0;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   DECLARE_ALIGNED(32, int16_t, r0[MAX_SB_SQUARE]);
   DECLARE_ALIGNED(32, int16_t, r1[MAX_SB_SQUARE]);
@@ -6425,34 +6420,34 @@
 
   int64_t sign_limit;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (hbd) {
-    vpx_highbd_subtract_block(bh, bw, r0, bw, src->buf, src->stride,
+    aom_highbd_subtract_block(bh, bw, r0, bw, src->buf, src->stride,
                               CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
-    vpx_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
+    aom_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
                               CONVERT_TO_BYTEPTR(p1), bw, xd->bd);
-    vpx_highbd_subtract_block(bh, bw, d10, bw, CONVERT_TO_BYTEPTR(p1), bw,
+    aom_highbd_subtract_block(bh, bw, d10, bw, CONVERT_TO_BYTEPTR(p1), bw,
                               CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
   } else  // NOLINT
-#endif    // CONFIG_VP9_HIGHBITDEPTH
+#endif    // CONFIG_AOM_HIGHBITDEPTH
   {
-    vpx_subtract_block(bh, bw, r0, bw, src->buf, src->stride, p0, bw);
-    vpx_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
-    vpx_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
+    aom_subtract_block(bh, bw, r0, bw, src->buf, src->stride, p0, bw);
+    aom_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
+    aom_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
   }
 
-  sign_limit = ((int64_t)vpx_sum_squares_i16(r0, N) -
-                (int64_t)vpx_sum_squares_i16(r1, N)) *
+  sign_limit = ((int64_t)aom_sum_squares_i16(r0, N) -
+                (int64_t)aom_sum_squares_i16(r1, N)) *
                (1 << WEDGE_WEIGHT_BITS) / 2;
 
-  vp10_wedge_compute_delta_squares(ds, r0, r1, N);
+  av1_wedge_compute_delta_squares(ds, r0, r1, N);
 
   for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
-    mask = vp10_get_contiguous_soft_mask(wedge_index, 0, bsize);
-    wedge_sign = vp10_wedge_sign_from_residuals(ds, mask, N, sign_limit);
+    mask = av1_get_contiguous_soft_mask(wedge_index, 0, bsize);
+    wedge_sign = av1_wedge_sign_from_residuals(ds, mask, N, sign_limit);
 
-    mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
-    sse = vp10_wedge_sse_from_residuals(r1, d10, mask, N);
+    mask = av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+    sse = av1_wedge_sse_from_residuals(r1, d10, mask, N);
     sse = ROUND_POWER_OF_TWO(sse, bd_round);
 
     model_rd_from_sse(cpi, xd, bsize, 0, sse, &rate, &dist);
@@ -6470,7 +6465,7 @@
 
 // Choose the best wedge index the specified sign
 static int64_t pick_wedge_fixed_sign(
-    const VP10_COMP *const cpi, const MACROBLOCK *const x,
+    const AV1_COMP *const cpi, const MACROBLOCK *const x,
     const BLOCK_SIZE bsize, const uint8_t *const p0, const uint8_t *const p1,
     const int wedge_sign, int *const best_wedge_index) {
   const MACROBLOCKD *const xd = &x->e_mbd;
@@ -6485,32 +6480,32 @@
   int wedge_types = (1 << get_wedge_bits_lookup(bsize));
   const uint8_t *mask;
   uint64_t sse;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
   const int bd_round = hbd ? (xd->bd - 8) * 2 : 0;
 #else
   const int bd_round = 0;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   DECLARE_ALIGNED(32, int16_t, r1[MAX_SB_SQUARE]);
   DECLARE_ALIGNED(32, int16_t, d10[MAX_SB_SQUARE]);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (hbd) {
-    vpx_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
+    aom_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
                               CONVERT_TO_BYTEPTR(p1), bw, xd->bd);
-    vpx_highbd_subtract_block(bh, bw, d10, bw, CONVERT_TO_BYTEPTR(p1), bw,
+    aom_highbd_subtract_block(bh, bw, d10, bw, CONVERT_TO_BYTEPTR(p1), bw,
                               CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
   } else  // NOLINT
-#endif    // CONFIG_VP9_HIGHBITDEPTH
+#endif    // CONFIG_AOM_HIGHBITDEPTH
   {
-    vpx_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
-    vpx_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
+    aom_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
+    aom_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
   }
 
   for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
-    mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
-    sse = vp10_wedge_sse_from_residuals(r1, d10, mask, N);
+    mask = av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+    sse = av1_wedge_sse_from_residuals(r1, d10, mask, N);
     sse = ROUND_POWER_OF_TWO(sse, bd_round);
 
     model_rd_from_sse(cpi, xd, bsize, 0, sse, &rate, &dist);
@@ -6525,7 +6520,7 @@
   return best_rd;
 }
 
-static int64_t pick_interinter_wedge(const VP10_COMP *const cpi,
+static int64_t pick_interinter_wedge(const AV1_COMP *const cpi,
                                      const MACROBLOCK *const x,
                                      const BLOCK_SIZE bsize,
                                      const uint8_t *const p0,
@@ -6552,7 +6547,7 @@
   return rd;
 }
 
-static int64_t pick_interintra_wedge(const VP10_COMP *const cpi,
+static int64_t pick_interintra_wedge(const AV1_COMP *const cpi,
                                      const MACROBLOCK *const x,
                                      const BLOCK_SIZE bsize,
                                      const uint8_t *const p0,
@@ -6574,7 +6569,7 @@
 #endif  // CONFIG_EXT_INTER
 
 static int64_t handle_inter_mode(
-    VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
+    AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
     int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
     int *disable_skip, int_mv (*mode_mv)[TOTAL_REFS_PER_FRAME], int mi_row,
     int mi_col,
@@ -6593,7 +6588,7 @@
     INTERP_FILTER (*single_filter)[TOTAL_REFS_PER_FRAME],
     int (*single_skippable)[TOTAL_REFS_PER_FRAME], int64_t *psse,
     const int64_t ref_best_rd) {
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
   MACROBLOCKD *xd = &x->e_mbd;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
@@ -6613,14 +6608,14 @@
       cpi->interintra_mode_cost[size_group_lookup[bsize]];
   const int is_comp_interintra_pred = (mbmi->ref_frame[1] == INTRA_FRAME);
 #if CONFIG_REF_MV
-  uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+  uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
 #endif
 #endif  // CONFIG_EXT_INTER
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint8_t, tmp_buf_[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
 #else
   DECLARE_ALIGNED(16, uint8_t, tmp_buf_[MAX_MB_PLANE * MAX_SB_SQUARE]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   uint8_t *tmp_buf;
 
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
@@ -6685,15 +6680,15 @@
     mode_ctx = mbmi_ext->compound_mode_context[refs[0]];
   else
 #endif  // CONFIG_EXT_INTER
-    mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
-                                          mbmi->ref_frame, bsize, -1);
+    mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+                                         mbmi->ref_frame, bsize, -1);
 #endif
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
     tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf_);
   else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     tmp_buf = tmp_buf_;
 
   if (is_comp_pred) {
@@ -6718,28 +6713,28 @@
                               single_newmv, &rate_mv, 0);
         } else {
 #if CONFIG_REF_MV
-          vp10_set_mvcost(x, mbmi->ref_frame[0]);
+          av1_set_mvcost(x, mbmi->ref_frame[0]);
 #endif  // CONFIG_REF_MV
-          rate_mv = vp10_mv_bit_cost(
-              &frame_mv[refs[0]].as_mv, &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
-              x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+          rate_mv = av1_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+                                    &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
+                                    x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
 #if CONFIG_REF_MV
-          vp10_set_mvcost(x, mbmi->ref_frame[1]);
+          av1_set_mvcost(x, mbmi->ref_frame[1]);
 #endif  // CONFIG_REF_MV
-          rate_mv += vp10_mv_bit_cost(
+          rate_mv += av1_mv_bit_cost(
               &frame_mv[refs[1]].as_mv, &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
               x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
         }
       } else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
         frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
-        rate_mv = vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
-                                   &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
-                                   x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+        rate_mv = av1_mv_bit_cost(&frame_mv[refs[1]].as_mv,
+                                  &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
+                                  x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
       } else {
         frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
-        rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
-                                   &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
-                                   x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+        rate_mv = av1_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+                                  &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
+                                  x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
       }
 #else
       // Initialize mv using single prediction mode result.
@@ -6751,17 +6746,17 @@
                             single_newmv, &rate_mv, 0);
       } else {
 #if CONFIG_REF_MV
-        vp10_set_mvcost(x, mbmi->ref_frame[0]);
+        av1_set_mvcost(x, mbmi->ref_frame[0]);
 #endif  // CONFIG_REF_MV
-        rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
-                                   &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
-                                   x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+        rate_mv = av1_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+                                  &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
+                                  x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
 #if CONFIG_REF_MV
-        vp10_set_mvcost(x, mbmi->ref_frame[1]);
+        av1_set_mvcost(x, mbmi->ref_frame[1]);
 #endif  // CONFIG_REF_MV
-        rate_mv += vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
-                                    &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
-                                    x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+        rate_mv += av1_mv_bit_cost(&frame_mv[refs[1]].as_mv,
+                                   &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
+                                   x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
       }
 #endif  // CONFIG_EXT_INTER
     } else {
@@ -6790,7 +6785,7 @@
       // motion field, where the distortion gain for a single block may not
       // be enough to overcome the cost of a new mv.
       if (discount_newmv_test(cpi, this_mode, x->best_mv, mode_mv, refs[0])) {
-        rate_mv = VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
+        rate_mv = AOMMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
       }
     }
     *rate2 += rate_mv;
@@ -6815,7 +6810,7 @@
   if (this_mode == NEAREST_NEARESTMV) {
 #else
   if (this_mode == NEARESTMV && is_comp_pred) {
-    uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+    uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
 #endif  // CONFIG_EXT_INTER
     if (mbmi_ext->ref_mv_count[ref_frame_type] > 0) {
       cur_mv[0] = mbmi_ext->ref_mv_stack[ref_frame_type][0].this_mv;
@@ -6873,7 +6868,7 @@
   }
 #else
   if (this_mode == NEARMV && is_comp_pred) {
-    uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+    uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
     if (mbmi_ext->ref_mv_count[ref_frame_type] > 1) {
       int ref_mv_idx = mbmi->ref_mv_idx + 1;
       cur_mv[0] = mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
@@ -6909,10 +6904,10 @@
   if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv,
                           refs[0])) {
 #if CONFIG_REF_MV && CONFIG_EXT_INTER
-    *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, is_comp_pred, mode_ctx),
+    *rate2 += AOMMIN(cost_mv_ref(cpi, this_mode, is_comp_pred, mode_ctx),
                      cost_mv_ref(cpi, NEARESTMV, is_comp_pred, mode_ctx));
 #else
-    *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, mode_ctx),
+    *rate2 += AOMMIN(cost_mv_ref(cpi, this_mode, mode_ctx),
                      cost_mv_ref(cpi, NEARESTMV, mode_ctx));
 #endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
   } else {
@@ -6969,7 +6964,7 @@
 #else
       mbmi->interp_filter = i;
 #endif
-      rs = vp10_get_switchable_rate(cpi, xd);
+      rs = av1_get_switchable_rate(cpi, xd);
       rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
 
       if (i > 0 && intpel_mv && IsInterpolatingFilter(i)) {
@@ -7005,7 +7000,7 @@
             xd->plane[j].dst.stride = MAX_SB_SIZE;
           }
         }
-        vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+        av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
         model_rd_for_sb(cpi, bsize, x, xd, 0, MAX_MB_PLANE - 1, &rate_sum,
                         &dist_sum, &tmp_skip_sb, &tmp_skip_sse);
 
@@ -7076,7 +7071,7 @@
   mbmi->interp_filter =
       cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
 #endif
-  rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0;
+  rs = cm->interp_filter == SWITCHABLE ? av1_get_switchable_rate(cpi, xd) : 0;
 
 #if CONFIG_EXT_INTER
 #if CONFIG_OBMC
@@ -7094,10 +7089,10 @@
     int tmp_skip_txfm_sb;
     int64_t tmp_skip_sse_sb;
 
-    rs = vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
+    rs = av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
     mbmi->use_wedge_interinter = 0;
-    vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
-    vp10_subtract_plane(x, bsize, 0);
+    av1_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+    av1_subtract_plane(x, bsize, 0);
     rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
                              &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
     if (rd != INT64_MAX)
@@ -7114,12 +7109,12 @@
       int strides[1] = { bw };
 
       mbmi->use_wedge_interinter = 1;
-      rs = vp10_cost_literal(get_interinter_wedge_bits(bsize)) +
-           vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
+      rs = av1_cost_literal(get_interinter_wedge_bits(bsize)) +
+           av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
 
-      vp10_build_inter_predictors_for_planes_single_buf(
+      av1_build_inter_predictors_for_planes_single_buf(
           xd, bsize, 0, 0, mi_row, mi_col, 0, preds0, strides);
-      vp10_build_inter_predictors_for_planes_single_buf(
+      av1_build_inter_predictors_for_planes_single_buf(
           xd, bsize, 0, 0, mi_row, mi_col, 1, preds1, strides);
 
       // Choose the best wedge
@@ -7152,7 +7147,7 @@
           tmp_rate_mv = rate_mvs[1];
           mbmi->mv[1].as_int = tmp_mv[1].as_int;
         }
-        vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+        av1_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
         model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
                         &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
         rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
@@ -7162,10 +7157,10 @@
           mbmi->mv[0].as_int = cur_mv[0].as_int;
           mbmi->mv[1].as_int = cur_mv[1].as_int;
           tmp_rate_mv = rate_mv;
-          vp10_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0,
-                                                    strides, preds1, strides);
+          av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0,
+                                                   strides, preds1, strides);
         }
-        vp10_subtract_plane(x, bsize, 0);
+        av1_subtract_plane(x, bsize, 0);
         rd =
             estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
                                 &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
@@ -7188,9 +7183,9 @@
           xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
         }
       } else {
-        vp10_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0,
-                                                  strides, preds1, strides);
-        vp10_subtract_plane(x, bsize, 0);
+        av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0,
+                                                 strides, preds1, strides);
+        av1_subtract_plane(x, bsize, 0);
         rd =
             estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
                                 &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
@@ -7205,19 +7200,19 @@
       }
     }
     if (ref_best_rd < INT64_MAX &&
-        VPXMIN(best_rd_wedge, best_rd_nowedge) / 3 > ref_best_rd)
+        AOMMIN(best_rd_wedge, best_rd_nowedge) / 3 > ref_best_rd)
       return INT64_MAX;
 
     pred_exists = 0;
-    tmp_rd = VPXMIN(best_rd_wedge, best_rd_nowedge);
+    tmp_rd = AOMMIN(best_rd_wedge, best_rd_nowedge);
 
     if (mbmi->use_wedge_interinter)
       *compmode_wedge_cost =
-          vp10_cost_literal(get_interinter_wedge_bits(bsize)) +
-          vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
+          av1_cost_literal(get_interinter_wedge_bits(bsize)) +
+          av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
     else
       *compmode_wedge_cost =
-          vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
+          av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
   }
 
   if (is_comp_interintra_pred) {
@@ -7236,11 +7231,11 @@
     DECLARE_ALIGNED(16, uint8_t, intrapred_[2 * MAX_SB_SQUARE]);
     uint8_t *intrapred;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
       intrapred = CONVERT_TO_BYTEPTR(intrapred_);
     else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       intrapred = intrapred_;
 
     mbmi->ref_frame[1] = NONE;
@@ -7248,7 +7243,7 @@
       xd->plane[j].dst.buf = tmp_buf + j * MAX_SB_SQUARE;
       xd->plane[j].dst.stride = bw;
     }
-    vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+    av1_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
     restore_dst_buf(xd, orig_dst, orig_dst_stride);
     mbmi->ref_frame[1] = INTRA_FRAME;
     mbmi->use_wedge_interintra = 0;
@@ -7256,8 +7251,8 @@
     for (j = 0; j < INTERINTRA_MODES; ++j) {
       mbmi->interintra_mode = (INTERINTRA_MODE)j;
       rmode = interintra_mode_cost[mbmi->interintra_mode];
-      vp10_build_intra_predictors_for_interintra(xd, bsize, 0, intrapred, bw);
-      vp10_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
+      av1_build_intra_predictors_for_interintra(xd, bsize, 0, intrapred, bw);
+      av1_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
       model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
                       &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
       rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
@@ -7268,9 +7263,9 @@
     }
     mbmi->interintra_mode = best_interintra_mode;
     rmode = interintra_mode_cost[mbmi->interintra_mode];
-    vp10_build_intra_predictors_for_interintra(xd, bsize, 0, intrapred, bw);
-    vp10_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
-    vp10_subtract_plane(x, bsize, 0);
+    av1_build_intra_predictors_for_interintra(xd, bsize, 0, intrapred, bw);
+    av1_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
+    av1_subtract_plane(x, bsize, 0);
     rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
                              &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
     if (rd != INT64_MAX)
@@ -7281,7 +7276,7 @@
       return INT64_MAX;
     }
     if (is_interintra_wedge_used(bsize)) {
-      rwedge = vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 0);
+      rwedge = av1_cost_bit(cm->fc->wedge_interintra_prob[bsize], 0);
       if (rd != INT64_MAX)
         rd = RDCOST(x->rdmult, x->rddiv, rmode + rate_mv + rwedge + rate_sum,
                     dist_sum);
@@ -7291,8 +7286,8 @@
       if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh) {
         mbmi->use_wedge_interintra = 1;
 
-        rwedge = vp10_cost_literal(get_interintra_wedge_bits(bsize)) +
-                 vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
+        rwedge = av1_cost_literal(get_interintra_wedge_bits(bsize)) +
+                 av1_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
 
         best_interintra_rd_wedge =
             pick_interintra_wedge(cpi, x, bsize, intrapred_, tmp_buf_);
@@ -7302,12 +7297,12 @@
         // Refine motion vector.
         if (have_newmv_in_inter_mode(this_mode)) {
           // get negative of mask
-          const uint8_t *mask = vp10_get_contiguous_soft_mask(
+          const uint8_t *mask = av1_get_contiguous_soft_mask(
               mbmi->interintra_wedge_index, 1, bsize);
           do_masked_motion_search(cpi, x, mask, bw, bsize, mi_row, mi_col,
                                   &tmp_mv, &tmp_rate_mv, 0, mv_idx);
           mbmi->mv[0].as_int = tmp_mv.as_int;
-          vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+          av1_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
           model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
                           &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
           rd = RDCOST(x->rdmult, x->rddiv,
@@ -7321,10 +7316,10 @@
         } else {
           tmp_mv.as_int = cur_mv[0].as_int;
           tmp_rate_mv = rate_mv;
-          vp10_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
+          av1_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
         }
         // Evaluate closer to true rd
-        vp10_subtract_plane(x, bsize, 0);
+        av1_subtract_plane(x, bsize, 0);
         rd =
             estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
                                 &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
@@ -7352,23 +7347,23 @@
     pred_exists = 0;
     tmp_rd = best_interintra_rd;
     *compmode_interintra_cost =
-        vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 1);
+        av1_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 1);
     *compmode_interintra_cost += interintra_mode_cost[mbmi->interintra_mode];
     if (is_interintra_wedge_used(bsize)) {
-      *compmode_interintra_cost += vp10_cost_bit(
+      *compmode_interintra_cost += av1_cost_bit(
           cm->fc->wedge_interintra_prob[bsize], mbmi->use_wedge_interintra);
       if (mbmi->use_wedge_interintra) {
         *compmode_interintra_cost +=
-            vp10_cost_literal(get_interintra_wedge_bits(bsize));
+            av1_cost_literal(get_interintra_wedge_bits(bsize));
       }
     }
   } else if (is_interintra_allowed(mbmi)) {
     *compmode_interintra_cost =
-        vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 0);
+        av1_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 0);
   }
 
 #if CONFIG_EXT_INTERP
-  if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE) {
+  if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE) {
 #if CONFIG_DUAL_FILTER
     for (i = 0; i < 4; ++i) mbmi->interp_filter[i] = EIGHTTAP_REGULAR;
 #else
@@ -7395,7 +7390,7 @@
     // Handles the special case when a filter that is not in the
     // switchable list (ex. bilinear) is indicated at the frame level, or
     // skip condition holds.
-    vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+    av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
     model_rd_for_sb(cpi, bsize, x, xd, 0, MAX_MB_PLANE - 1, &tmp_rate,
                     &tmp_dist, &skip_txfm_sb, &skip_sse_sb);
     rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
@@ -7413,7 +7408,7 @@
       const int mode0 = compound_ref0_mode(this_mode);
       const int mode1 = compound_ref1_mode(this_mode);
       int64_t mrd =
-          VPXMIN(modelled_rd[mode0][refs[0]], modelled_rd[mode1][refs[1]]);
+          AOMMIN(modelled_rd[mode0][refs[0]], modelled_rd[mode1][refs[1]]);
       if (rd / 4 * 3 > mrd && ref_best_rd < INT64_MAX) {
         restore_dst_buf(xd, orig_dst, orig_dst_stride);
         return INT64_MAX;
@@ -7486,7 +7481,7 @@
                                   &tmp_mv, pred_mv, &tmp_rate_mv);
         mbmi->mv[0].as_int = tmp_mv.as_int;
         if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) {
-          tmp_rate_mv = VPXMAX((tmp_rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
+          tmp_rate_mv = AOMMAX((tmp_rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
         }
 #if CONFIG_EXT_INTER
         tmp_rate2 = rate2_bmc_nocoeff - rate_mv_bmc + tmp_rate_mv;
@@ -7500,21 +7495,21 @@
         if (!has_subpel_mv_component(xd->mi[0], xd, 1))
           obmc_interp_filter[1][1] = mbmi->interp_filter[1] = EIGHTTAP_REGULAR;
 #else
-        if (!vp10_is_interp_needed(xd))
+        if (!av1_is_interp_needed(xd))
           obmc_interp_filter[1] = mbmi->interp_filter = EIGHTTAP_REGULAR;
 #endif  // CONFIG_DUAL_FILTER
         // This is not quite correct with CONFIG_DUAL_FILTER when a filter
         // is needed in only one direction
-        if (!vp10_is_interp_needed(xd)) tmp_rate2 -= rs;
+        if (!av1_is_interp_needed(xd)) tmp_rate2 -= rs;
 #endif  // CONFIG_EXT_INTERP
-        vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+        av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
 #if CONFIG_EXT_INTER
       } else {
-        vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+        av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
 #endif  // CONFIG_EXT_INTER
       }
-      vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
-                                       dst_stride1, dst_buf2, dst_stride2);
+      av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+                                      dst_stride1, dst_buf2, dst_stride2);
       model_rd_for_sb(cpi, bsize, x, xd, 0, MAX_MB_PLANE - 1, &tmp_rate,
                       &tmp_dist, &skip_txfm_sb, &skip_sse_sb);
     }
@@ -7537,7 +7532,7 @@
       int64_t rdcosty = INT64_MAX;
 
       // Y cost and distortion
-      vp10_subtract_plane(x, bsize, 0);
+      av1_subtract_plane(x, bsize, 0);
 #if CONFIG_VAR_TX
       if (cm->tx_mode == TX_MODE_SELECT || xd->lossless[mbmi->segment_id]) {
         select_tx_type_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse,
@@ -7576,7 +7571,7 @@
       *distortion += distortion_y;
 
       rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
-      rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
+      rdcosty = AOMMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
 
 #if CONFIG_VAR_TX
       if (!inter_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
@@ -7605,25 +7600,24 @@
         *rate2 -= *rate_uv + *rate_y;
         *rate_y = 0;
         *rate_uv = 0;
-        *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+        *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
         mbmi->skip = 0;
         // here mbmi->skip temporarily plays a role as what this_skip2 does
       } else if (!xd->lossless[mbmi->segment_id] &&
                  (RDCOST(x->rdmult, x->rddiv,
                          *rate_y + *rate_uv +
-                             vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0),
+                             av1_cost_bit(av1_get_skip_prob(cm, xd), 0),
                          *distortion) >=
                   RDCOST(x->rdmult, x->rddiv,
-                         vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1),
-                         *psse))) {
+                         av1_cost_bit(av1_get_skip_prob(cm, xd), 1), *psse))) {
         *rate2 -= *rate_uv + *rate_y;
-        *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+        *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
         *distortion = *psse;
         *rate_y = 0;
         *rate_uv = 0;
         mbmi->skip = 1;
       } else {
-        *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
         mbmi->skip = 0;
       }
       *disable_skip = 0;
@@ -7636,7 +7630,7 @@
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
       mbmi->skip = 0;
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
-      *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+      *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
 
       *distortion = skip_sse_sb;
       *psse = skip_sse_sb;
@@ -7670,18 +7664,18 @@
       best_skippable = *skippable;
       best_xskip = x->skip;
       best_disable_skip = *disable_skip;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-        x->recon_variance = vp10_high_get_sby_perpixel_variance(
+        x->recon_variance = av1_high_get_sby_perpixel_variance(
             cpi, &xd->plane[0].dst, bsize, xd->bd);
       } else {
         x->recon_variance =
-            vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+            av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
       }
 #else
       x->recon_variance =
-          vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+          av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     }
   }
 
@@ -7709,28 +7703,28 @@
   if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable;
 
 #if !(CONFIG_OBMC || CONFIG_WARPED_MOTION)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    x->recon_variance = vp10_high_get_sby_perpixel_variance(
+    x->recon_variance = av1_high_get_sby_perpixel_variance(
         cpi, &xd->plane[0].dst, bsize, xd->bd);
   } else {
     x->recon_variance =
-        vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+        av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
   }
 #else
   x->recon_variance =
-      vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+      av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // !(CONFIG_OBMC || CONFIG_WARPED_MOTION)
 
   restore_dst_buf(xd, orig_dst, orig_dst_stride);
   return 0;  // The rate-distortion cost will be re-calculated by caller.
 }
 
-void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
-                                BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
-                                int64_t best_rd) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_rd_pick_intra_mode_sb(AV1_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
+                               BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                               int64_t best_rd) {
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   struct macroblockd_plane *const pd = xd->plane;
   int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
@@ -7758,15 +7752,15 @@
   max_uv_tx_size = get_uv_tx_size_impl(
       xd->mi[0]->mbmi.tx_size, bsize, pd[1].subsampling_x, pd[1].subsampling_y);
   rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly, &dist_uv,
-                          &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size);
+                          &uv_skip, AOMMAX(BLOCK_8X8, bsize), max_uv_tx_size);
 
   if (y_skip && uv_skip) {
     rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
-                    vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+                    av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
     rd_cost->dist = dist_y + dist_uv;
   } else {
     rd_cost->rate =
-        rate_y + rate_uv + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        rate_y + rate_uv + av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
     rd_cost->dist = dist_y + dist_uv;
   }
 
@@ -7806,18 +7800,18 @@
   // to a predictor with a low spatial complexity compared to the source.
   if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) &&
       (source_variance > recon_variance)) {
-    var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error));
+    var_factor = AOMMIN(absvar_diff, AOMMIN(VLOW_ADJ_MAX, var_error));
     // A second possible case of interest is where the source variance
     // is very low and we wish to discourage false texture or motion trails.
   } else if ((source_variance < (LOW_VAR_THRESH >> 1)) &&
              (recon_variance > source_variance)) {
-    var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error));
+    var_factor = AOMMIN(absvar_diff, AOMMIN(VHIGH_ADJ_MAX, var_error));
   }
   *this_rd += (*this_rd * var_factor) / 100;
 }
 
 // Do we have an internal image edge (e.g. formatting bars).
-int vp10_internal_image_edge(VP10_COMP *cpi) {
+int av1_internal_image_edge(AV1_COMP *cpi) {
   return (cpi->oxcf.pass == 2) &&
          ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
           (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
@@ -7826,7 +7820,7 @@
 // Checks to see if a super block is on a horizontal image edge.
 // In most cases this is the "real" edge unless there are formatting
 // bars embedded in the stream.
-int vp10_active_h_edge(VP10_COMP *cpi, int mi_row, int mi_step) {
+int av1_active_h_edge(AV1_COMP *cpi, int mi_row, int mi_step) {
   int top_edge = 0;
   int bottom_edge = cpi->common.mi_rows;
   int is_active_h_edge = 0;
@@ -7840,7 +7834,7 @@
     top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
 
     bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
-    bottom_edge = VPXMAX(top_edge, bottom_edge);
+    bottom_edge = AOMMAX(top_edge, bottom_edge);
   }
 
   if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) ||
@@ -7853,7 +7847,7 @@
 // Checks to see if a super block is on a vertical image edge.
 // In most cases this is the "real" edge unless there are formatting
 // bars embedded in the stream.
-int vp10_active_v_edge(VP10_COMP *cpi, int mi_col, int mi_step) {
+int av1_active_v_edge(AV1_COMP *cpi, int mi_col, int mi_step) {
   int left_edge = 0;
   int right_edge = cpi->common.mi_cols;
   int is_active_v_edge = 0;
@@ -7867,7 +7861,7 @@
     left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
 
     right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
-    right_edge = VPXMAX(left_edge, right_edge);
+    right_edge = AOMMAX(left_edge, right_edge);
   }
 
   if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) ||
@@ -7880,12 +7874,12 @@
 // Checks to see if a super block is at the edge of the active image.
 // In most cases this is the "real" edge unless there are formatting
 // bars embedded in the stream.
-int vp10_active_edge_sb(VP10_COMP *cpi, int mi_row, int mi_col) {
-  return vp10_active_h_edge(cpi, mi_row, cpi->common.mib_size) ||
-         vp10_active_v_edge(cpi, mi_col, cpi->common.mib_size);
+int av1_active_edge_sb(AV1_COMP *cpi, int mi_row, int mi_col) {
+  return av1_active_h_edge(cpi, mi_row, cpi->common.mib_size) ||
+         av1_active_v_edge(cpi, mi_col, cpi->common.mib_size);
 }
 
-static void restore_uv_color_map(VP10_COMP *cpi, MACROBLOCK *x) {
+static void restore_uv_color_map(AV1_COMP *cpi, MACROBLOCK *x) {
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
@@ -7901,25 +7895,25 @@
   float centroids[2 * PALETTE_MAX_SIZE];
   uint8_t *const color_map = xd->plane[1].color_index_map;
   int r, c;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const uint16_t *const src_u16 = CONVERT_TO_SHORTPTR(src_u);
   const uint16_t *const src_v16 = CONVERT_TO_SHORTPTR(src_v);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   (void)cpi;
 
   for (r = 0; r < rows; ++r) {
     for (c = 0; c < cols; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cpi->common.use_highbitdepth) {
         data[(r * cols + c) * 2] = src_u16[r * src_stride + c];
         data[(r * cols + c) * 2 + 1] = src_v16[r * src_stride + c];
       } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         data[(r * cols + c) * 2] = src_u[r * src_stride + c];
         data[(r * cols + c) * 2 + 1] = src_v[r * src_stride + c];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     }
   }
 
@@ -7929,13 +7923,13 @@
     }
   }
 
-  vp10_calc_indices(data, centroids, color_map, rows * cols,
-                    pmi->palette_size[1], 2);
+  av1_calc_indices(data, centroids, color_map, rows * cols,
+                   pmi->palette_size[1], 2);
 }
 
 #if CONFIG_EXT_INTRA
 static void pick_ext_intra_interframe(
-    VP10_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
+    AV1_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
     int *rate_uv_intra, int *rate_uv_tokenonly, int64_t *dist_uv, int *skip_uv,
     PREDICTION_MODE *mode_uv, EXT_INTRA_MODE_INFO *ext_intra_mode_info_uv,
     PALETTE_MODE_INFO *pmi_uv, int8_t *uv_angle_delta, int palette_ctx,
@@ -7946,7 +7940,7 @@
     int *returnrate_nocoef,
 #endif  // CONFIG_SUPERTX
     int64_t *best_pred_rd, MB_MODE_INFO *best_mbmode, RD_COST *rd_cost) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
@@ -7958,8 +7952,8 @@
   TX_SIZE uv_tx;
 
   for (i = 0; i < MAX_MODES; ++i)
-    if (vp10_mode_order[i].mode == DC_PRED &&
-        vp10_mode_order[i].ref_frame[0] == INTRA_FRAME)
+    if (av1_mode_order[i].mode == DC_PRED &&
+        av1_mode_order[i].ref_frame[0] == INTRA_FRAME)
       break;
   dc_mode_index = i;
   assert(i < MAX_MODES);
@@ -8008,8 +8002,8 @@
   rate2 = rate_y + intra_mode_cost[mbmi->mode] + rate_uv +
           cpi->intra_uv_mode_cost[mbmi->mode][mbmi->uv_mode];
   if (cpi->common.allow_screen_content_tools && mbmi->mode == DC_PRED)
-    rate2 += vp10_cost_bit(
-        vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
+    rate2 += av1_cost_bit(
+        av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
 
   if (!xd->lossless[mbmi->segment_id]) {
     // super_block_yrd above includes the cost of the tx_size in the
@@ -8020,8 +8014,8 @@
                                 TX_8X8][get_tx_size_context(xd)][mbmi->tx_size];
   }
 
-  rate2 += vp10_cost_bit(cm->fc->ext_intra_probs[0],
-                         mbmi->ext_intra_mode_info.use_ext_intra_mode[0]);
+  rate2 += av1_cost_bit(cm->fc->ext_intra_probs[0],
+                        mbmi->ext_intra_mode_info.use_ext_intra_mode[0]);
   rate2 += write_uniform_cost(FILTER_INTRA_MODES,
                               mbmi->ext_intra_mode_info.ext_intra_mode[0]);
   if (mbmi->uv_mode != DC_PRED && mbmi->uv_mode != TM_PRED) {
@@ -8029,26 +8023,26 @@
                                 MAX_ANGLE_DELTAS + mbmi->angle_delta[1]);
   }
   if (mbmi->mode == DC_PRED) {
-    rate2 += vp10_cost_bit(cpi->common.fc->ext_intra_probs[1],
-                           mbmi->ext_intra_mode_info.use_ext_intra_mode[1]);
+    rate2 += av1_cost_bit(cpi->common.fc->ext_intra_probs[1],
+                          mbmi->ext_intra_mode_info.use_ext_intra_mode[1]);
     if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1])
       rate2 += write_uniform_cost(FILTER_INTRA_MODES,
                                   mbmi->ext_intra_mode_info.ext_intra_mode[1]);
   }
   distortion2 = distortion_y + distortion_uv;
-  vp10_encode_intra_block_plane(x, bsize, 0, 0);
-#if CONFIG_VP9_HIGHBITDEPTH
+  av1_encode_intra_block_plane(x, bsize, 0, 0);
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    x->recon_variance = vp10_high_get_sby_perpixel_variance(
+    x->recon_variance = av1_high_get_sby_perpixel_variance(
         cpi, &xd->plane[0].dst, bsize, xd->bd);
   } else {
     x->recon_variance =
-        vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+        av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
   }
 #else
   x->recon_variance =
-      vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+      av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   rate2 += ref_costs_single[INTRA_FRAME];
 
@@ -8056,9 +8050,9 @@
     rate2 -= (rate_y + rate_uv);
     rate_y = 0;
     rate_uv = 0;
-    rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+    rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
   } else {
-    rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+    rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
   }
   this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
   rd_variance_adjustment(x, &this_rd, INTRA_FRAME, x->source_variance);
@@ -8068,7 +8062,7 @@
     *best_intra_mode = mbmi->mode;
   }
   for (i = 0; i < REFERENCE_MODES; ++i)
-    best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
+    best_pred_rd[i] = AOMMIN(best_pred_rd[i], this_rd);
 
   if (this_rd < *best_rd) {
     *best_mode_index = dc_mode_index;
@@ -8079,9 +8073,9 @@
       *returnrate_nocoef = rate2;
     else
       *returnrate_nocoef = rate2 - rate_y - rate_uv;
-    *returnrate_nocoef -= vp10_cost_bit(vp10_get_skip_prob(cm, xd), skippable);
-    *returnrate_nocoef -= vp10_cost_bit(vp10_get_intra_inter_prob(cm, xd),
-                                        mbmi->ref_frame[0] != INTRA_FRAME);
+    *returnrate_nocoef -= av1_cost_bit(av1_get_skip_prob(cm, xd), skippable);
+    *returnrate_nocoef -= av1_cost_bit(av1_get_intra_inter_prob(cm, xd),
+                                       mbmi->ref_frame[0] != INTRA_FRAME);
 #endif  // CONFIG_SUPERTX
     rd_cost->dist = distortion2;
     rd_cost->rdcost = this_rd;
@@ -8094,21 +8088,23 @@
 #endif  // CONFIG_EXT_INTRA
 
 #if CONFIG_OBMC
-static void calc_target_weighted_pred(
-    const VP10_COMMON *cm, const MACROBLOCK *x, const MACROBLOCKD *xd,
-    int mi_row, int mi_col, const uint8_t *above, int above_stride,
-    const uint8_t *left, int left_stride, int32_t *mask_buf, int32_t *wsrc_buf);
+static void calc_target_weighted_pred(const AV1_COMMON *cm, const MACROBLOCK *x,
+                                      const MACROBLOCKD *xd, int mi_row,
+                                      int mi_col, const uint8_t *above,
+                                      int above_stride, const uint8_t *left,
+                                      int left_stride, int32_t *mask_buf,
+                                      int32_t *wsrc_buf);
 #endif  // CONFIG_OBMC
 
-void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
-                                MACROBLOCK *x, int mi_row, int mi_col,
-                                RD_COST *rd_cost,
+void av1_rd_pick_inter_mode_sb(AV1_COMP *cpi, TileDataEnc *tile_data,
+                               MACROBLOCK *x, int mi_row, int mi_col,
+                               RD_COST *rd_cost,
 #if CONFIG_SUPERTX
-                                int *returnrate_nocoef,
+                               int *returnrate_nocoef,
 #endif  // CONFIG_SUPERTX
-                                BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
-                                int64_t best_rd_so_far) {
-  VP10_COMMON *const cm = &cpi->common;
+                               BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                               int64_t best_rd_so_far) {
+  AV1_COMMON *const cm = &cpi->common;
   RD_OPT *const rd_opt = &cpi->rd;
   SPEED_FEATURES *const sf = &cpi->sf;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -8133,16 +8129,16 @@
   int single_skippable[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
   static const int flag_list[TOTAL_REFS_PER_FRAME] = {
     0,
-    VPX_LAST_FLAG,
+    AOM_LAST_FLAG,
 #if CONFIG_EXT_REFS
-    VPX_LAST2_FLAG,
-    VPX_LAST3_FLAG,
+    AOM_LAST2_FLAG,
+    AOM_LAST3_FLAG,
 #endif  // CONFIG_EXT_REFS
-    VPX_GOLD_FLAG,
+    AOM_GOLD_FLAG,
 #if CONFIG_EXT_REFS
-    VPX_BWD_FLAG,
+    AOM_BWD_FLAG,
 #endif  // CONFIG_EXT_REFS
-    VPX_ALT_FLAG
+    AOM_ALT_FLAG
   };
   int64_t best_rd = best_rd_so_far;
   int best_rate_y = INT_MAX, best_rate_uv = INT_MAX;
@@ -8153,7 +8149,7 @@
   int midx, best_mode_index = -1;
   unsigned int ref_costs_single[TOTAL_REFS_PER_FRAME];
   unsigned int ref_costs_comp[TOTAL_REFS_PER_FRAME];
-  vpx_prob comp_mode_p;
+  aom_prob comp_mode_p;
   int64_t best_intra_rd = INT64_MAX;
   unsigned int best_pred_sse = UINT_MAX;
   PREDICTION_MODE best_intra_mode = DC_PRED;
@@ -8169,7 +8165,7 @@
   int rate_overhead, rate_dummy;
   uint8_t directional_mode_skip_mask[INTRA_MODES];
 #endif  // CONFIG_EXT_INTRA
-  const int intra_cost_penalty = vp10_get_intra_cost_penalty(
+  const int intra_cost_penalty = av1_get_intra_cost_penalty(
       cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
   const int *const intra_mode_cost = cpi->mbmode_cost[size_group_lookup[bsize]];
   int best_skip2 = 0;
@@ -8194,13 +8190,13 @@
   const MODE_INFO *above_mi = xd->above_mi;
   const MODE_INFO *left_mi = xd->left_mi;
 #if CONFIG_OBMC
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
   DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
 #else
   DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
   DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, int32_t, weighted_src_buf[MAX_SB_SQUARE]);
   DECLARE_ALIGNED(16, int32_t, mask2d_buf[MAX_SB_SQUARE]);
   uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
@@ -8211,7 +8207,7 @@
   int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
   int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     int len = sizeof(uint16_t);
     dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -8221,20 +8217,20 @@
     dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * len);
     dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + 2 * MAX_SB_SQUARE * len);
   } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     dst_buf1[0] = tmp_buf1;
     dst_buf1[1] = tmp_buf1 + MAX_SB_SQUARE;
     dst_buf1[2] = tmp_buf1 + 2 * MAX_SB_SQUARE;
     dst_buf2[0] = tmp_buf2;
     dst_buf2[1] = tmp_buf2 + MAX_SB_SQUARE;
     dst_buf2[2] = tmp_buf2 + 2 * MAX_SB_SQUARE;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_OBMC
 
-  vp10_zero(best_mbmode);
-  vp10_zero(pmi_uv);
+  av1_zero(best_mbmode);
+  av1_zero(pmi_uv);
 
   if (cm->allow_screen_content_tools) {
     if (above_mi)
@@ -8291,22 +8287,22 @@
     MODE_INFO *const mi = xd->mi[0];
     int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
     x->mbmi_ext->mode_context[ref_frame] = 0;
-    vp10_find_mv_refs(cm, xd, mi, ref_frame, &mbmi_ext->ref_mv_count[ref_frame],
-                      mbmi_ext->ref_mv_stack[ref_frame],
+    av1_find_mv_refs(cm, xd, mi, ref_frame, &mbmi_ext->ref_mv_count[ref_frame],
+                     mbmi_ext->ref_mv_stack[ref_frame],
 #if CONFIG_EXT_INTER
-                      mbmi_ext->compound_mode_context,
+                     mbmi_ext->compound_mode_context,
 #endif  // CONFIG_EXT_INTER
-                      candidates, mi_row, mi_col, NULL, NULL,
-                      mbmi_ext->mode_context);
+                     candidates, mi_row, mi_col, NULL, NULL,
+                     mbmi_ext->mode_context);
   }
 #endif  // CONFIG_REF_MV
 
 #if CONFIG_OBMC
-  vp10_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
-                                       dst_width1, dst_height1, dst_stride1);
-  vp10_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
-                                      dst_width2, dst_height2, dst_stride2);
-  vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+  av1_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
+                                      dst_width1, dst_height1, dst_stride1);
+  av1_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
+                                     dst_width2, dst_height2, dst_stride2);
+  av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
   calc_target_weighted_pred(cm, x, xd, mi_row, mi_col, dst_buf1[0],
                             dst_stride1[0], dst_buf2[0], dst_stride2[0],
                             mask2d_buf, weighted_src_buf);
@@ -8466,9 +8462,9 @@
 #endif
 
     mode_index = mode_map[midx];
-    this_mode = vp10_mode_order[mode_index].mode;
-    ref_frame = vp10_mode_order[mode_index].ref_frame[0];
-    second_ref_frame = vp10_mode_order[mode_index].ref_frame[1];
+    this_mode = av1_mode_order[mode_index].mode;
+    ref_frame = av1_mode_order[mode_index].ref_frame[0];
+    second_ref_frame = av1_mode_order[mode_index].ref_frame[1];
 
 #if CONFIG_EXT_INTER
     if (ref_frame > INTRA_FRAME && second_ref_frame == INTRA_FRAME) {
@@ -8528,7 +8524,7 @@
     }
 
     if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
-        (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
+        (ref_frame_skip_mask[1] & (1 << AOMMAX(0, second_ref_frame))))
       continue;
 
     if (mode_skip_mask[ref_frame] & (1 << this_mode)) continue;
@@ -8643,7 +8639,7 @@
           const uint8_t *src = x->plane[0].src.buf;
           const int rows = 4 * num_4x4_blocks_high_lookup[bsize];
           const int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
             highbd_angle_estimation(src, src_stride, rows, cols,
                                     directional_mode_skip_mask);
@@ -8712,9 +8708,8 @@
       rate2 = rate_y + intra_mode_cost[mbmi->mode] + rate_uv +
               cpi->intra_uv_mode_cost[mbmi->mode][mbmi->uv_mode];
       if (cpi->common.allow_screen_content_tools && mbmi->mode == DC_PRED)
-        rate2 += vp10_cost_bit(
-            vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx],
-            0);
+        rate2 += av1_cost_bit(
+            av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
 
       if (!xd->lossless[mbmi->segment_id]) {
         // super_block_yrd above includes the cost of the tx_size in the
@@ -8727,18 +8722,18 @@
 #if CONFIG_EXT_INTRA
       if (is_directional_mode) {
         int p_angle;
-        const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+        const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
         rate2 += write_uniform_cost(2 * MAX_ANGLE_DELTAS + 1,
                                     MAX_ANGLE_DELTAS + mbmi->angle_delta[0]);
         p_angle =
             mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
-        if (vp10_is_intra_filter_switchable(p_angle))
+        if (av1_is_intra_filter_switchable(p_angle))
           rate2 += cpi->intra_filter_cost[intra_filter_ctx][mbmi->intra_filter];
       }
 
       if (mbmi->mode == DC_PRED && ALLOW_FILTER_INTRA_MODES) {
-        rate2 += vp10_cost_bit(cm->fc->ext_intra_probs[0],
-                               mbmi->ext_intra_mode_info.use_ext_intra_mode[0]);
+        rate2 += av1_cost_bit(cm->fc->ext_intra_probs[0],
+                              mbmi->ext_intra_mode_info.use_ext_intra_mode[0]);
         if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
           rate2 += write_uniform_cost(
               FILTER_INTRA_MODES, mbmi->ext_intra_mode_info.ext_intra_mode[0]);
@@ -8751,8 +8746,8 @@
       }
 
       if (ALLOW_FILTER_INTRA_MODES && mbmi->mode == DC_PRED) {
-        rate2 += vp10_cost_bit(cpi->common.fc->ext_intra_probs[1],
-                               mbmi->ext_intra_mode_info.use_ext_intra_mode[1]);
+        rate2 += av1_cost_bit(cpi->common.fc->ext_intra_probs[1],
+                              mbmi->ext_intra_mode_info.use_ext_intra_mode[1]);
         if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1])
           rate2 += write_uniform_cost(
               FILTER_INTRA_MODES, mbmi->ext_intra_mode_info.ext_intra_mode[1]);
@@ -8761,19 +8756,19 @@
       if (this_mode != DC_PRED && this_mode != TM_PRED)
         rate2 += intra_cost_penalty;
       distortion2 = distortion_y + distortion_uv;
-      vp10_encode_intra_block_plane(x, bsize, 0, 1);
-#if CONFIG_VP9_HIGHBITDEPTH
+      av1_encode_intra_block_plane(x, bsize, 0, 1);
+#if CONFIG_AOM_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-        x->recon_variance = vp10_high_get_sby_perpixel_variance(
+        x->recon_variance = av1_high_get_sby_perpixel_variance(
             cpi, &xd->plane[0].dst, bsize, xd->bd);
       } else {
         x->recon_variance =
-            vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+            av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
       }
 #else
       x->recon_variance =
-          vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+          av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     } else {
 #if CONFIG_REF_MV
       int_mv backup_ref_mv[2];
@@ -8798,7 +8793,7 @@
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_REF_MV
       mbmi->ref_mv_idx = 0;
-      ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+      ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
 
       if (this_mode == NEWMV && mbmi_ext->ref_mv_count[ref_frame_type] > 1) {
         int ref;
@@ -8841,10 +8836,10 @@
         // TODO(jingning): This should be deprecated shortly.
         int idx_offset = (mbmi->mode == NEARMV) ? 1 : 0;
         int ref_set =
-            VPXMIN(2, mbmi_ext->ref_mv_count[ref_frame_type] - 1 - idx_offset);
+            AOMMIN(2, mbmi_ext->ref_mv_count[ref_frame_type] - 1 - idx_offset);
 
         uint8_t drl_ctx =
-            vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx_offset);
+            av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx_offset);
         // Dummy
         int_mv backup_fmv[2];
         backup_fmv[0] = frame_mv[NEWMV][ref_frame];
@@ -8857,12 +8852,12 @@
               RDCOST(x->rdmult, x->rddiv, 0, total_sse))
             tmp_ref_rd =
                 RDCOST(x->rdmult, x->rddiv,
-                       rate2 + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0),
+                       rate2 + av1_cost_bit(av1_get_skip_prob(cm, xd), 0),
                        distortion2);
           else
             tmp_ref_rd =
                 RDCOST(x->rdmult, x->rddiv,
-                       rate2 + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1) -
+                       rate2 + av1_cost_bit(av1_get_skip_prob(cm, xd), 1) -
                            rate_y - rate_uv,
                        total_sse);
         }
@@ -8940,8 +8935,8 @@
 
           for (i = 0; i < mbmi->ref_mv_idx; ++i) {
             uint8_t drl1_ctx = 0;
-            drl1_ctx = vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type],
-                                    i + idx_offset);
+            drl1_ctx = av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type],
+                                   i + idx_offset);
             tmp_rate += cpi->drl_mode_cost0[drl1_ctx][1];
           }
 
@@ -8949,8 +8944,8 @@
                   mbmi->ref_mv_idx + idx_offset + 1 &&
               ref_idx < ref_set - 1) {
             uint8_t drl1_ctx =
-                vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type],
-                             mbmi->ref_mv_idx + idx_offset);
+                av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type],
+                            mbmi->ref_mv_idx + idx_offset);
             tmp_rate += cpi->drl_mode_cost0[drl1_ctx][0];
           }
 
@@ -8960,16 +8955,16 @@
 #else
             if (RDCOST(x->rdmult, x->rddiv, tmp_rate_y + tmp_rate_uv,
                        tmp_dist) < RDCOST(x->rdmult, x->rddiv, 0, tmp_sse))
-              tmp_alt_rd = RDCOST(
-                  x->rdmult, x->rddiv,
-                  tmp_rate + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0),
-                  tmp_dist);
+              tmp_alt_rd =
+                  RDCOST(x->rdmult, x->rddiv,
+                         tmp_rate + av1_cost_bit(av1_get_skip_prob(cm, xd), 0),
+                         tmp_dist);
             else
-              tmp_alt_rd = RDCOST(
-                  x->rdmult, x->rddiv,
-                  tmp_rate + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1) -
-                      tmp_rate_y - tmp_rate_uv,
-                  tmp_sse);
+              tmp_alt_rd =
+                  RDCOST(x->rdmult, x->rddiv,
+                         tmp_rate + av1_cost_bit(av1_get_skip_prob(cm, xd), 1) -
+                             tmp_rate_y - tmp_rate_uv,
+                         tmp_sse);
 #endif  // CONFIG_OBMC
           }
 
@@ -9011,7 +9006,7 @@
 
       if (this_rd == INT64_MAX) continue;
 
-      compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
+      compmode_cost = av1_cost_bit(comp_mode_p, comp_pred);
 
       if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
     }
@@ -9047,15 +9042,15 @@
         rate_y = 0;
         rate_uv = 0;
         // Cost the skip mb case
-        rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+        rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
       } else if (ref_frame != INTRA_FRAME && !xd->lossless[mbmi->segment_id]) {
         if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
             RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
           // Add in the cost of the no skip flag.
-          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+          rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
         } else {
           // FIXME(rbultje) make this work for splitmv also
-          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+          rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
           distortion2 = total_sse;
           assert(total_sse >= 0);
           rate2 -= (rate_y + rate_uv);
@@ -9065,7 +9060,7 @@
         }
       } else {
         // Add in the cost of the no skip flag.
-        rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
       }
 
       // Calculate the final RD estimate for this mode.
@@ -9102,7 +9097,7 @@
 
     if (!disable_skip && ref_frame == INTRA_FRAME) {
       for (i = 0; i < REFERENCE_MODES; ++i)
-        best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
+        best_pred_rd[i] = AOMMIN(best_pred_rd[i], this_rd);
     }
 
     // Did this mode help.. i.e. is it the new best mode
@@ -9124,11 +9119,10 @@
           *returnrate_nocoef = rate2;
         else
           *returnrate_nocoef = rate2 - rate_y - rate_uv;
-        *returnrate_nocoef -=
-            vp10_cost_bit(vp10_get_skip_prob(cm, xd),
-                          disable_skip || skippable || this_skip2);
-        *returnrate_nocoef -= vp10_cost_bit(vp10_get_intra_inter_prob(cm, xd),
-                                            mbmi->ref_frame[0] != INTRA_FRAME);
+        *returnrate_nocoef -= av1_cost_bit(
+            av1_get_skip_prob(cm, xd), disable_skip || skippable || this_skip2);
+        *returnrate_nocoef -= av1_cost_bit(av1_get_intra_inter_prob(cm, xd),
+                                           mbmi->ref_frame[0] != INTRA_FRAME);
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
         if (is_inter_block(mbmi) && is_motvar_allowed(mbmi))
           *returnrate_nocoef -= cpi->motvar_cost[bsize][mbmi->motion_variation];
@@ -9140,8 +9134,8 @@
         best_mbmode = *mbmi;
         best_skip2 = this_skip2;
         best_mode_skippable = skippable;
-        best_rate_y = rate_y + vp10_cost_bit(vp10_get_skip_prob(cm, xd),
-                                             this_skip2 || skippable);
+        best_rate_y = rate_y + av1_cost_bit(av1_get_skip_prob(cm, xd),
+                                            this_skip2 || skippable);
         best_rate_uv = rate_uv;
 
 #if CONFIG_VAR_TX
@@ -9157,11 +9151,11 @@
           int qstep = xd->plane[0].dequant[1];
           // TODO(debargha): Enhance this by specializing for each mode_index
           int scale = 4;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
             qstep >>= (xd->bd - 8);
           }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
           if (x->source_variance < UINT_MAX) {
             const int var_adjust = (x->source_variance < 16);
             scale -= var_adjust;
@@ -9229,13 +9223,13 @@
     }
 
     if (is_inter_mode(mbmi->mode)) {
-      vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+      av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
 #if CONFIG_OBMC
       if (mbmi->motion_variation == OBMC_CAUSAL)
-        vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
-                                         dst_stride1, dst_buf2, dst_stride2);
+        av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+                                        dst_stride1, dst_buf2, dst_stride2);
 #endif  // CONFIG_OBMC
-      vp10_subtract_plane(x, bsize, 0);
+      av1_subtract_plane(x, bsize, 0);
 #if CONFIG_VAR_TX
       if (cm->tx_mode == TX_MODE_SELECT || xd->lossless[mbmi->segment_id]) {
         select_tx_type_yrd(cpi, x, &rate_y, &dist_y, &skip_y, &sse_y, bsize,
@@ -9269,13 +9263,13 @@
     if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, (dist_y + dist_uv)) >
         RDCOST(x->rdmult, x->rddiv, 0, (sse_y + sse_uv))) {
       skip_blk = 1;
-      rate_y = vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+      rate_y = av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
       rate_uv = 0;
       dist_y = sse_y;
       dist_uv = sse_uv;
     } else {
       skip_blk = 0;
-      rate_y += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+      rate_y += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
     }
 
     if (RDCOST(x->rdmult, x->rddiv, best_rate_y + best_rate_uv, rd_cost->dist) >
@@ -9377,12 +9371,12 @@
 #if CONFIG_SUPERTX
       best_rate_nocoef = rate2;
 #endif
-      rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+      rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
     } else {
 #if CONFIG_SUPERTX
       best_rate_nocoef = rate2 - (rate_y + rate_uv_tokenonly[uv_tx]);
 #endif
-      rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+      rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
     }
     this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
     if (this_rd < best_rd) {
@@ -9433,11 +9427,11 @@
                                          best_mbmode.ref_frame[1] };
     int comp_pred_mode = refs[1] > INTRA_FRAME;
 #if CONFIG_REF_MV
-    const uint8_t rf_type = vp10_ref_frame_type(best_mbmode.ref_frame);
+    const uint8_t rf_type = av1_ref_frame_type(best_mbmode.ref_frame);
     if (!comp_pred_mode) {
       int i;
       int ref_set = (mbmi_ext->ref_mv_count[rf_type] >= 2)
-                        ? VPXMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2)
+                        ? AOMMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2)
                         : INT_MAX;
 
       for (i = 0; i <= ref_set && ref_set != INT_MAX; ++i) {
@@ -9467,7 +9461,7 @@
 #else
       int i;
       int ref_set = (mbmi_ext->ref_mv_count[rf_type] >= 2)
-                        ? VPXMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2)
+                        ? AOMMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2)
                         : INT_MAX;
 
       for (i = 0; i <= ref_set && ref_set != INT_MAX; ++i) {
@@ -9621,8 +9615,8 @@
 #endif
 
   if (!cpi->rc.is_src_frame_alt_ref)
-    vp10_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
-                               sf->adaptive_rd_thresh, bsize, best_mode_index);
+    av1_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
+                              sf->adaptive_rd_thresh, bsize, best_mode_index);
 
   // macroblock modes
   *mbmi = best_mbmode;
@@ -9656,12 +9650,12 @@
   }
 }
 
-void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi, TileDataEnc *tile_data,
-                                         MACROBLOCK *x, RD_COST *rd_cost,
-                                         BLOCK_SIZE bsize,
-                                         PICK_MODE_CONTEXT *ctx,
-                                         int64_t best_rd_so_far) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_rd_pick_inter_mode_sb_seg_skip(AV1_COMP *cpi, TileDataEnc *tile_data,
+                                        MACROBLOCK *x, RD_COST *rd_cost,
+                                        BLOCK_SIZE bsize,
+                                        PICK_MODE_CONTEXT *ctx,
+                                        int64_t best_rd_so_far) {
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   unsigned char segment_id = mbmi->segment_id;
@@ -9670,7 +9664,7 @@
   int64_t best_pred_diff[REFERENCE_MODES];
   unsigned int ref_costs_single[TOTAL_REFS_PER_FRAME];
   unsigned int ref_costs_comp[TOTAL_REFS_PER_FRAME];
-  vpx_prob comp_mode_p;
+  aom_prob comp_mode_p;
   INTERP_FILTER best_filter = SWITCHABLE;
   int64_t this_rd = INT64_MAX;
   int rate2 = 0;
@@ -9709,7 +9703,7 @@
     best_filter = EIGHTTAP_REGULAR;
     if (cm->interp_filter == SWITCHABLE &&
 #if CONFIG_EXT_INTERP
-        vp10_is_interp_needed(xd) &&
+        av1_is_interp_needed(xd) &&
 #endif  // CONFIG_EXT_INTERP
         x->source_variance >= cpi->sf.disable_filter_search_var_thresh) {
       int rs;
@@ -9721,7 +9715,7 @@
 #else
         mbmi->interp_filter = i;
 #endif
-        rs = vp10_get_switchable_rate(cpi, xd);
+        rs = av1_get_switchable_rate(cpi, xd);
         if (rs < best_rs) {
           best_rs = rs;
 #if CONFIG_DUAL_FILTER
@@ -9740,7 +9734,7 @@
 #else
     mbmi->interp_filter = best_filter;
 #endif
-    rate2 += vp10_get_switchable_rate(cpi, xd);
+    rate2 += av1_get_switchable_rate(cpi, xd);
   } else {
 #if CONFIG_DUAL_FILTER
     for (i = 0; i < 4; ++i) mbmi->interp_filter[0] = cm->interp_filter;
@@ -9750,7 +9744,7 @@
   }
 
   if (cm->reference_mode == REFERENCE_MODE_SELECT)
-    rate2 += vp10_cost_bit(comp_mode_p, comp_pred);
+    rate2 += av1_cost_bit(comp_mode_p, comp_pred);
 
   // Estimate the reference frame signaling cost and add it
   // to the rolling cost variable.
@@ -9775,24 +9769,23 @@
          (cm->interp_filter == mbmi->interp_filter));
 #endif
 
-  vp10_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
-                             cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
+  av1_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
+                            cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
 
-  vp10_zero(best_pred_diff);
+  av1_zero(best_pred_diff);
 
   store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, 0);
 }
 
-void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
-                                    TileDataEnc *tile_data,
-                                    struct macroblock *x, int mi_row,
-                                    int mi_col, struct RD_COST *rd_cost,
+void av1_rd_pick_inter_mode_sub8x8(struct AV1_COMP *cpi, TileDataEnc *tile_data,
+                                   struct macroblock *x, int mi_row, int mi_col,
+                                   struct RD_COST *rd_cost,
 #if CONFIG_SUPERTX
-                                    int *returnrate_nocoef,
+                                   int *returnrate_nocoef,
 #endif  // CONFIG_SUPERTX
-                                    BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
-                                    int64_t best_rd_so_far) {
-  VP10_COMMON *const cm = &cpi->common;
+                                   BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                                   int64_t best_rd_so_far) {
+  AV1_COMMON *const cm = &cpi->common;
   RD_OPT *const rd_opt = &cpi->rd;
   SPEED_FEATURES *const sf = &cpi->sf;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -9805,16 +9798,16 @@
   struct buf_2d yv12_mb[TOTAL_REFS_PER_FRAME][MAX_MB_PLANE];
   static const int flag_list[TOTAL_REFS_PER_FRAME] = {
     0,
-    VPX_LAST_FLAG,
+    AOM_LAST_FLAG,
 #if CONFIG_EXT_REFS
-    VPX_LAST2_FLAG,
-    VPX_LAST3_FLAG,
+    AOM_LAST2_FLAG,
+    AOM_LAST3_FLAG,
 #endif  // CONFIG_EXT_REFS
-    VPX_GOLD_FLAG,
+    AOM_GOLD_FLAG,
 #if CONFIG_EXT_REFS
-    VPX_BWD_FLAG,
+    AOM_BWD_FLAG,
 #endif  // CONFIG_EXT_REFS
-    VPX_ALT_FLAG
+    AOM_ALT_FLAG
   };
   int64_t best_rd = best_rd_so_far;
   int64_t best_yrd = best_rd_so_far;  // FIXME(rbultje) more precise
@@ -9824,7 +9817,7 @@
   int ref_index, best_ref_index = 0;
   unsigned int ref_costs_single[TOTAL_REFS_PER_FRAME];
   unsigned int ref_costs_comp[TOTAL_REFS_PER_FRAME];
-  vpx_prob comp_mode_p;
+  aom_prob comp_mode_p;
 #if CONFIG_DUAL_FILTER
   INTERP_FILTER tmp_best_filter[4] = { 0 };
 #else
@@ -9834,7 +9827,7 @@
   int64_t dist_uv;
   int skip_uv;
   PREDICTION_MODE mode_uv = DC_PRED;
-  const int intra_cost_penalty = vp10_get_intra_cost_penalty(
+  const int intra_cost_penalty = av1_get_intra_cost_penalty(
       cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
 #if CONFIG_EXT_INTER
   int_mv seg_mvs[4][2][TOTAL_REFS_PER_FRAME];
@@ -9845,14 +9838,14 @@
   int best_skip2 = 0;
   int ref_frame_skip_mask[2] = { 0 };
   int internal_active_edge =
-      vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
+      av1_active_edge_sb(cpi, mi_row, mi_col) && av1_internal_image_edge(cpi);
 
 #if CONFIG_SUPERTX
   best_rd_so_far = INT64_MAX;
   best_rd = best_rd_so_far;
   best_yrd = best_rd_so_far;
 #endif  // CONFIG_SUPERTX
-  vp10_zero(best_mbmode);
+  av1_zero(best_mbmode);
 
 #if CONFIG_EXT_INTRA
   mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
@@ -9924,8 +9917,8 @@
     int64_t total_sse = INT_MAX;
     int early_term = 0;
 
-    ref_frame = vp10_ref_order[ref_index].ref_frame[0];
-    second_ref_frame = vp10_ref_order[ref_index].ref_frame[1];
+    ref_frame = av1_ref_order[ref_index].ref_frame[0];
+    second_ref_frame = av1_ref_order[ref_index].ref_frame[1];
 
     // Look at the reference frame of the best mode so far and set the
     // skip mask to look at a subset of the remaining modes.
@@ -9993,7 +9986,7 @@
     }
 
     if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
-        (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
+        (ref_frame_skip_mask[1] & (1 << AOMMAX(0, second_ref_frame))))
       continue;
 
     // Test best rd so far against threshold for trying this mode.
@@ -10019,11 +10012,11 @@
     // TODO(jingning, jkoleszar): scaling reference frame not supported for
     // sub8x8 blocks.
     if (ref_frame > INTRA_FRAME &&
-        vp10_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
+        av1_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
       continue;
 
     if (second_ref_frame > INTRA_FRAME &&
-        vp10_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf))
+        av1_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf))
       continue;
 
     if (comp_pred)
@@ -10216,18 +10209,18 @@
                 bsi, switchable_filter_index, mi_row, mi_col);
 #if CONFIG_EXT_INTERP
 #if CONFIG_DUAL_FILTER
-            if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+            if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
                 (mbmi->interp_filter[0] != EIGHTTAP_REGULAR ||
                  mbmi->interp_filter[1] != EIGHTTAP_REGULAR))  // invalid config
               continue;
 #else
-            if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+            if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
                 mbmi->interp_filter != EIGHTTAP_REGULAR)  // invalid config
               continue;
 #endif
 #endif  // CONFIG_EXT_INTERP
             if (tmp_rd == INT64_MAX) continue;
-            rs = vp10_get_switchable_rate(cpi, xd);
+            rs = av1_get_switchable_rate(cpi, xd);
             rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
             if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd;
 
@@ -10301,14 +10294,14 @@
             bsi, 0, mi_row, mi_col);
 #if CONFIG_EXT_INTERP
 #if CONFIG_DUAL_FILTER
-        if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+        if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
             (mbmi->interp_filter[0] != EIGHTTAP_REGULAR ||
              mbmi->interp_filter[1] != EIGHTTAP_REGULAR)) {
           mbmi->interp_filter[0] = EIGHTTAP_REGULAR;
           mbmi->interp_filter[1] = EIGHTTAP_REGULAR;
         }
 #else
-        if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+        if (!av1_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
             mbmi->interp_filter != EIGHTTAP_REGULAR)
           mbmi->interp_filter = EIGHTTAP_REGULAR;
 #endif  // CONFIG_DUAL_FILTER
@@ -10345,22 +10338,22 @@
       distortion2 += distortion;
 
       if (cm->interp_filter == SWITCHABLE)
-        rate2 += vp10_get_switchable_rate(cpi, xd);
+        rate2 += av1_get_switchable_rate(cpi, xd);
 
       if (!mode_excluded)
         mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
                                   : cm->reference_mode == COMPOUND_REFERENCE;
 
-      compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
+      compmode_cost = av1_cost_bit(comp_mode_p, comp_pred);
 
       tmp_best_rdu =
-          best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
+          best_rd - AOMMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
                            RDCOST(x->rdmult, x->rddiv, 0, total_sse));
 
       if (tmp_best_rdu > 0) {
         // If even the 'Y' rd value of split is higher than best so far
         // then dont bother looking at UV
-        vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
+        av1_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
 #if CONFIG_VAR_TX
         if (!inter_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
                               &uv_sse, BLOCK_8X8, tmp_best_rdu))
@@ -10400,10 +10393,10 @@
         if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
             RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
           // Add in the cost of the no skip flag.
-          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+          rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
         } else {
           // FIXME(rbultje) make this work for splitmv also
-          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+          rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
           distortion2 = total_sse;
           assert(total_sse >= 0);
           rate2 -= (rate_y + rate_uv);
@@ -10413,7 +10406,7 @@
         }
       } else {
         // Add in the cost of the no skip flag.
-        rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
       }
 
       // Calculate the final RD estimate for this mode.
@@ -10422,7 +10415,7 @@
 
     if (!disable_skip && ref_frame == INTRA_FRAME) {
       for (i = 0; i < REFERENCE_MODES; ++i)
-        best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
+        best_pred_rd[i] = AOMMIN(best_pred_rd[i], this_rd);
     }
 
     // Did this mode help.. i.e. is it the new best mode
@@ -10441,9 +10434,9 @@
         *returnrate_nocoef = rate2 - rate_y - rate_uv;
         if (!disable_skip)
           *returnrate_nocoef -=
-              vp10_cost_bit(vp10_get_skip_prob(cm, xd), this_skip2);
-        *returnrate_nocoef -= vp10_cost_bit(vp10_get_intra_inter_prob(cm, xd),
-                                            mbmi->ref_frame[0] != INTRA_FRAME);
+              av1_cost_bit(av1_get_skip_prob(cm, xd), this_skip2);
+        *returnrate_nocoef -= av1_cost_bit(av1_get_intra_inter_prob(cm, xd),
+                                           mbmi->ref_frame[0] != INTRA_FRAME);
         assert(*returnrate_nocoef > 0);
 #endif  // CONFIG_SUPERTX
         rd_cost->dist = distortion2;
@@ -10468,11 +10461,11 @@
           int qstep = xd->plane[0].dequant[1];
           // TODO(debargha): Enhance this by specializing for each mode_index
           int scale = 4;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
             qstep >>= (xd->bd - 8);
           }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
           if (x->source_variance < UINT_MAX) {
             const int var_adjust = (x->source_variance < 16);
             scale -= var_adjust;
@@ -10552,8 +10545,8 @@
          !is_inter_block(&best_mbmode));
 #endif
 
-  vp10_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
-                             sf->adaptive_rd_thresh, bsize, best_ref_index);
+  av1_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
+                            sf->adaptive_rd_thresh, bsize, best_ref_index);
 
   // macroblock modes
   *mbmi = best_mbmode;
@@ -10587,34 +10580,34 @@
 }
 
 #if CONFIG_OBMC
-// This function has a structure similar to vp10_build_obmc_inter_prediction
+// This function has a structure similar to av1_build_obmc_inter_prediction
 //
 // The OBMC predictor is computed as:
 //
 //  PObmc(x,y) =
-//    VPX_BLEND_A64(Mh(x),
-//                  VPX_BLEND_A64(Mv(y), P(x,y), PAbove(x,y)),
+//    AOM_BLEND_A64(Mh(x),
+//                  AOM_BLEND_A64(Mv(y), P(x,y), PAbove(x,y)),
 //                  PLeft(x, y))
 //
-// Scaling up by VPX_BLEND_A64_MAX_ALPHA ** 2 and omitting the intermediate
+// Scaling up by AOM_BLEND_A64_MAX_ALPHA ** 2 and omitting the intermediate
 // rounding, this can be written as:
 //
-//  VPX_BLEND_A64_MAX_ALPHA * VPX_BLEND_A64_MAX_ALPHA * Pobmc(x,y) =
+//  AOM_BLEND_A64_MAX_ALPHA * AOM_BLEND_A64_MAX_ALPHA * Pobmc(x,y) =
 //    Mh(x) * Mv(y) * P(x,y) +
 //      Mh(x) * Cv(y) * Pabove(x,y) +
-//      VPX_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
+//      AOM_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
 //
 // Where :
 //
-//  Cv(y) = VPX_BLEND_A64_MAX_ALPHA - Mv(y)
-//  Ch(y) = VPX_BLEND_A64_MAX_ALPHA - Mh(y)
+//  Cv(y) = AOM_BLEND_A64_MAX_ALPHA - Mv(y)
+//  Ch(y) = AOM_BLEND_A64_MAX_ALPHA - Mh(y)
 //
 // This function computes 'wsrc' and 'mask' as:
 //
 //  wsrc(x, y) =
-//    VPX_BLEND_A64_MAX_ALPHA * VPX_BLEND_A64_MAX_ALPHA * src(x, y) -
+//    AOM_BLEND_A64_MAX_ALPHA * AOM_BLEND_A64_MAX_ALPHA * src(x, y) -
 //      Mh(x) * Cv(y) * Pabove(x,y) +
-//      VPX_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
+//      AOM_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
 //
 //  mask(x, y) = Mh(x) * Mv(y)
 //
@@ -10623,10 +10616,9 @@
 // computing:
 //
 //  error(x, y) =
-//    wsrc(x, y) - mask(x, y) * P(x, y) / (VPX_BLEND_A64_MAX_ALPHA ** 2)
+//    wsrc(x, y) - mask(x, y) * P(x, y) / (AOM_BLEND_A64_MAX_ALPHA ** 2)
 //
-static void calc_target_weighted_pred(const VP10_COMMON *cm,
-                                      const MACROBLOCK *x,
+static void calc_target_weighted_pred(const AV1_COMMON *cm, const MACROBLOCK *x,
                                       const MACROBLOCKD *xd, int mi_row,
                                       int mi_col, const uint8_t *above,
                                       int above_stride, const uint8_t *left,
@@ -10638,26 +10630,26 @@
   const int bh = 8 * xd->n8_h;
   const int wsrc_stride = bw;
   const int mask_stride = bw;
-  const int src_scale = VPX_BLEND_A64_MAX_ALPHA * VPX_BLEND_A64_MAX_ALPHA;
-#if CONFIG_VP9_HIGHBITDEPTH
+  const int src_scale = AOM_BLEND_A64_MAX_ALPHA * AOM_BLEND_A64_MAX_ALPHA;
+#if CONFIG_AOM_HIGHBITDEPTH
   const int is_hbd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
 #else
   const int is_hbd = 0;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // plane 0 should not be subsampled
   assert(xd->plane[0].subsampling_x == 0);
   assert(xd->plane[0].subsampling_y == 0);
 
-  vp10_zero_array(wsrc_buf, bw * bh);
-  for (i = 0; i < bw * bh; ++i) mask_buf[i] = VPX_BLEND_A64_MAX_ALPHA;
+  av1_zero_array(wsrc_buf, bw * bh);
+  for (i = 0; i < bw * bh; ++i) mask_buf[i] = AOM_BLEND_A64_MAX_ALPHA;
 
   // handle above row
   if (xd->up_available) {
     const int overlap = num_4x4_blocks_high_lookup[bsize] * 2;
-    const int miw = VPXMIN(xd->n8_w, cm->mi_cols - mi_col);
+    const int miw = AOMMIN(xd->n8_w, cm->mi_cols - mi_col);
     const int mi_row_offset = -1;
-    const uint8_t *const mask1d = vp10_get_obmc_mask(overlap);
+    const uint8_t *const mask1d = av1_get_obmc_mask(overlap);
 
     assert(miw > 0);
 
@@ -10667,7 +10659,7 @@
       const MB_MODE_INFO *const above_mbmi =
           &xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
       const int mi_step =
-          VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
+          AOMMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
       const int neighbor_bw = mi_step * MI_SIZE;
 
       if (is_neighbor_overlappable(above_mbmi)) {
@@ -10680,7 +10672,7 @@
 
           for (row = 0; row < overlap; ++row) {
             const uint8_t m0 = mask1d[row];
-            const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
+            const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
             for (col = 0; col < neighbor_bw; ++col) {
               wsrc[col] = m1 * tmp[col];
               mask[col] = m0;
@@ -10689,13 +10681,13 @@
             mask += mask_stride;
             tmp += tmp_stride;
           }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           const uint16_t *tmp = CONVERT_TO_SHORTPTR(above);
 
           for (row = 0; row < overlap; ++row) {
             const uint8_t m0 = mask1d[row];
-            const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
+            const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
             for (col = 0; col < neighbor_bw; ++col) {
               wsrc[col] = m1 * tmp[col];
               mask[col] = m0;
@@ -10704,7 +10696,7 @@
             mask += mask_stride;
             tmp += tmp_stride;
           }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
 
@@ -10714,16 +10706,16 @@
   }
 
   for (i = 0; i < bw * bh; ++i) {
-    wsrc_buf[i] *= VPX_BLEND_A64_MAX_ALPHA;
-    mask_buf[i] *= VPX_BLEND_A64_MAX_ALPHA;
+    wsrc_buf[i] *= AOM_BLEND_A64_MAX_ALPHA;
+    mask_buf[i] *= AOM_BLEND_A64_MAX_ALPHA;
   }
 
   // handle left column
   if (xd->left_available) {
     const int overlap = num_4x4_blocks_wide_lookup[bsize] * 2;
-    const int mih = VPXMIN(xd->n8_h, cm->mi_rows - mi_row);
+    const int mih = AOMMIN(xd->n8_h, cm->mi_rows - mi_row);
     const int mi_col_offset = -1;
-    const uint8_t *const mask1d = vp10_get_obmc_mask(overlap);
+    const uint8_t *const mask1d = av1_get_obmc_mask(overlap);
 
     assert(mih > 0);
 
@@ -10733,7 +10725,7 @@
       const MB_MODE_INFO *const left_mbmi =
           &xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
       const int mi_step =
-          VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
+          AOMMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
       const int neighbor_bh = mi_step * MI_SIZE;
 
       if (is_neighbor_overlappable(left_mbmi)) {
@@ -10747,32 +10739,32 @@
           for (row = 0; row < neighbor_bh; ++row) {
             for (col = 0; col < overlap; ++col) {
               const uint8_t m0 = mask1d[col];
-              const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
-              wsrc[col] = (wsrc[col] >> VPX_BLEND_A64_ROUND_BITS) * m0 +
-                          (tmp[col] << VPX_BLEND_A64_ROUND_BITS) * m1;
-              mask[col] = (mask[col] >> VPX_BLEND_A64_ROUND_BITS) * m0;
+              const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
+              wsrc[col] = (wsrc[col] >> AOM_BLEND_A64_ROUND_BITS) * m0 +
+                          (tmp[col] << AOM_BLEND_A64_ROUND_BITS) * m1;
+              mask[col] = (mask[col] >> AOM_BLEND_A64_ROUND_BITS) * m0;
             }
             wsrc += wsrc_stride;
             mask += mask_stride;
             tmp += tmp_stride;
           }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           const uint16_t *tmp = CONVERT_TO_SHORTPTR(left);
 
           for (row = 0; row < neighbor_bh; ++row) {
             for (col = 0; col < overlap; ++col) {
               const uint8_t m0 = mask1d[col];
-              const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
-              wsrc[col] = (wsrc[col] >> VPX_BLEND_A64_ROUND_BITS) * m0 +
-                          (tmp[col] << VPX_BLEND_A64_ROUND_BITS) * m1;
-              mask[col] = (mask[col] >> VPX_BLEND_A64_ROUND_BITS) * m0;
+              const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
+              wsrc[col] = (wsrc[col] >> AOM_BLEND_A64_ROUND_BITS) * m0 +
+                          (tmp[col] << AOM_BLEND_A64_ROUND_BITS) * m1;
+              mask[col] = (mask[col] >> AOM_BLEND_A64_ROUND_BITS) * m0;
             }
             wsrc += wsrc_stride;
             mask += mask_stride;
             tmp += tmp_stride;
           }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
 
@@ -10791,7 +10783,7 @@
       wsrc_buf += wsrc_stride;
       src += x->plane[0].src.stride;
     }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   } else {
     const uint16_t *src = CONVERT_TO_SHORTPTR(x->plane[0].src.buf);
 
@@ -10802,7 +10794,7 @@
       wsrc_buf += wsrc_stride;
       src += x->plane[0].src.stride;
     }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 }
 #endif  // CONFIG_OBMC
diff --git a/av1/encoder/rdopt.h b/av1/encoder/rdopt.h
index 4ce2879..de02f1f 100644
--- a/av1/encoder/rdopt.h
+++ b/av1/encoder/rdopt.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_RDOPT_H_
-#define VP10_ENCODER_RDOPT_H_
+#ifndef AV1_ENCODER_RDOPT_H_
+#define AV1_ENCODER_RDOPT_H_
 
 #include "av1/common/blockd.h"
 
@@ -21,71 +21,70 @@
 #endif
 
 struct TileInfo;
-struct VP10_COMP;
+struct AV1_COMP;
 struct macroblock;
 struct RD_COST;
 
-void vp10_rd_pick_intra_mode_sb(struct VP10_COMP *cpi, struct macroblock *x,
-                                struct RD_COST *rd_cost, BLOCK_SIZE bsize,
-                                PICK_MODE_CONTEXT *ctx, int64_t best_rd);
+void av1_rd_pick_intra_mode_sb(struct AV1_COMP *cpi, struct macroblock *x,
+                               struct RD_COST *rd_cost, BLOCK_SIZE bsize,
+                               PICK_MODE_CONTEXT *ctx, int64_t best_rd);
 
-unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
-                                            const struct buf_2d *ref,
-                                            BLOCK_SIZE bs);
-#if CONFIG_VP9_HIGHBITDEPTH
-unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
-                                                 const struct buf_2d *ref,
-                                                 BLOCK_SIZE bs, int bd);
+unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
+                                           const struct buf_2d *ref,
+                                           BLOCK_SIZE bs);
+#if CONFIG_AOM_HIGHBITDEPTH
+unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
+                                                const struct buf_2d *ref,
+                                                BLOCK_SIZE bs, int bd);
 #endif
 
-void vp10_rd_pick_inter_mode_sb(struct VP10_COMP *cpi,
-                                struct TileDataEnc *tile_data,
-                                struct macroblock *x, int mi_row, int mi_col,
-                                struct RD_COST *rd_cost,
+void av1_rd_pick_inter_mode_sb(struct AV1_COMP *cpi,
+                               struct TileDataEnc *tile_data,
+                               struct macroblock *x, int mi_row, int mi_col,
+                               struct RD_COST *rd_cost,
 #if CONFIG_SUPERTX
-                                int *returnrate_nocoef,
+                               int *returnrate_nocoef,
 #endif  // CONFIG_SUPERTX
-                                BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
-                                int64_t best_rd_so_far);
+                               BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                               int64_t best_rd_so_far);
 
-void vp10_rd_pick_inter_mode_sb_seg_skip(
-    struct VP10_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
+void av1_rd_pick_inter_mode_sb_seg_skip(
+    struct AV1_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
     struct RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
     int64_t best_rd_so_far);
 
-int vp10_internal_image_edge(struct VP10_COMP *cpi);
-int vp10_active_h_edge(struct VP10_COMP *cpi, int mi_row, int mi_step);
-int vp10_active_v_edge(struct VP10_COMP *cpi, int mi_col, int mi_step);
-int vp10_active_edge_sb(struct VP10_COMP *cpi, int mi_row, int mi_col);
+int av1_internal_image_edge(struct AV1_COMP *cpi);
+int av1_active_h_edge(struct AV1_COMP *cpi, int mi_row, int mi_step);
+int av1_active_v_edge(struct AV1_COMP *cpi, int mi_col, int mi_step);
+int av1_active_edge_sb(struct AV1_COMP *cpi, int mi_row, int mi_col);
 
-void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
-                                    struct TileDataEnc *tile_data,
-                                    struct macroblock *x, int mi_row,
-                                    int mi_col, struct RD_COST *rd_cost,
+void av1_rd_pick_inter_mode_sub8x8(struct AV1_COMP *cpi,
+                                   struct TileDataEnc *tile_data,
+                                   struct macroblock *x, int mi_row, int mi_col,
+                                   struct RD_COST *rd_cost,
 #if CONFIG_SUPERTX
-                                    int *returnrate_nocoef,
+                                   int *returnrate_nocoef,
 #endif  // CONFIG_SUPERTX
-                                    BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
-                                    int64_t best_rd_so_far);
+                                   BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                                   int64_t best_rd_so_far);
 
 #if CONFIG_SUPERTX
 #if CONFIG_VAR_TX
-void vp10_tx_block_rd_b(const VP10_COMP *cpi, MACROBLOCK *x, TX_SIZE tx_size,
-                        int blk_row, int blk_col, int plane, int block,
-                        int plane_bsize, int coeff_ctx, int *rate,
-                        int64_t *dist, int64_t *bsse, int *skip);
+void av1_tx_block_rd_b(const AV1_COMP *cpi, MACROBLOCK *x, TX_SIZE tx_size,
+                       int blk_row, int blk_col, int plane, int block,
+                       int plane_bsize, int coeff_ctx, int *rate, int64_t *dist,
+                       int64_t *bsse, int *skip);
 #endif
 
-void vp10_txfm_rd_in_plane_supertx(MACROBLOCK *x, const VP10_COMP *cpi,
-                                   int *rate, int64_t *distortion,
-                                   int *skippable, int64_t *sse,
-                                   int64_t ref_best_rd, int plane,
-                                   BLOCK_SIZE bsize, TX_SIZE tx_size,
-                                   int use_fast_coef_casting);
+void av1_txfm_rd_in_plane_supertx(MACROBLOCK *x, const AV1_COMP *cpi, int *rate,
+                                  int64_t *distortion, int *skippable,
+                                  int64_t *sse, int64_t ref_best_rd, int plane,
+                                  BLOCK_SIZE bsize, TX_SIZE tx_size,
+                                  int use_fast_coef_casting);
 #endif  // CONFIG_SUPERTX
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_RDOPT_H_
+#endif  // AV1_ENCODER_RDOPT_H_
diff --git a/av1/encoder/resize.c b/av1/encoder/resize.c
index e209b21..91fa6ed 100644
--- a/av1/encoder/resize.c
+++ b/av1/encoder/resize.c
@@ -15,9 +15,9 @@
 #include <stdlib.h>
 #include <string.h>
 
-#if CONFIG_VP9_HIGHBITDEPTH
-#include "aom_dsp/vpx_dsp_common.h"
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
+#include "aom_dsp/aom_dsp_common.h"
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #include "aom_ports/mem.h"
 #include "av1/common/common.h"
 #include "av1/encoder/resize.h"
@@ -132,8 +132,8 @@
 };
 
 // Filters for factor of 2 downsampling.
-static const int16_t vp10_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
-static const int16_t vp10_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
+static const int16_t av1_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
+static const int16_t av1_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
 
 static const interp_kernel *choose_interp_filter(int inlength, int outlength) {
   int outlength16 = outlength * 16;
@@ -239,8 +239,8 @@
 static void down2_symeven(const uint8_t *const input, int length,
                           uint8_t *output) {
   // Actual filter len = 2 * filter_len_half.
-  const int16_t *filter = vp10_down2_symeven_half_filter;
-  const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2;
+  const int16_t *filter = av1_down2_symeven_half_filter;
+  const int filter_len_half = sizeof(av1_down2_symeven_half_filter) / 2;
   int i, j;
   uint8_t *optr = output;
   int l1 = filter_len_half;
@@ -295,8 +295,8 @@
 static void down2_symodd(const uint8_t *const input, int length,
                          uint8_t *output) {
   // Actual filter len = 2 * filter_len_half - 1.
-  const int16_t *filter = vp10_down2_symodd_half_filter;
-  const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
+  const int16_t *filter = av1_down2_symodd_half_filter;
+  const int filter_len_half = sizeof(av1_down2_symodd_half_filter) / 2;
   int i, j;
   uint8_t *optr = output;
   int l1 = filter_len_half - 1;
@@ -419,9 +419,9 @@
   }
 }
 
-void vp10_resize_plane(const uint8_t *const input, int height, int width,
-                       int in_stride, uint8_t *output, int height2, int width2,
-                       int out_stride) {
+void av1_resize_plane(const uint8_t *const input, int height, int width,
+                      int in_stride, uint8_t *output, int height2, int width2,
+                      int out_stride) {
   int i;
   uint8_t *intbuf = (uint8_t *)malloc(sizeof(uint8_t) * width2 * height);
   uint8_t *tmpbuf =
@@ -450,7 +450,7 @@
   free(arrbuf2);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_interpolate(const uint16_t *const input, int inlength,
                                uint16_t *output, int outlength, int bd) {
   const int64_t delta =
@@ -541,8 +541,8 @@
 static void highbd_down2_symeven(const uint16_t *const input, int length,
                                  uint16_t *output, int bd) {
   // Actual filter len = 2 * filter_len_half.
-  static const int16_t *filter = vp10_down2_symeven_half_filter;
-  const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2;
+  static const int16_t *filter = av1_down2_symeven_half_filter;
+  const int filter_len_half = sizeof(av1_down2_symeven_half_filter) / 2;
   int i, j;
   uint16_t *optr = output;
   int l1 = filter_len_half;
@@ -597,8 +597,8 @@
 static void highbd_down2_symodd(const uint16_t *const input, int length,
                                 uint16_t *output, int bd) {
   // Actual filter len = 2 * filter_len_half - 1.
-  static const int16_t *filter = vp10_down2_symodd_half_filter;
-  const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
+  static const int16_t *filter = av1_down2_symodd_half_filter;
+  const int filter_len_half = sizeof(av1_down2_symodd_half_filter) / 2;
   int i, j;
   uint16_t *optr = output;
   int l1 = filter_len_half - 1;
@@ -708,9 +708,9 @@
   }
 }
 
-void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
-                              int in_stride, uint8_t *output, int height2,
-                              int width2, int out_stride, int bd) {
+void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
+                             int in_stride, uint8_t *output, int height2,
+                             int width2, int out_stride, int bd) {
   int i;
   uint16_t *intbuf = (uint16_t *)malloc(sizeof(uint16_t) * width2 * height);
   uint16_t *tmpbuf =
@@ -736,84 +736,84 @@
   free(arrbuf);
   free(arrbuf2);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_resize_frame420(const uint8_t *const y, int y_stride,
-                          const uint8_t *const u, const uint8_t *const v,
-                          int uv_stride, int height, int width, uint8_t *oy,
-                          int oy_stride, uint8_t *ou, uint8_t *ov,
-                          int ouv_stride, int oheight, int owidth) {
-  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
-  vp10_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
-                    owidth / 2, ouv_stride);
-  vp10_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
-                    owidth / 2, ouv_stride);
+void av1_resize_frame420(const uint8_t *const y, int y_stride,
+                         const uint8_t *const u, const uint8_t *const v,
+                         int uv_stride, int height, int width, uint8_t *oy,
+                         int oy_stride, uint8_t *ou, uint8_t *ov,
+                         int ouv_stride, int oheight, int owidth) {
+  av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  av1_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+                   owidth / 2, ouv_stride);
+  av1_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+                   owidth / 2, ouv_stride);
 }
 
-void vp10_resize_frame422(const uint8_t *const y, int y_stride,
-                          const uint8_t *const u, const uint8_t *const v,
-                          int uv_stride, int height, int width, uint8_t *oy,
-                          int oy_stride, uint8_t *ou, uint8_t *ov,
-                          int ouv_stride, int oheight, int owidth) {
-  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
-  vp10_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
-                    ouv_stride);
-  vp10_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
-                    ouv_stride);
+void av1_resize_frame422(const uint8_t *const y, int y_stride,
+                         const uint8_t *const u, const uint8_t *const v,
+                         int uv_stride, int height, int width, uint8_t *oy,
+                         int oy_stride, uint8_t *ou, uint8_t *ov,
+                         int ouv_stride, int oheight, int owidth) {
+  av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  av1_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
+                   ouv_stride);
+  av1_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
+                   ouv_stride);
 }
 
-void vp10_resize_frame444(const uint8_t *const y, int y_stride,
-                          const uint8_t *const u, const uint8_t *const v,
-                          int uv_stride, int height, int width, uint8_t *oy,
-                          int oy_stride, uint8_t *ou, uint8_t *ov,
-                          int ouv_stride, int oheight, int owidth) {
-  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
-  vp10_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
-                    ouv_stride);
-  vp10_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
-                    ouv_stride);
+void av1_resize_frame444(const uint8_t *const y, int y_stride,
+                         const uint8_t *const u, const uint8_t *const v,
+                         int uv_stride, int height, int width, uint8_t *oy,
+                         int oy_stride, uint8_t *ou, uint8_t *ov,
+                         int ouv_stride, int oheight, int owidth) {
+  av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  av1_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+                   ouv_stride);
+  av1_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+                   ouv_stride);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
-                                 const uint8_t *const u, const uint8_t *const v,
-                                 int uv_stride, int height, int width,
-                                 uint8_t *oy, int oy_stride, uint8_t *ou,
-                                 uint8_t *ov, int ouv_stride, int oheight,
-                                 int owidth, int bd) {
-  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
-                           oy_stride, bd);
-  vp10_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
-                           owidth / 2, ouv_stride, bd);
-  vp10_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
-                           owidth / 2, ouv_stride, bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+                                const uint8_t *const u, const uint8_t *const v,
+                                int uv_stride, int height, int width,
+                                uint8_t *oy, int oy_stride, uint8_t *ou,
+                                uint8_t *ov, int ouv_stride, int oheight,
+                                int owidth, int bd) {
+  av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+                          oy_stride, bd);
+  av1_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+                          owidth / 2, ouv_stride, bd);
+  av1_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+                          owidth / 2, ouv_stride, bd);
 }
 
-void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
-                                 const uint8_t *const u, const uint8_t *const v,
-                                 int uv_stride, int height, int width,
-                                 uint8_t *oy, int oy_stride, uint8_t *ou,
-                                 uint8_t *ov, int ouv_stride, int oheight,
-                                 int owidth, int bd) {
-  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
-                           oy_stride, bd);
-  vp10_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
-                           owidth / 2, ouv_stride, bd);
-  vp10_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
-                           owidth / 2, ouv_stride, bd);
+void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+                                const uint8_t *const u, const uint8_t *const v,
+                                int uv_stride, int height, int width,
+                                uint8_t *oy, int oy_stride, uint8_t *ou,
+                                uint8_t *ov, int ouv_stride, int oheight,
+                                int owidth, int bd) {
+  av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+                          oy_stride, bd);
+  av1_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
+                          owidth / 2, ouv_stride, bd);
+  av1_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
+                          owidth / 2, ouv_stride, bd);
 }
 
-void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
-                                 const uint8_t *const u, const uint8_t *const v,
-                                 int uv_stride, int height, int width,
-                                 uint8_t *oy, int oy_stride, uint8_t *ou,
-                                 uint8_t *ov, int ouv_stride, int oheight,
-                                 int owidth, int bd) {
-  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
-                           oy_stride, bd);
-  vp10_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
-                           ouv_stride, bd);
-  vp10_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
-                           ouv_stride, bd);
+void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+                                const uint8_t *const u, const uint8_t *const v,
+                                int uv_stride, int height, int width,
+                                uint8_t *oy, int oy_stride, uint8_t *ou,
+                                uint8_t *ov, int ouv_stride, int oheight,
+                                int owidth, int bd) {
+  av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+                          oy_stride, bd);
+  av1_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+                          ouv_stride, bd);
+  av1_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+                          ouv_stride, bd);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/encoder/resize.h b/av1/encoder/resize.h
index 8fe1d1b..94f9ea3 100644
--- a/av1/encoder/resize.h
+++ b/av1/encoder/resize.h
@@ -8,61 +8,61 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_RESIZE_H_
-#define VP10_ENCODER_RESIZE_H_
+#ifndef AV1_ENCODER_RESIZE_H_
+#define AV1_ENCODER_RESIZE_H_
 
 #include <stdio.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-void vp10_resize_plane(const uint8_t *const input, int height, int width,
-                       int in_stride, uint8_t *output, int height2, int width2,
-                       int out_stride);
-void vp10_resize_frame420(const uint8_t *const y, int y_stride,
-                          const uint8_t *const u, const uint8_t *const v,
-                          int uv_stride, int height, int width, uint8_t *oy,
-                          int oy_stride, uint8_t *ou, uint8_t *ov,
-                          int ouv_stride, int oheight, int owidth);
-void vp10_resize_frame422(const uint8_t *const y, int y_stride,
-                          const uint8_t *const u, const uint8_t *const v,
-                          int uv_stride, int height, int width, uint8_t *oy,
-                          int oy_stride, uint8_t *ou, uint8_t *ov,
-                          int ouv_stride, int oheight, int owidth);
-void vp10_resize_frame444(const uint8_t *const y, int y_stride,
-                          const uint8_t *const u, const uint8_t *const v,
-                          int uv_stride, int height, int width, uint8_t *oy,
-                          int oy_stride, uint8_t *ou, uint8_t *ov,
-                          int ouv_stride, int oheight, int owidth);
+void av1_resize_plane(const uint8_t *const input, int height, int width,
+                      int in_stride, uint8_t *output, int height2, int width2,
+                      int out_stride);
+void av1_resize_frame420(const uint8_t *const y, int y_stride,
+                         const uint8_t *const u, const uint8_t *const v,
+                         int uv_stride, int height, int width, uint8_t *oy,
+                         int oy_stride, uint8_t *ou, uint8_t *ov,
+                         int ouv_stride, int oheight, int owidth);
+void av1_resize_frame422(const uint8_t *const y, int y_stride,
+                         const uint8_t *const u, const uint8_t *const v,
+                         int uv_stride, int height, int width, uint8_t *oy,
+                         int oy_stride, uint8_t *ou, uint8_t *ov,
+                         int ouv_stride, int oheight, int owidth);
+void av1_resize_frame444(const uint8_t *const y, int y_stride,
+                         const uint8_t *const u, const uint8_t *const v,
+                         int uv_stride, int height, int width, uint8_t *oy,
+                         int oy_stride, uint8_t *ou, uint8_t *ov,
+                         int ouv_stride, int oheight, int owidth);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
-                              int in_stride, uint8_t *output, int height2,
-                              int width2, int out_stride, int bd);
-void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
-                                 const uint8_t *const u, const uint8_t *const v,
-                                 int uv_stride, int height, int width,
-                                 uint8_t *oy, int oy_stride, uint8_t *ou,
-                                 uint8_t *ov, int ouv_stride, int oheight,
-                                 int owidth, int bd);
-void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
-                                 const uint8_t *const u, const uint8_t *const v,
-                                 int uv_stride, int height, int width,
-                                 uint8_t *oy, int oy_stride, uint8_t *ou,
-                                 uint8_t *ov, int ouv_stride, int oheight,
-                                 int owidth, int bd);
-void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
-                                 const uint8_t *const u, const uint8_t *const v,
-                                 int uv_stride, int height, int width,
-                                 uint8_t *oy, int oy_stride, uint8_t *ou,
-                                 uint8_t *ov, int ouv_stride, int oheight,
-                                 int owidth, int bd);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
+                             int in_stride, uint8_t *output, int height2,
+                             int width2, int out_stride, int bd);
+void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+                                const uint8_t *const u, const uint8_t *const v,
+                                int uv_stride, int height, int width,
+                                uint8_t *oy, int oy_stride, uint8_t *ou,
+                                uint8_t *ov, int ouv_stride, int oheight,
+                                int owidth, int bd);
+void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+                                const uint8_t *const u, const uint8_t *const v,
+                                int uv_stride, int height, int width,
+                                uint8_t *oy, int oy_stride, uint8_t *ou,
+                                uint8_t *ov, int ouv_stride, int oheight,
+                                int owidth, int bd);
+void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+                                const uint8_t *const u, const uint8_t *const v,
+                                int uv_stride, int height, int width,
+                                uint8_t *oy, int oy_stride, uint8_t *ou,
+                                uint8_t *ov, int ouv_stride, int oheight,
+                                int owidth, int bd);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_RESIZE_H_
+#endif  // AV1_ENCODER_RESIZE_H_
diff --git a/av1/encoder/segmentation.c b/av1/encoder/segmentation.c
index 5ac1283..9585878 100644
--- a/av1/encoder/segmentation.c
+++ b/av1/encoder/segmentation.c
@@ -10,7 +10,7 @@
 
 #include <limits.h>
 
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 #include "av1/common/pred_common.h"
 #include "av1/common/tile_common.h"
@@ -19,38 +19,38 @@
 #include "av1/encoder/segmentation.h"
 #include "av1/encoder/subexp.h"
 
-void vp10_enable_segmentation(struct segmentation *seg) {
+void av1_enable_segmentation(struct segmentation *seg) {
   seg->enabled = 1;
   seg->update_map = 1;
   seg->update_data = 1;
 }
 
-void vp10_disable_segmentation(struct segmentation *seg) {
+void av1_disable_segmentation(struct segmentation *seg) {
   seg->enabled = 0;
   seg->update_map = 0;
   seg->update_data = 0;
 }
 
-void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
-                           unsigned char abs_delta) {
+void av1_set_segment_data(struct segmentation *seg, signed char *feature_data,
+                          unsigned char abs_delta) {
   seg->abs_delta = abs_delta;
 
   memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
 }
-void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
-                             SEG_LVL_FEATURES feature_id) {
+void av1_disable_segfeature(struct segmentation *seg, int segment_id,
+                            SEG_LVL_FEATURES feature_id) {
   seg->feature_mask[segment_id] &= ~(1 << feature_id);
 }
 
-void vp10_clear_segdata(struct segmentation *seg, int segment_id,
-                        SEG_LVL_FEATURES feature_id) {
+void av1_clear_segdata(struct segmentation *seg, int segment_id,
+                       SEG_LVL_FEATURES feature_id) {
   seg->feature_data[segment_id][feature_id] = 0;
 }
 
 // Based on set of segment counts calculate a probability tree
 static void calc_segtree_probs(unsigned *segcounts,
-                               vpx_prob *segment_tree_probs,
-                               const vpx_prob *cur_tree_probs) {
+                               aom_prob *segment_tree_probs,
+                               const aom_prob *cur_tree_probs) {
   // Work out probabilities of each segment
   const unsigned cc[4] = { segcounts[0] + segcounts[1],
                            segcounts[2] + segcounts[3],
@@ -70,13 +70,13 @@
   for (i = 0; i < 7; i++) {
     const unsigned *ct =
         i == 0 ? ccc : i < 3 ? cc + (i & 2) : segcounts + (i - 3) * 2;
-    vp10_prob_diff_update_savings_search(
+    av1_prob_diff_update_savings_search(
         ct, cur_tree_probs[i], &segment_tree_probs[i], DIFF_UPDATE_PROB);
   }
 }
 
 // Based on set of segment counts and probabilities calculate a cost estimate
-static int cost_segmap(unsigned *segcounts, vpx_prob *probs) {
+static int cost_segmap(unsigned *segcounts, aom_prob *probs) {
   const int c01 = segcounts[0] + segcounts[1];
   const int c23 = segcounts[2] + segcounts[3];
   const int c45 = segcounts[4] + segcounts[5];
@@ -85,35 +85,35 @@
   const int c4567 = c45 + c67;
 
   // Cost the top node of the tree
-  int cost = c0123 * vp10_cost_zero(probs[0]) + c4567 * vp10_cost_one(probs[0]);
+  int cost = c0123 * av1_cost_zero(probs[0]) + c4567 * av1_cost_one(probs[0]);
 
   // Cost subsequent levels
   if (c0123 > 0) {
-    cost += c01 * vp10_cost_zero(probs[1]) + c23 * vp10_cost_one(probs[1]);
+    cost += c01 * av1_cost_zero(probs[1]) + c23 * av1_cost_one(probs[1]);
 
     if (c01 > 0)
-      cost += segcounts[0] * vp10_cost_zero(probs[3]) +
-              segcounts[1] * vp10_cost_one(probs[3]);
+      cost += segcounts[0] * av1_cost_zero(probs[3]) +
+              segcounts[1] * av1_cost_one(probs[3]);
     if (c23 > 0)
-      cost += segcounts[2] * vp10_cost_zero(probs[4]) +
-              segcounts[3] * vp10_cost_one(probs[4]);
+      cost += segcounts[2] * av1_cost_zero(probs[4]) +
+              segcounts[3] * av1_cost_one(probs[4]);
   }
 
   if (c4567 > 0) {
-    cost += c45 * vp10_cost_zero(probs[2]) + c67 * vp10_cost_one(probs[2]);
+    cost += c45 * av1_cost_zero(probs[2]) + c67 * av1_cost_one(probs[2]);
 
     if (c45 > 0)
-      cost += segcounts[4] * vp10_cost_zero(probs[5]) +
-              segcounts[5] * vp10_cost_one(probs[5]);
+      cost += segcounts[4] * av1_cost_zero(probs[5]) +
+              segcounts[5] * av1_cost_one(probs[5]);
     if (c67 > 0)
-      cost += segcounts[6] * vp10_cost_zero(probs[6]) +
-              segcounts[7] * vp10_cost_one(probs[6]);
+      cost += segcounts[6] * av1_cost_zero(probs[6]) +
+              segcounts[7] * av1_cost_one(probs[6]);
   }
 
   return cost;
 }
 
-static void count_segs(const VP10_COMMON *cm, MACROBLOCKD *xd,
+static void count_segs(const AV1_COMMON *cm, MACROBLOCKD *xd,
                        const TileInfo *tile, MODE_INFO **mi,
                        unsigned *no_pred_segcounts,
                        unsigned (*temporal_predictor_count)[2],
@@ -138,7 +138,7 @@
     const int pred_segment_id =
         get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col);
     const int pred_flag = pred_segment_id == segment_id;
-    const int pred_context = vp10_get_pred_context_seg_id(xd);
+    const int pred_context = av1_get_pred_context_seg_id(xd);
 
     // Store the prediction status for this mb and update counts
     // as appropriate
@@ -150,7 +150,7 @@
   }
 }
 
-static void count_segs_sb(const VP10_COMMON *cm, MACROBLOCKD *xd,
+static void count_segs_sb(const AV1_COMMON *cm, MACROBLOCKD *xd,
                           const TileInfo *tile, MODE_INFO **mi,
                           unsigned *no_pred_segcounts,
                           unsigned (*temporal_predictor_count)[2],
@@ -285,7 +285,7 @@
 #endif  // CONFIG_EXT_PARTITION_TYPES
 }
 
-void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
+void av1_choose_segmap_coding_method(AV1_COMMON *cm, MACROBLOCKD *xd) {
   struct segmentation *seg = &cm->seg;
   struct segmentation_probs *segp = &cm->fc->seg;
 
@@ -298,23 +298,23 @@
   unsigned *no_pred_segcounts = cm->counts.seg.tree_total;
   unsigned *t_unpred_seg_counts = cm->counts.seg.tree_mispred;
 
-  vpx_prob no_pred_tree[SEG_TREE_PROBS];
-  vpx_prob t_pred_tree[SEG_TREE_PROBS];
-  vpx_prob t_nopred_prob[PREDICTION_PROBS];
+  aom_prob no_pred_tree[SEG_TREE_PROBS];
+  aom_prob t_pred_tree[SEG_TREE_PROBS];
+  aom_prob t_nopred_prob[PREDICTION_PROBS];
 
   (void)xd;
 
   // We are about to recompute all the segment counts, so zero the accumulators.
-  vp10_zero(cm->counts.seg);
+  av1_zero(cm->counts.seg);
 
   // First of all generate stats regarding how well the last segment map
   // predicts this one
   for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) {
     TileInfo tile_info;
-    vp10_tile_set_row(&tile_info, cm, tile_row);
+    av1_tile_set_row(&tile_info, cm, tile_row);
     for (tile_col = 0; tile_col < cm->tile_cols; tile_col++) {
       MODE_INFO **mi_ptr;
-      vp10_tile_set_col(&tile_info, cm, tile_col);
+      av1_tile_set_col(&tile_info, cm, tile_col);
       mi_ptr = cm->mi_grid_visible + tile_info.mi_row_start * cm->mi_stride +
                tile_info.mi_col_start;
       for (mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end;
@@ -348,13 +348,13 @@
       const int count1 = temporal_predictor_count[i][1];
 
       t_nopred_prob[i] = get_binary_prob(count0, count1);
-      vp10_prob_diff_update_savings_search(temporal_predictor_count[i],
-                                           segp->pred_probs[i],
-                                           &t_nopred_prob[i], DIFF_UPDATE_PROB);
+      av1_prob_diff_update_savings_search(temporal_predictor_count[i],
+                                          segp->pred_probs[i],
+                                          &t_nopred_prob[i], DIFF_UPDATE_PROB);
 
       // Add in the predictor signaling cost
-      t_pred_cost += count0 * vp10_cost_zero(t_nopred_prob[i]) +
-                     count1 * vp10_cost_one(t_nopred_prob[i]);
+      t_pred_cost += count0 * av1_cost_zero(t_nopred_prob[i]) +
+                     count1 * av1_cost_one(t_nopred_prob[i]);
     }
   }
 
@@ -367,12 +367,12 @@
   }
 }
 
-void vp10_reset_segment_features(VP10_COMMON *cm) {
+void av1_reset_segment_features(AV1_COMMON *cm) {
   struct segmentation *seg = &cm->seg;
 
   // Set up default state for MB feature flags
   seg->enabled = 0;
   seg->update_map = 0;
   seg->update_data = 0;
-  vp10_clearall_segfeatures(seg);
+  av1_clearall_segfeatures(seg);
 }
diff --git a/av1/encoder/segmentation.h b/av1/encoder/segmentation.h
index 3c79bd1..e15c8b1 100644
--- a/av1/encoder/segmentation.h
+++ b/av1/encoder/segmentation.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_SEGMENTATION_H_
-#define VP10_ENCODER_SEGMENTATION_H_
+#ifndef AV1_ENCODER_SEGMENTATION_H_
+#define AV1_ENCODER_SEGMENTATION_H_
 
 #include "av1/common/blockd.h"
 #include "av1/encoder/encoder.h"
@@ -18,13 +18,13 @@
 extern "C" {
 #endif
 
-void vp10_enable_segmentation(struct segmentation *seg);
-void vp10_disable_segmentation(struct segmentation *seg);
+void av1_enable_segmentation(struct segmentation *seg);
+void av1_disable_segmentation(struct segmentation *seg);
 
-void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
-                             SEG_LVL_FEATURES feature_id);
-void vp10_clear_segdata(struct segmentation *seg, int segment_id,
-                        SEG_LVL_FEATURES feature_id);
+void av1_disable_segfeature(struct segmentation *seg, int segment_id,
+                            SEG_LVL_FEATURES feature_id);
+void av1_clear_segdata(struct segmentation *seg, int segment_id,
+                       SEG_LVL_FEATURES feature_id);
 
 // The values given for each segment can be either deltas (from the default
 // value chosen for the frame) or absolute values.
@@ -36,15 +36,15 @@
 //
 // abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
 // the absolute values given).
-void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
-                           unsigned char abs_delta);
+void av1_set_segment_data(struct segmentation *seg, signed char *feature_data,
+                          unsigned char abs_delta);
 
-void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd);
+void av1_choose_segmap_coding_method(AV1_COMMON *cm, MACROBLOCKD *xd);
 
-void vp10_reset_segment_features(VP10_COMMON *cm);
+void av1_reset_segment_features(AV1_COMMON *cm);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_SEGMENTATION_H_
+#endif  // AV1_ENCODER_SEGMENTATION_H_
diff --git a/av1/encoder/speed_features.c b/av1/encoder/speed_features.c
index ab66250..d0b198b 100644
--- a/av1/encoder/speed_features.c
+++ b/av1/encoder/speed_features.c
@@ -14,7 +14,7 @@
 #include "av1/encoder/speed_features.h"
 #include "av1/encoder/rdopt.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 // Mesh search patters for various speed settings
 static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] = {
@@ -37,7 +37,7 @@
 
 // Intra only frames, golden frames (except alt ref overlays) and
 // alt ref frames tend to be coded at a higher than ambient quality
-static int frame_is_boosted(const VP10_COMP *cpi) {
+static int frame_is_boosted(const AV1_COMP *cpi) {
   return frame_is_kf_gf_arf(cpi);
 }
 
@@ -47,7 +47,7 @@
 // partly on the screen area that over which they propogate. Propogation is
 // limited by transform block size but the screen area take up by a given block
 // size will be larger for a small image format stretched to full screen.
-static BLOCK_SIZE set_partition_min_limit(VP10_COMMON *const cm) {
+static BLOCK_SIZE set_partition_min_limit(AV1_COMMON *const cm) {
   unsigned int screen_area = (cm->width * cm->height);
 
   // Select block size based on image format size.
@@ -63,13 +63,13 @@
   }
 }
 
-static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi,
+static void set_good_speed_feature_framesize_dependent(AV1_COMP *cpi,
                                                        SPEED_FEATURES *sf,
                                                        int speed) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
 
   if (speed >= 1) {
-    if (VPXMIN(cm->width, cm->height) >= 720) {
+    if (AOMMIN(cm->width, cm->height) >= 720) {
       sf->disable_split_mask =
           cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
       sf->partition_search_breakout_dist_thr = (1 << 23);
@@ -80,7 +80,7 @@
   }
 
   if (speed >= 2) {
-    if (VPXMIN(cm->width, cm->height) >= 720) {
+    if (AOMMIN(cm->width, cm->height) >= 720) {
       sf->disable_split_mask =
           cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
       sf->adaptive_pred_interp_filter = 0;
@@ -95,7 +95,7 @@
   }
 
   if (speed >= 3) {
-    if (VPXMIN(cm->width, cm->height) >= 720) {
+    if (AOMMIN(cm->width, cm->height) >= 720) {
       sf->disable_split_mask = DISABLE_ALL_SPLIT;
       sf->schedule_mode_search = cm->base_qindex < 220 ? 1 : 0;
       sf->partition_search_breakout_dist_thr = (1 << 25);
@@ -114,12 +114,12 @@
   // Also if the image edge is internal to the coded area.
   if ((speed >= 1) && (cpi->oxcf.pass == 2) &&
       ((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) ||
-       (vp10_internal_image_edge(cpi)))) {
+       (av1_internal_image_edge(cpi)))) {
     sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
   }
 
   if (speed >= 4) {
-    if (VPXMIN(cm->width, cm->height) >= 720) {
+    if (AOMMIN(cm->width, cm->height) >= 720) {
       sf->partition_search_breakout_dist_thr = (1 << 26);
     } else {
       sf->partition_search_breakout_dist_thr = (1 << 24);
@@ -128,13 +128,13 @@
   }
 }
 
-static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
+static void set_good_speed_feature(AV1_COMP *cpi, AV1_COMMON *cm,
                                    SPEED_FEATURES *sf, int speed) {
   const int boosted = frame_is_boosted(cpi);
 
   if (speed >= 1) {
     if ((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) ||
-        vp10_internal_image_edge(cpi)) {
+        av1_internal_image_edge(cpi)) {
       sf->use_square_partition_only = !frame_is_boosted(cpi);
     } else {
       sf->use_square_partition_only = !frame_is_intra_only(cm);
@@ -237,12 +237,12 @@
   }
 }
 
-static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi,
+static void set_rt_speed_feature_framesize_dependent(AV1_COMP *cpi,
                                                      SPEED_FEATURES *sf,
                                                      int speed) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   if (speed >= 1) {
-    if (VPXMIN(cm->width, cm->height) >= 720) {
+    if (AOMMIN(cm->width, cm->height) >= 720) {
       sf->disable_split_mask =
           cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
     } else {
@@ -251,7 +251,7 @@
   }
 
   if (speed >= 2) {
-    if (VPXMIN(cm->width, cm->height) >= 720) {
+    if (AOMMIN(cm->width, cm->height) >= 720) {
       sf->disable_split_mask =
           cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
     } else {
@@ -260,7 +260,7 @@
   }
 
   if (speed >= 5) {
-    if (VPXMIN(cm->width, cm->height) >= 720) {
+    if (AOMMIN(cm->width, cm->height) >= 720) {
       sf->partition_search_breakout_dist_thr = (1 << 25);
     } else {
       sf->partition_search_breakout_dist_thr = (1 << 23);
@@ -269,13 +269,13 @@
 
   if (speed >= 7) {
     sf->encode_breakout_thresh =
-        (VPXMIN(cm->width, cm->height) >= 720) ? 800 : 300;
+        (AOMMIN(cm->width, cm->height) >= 720) ? 800 : 300;
   }
 }
 
-static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf, int speed,
-                                 vpx_tune_content content) {
-  VP10_COMMON *const cm = &cpi->common;
+static void set_rt_speed_feature(AV1_COMP *cpi, SPEED_FEATURES *sf, int speed,
+                                 aom_tune_content content) {
+  AV1_COMMON *const cm = &cpi->common;
   const int is_keyframe = cm->frame_type == KEY_FRAME;
   const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key;
   sf->static_segmentation = 0;
@@ -401,7 +401,7 @@
 
     if (!is_keyframe) {
       int i;
-      if (content == VPX_CONTENT_SCREEN) {
+      if (content == AOM_CONTENT_SCREEN) {
         for (i = 0; i < BLOCK_SIZES; ++i)
           sf->intra_y_mode_bsize_mask[i] = INTRA_DC_TM_H_V;
       } else {
@@ -435,9 +435,9 @@
   }
 }
 
-void vp10_set_speed_features_framesize_dependent(VP10_COMP *cpi) {
+void av1_set_speed_features_framesize_dependent(AV1_COMP *cpi) {
   SPEED_FEATURES *const sf = &cpi->sf;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   RD_OPT *const rd = &cpi->rd;
   int i;
 
@@ -464,11 +464,11 @@
   }
 }
 
-void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
+void av1_set_speed_features_framesize_independent(AV1_COMP *cpi) {
   SPEED_FEATURES *const sf = &cpi->sf;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &cpi->td.mb;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   int i;
 
   // best quality defaults
@@ -567,8 +567,8 @@
     sf->partition_search_breakout_dist_thr <<= 2 * (MAX_SB_SIZE_LOG2 - 6);
   }
 
-  cpi->full_search_sad = vp10_full_search_sad;
-  cpi->diamond_search_sad = vp10_diamond_search_sad;
+  cpi->full_search_sad = av1_full_search_sad;
+  cpi->diamond_search_sad = av1_diamond_search_sad;
 
   sf->allow_exhaustive_searches = 1;
   if (oxcf->mode == BEST) {
@@ -609,14 +609,13 @@
   }
 
   if (sf->mv.subpel_search_method == SUBPEL_TREE) {
-    cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree;
+    cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree;
   } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED) {
-    cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned;
+    cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree_pruned;
   } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_MORE) {
-    cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned_more;
+    cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree_pruned_more;
   } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_EVENMORE) {
-    cpi->find_fractional_mv_step =
-        vp10_find_best_sub_pixel_tree_pruned_evenmore;
+    cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree_pruned_evenmore;
   }
 
 #if !CONFIG_AOM_QM
diff --git a/av1/encoder/speed_features.h b/av1/encoder/speed_features.h
index 2457c5b..18cb380 100644
--- a/av1/encoder/speed_features.h
+++ b/av1/encoder/speed_features.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_SPEED_FEATURES_H_
-#define VP10_ENCODER_SPEED_FEATURES_H_
+#ifndef AV1_ENCODER_SPEED_FEATURES_H_
+#define AV1_ENCODER_SPEED_FEATURES_H_
 
 #include "av1/common/enums.h"
 
@@ -471,7 +471,7 @@
   // Allow skipping partition search for still image frame
   int allow_partition_search_skip;
 
-  // Fast approximation of vp10_model_rd_from_var_lapndz
+  // Fast approximation of av1_model_rd_from_var_lapndz
   int simple_model_rd_from_var;
 
   // Do sub-pixel search in up-sampled reference frames
@@ -482,13 +482,13 @@
   int use_transform_domain_distortion;
 } SPEED_FEATURES;
 
-struct VP10_COMP;
+struct AV1_COMP;
 
-void vp10_set_speed_features_framesize_independent(struct VP10_COMP *cpi);
-void vp10_set_speed_features_framesize_dependent(struct VP10_COMP *cpi);
+void av1_set_speed_features_framesize_independent(struct AV1_COMP *cpi);
+void av1_set_speed_features_framesize_dependent(struct AV1_COMP *cpi);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_SPEED_FEATURES_H_
+#endif  // AV1_ENCODER_SPEED_FEATURES_H_
diff --git a/av1/encoder/subexp.c b/av1/encoder/subexp.c
index d722654..dd6c250 100644
--- a/av1/encoder/subexp.c
+++ b/av1/encoder/subexp.c
@@ -14,7 +14,7 @@
 #include "av1/encoder/cost.h"
 #include "av1/encoder/subexp.h"
 
-#define vp10_cost_upd256 ((int)(vp10_cost_one(upd) - vp10_cost_zero(upd)))
+#define av1_cost_upd256 ((int)(av1_cost_one(upd) - av1_cost_zero(upd)))
 
 static const uint8_t update_bits[255] = {
   5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  6,  6,  6,
@@ -76,54 +76,54 @@
   return i;
 }
 
-static int prob_diff_update_cost(vpx_prob newp, vpx_prob oldp) {
+static int prob_diff_update_cost(aom_prob newp, aom_prob oldp) {
   int delp = remap_prob(newp, oldp);
-  return update_bits[delp] << VP10_PROB_COST_SHIFT;
+  return update_bits[delp] << AV1_PROB_COST_SHIFT;
 }
 
-static void encode_uniform(vp10_writer *w, int v) {
+static void encode_uniform(aom_writer *w, int v) {
   const int l = 8;
   const int m = (1 << l) - 190;
   if (v < m) {
-    vp10_write_literal(w, v, l - 1);
+    aom_write_literal(w, v, l - 1);
   } else {
-    vp10_write_literal(w, m + ((v - m) >> 1), l - 1);
-    vp10_write_literal(w, (v - m) & 1, 1);
+    aom_write_literal(w, m + ((v - m) >> 1), l - 1);
+    aom_write_literal(w, (v - m) & 1, 1);
   }
 }
 
-static INLINE int write_bit_gte(vp10_writer *w, int word, int test) {
-  vp10_write_literal(w, word >= test, 1);
+static INLINE int write_bit_gte(aom_writer *w, int word, int test) {
+  aom_write_literal(w, word >= test, 1);
   return word >= test;
 }
 
-static void encode_term_subexp(vp10_writer *w, int word) {
+static void encode_term_subexp(aom_writer *w, int word) {
   if (!write_bit_gte(w, word, 16)) {
-    vp10_write_literal(w, word, 4);
+    aom_write_literal(w, word, 4);
   } else if (!write_bit_gte(w, word, 32)) {
-    vp10_write_literal(w, word - 16, 4);
+    aom_write_literal(w, word - 16, 4);
   } else if (!write_bit_gte(w, word, 64)) {
-    vp10_write_literal(w, word - 32, 5);
+    aom_write_literal(w, word - 32, 5);
   } else {
     encode_uniform(w, word - 64);
   }
 }
 
-void vp10_write_prob_diff_update(vp10_writer *w, vpx_prob newp, vpx_prob oldp) {
+void av1_write_prob_diff_update(aom_writer *w, aom_prob newp, aom_prob oldp) {
   const int delp = remap_prob(newp, oldp);
   encode_term_subexp(w, delp);
 }
 
-int vp10_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
-                                         vpx_prob *bestp, vpx_prob upd) {
+int av1_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
+                                        aom_prob *bestp, aom_prob upd) {
   const int old_b = cost_branch256(ct, oldp);
   int bestsavings = 0;
-  vpx_prob newp, bestnewp = oldp;
+  aom_prob newp, bestnewp = oldp;
   const int step = *bestp > oldp ? -1 : 1;
 
   for (newp = *bestp; newp != oldp; newp += step) {
     const int new_b = cost_branch256(ct, newp);
-    const int update_b = prob_diff_update_cost(newp, oldp) + vp10_cost_upd256;
+    const int update_b = prob_diff_update_cost(newp, oldp) + av1_cost_upd256;
     const int savings = old_b - new_b - update_b;
     if (savings > bestsavings) {
       bestsavings = savings;
@@ -134,17 +134,17 @@
   return bestsavings;
 }
 
-int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
-                                               const vpx_prob *oldp,
-                                               vpx_prob *bestp, vpx_prob upd,
-                                               int stepsize) {
+int av1_prob_diff_update_savings_search_model(const unsigned int *ct,
+                                              const aom_prob *oldp,
+                                              aom_prob *bestp, aom_prob upd,
+                                              int stepsize) {
   int i, old_b, new_b, update_b, savings, bestsavings;
   int newp;
   const int step_sign = *bestp > oldp[PIVOT_NODE] ? -1 : 1;
   const int step = stepsize * step_sign;
-  vpx_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
-  vp10_model_to_full_probs(oldp, oldplist);
-  memcpy(newplist, oldp, sizeof(vpx_prob) * UNCONSTRAINED_NODES);
+  aom_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
+  av1_model_to_full_probs(oldp, oldplist);
+  memcpy(newplist, oldp, sizeof(aom_prob) * UNCONSTRAINED_NODES);
   for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
     old_b += cost_branch256(ct + 2 * i, oldplist[i]);
   old_b += cost_branch256(ct + 2 * PIVOT_NODE, oldplist[PIVOT_NODE]);
@@ -157,11 +157,11 @@
   for (newp = *bestp; (newp - oldp[PIVOT_NODE]) * step_sign < 0; newp += step) {
     if (newp < 1 || newp > 255) continue;
     newplist[PIVOT_NODE] = newp;
-    vp10_model_to_full_probs(newplist, newplist);
+    av1_model_to_full_probs(newplist, newplist);
     for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
       new_b += cost_branch256(ct + 2 * i, newplist[i]);
     new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
-    update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
+    update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + av1_cost_upd256;
     savings = old_b - new_b - update_b;
     if (savings > bestsavings) {
       bestsavings = savings;
@@ -174,7 +174,7 @@
 }
 
 #if CONFIG_ENTROPY
-static int get_cost(unsigned int ct[][2], vpx_prob p, int n) {
+static int get_cost(unsigned int ct[][2], aom_prob p, int n) {
   int i, p0 = p;
   unsigned int total_ct[2] = { 0, 0 };
   int cost = 0;
@@ -184,22 +184,22 @@
     total_ct[0] += ct[i][0];
     total_ct[1] += ct[i][1];
     if (i < n)
-      p = vp10_merge_probs(p0, total_ct, COEF_COUNT_SAT_BITS,
-                           COEF_MAX_UPDATE_FACTOR_BITS);
+      p = av1_merge_probs(p0, total_ct, COEF_COUNT_SAT_BITS,
+                          COEF_MAX_UPDATE_FACTOR_BITS);
   }
   return cost;
 }
 
-int vp10_prob_update_search_subframe(unsigned int ct[][2], vpx_prob oldp,
-                                     vpx_prob *bestp, vpx_prob upd, int n) {
+int av1_prob_update_search_subframe(unsigned int ct[][2], aom_prob oldp,
+                                    aom_prob *bestp, aom_prob upd, int n) {
   const int old_b = get_cost(ct, oldp, n);
   int bestsavings = 0;
-  vpx_prob newp, bestnewp = oldp;
+  aom_prob newp, bestnewp = oldp;
   const int step = *bestp > oldp ? -1 : 1;
 
   for (newp = *bestp; newp != oldp; newp += step) {
     const int new_b = get_cost(ct, newp, n);
-    const int update_b = prob_diff_update_cost(newp, oldp) + vp10_cost_upd256;
+    const int update_b = prob_diff_update_cost(newp, oldp) + av1_cost_upd256;
     const int savings = old_b - new_b - update_b;
     if (savings > bestsavings) {
       bestsavings = savings;
@@ -210,16 +210,16 @@
   return bestsavings;
 }
 
-int vp10_prob_update_search_model_subframe(
-    unsigned int ct[ENTROPY_NODES][COEF_PROBS_BUFS][2], const vpx_prob *oldp,
-    vpx_prob *bestp, vpx_prob upd, int stepsize, int n) {
+int av1_prob_update_search_model_subframe(
+    unsigned int ct[ENTROPY_NODES][COEF_PROBS_BUFS][2], const aom_prob *oldp,
+    aom_prob *bestp, aom_prob upd, int stepsize, int n) {
   int i, old_b, new_b, update_b, savings, bestsavings;
   int newp;
   const int step_sign = *bestp > oldp[PIVOT_NODE] ? -1 : 1;
   const int step = stepsize * step_sign;
-  vpx_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
-  vp10_model_to_full_probs(oldp, oldplist);
-  memcpy(newplist, oldp, sizeof(vpx_prob) * UNCONSTRAINED_NODES);
+  aom_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
+  av1_model_to_full_probs(oldp, oldplist);
+  memcpy(newplist, oldp, sizeof(aom_prob) * UNCONSTRAINED_NODES);
   for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
     old_b += get_cost(ct[i], oldplist[i], n);
   old_b += get_cost(ct[PIVOT_NODE], oldplist[PIVOT_NODE], n);
@@ -232,11 +232,11 @@
   for (newp = *bestp; (newp - oldp[PIVOT_NODE]) * step_sign < 0; newp += step) {
     if (newp < 1 || newp > 255) continue;
     newplist[PIVOT_NODE] = newp;
-    vp10_model_to_full_probs(newplist, newplist);
+    av1_model_to_full_probs(newplist, newplist);
     for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
       new_b += get_cost(ct[i], newplist[i], n);
     new_b += get_cost(ct[PIVOT_NODE], newplist[PIVOT_NODE], n);
-    update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
+    update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + av1_cost_upd256;
     savings = old_b - new_b - update_b;
     if (savings > bestsavings) {
       bestsavings = savings;
@@ -249,40 +249,40 @@
 }
 #endif  // CONFIG_ENTROPY
 
-void vp10_cond_prob_diff_update(vp10_writer *w, vpx_prob *oldp,
-                                const unsigned int ct[2]) {
-  const vpx_prob upd = DIFF_UPDATE_PROB;
-  vpx_prob newp = get_binary_prob(ct[0], ct[1]);
+void av1_cond_prob_diff_update(aom_writer *w, aom_prob *oldp,
+                               const unsigned int ct[2]) {
+  const aom_prob upd = DIFF_UPDATE_PROB;
+  aom_prob newp = get_binary_prob(ct[0], ct[1]);
   const int savings =
-      vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
+      av1_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
   assert(newp >= 1);
   if (savings > 0) {
-    vp10_write(w, 1, upd);
-    vp10_write_prob_diff_update(w, newp, *oldp);
+    aom_write(w, 1, upd);
+    av1_write_prob_diff_update(w, newp, *oldp);
     *oldp = newp;
   } else {
-    vp10_write(w, 0, upd);
+    aom_write(w, 0, upd);
   }
 }
 
-int vp10_cond_prob_diff_update_savings(vpx_prob *oldp,
-                                       const unsigned int ct[2]) {
-  const vpx_prob upd = DIFF_UPDATE_PROB;
-  vpx_prob newp = get_binary_prob(ct[0], ct[1]);
+int av1_cond_prob_diff_update_savings(aom_prob *oldp,
+                                      const unsigned int ct[2]) {
+  const aom_prob upd = DIFF_UPDATE_PROB;
+  aom_prob newp = get_binary_prob(ct[0], ct[1]);
   const int savings =
-      vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
+      av1_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
   return savings;
 }
 
-void vp10_write_primitive_symmetric(vp10_writer *w, int word,
-                                    unsigned int abs_bits) {
+void aom_write_primitive_symmetric(aom_writer *w, int word,
+                                   unsigned int abs_bits) {
   if (word == 0) {
-    vp10_write_bit(w, 0);
+    aom_write_bit(w, 0);
   } else {
     const int x = abs(word);
     const int s = word < 0;
-    vp10_write_bit(w, 1);
-    vp10_write_bit(w, s);
-    vp10_write_literal(w, x - 1, abs_bits);
+    aom_write_bit(w, 1);
+    aom_write_bit(w, s);
+    aom_write_literal(w, x - 1, abs_bits);
   }
 }
diff --git a/av1/encoder/subexp.h b/av1/encoder/subexp.h
index 82ce2e0..c829f2d 100644
--- a/av1/encoder/subexp.h
+++ b/av1/encoder/subexp.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_SUBEXP_H_
-#define VP10_ENCODER_SUBEXP_H_
+#ifndef AV1_ENCODER_SUBEXP_H_
+#define AV1_ENCODER_SUBEXP_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -17,30 +17,29 @@
 
 #include "aom_dsp/prob.h"
 
-struct vp10_writer;
+struct aom_writer;
 
-void vp10_write_prob_diff_update(struct vp10_writer *w, vpx_prob newp,
-                                 vpx_prob oldp);
+void av1_write_prob_diff_update(struct aom_writer *w, aom_prob newp,
+                                aom_prob oldp);
 
-void vp10_cond_prob_diff_update(struct vp10_writer *w, vpx_prob *oldp,
-                                const unsigned int ct[2]);
+void av1_cond_prob_diff_update(struct aom_writer *w, aom_prob *oldp,
+                               const unsigned int ct[2]);
 
-int vp10_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
-                                         vpx_prob *bestp, vpx_prob upd);
+int av1_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
+                                        aom_prob *bestp, aom_prob upd);
 
-int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
-                                               const vpx_prob *oldp,
-                                               vpx_prob *bestp, vpx_prob upd,
-                                               int stepsize);
-int vp10_cond_prob_diff_update_savings(vpx_prob *oldp,
-                                       const unsigned int ct[2]);
+int av1_prob_diff_update_savings_search_model(const unsigned int *ct,
+                                              const aom_prob *oldp,
+                                              aom_prob *bestp, aom_prob upd,
+                                              int stepsize);
+int av1_cond_prob_diff_update_savings(aom_prob *oldp, const unsigned int ct[2]);
 
 #if CONFIG_ENTROPY
-int vp10_prob_update_search_subframe(unsigned int ct[][2], vpx_prob oldp,
-                                     vpx_prob *bestp, vpx_prob upd, int n);
-int vp10_prob_update_search_model_subframe(
-    unsigned int ct[ENTROPY_NODES][COEF_PROBS_BUFS][2], const vpx_prob *oldp,
-    vpx_prob *bestp, vpx_prob upd, int stepsize, int n);
+int av1_prob_update_search_subframe(unsigned int ct[][2], aom_prob oldp,
+                                    aom_prob *bestp, aom_prob upd, int n);
+int av1_prob_update_search_model_subframe(
+    unsigned int ct[ENTROPY_NODES][COEF_PROBS_BUFS][2], const aom_prob *oldp,
+    aom_prob *bestp, aom_prob upd, int stepsize, int n);
 #endif  // CONFIG_ENTROPY
 
 //
@@ -48,10 +47,10 @@
 // 2 * 2^mag_bits + 1, symmetric around 0, where one bit is used to
 // indicate 0 or non-zero, mag_bits bits are used to indicate magnitide
 // and 1 more bit for the sign if non-zero.
-void vp10_write_primitive_symmetric(vp10_writer *w, int word,
-                                    unsigned int mag_bits);
+void aom_write_primitive_symmetric(aom_writer *w, int word,
+                                   unsigned int mag_bits);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_SUBEXP_H_
+#endif  // AV1_ENCODER_SUBEXP_H_
diff --git a/av1/encoder/temporal_filter.c b/av1/encoder/temporal_filter.c
index 32490cc..4a5de37 100644
--- a/av1/encoder/temporal_filter.c
+++ b/av1/encoder/temporal_filter.c
@@ -24,11 +24,11 @@
 #include "av1/encoder/ratectrl.h"
 #include "av1/encoder/segmentation.h"
 #include "av1/encoder/temporal_filter.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
-#include "aom_ports/vpx_timer.h"
-#include "aom_scale/vpx_scale.h"
+#include "aom_ports/aom_timer.h"
+#include "aom_scale/aom_scale.h"
 
 static void temporal_filter_predictors_mb_c(
     MACROBLOCKD *xd, uint8_t *y_mb_ptr, uint8_t *u_mb_ptr, uint8_t *v_mb_ptr,
@@ -61,41 +61,41 @@
     mv_precision_uv = MV_PRECISION_Q3;
   }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    vp10_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv,
-                                      scale, 16, 16, which_mv, interp_filter,
-                                      MV_PRECISION_Q3, x, y, xd->bd);
+    av1_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale,
+                                     16, 16, which_mv, interp_filter,
+                                     MV_PRECISION_Q3, x, y, xd->bd);
 
-    vp10_highbd_build_inter_predictor(
-        u_mb_ptr, uv_stride, &pred[256], uv_block_width, &mv, scale,
-        uv_block_width, uv_block_height, which_mv, interp_filter,
-        mv_precision_uv, x, y, xd->bd);
+    av1_highbd_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256],
+                                     uv_block_width, &mv, scale, uv_block_width,
+                                     uv_block_height, which_mv, interp_filter,
+                                     mv_precision_uv, x, y, xd->bd);
 
-    vp10_highbd_build_inter_predictor(
-        v_mb_ptr, uv_stride, &pred[512], uv_block_width, &mv, scale,
-        uv_block_width, uv_block_height, which_mv, interp_filter,
-        mv_precision_uv, x, y, xd->bd);
+    av1_highbd_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512],
+                                     uv_block_width, &mv, scale, uv_block_width,
+                                     uv_block_height, which_mv, interp_filter,
+                                     mv_precision_uv, x, y, xd->bd);
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-  vp10_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
-                             which_mv, interp_filter, MV_PRECISION_Q3, x, y);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+  av1_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
+                            which_mv, interp_filter, MV_PRECISION_Q3, x, y);
 
-  vp10_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
-                             &mv, scale, uv_block_width, uv_block_height,
-                             which_mv, interp_filter, mv_precision_uv, x, y);
+  av1_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
+                            &mv, scale, uv_block_width, uv_block_height,
+                            which_mv, interp_filter, mv_precision_uv, x, y);
 
-  vp10_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
-                             &mv, scale, uv_block_width, uv_block_height,
-                             which_mv, interp_filter, mv_precision_uv, x, y);
+  av1_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
+                            &mv, scale, uv_block_width, uv_block_height,
+                            which_mv, interp_filter, mv_precision_uv, x, y);
 }
 
-void vp10_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
-                                  uint8_t *frame2, unsigned int block_width,
-                                  unsigned int block_height, int strength,
-                                  int filter_weight, unsigned int *accumulator,
-                                  uint16_t *count) {
+void av1_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
+                                 uint8_t *frame2, unsigned int block_width,
+                                 unsigned int block_height, int strength,
+                                 int filter_weight, unsigned int *accumulator,
+                                 uint16_t *count) {
   unsigned int i, j, k;
   int modifier;
   int byte = 0;
@@ -152,8 +152,8 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_temporal_filter_apply_c(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_temporal_filter_apply_c(
     uint8_t *frame1_8, unsigned int stride, uint8_t *frame2_8,
     unsigned int block_width, unsigned int block_height, int strength,
     int filter_weight, unsigned int *accumulator, uint16_t *count) {
@@ -214,9 +214,9 @@
     byte += stride - block_width;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
+static int temporal_filter_find_matching_mb_c(AV1_COMP *cpi,
                                               uint8_t *arf_frame_buf,
                                               uint8_t *frame_ptr_buf,
                                               int stride) {
@@ -247,7 +247,7 @@
   xd->plane[0].pre[0].stride = stride;
 
   step_param = mv_sf->reduce_first_step_size;
-  step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
+  step_param = AOMMIN(step_param, MAX_MVSEARCH_STEPS - 2);
 
 #if CONFIG_REF_MV
   x->mvcost = x->mv_cost_stack[0];
@@ -257,9 +257,9 @@
 #endif
 
   // Ignore mv costing by sending NULL pointer instead of cost arrays
-  vp10_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
-                  cond_cost_list(cpi, cost_list), &cpi->fn_ptr[BLOCK_16X16], 0,
-                  &best_ref_mv1);
+  av1_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
+                 cond_cost_list(cpi, cost_list), &cpi->fn_ptr[BLOCK_16X16], 0,
+                 &best_ref_mv1);
 
   // Ignore mv costing by sending NULL pointer instead of cost array
   bestsme = cpi->find_fractional_mv_step(
@@ -277,7 +277,7 @@
   return bestsme;
 }
 
-static void temporal_filter_iterate_c(VP10_COMP *cpi,
+static void temporal_filter_iterate_c(AV1_COMP *cpi,
                                       YV12_BUFFER_CONFIG **frames,
                                       int frame_count, int alt_ref_index,
                                       int strength,
@@ -295,7 +295,7 @@
   MACROBLOCKD *mbd = &cpi->td.mb.e_mbd;
   YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
   uint8_t *dst1, *dst2;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
   DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
   uint8_t *predictor;
@@ -308,7 +308,7 @@
   // Save input state
   uint8_t *input_buffer[MAX_MB_PLANE];
   int i;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     predictor = CONVERT_TO_BYTEPTR(predictor16);
   } else {
@@ -320,19 +320,19 @@
 
   for (mb_row = 0; mb_row < mb_rows; mb_row++) {
     // Source frames are extended to 16 pixels. This is different than
-    //  L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS)
+    //  L/A/G reference frames that have a border of 32 (AV1ENCBORDERINPIXELS)
     // A 6/8 tap filter is used for motion search.  This requires 2 pixels
     //  before and 3 pixels after.  So the largest Y mv on a border would
-    //  then be 16 - VPX_INTERP_EXTEND. The UV blocks are half the size of the
+    //  then be 16 - AOM_INTERP_EXTEND. The UV blocks are half the size of the
     //  Y and therefore only extended by 8.  The largest mv that a UV block
-    //  can support is 8 - VPX_INTERP_EXTEND.  A UV mv is half of a Y mv.
-    //  (16 - VPX_INTERP_EXTEND) >> 1 which is greater than
-    //  8 - VPX_INTERP_EXTEND.
+    //  can support is 8 - AOM_INTERP_EXTEND.  A UV mv is half of a Y mv.
+    //  (16 - AOM_INTERP_EXTEND) >> 1 which is greater than
+    //  8 - AOM_INTERP_EXTEND.
     // To keep the mv in play for both Y and UV planes the max that it
-    //  can be on a border is therefore 16 - (2*VPX_INTERP_EXTEND+1).
-    cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VPX_INTERP_EXTEND));
+    //  can be on a border is therefore 16 - (2*AOM_INTERP_EXTEND+1).
+    cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * AOM_INTERP_EXTEND));
     cpi->td.mb.mv_row_max =
-        ((mb_rows - 1 - mb_row) * 16) + (17 - 2 * VPX_INTERP_EXTEND);
+        ((mb_rows - 1 - mb_row) * 16) + (17 - 2 * AOM_INTERP_EXTEND);
 
     for (mb_col = 0; mb_col < mb_cols; mb_col++) {
       int i, j, k;
@@ -341,9 +341,9 @@
       memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0]));
       memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
 
-      cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VPX_INTERP_EXTEND));
+      cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * AOM_INTERP_EXTEND));
       cpi->td.mb.mv_col_max =
-          ((mb_cols - 1 - mb_col) * 16) + (17 - 2 * VPX_INTERP_EXTEND);
+          ((mb_cols - 1 - mb_col) * 16) + (17 - 2 * AOM_INTERP_EXTEND);
 
       for (frame = 0; frame < frame_count; frame++) {
         const int thresh_low = 10000;
@@ -378,53 +378,53 @@
               mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, predictor, scale,
               mb_col * 16, mb_row * 16);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
             int adj_strength = strength + 2 * (mbd->bd - 8);
             // Apply the filter (YUV)
-            vp10_highbd_temporal_filter_apply(
+            av1_highbd_temporal_filter_apply(
                 f->y_buffer + mb_y_offset, f->y_stride, predictor, 16, 16,
                 adj_strength, filter_weight, accumulator, count);
-            vp10_highbd_temporal_filter_apply(
+            av1_highbd_temporal_filter_apply(
                 f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
                 mb_uv_width, mb_uv_height, adj_strength, filter_weight,
                 accumulator + 256, count + 256);
-            vp10_highbd_temporal_filter_apply(
+            av1_highbd_temporal_filter_apply(
                 f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
                 mb_uv_width, mb_uv_height, adj_strength, filter_weight,
                 accumulator + 512, count + 512);
           } else {
             // Apply the filter (YUV)
-            vp10_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
-                                         predictor, 16, 16, strength,
-                                         filter_weight, accumulator, count);
-            vp10_temporal_filter_apply_c(
+            av1_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
+                                        predictor, 16, 16, strength,
+                                        filter_weight, accumulator, count);
+            av1_temporal_filter_apply_c(
                 f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
                 mb_uv_width, mb_uv_height, strength, filter_weight,
                 accumulator + 256, count + 256);
-            vp10_temporal_filter_apply_c(
+            av1_temporal_filter_apply_c(
                 f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
                 mb_uv_width, mb_uv_height, strength, filter_weight,
                 accumulator + 512, count + 512);
           }
 #else
           // Apply the filter (YUV)
-          vp10_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
-                                       predictor, 16, 16, strength,
-                                       filter_weight, accumulator, count);
-          vp10_temporal_filter_apply_c(f->u_buffer + mb_uv_offset, f->uv_stride,
-                                       predictor + 256, mb_uv_width,
-                                       mb_uv_height, strength, filter_weight,
-                                       accumulator + 256, count + 256);
-          vp10_temporal_filter_apply_c(f->v_buffer + mb_uv_offset, f->uv_stride,
-                                       predictor + 512, mb_uv_width,
-                                       mb_uv_height, strength, filter_weight,
-                                       accumulator + 512, count + 512);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+          av1_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
+                                      predictor, 16, 16, strength,
+                                      filter_weight, accumulator, count);
+          av1_temporal_filter_apply_c(f->u_buffer + mb_uv_offset, f->uv_stride,
+                                      predictor + 256, mb_uv_width,
+                                      mb_uv_height, strength, filter_weight,
+                                      accumulator + 256, count + 256);
+          av1_temporal_filter_apply_c(f->v_buffer + mb_uv_offset, f->uv_stride,
+                                      predictor + 512, mb_uv_width,
+                                      mb_uv_height, strength, filter_weight,
+                                      accumulator + 512, count + 512);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
         uint16_t *dst1_16;
         uint16_t *dst2_16;
@@ -544,7 +544,7 @@
         }
         byte += stride - mb_uv_width;
       }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       mb_y_offset += 16;
       mb_uv_offset += mb_uv_width;
     }
@@ -557,11 +557,11 @@
 }
 
 // Apply buffer limits and context specific adjustments to arnr filter.
-static void adjust_arnr_filter(VP10_COMP *cpi, int distance, int group_boost,
+static void adjust_arnr_filter(AV1_COMP *cpi, int distance, int group_boost,
                                int *arnr_frames, int *arnr_strength) {
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   const int frames_after_arf =
-      vp10_lookahead_depth(cpi->lookahead) - distance - 1;
+      av1_lookahead_depth(cpi->lookahead) - distance - 1;
   int frames_fwd = (cpi->oxcf.arnr_max_frames - 1) >> 1;
   int frames_bwd;
   int q, frames, strength;
@@ -581,11 +581,11 @@
 
   // Adjust the strength based on active max q.
   if (cpi->common.current_video_frame > 1)
-    q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
-                                       cpi->common.bit_depth));
+    q = ((int)av1_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
+                                      cpi->common.bit_depth));
   else
-    q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
-                                       cpi->common.bit_depth));
+    q = ((int)av1_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
+                                      cpi->common.bit_depth));
   if (q > 16) {
     strength = oxcf->arnr_strength;
   } else {
@@ -615,7 +615,7 @@
   *arnr_strength = strength;
 }
 
-void vp10_temporal_filter(VP10_COMP *cpi, int distance) {
+void av1_temporal_filter(AV1_COMP *cpi, int distance) {
   RATE_CONTROL *const rc = &cpi->rc;
   int frame;
   int frames_to_blur;
@@ -658,7 +658,7 @@
   for (frame = 0; frame < frames_to_blur; ++frame) {
     const int which_buffer = start_frame - frame;
     struct lookahead_entry *buf =
-        vp10_lookahead_peek(cpi->lookahead, which_buffer);
+        av1_lookahead_peek(cpi->lookahead, which_buffer);
     frames[frames_to_blur - 1 - frame] = &buf->img;
   }
 
@@ -666,16 +666,16 @@
 // Setup scaling factors. Scaling on each of the arnr frames is not
 // supported.
 // ARF is produced at the native frame size and resized when coded.
-#if CONFIG_VP9_HIGHBITDEPTH
-    vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+    av1_setup_scale_factors_for_frame(
         &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
         frames[0]->y_crop_width, frames[0]->y_crop_height,
         cpi->common.use_highbitdepth);
 #else
-    vp10_setup_scale_factors_for_frame(
+    av1_setup_scale_factors_for_frame(
         &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
         frames[0]->y_crop_width, frames[0]->y_crop_height);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 
   temporal_filter_iterate_c(cpi, frames, frames_to_blur,
diff --git a/av1/encoder/temporal_filter.h b/av1/encoder/temporal_filter.h
index ce5291a..ef21215 100644
--- a/av1/encoder/temporal_filter.h
+++ b/av1/encoder/temporal_filter.h
@@ -8,17 +8,17 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_TEMPORAL_FILTER_H_
-#define VP10_ENCODER_TEMPORAL_FILTER_H_
+#ifndef AV1_ENCODER_TEMPORAL_FILTER_H_
+#define AV1_ENCODER_TEMPORAL_FILTER_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-void vp10_temporal_filter(VP10_COMP *cpi, int distance);
+void av1_temporal_filter(AV1_COMP *cpi, int distance);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_TEMPORAL_FILTER_H_
+#endif  // AV1_ENCODER_TEMPORAL_FILTER_H_
diff --git a/av1/encoder/tokenize.c b/av1/encoder/tokenize.c
index c841fa6..6a5dc21 100644
--- a/av1/encoder/tokenize.c
+++ b/av1/encoder/tokenize.c
@@ -13,7 +13,7 @@
 #include <stdio.h>
 #include <string.h>
 
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 #include "av1/common/entropy.h"
 #include "av1/common/pred_common.h"
@@ -45,14 +45,14 @@
   { 9, 36 }, { 9, 38 }, { 9, 40 }, { 9, 42 }, { 9, 44 }, { 9, 46 }, { 9, 48 },
   { 9, 50 }, { 9, 52 }, { 9, 54 }, { 9, 56 }, { 9, 58 }, { 9, 60 }, { 9, 62 }
 };
-const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens =
+const TOKENVALUE *av1_dct_cat_lt_10_value_tokens =
     dct_cat_lt_10_value_tokens +
     (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens)) /
         2;
 // The corresponding costs of the extrabits for the tokens in the above table
 // are stored in the table below. The values are obtained from looking up the
 // entry for the specified extrabits in the table corresponding to the token
-// (as defined in cost element vp10_extra_bits)
+// (as defined in cost element av1_extra_bits)
 // e.g. {9, 63} maps to cat5_cost[63 >> 1], {1, 1} maps to sign_cost[1 >> 1]
 static const int dct_cat_lt_10_value_cost[] = {
   3773, 3750, 3704, 3681, 3623, 3600, 3554, 3531, 3432, 3409, 3363, 3340, 3282,
@@ -67,13 +67,13 @@
   3190, 3213, 3259, 3282, 3340, 3363, 3409, 3432, 3531, 3554, 3600, 3623, 3681,
   3704, 3750, 3773,
 };
-const int *vp10_dct_cat_lt_10_value_cost =
+const int *av1_dct_cat_lt_10_value_cost =
     dct_cat_lt_10_value_cost +
     (sizeof(dct_cat_lt_10_value_cost) / sizeof(*dct_cat_lt_10_value_cost)) / 2;
 
 // Array indices are identical to previously-existing CONTEXT_NODE indices
 /* clang-format off */
-const vpx_tree_index vp10_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
+const aom_tree_index av1_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
   -EOB_TOKEN, 2,                       // 0  = EOB
   -ZERO_TOKEN, 4,                      // 1  = ZERO
   -ONE_TOKEN, 6,                       // 2  = ONE
@@ -88,12 +88,12 @@
 };
 /* clang-format on */
 
-static const vpx_tree_index cat1[2] = { 0, 0 };
-static const vpx_tree_index cat2[4] = { 2, 2, 0, 0 };
-static const vpx_tree_index cat3[6] = { 2, 2, 4, 4, 0, 0 };
-static const vpx_tree_index cat4[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
-static const vpx_tree_index cat5[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
-static const vpx_tree_index cat6[28] = { 2,  2,  4,  4,  6,  6,  8,  8,  10, 10,
+static const aom_tree_index cat1[2] = { 0, 0 };
+static const aom_tree_index cat2[4] = { 2, 2, 0, 0 };
+static const aom_tree_index cat3[6] = { 2, 2, 4, 4, 0, 0 };
+static const aom_tree_index cat4[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const aom_tree_index cat5[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const aom_tree_index cat6[28] = { 2,  2,  4,  4,  6,  6,  8,  8,  10, 10,
                                          12, 12, 14, 14, 16, 16, 18, 18, 20, 20,
                                          22, 22, 24, 24, 26, 26, 0,  0 };
 
@@ -111,7 +111,7 @@
   2986, 3044, 3067, 3113, 3136, 3190, 3213, 3259, 3282, 3340, 3363,
   3409, 3432, 3531, 3554, 3600, 3623, 3681, 3704, 3750, 3773
 };
-const int16_t vp10_cat6_low_cost[256] = {
+const int16_t av1_cat6_low_cost[256] = {
   3378, 3390, 3401, 3413, 3435, 3447, 3458, 3470, 3517, 3529, 3540, 3552, 3574,
   3586, 3597, 3609, 3671, 3683, 3694, 3706, 3728, 3740, 3751, 3763, 3810, 3822,
   3833, 3845, 3867, 3879, 3890, 3902, 3973, 3985, 3996, 4008, 4030, 4042, 4053,
@@ -133,7 +133,7 @@
   6620, 6632, 6654, 6666, 6677, 6689, 6751, 6763, 6774, 6786, 6808, 6820, 6831,
   6843, 6890, 6902, 6913, 6925, 6947, 6959, 6970, 6982
 };
-const int vp10_cat6_high_cost[64] = {
+const int av1_cat6_high_cost[64] = {
   88,    2251,  2727,  4890,  3148,  5311,  5787,  7950,  3666,  5829,  6305,
   8468,  6726,  8889,  9365,  11528, 3666,  5829,  6305,  8468,  6726,  8889,
   9365,  11528, 7244,  9407,  9883,  12046, 10304, 12467, 12943, 15106, 3666,
@@ -142,8 +142,8 @@
   15106, 10822, 12985, 13461, 15624, 13882, 16045, 16521, 18684
 };
 
-#if CONFIG_VP9_HIGHBITDEPTH
-const int vp10_cat6_high10_high_cost[256] = {
+#if CONFIG_AOM_HIGHBITDEPTH
+const int av1_cat6_high10_high_cost[256] = {
   94,    2257,  2733,  4896,  3154,  5317,  5793,  7956,  3672,  5835,  6311,
   8474,  6732,  8895,  9371,  11534, 3672,  5835,  6311,  8474,  6732,  8895,
   9371,  11534, 7250,  9413,  9889,  12052, 10310, 12473, 12949, 15112, 3672,
@@ -169,7 +169,7 @@
   18075, 20238, 18496, 20659, 21135, 23298, 19014, 21177, 21653, 23816, 22074,
   24237, 24713, 26876
 };
-const int vp10_cat6_high12_high_cost[1024] = {
+const int av1_cat6_high12_high_cost[1024] = {
   100,   2263,  2739,  4902,  3160,  5323,  5799,  7962,  3678,  5841,  6317,
   8480,  6738,  8901,  9377,  11540, 3678,  5841,  6317,  8480,  6738,  8901,
   9377,  11540, 7256,  9419,  9895,  12058, 10316, 12479, 12955, 15118, 3678,
@@ -267,82 +267,82 @@
 };
 #endif
 
-#if CONFIG_VP9_HIGHBITDEPTH
-static const vpx_tree_index cat1_high10[2] = { 0, 0 };
-static const vpx_tree_index cat2_high10[4] = { 2, 2, 0, 0 };
-static const vpx_tree_index cat3_high10[6] = { 2, 2, 4, 4, 0, 0 };
-static const vpx_tree_index cat4_high10[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
-static const vpx_tree_index cat5_high10[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
-static const vpx_tree_index cat6_high10[32] = { 2,  2,  4,  4,  6,  6,  8,  8,
+#if CONFIG_AOM_HIGHBITDEPTH
+static const aom_tree_index cat1_high10[2] = { 0, 0 };
+static const aom_tree_index cat2_high10[4] = { 2, 2, 0, 0 };
+static const aom_tree_index cat3_high10[6] = { 2, 2, 4, 4, 0, 0 };
+static const aom_tree_index cat4_high10[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const aom_tree_index cat5_high10[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const aom_tree_index cat6_high10[32] = { 2,  2,  4,  4,  6,  6,  8,  8,
                                                 10, 10, 12, 12, 14, 14, 16, 16,
                                                 18, 18, 20, 20, 22, 22, 24, 24,
                                                 26, 26, 28, 28, 30, 30, 0,  0 };
-static const vpx_tree_index cat1_high12[2] = { 0, 0 };
-static const vpx_tree_index cat2_high12[4] = { 2, 2, 0, 0 };
-static const vpx_tree_index cat3_high12[6] = { 2, 2, 4, 4, 0, 0 };
-static const vpx_tree_index cat4_high12[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
-static const vpx_tree_index cat5_high12[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
-static const vpx_tree_index cat6_high12[36] = {
+static const aom_tree_index cat1_high12[2] = { 0, 0 };
+static const aom_tree_index cat2_high12[4] = { 2, 2, 0, 0 };
+static const aom_tree_index cat3_high12[6] = { 2, 2, 4, 4, 0, 0 };
+static const aom_tree_index cat4_high12[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const aom_tree_index cat5_high12[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const aom_tree_index cat6_high12[36] = {
   2,  2,  4,  4,  6,  6,  8,  8,  10, 10, 12, 12, 14, 14, 16, 16, 18, 18,
   20, 20, 22, 22, 24, 24, 26, 26, 28, 28, 30, 30, 32, 32, 34, 34, 0,  0
 };
 #endif
 
-const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS] = {
-  { 0, 0, 0, 0, zero_cost },                             // ZERO_TOKEN
-  { 0, 0, 0, 1, sign_cost },                             // ONE_TOKEN
-  { 0, 0, 0, 2, sign_cost },                             // TWO_TOKEN
-  { 0, 0, 0, 3, sign_cost },                             // THREE_TOKEN
-  { 0, 0, 0, 4, sign_cost },                             // FOUR_TOKEN
-  { cat1, vp10_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost },  // CATEGORY1_TOKEN
-  { cat2, vp10_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost },  // CATEGORY2_TOKEN
-  { cat3, vp10_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost },  // CATEGORY3_TOKEN
-  { cat4, vp10_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost },  // CATEGORY4_TOKEN
-  { cat5, vp10_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost },  // CATEGORY5_TOKEN
-  { cat6, vp10_cat6_prob, 14, CAT6_MIN_VAL, 0 },         // CATEGORY6_TOKEN
-  { 0, 0, 0, 0, zero_cost }                              // EOB_TOKEN
+const av1_extra_bit av1_extra_bits[ENTROPY_TOKENS] = {
+  { 0, 0, 0, 0, zero_cost },                            // ZERO_TOKEN
+  { 0, 0, 0, 1, sign_cost },                            // ONE_TOKEN
+  { 0, 0, 0, 2, sign_cost },                            // TWO_TOKEN
+  { 0, 0, 0, 3, sign_cost },                            // THREE_TOKEN
+  { 0, 0, 0, 4, sign_cost },                            // FOUR_TOKEN
+  { cat1, av1_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost },  // CATEGORY1_TOKEN
+  { cat2, av1_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost },  // CATEGORY2_TOKEN
+  { cat3, av1_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost },  // CATEGORY3_TOKEN
+  { cat4, av1_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost },  // CATEGORY4_TOKEN
+  { cat5, av1_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost },  // CATEGORY5_TOKEN
+  { cat6, av1_cat6_prob, 14, CAT6_MIN_VAL, 0 },         // CATEGORY6_TOKEN
+  { 0, 0, 0, 0, zero_cost }                             // EOB_TOKEN
 };
 
-#if CONFIG_VP9_HIGHBITDEPTH
-const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS] = {
-  { 0, 0, 0, 0, zero_cost },                                           // ZERO
-  { 0, 0, 0, 1, sign_cost },                                           // ONE
-  { 0, 0, 0, 2, sign_cost },                                           // TWO
-  { 0, 0, 0, 3, sign_cost },                                           // THREE
-  { 0, 0, 0, 4, sign_cost },                                           // FOUR
-  { cat1_high10, vp10_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
-  { cat2_high10, vp10_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
-  { cat3_high10, vp10_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
-  { cat4_high10, vp10_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
-  { cat5_high10, vp10_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
-  { cat6_high10, vp10_cat6_prob_high10, 16, CAT6_MIN_VAL, 0 },         // CAT6
-  { 0, 0, 0, 0, zero_cost }                                            // EOB
+#if CONFIG_AOM_HIGHBITDEPTH
+const av1_extra_bit av1_extra_bits_high10[ENTROPY_TOKENS] = {
+  { 0, 0, 0, 0, zero_cost },                                          // ZERO
+  { 0, 0, 0, 1, sign_cost },                                          // ONE
+  { 0, 0, 0, 2, sign_cost },                                          // TWO
+  { 0, 0, 0, 3, sign_cost },                                          // THREE
+  { 0, 0, 0, 4, sign_cost },                                          // FOUR
+  { cat1_high10, av1_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
+  { cat2_high10, av1_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
+  { cat3_high10, av1_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
+  { cat4_high10, av1_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
+  { cat5_high10, av1_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
+  { cat6_high10, av1_cat6_prob_high10, 16, CAT6_MIN_VAL, 0 },         // CAT6
+  { 0, 0, 0, 0, zero_cost }                                           // EOB
 };
-const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS] = {
-  { 0, 0, 0, 0, zero_cost },                                           // ZERO
-  { 0, 0, 0, 1, sign_cost },                                           // ONE
-  { 0, 0, 0, 2, sign_cost },                                           // TWO
-  { 0, 0, 0, 3, sign_cost },                                           // THREE
-  { 0, 0, 0, 4, sign_cost },                                           // FOUR
-  { cat1_high12, vp10_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
-  { cat2_high12, vp10_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
-  { cat3_high12, vp10_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
-  { cat4_high12, vp10_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
-  { cat5_high12, vp10_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
-  { cat6_high12, vp10_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 },         // CAT6
-  { 0, 0, 0, 0, zero_cost }                                            // EOB
+const av1_extra_bit av1_extra_bits_high12[ENTROPY_TOKENS] = {
+  { 0, 0, 0, 0, zero_cost },                                          // ZERO
+  { 0, 0, 0, 1, sign_cost },                                          // ONE
+  { 0, 0, 0, 2, sign_cost },                                          // TWO
+  { 0, 0, 0, 3, sign_cost },                                          // THREE
+  { 0, 0, 0, 4, sign_cost },                                          // FOUR
+  { cat1_high12, av1_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
+  { cat2_high12, av1_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
+  { cat3_high12, av1_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
+  { cat4_high12, av1_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
+  { cat5_high12, av1_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
+  { cat6_high12, av1_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 },         // CAT6
+  { 0, 0, 0, 0, zero_cost }                                           // EOB
 };
 #endif
 
 #if !CONFIG_ANS
-const struct vp10_token vp10_coef_encodings[ENTROPY_TOKENS] = {
+const struct av1_token av1_coef_encodings[ENTROPY_TOKENS] = {
   { 2, 2 },  { 6, 3 },   { 28, 5 },  { 58, 6 },  { 59, 6 },  { 60, 6 },
   { 61, 6 }, { 124, 7 }, { 125, 7 }, { 126, 7 }, { 127, 7 }, { 0, 1 }
 };
 #endif  // !CONFIG_ANS
 
 struct tokenize_b_args {
-  VP10_COMP *cpi;
+  AV1_COMP *cpi;
   ThreadData *td;
   TOKENEXTRA **tp;
 };
@@ -356,11 +356,11 @@
   MACROBLOCKD *const xd = &x->e_mbd;
   struct macroblock_plane *p = &x->plane[plane];
   struct macroblockd_plane *pd = &xd->plane[plane];
-  vp10_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, blk_col,
-                    blk_row);
+  av1_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, blk_col,
+                   blk_row);
 }
 
-static INLINE void add_token(TOKENEXTRA **t, const vpx_prob *context_tree,
+static INLINE void add_token(TOKENEXTRA **t, const aom_prob *context_tree,
 #if CONFIG_ANS
                              const rans_dec_lut *token_cdf,
 #endif  // CONFIG_ANS
@@ -378,7 +378,7 @@
 }
 
 static INLINE void add_token_no_extra(TOKENEXTRA **t,
-                                      const vpx_prob *context_tree,
+                                      const aom_prob *context_tree,
                                       uint8_t token, uint8_t skip_eob_node,
                                       unsigned int *counts) {
   (*t)->token = token;
@@ -394,8 +394,8 @@
   return segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max;
 }
 
-void vp10_tokenize_palette_sb(struct ThreadData *const td, BLOCK_SIZE bsize,
-                              int plane, TOKENEXTRA **t) {
+void av1_tokenize_palette_sb(struct ThreadData *const td, BLOCK_SIZE bsize,
+                             int plane, TOKENEXTRA **t) {
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -408,14 +408,14 @@
                    (xd->plane[plane != 0].subsampling_y);
   const int cols = (4 * num_4x4_blocks_wide_lookup[bsize]) >>
                    (xd->plane[plane != 0].subsampling_x);
-  const vpx_prob (*const probs)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
-      plane == 0 ? vp10_default_palette_y_color_prob
-                 : vp10_default_palette_uv_color_prob;
+  const aom_prob (*const probs)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
+      plane == 0 ? av1_default_palette_y_color_prob
+                 : av1_default_palette_uv_color_prob;
 
   for (i = 0; i < rows; ++i) {
     for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
       color_ctx =
-          vp10_get_palette_color_context(color_map, cols, i, j, n, color_order);
+          av1_get_palette_color_context(color_map, cols, i, j, n, color_order);
       for (k = 0; k < n; ++k)
         if (color_map[i * cols + j] == color_order[k]) {
           color_new_idx = k;
@@ -433,7 +433,7 @@
 static void tokenize_b(int plane, int block, int blk_row, int blk_col,
                        BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
   struct tokenize_b_args *const args = arg;
-  VP10_COMP *cpi = args->cpi;
+  AV1_COMP *cpi = args->cpi;
   ThreadData *const td = args->td;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -449,7 +449,7 @@
   const PLANE_TYPE type = pd->plane_type;
   const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block);
 #if CONFIG_SUPERTX
-  const int segment_id = VPXMIN(mbmi->segment_id, mbmi->segment_id_supertx);
+  const int segment_id = AOMMIN(mbmi->segment_id, mbmi->segment_id_supertx);
 #else
   const int segment_id = mbmi->segment_id;
 #endif  // CONFIG_SUEPRTX
@@ -460,11 +460,11 @@
   unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
       td->rd_counts.coef_counts[txsize_sqr_map[tx_size]][type][ref];
 #if CONFIG_ENTROPY
-  vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+  aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
       cpi->subframe_stats.coef_probs_buf[cpi->common.coef_probs_update_idx]
                                         [txsize_sqr_map[tx_size]][type][ref];
 #else
-  vpx_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+  aom_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
       cpi->common.fc->coef_probs[txsize_sqr_map[tx_size]][type][ref];
 #endif  // CONFIG_ENTROPY
 #if CONFIG_ANS
@@ -488,7 +488,7 @@
     const int v = qcoeff[scan[c]];
     eob_branch[band[c]][pt] += !skip_eob;
 
-    vp10_get_token_extra(v, &token, &extra);
+    av1_get_token_extra(v, &token, &extra);
 
     add_token(&t, coef_probs[band[c]][pt],
 #if CONFIG_ANS
@@ -496,7 +496,7 @@
 #endif  // CONFIG_ANS
               extra, (uint8_t)token, (uint8_t)skip_eob, counts[band[c]][pt]);
 
-    token_cache[scan[c]] = vp10_pt_energy_class[token];
+    token_cache[scan[c]] = av1_pt_energy_class[token];
     ++c;
     pt = get_coef_context(nb, token_cache, c);
     skip_eob = (token == ZERO_TOKEN);
@@ -509,7 +509,7 @@
 
   *tp = t;
 
-  vp10_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, blk_col, blk_row);
+  av1_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, blk_col, blk_row);
 }
 
 struct is_skippable_args {
@@ -528,12 +528,12 @@
 }
 
 // TODO(yaowu): rewrite and optimize this function to remove the usage of
-//              vp10_foreach_transform_block() and simplify is_skippable().
-int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+//              av1_foreach_transform_block() and simplify is_skippable().
+int av1_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
   int result = 1;
   struct is_skippable_args args = { x->plane[plane].eobs, &result };
-  vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
-                                          &args);
+  av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
+                                         &args);
   return result;
 }
 
@@ -550,11 +550,11 @@
   *(args->skippable) |= (args->eobs[block] > eobs);
 }
 
-int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+int av1_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
   int result = 0;
   struct is_skippable_args args = { x->plane[plane].eobs, &result };
-  vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
-                                          has_high_freq_coeff, &args);
+  av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
+                                         has_high_freq_coeff, &args);
   return result;
 }
 
@@ -615,15 +615,15 @@
   }
 }
 
-void vp10_tokenize_sb_inter(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
-                            int dry_run, int mi_row, int mi_col,
-                            BLOCK_SIZE bsize) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_tokenize_sb_inter(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+                           int dry_run, int mi_row, int mi_col,
+                           BLOCK_SIZE bsize) {
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   TOKENEXTRA *t_backup = *t;
-  const int ctx = vp10_get_skip_context(xd);
+  const int ctx = av1_get_skip_context(xd);
   const int skip_inc =
       !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
   struct tokenize_b_args arg = { cpi, td, t };
@@ -669,13 +669,13 @@
 }
 #endif  // CONFIG_VAR_TX
 
-void vp10_tokenize_sb(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
-                      int dry_run, BLOCK_SIZE bsize) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_tokenize_sb(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t, int dry_run,
+                     BLOCK_SIZE bsize) {
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
-  const int ctx = vp10_get_skip_context(xd);
+  const int ctx = av1_get_skip_context(xd);
   const int skip_inc =
       !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
   struct tokenize_b_args arg = { cpi, td, t };
@@ -691,24 +691,24 @@
     td->counts->skip[ctx][0] += skip_inc;
 
     for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
-      vp10_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
-                                              &arg);
+      av1_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
+                                             &arg);
       (*t)->token = EOSB_TOKEN;
       (*t)++;
     }
   } else {
-    vp10_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
+    av1_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
   }
 }
 
 #if CONFIG_SUPERTX
-void vp10_tokenize_sb_supertx(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
-                              int dry_run, BLOCK_SIZE bsize) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_tokenize_sb_supertx(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+                             int dry_run, BLOCK_SIZE bsize) {
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &td->mb.e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   TOKENEXTRA *t_backup = *t;
-  const int ctx = vp10_get_skip_context(xd);
+  const int ctx = av1_get_skip_context(xd);
   const int skip_inc =
       !segfeature_active(&cm->seg, mbmi->segment_id_supertx, SEG_LVL_SKIP);
   struct tokenize_b_args arg = { cpi, td, t };
@@ -724,13 +724,13 @@
     td->counts->skip[ctx][0] += skip_inc;
 
     for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
-      vp10_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
-                                              &arg);
+      av1_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
+                                             &arg);
       (*t)->token = EOSB_TOKEN;
       (*t)++;
     }
   } else {
-    vp10_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
+    av1_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
     *t = t_backup;
   }
 }
diff --git a/av1/encoder/tokenize.h b/av1/encoder/tokenize.h
index 7ae8676..3f43405 100644
--- a/av1/encoder/tokenize.h
+++ b/av1/encoder/tokenize.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_TOKENIZE_H_
-#define VP10_ENCODER_TOKENIZE_H_
+#ifndef AV1_ENCODER_TOKENIZE_H_
+#define AV1_ENCODER_TOKENIZE_H_
 
 #include "av1/common/entropy.h"
 
@@ -22,7 +22,7 @@
 
 #define EOSB_TOKEN 127  // Not signalled, encoder only
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef int32_t EXTRABIT;
 #else
 typedef int16_t EXTRABIT;
@@ -34,7 +34,7 @@
 } TOKENVALUE;
 
 typedef struct {
-  const vpx_prob *context_tree;
+  const aom_prob *context_tree;
 #if CONFIG_ANS
   const rans_dec_lut *token_cdf;
 #endif  // CONFIG_ANS
@@ -43,68 +43,67 @@
   uint8_t skip_eob_node;
 } TOKENEXTRA;
 
-extern const vpx_tree_index vp10_coef_tree[];
-extern const vpx_tree_index vp10_coef_con_tree[];
+extern const aom_tree_index av1_coef_tree[];
+extern const aom_tree_index av1_coef_con_tree[];
 #if !CONFIG_ANS
-extern const struct vp10_token vp10_coef_encodings[];
+extern const struct av1_token av1_coef_encodings[];
 #endif  // !CONFIG_ANS
 
-int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
-int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+int av1_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+int av1_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
 
-struct VP10_COMP;
+struct AV1_COMP;
 struct ThreadData;
 
 #if CONFIG_VAR_TX
-void vp10_tokenize_sb_inter(struct VP10_COMP *cpi, struct ThreadData *td,
-                            TOKENEXTRA **t, int dry_run, int mi_row, int mi_col,
-                            BLOCK_SIZE bsize);
+void av1_tokenize_sb_inter(struct AV1_COMP *cpi, struct ThreadData *td,
+                           TOKENEXTRA **t, int dry_run, int mi_row, int mi_col,
+                           BLOCK_SIZE bsize);
 #endif
 
-void vp10_tokenize_palette_sb(struct ThreadData *const td, BLOCK_SIZE bsize,
-                              int plane, TOKENEXTRA **t);
-void vp10_tokenize_sb(struct VP10_COMP *cpi, struct ThreadData *td,
-                      TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
+void av1_tokenize_palette_sb(struct ThreadData *const td, BLOCK_SIZE bsize,
+                             int plane, TOKENEXTRA **t);
+void av1_tokenize_sb(struct AV1_COMP *cpi, struct ThreadData *td,
+                     TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
 #if CONFIG_SUPERTX
-void vp10_tokenize_sb_supertx(struct VP10_COMP *cpi, struct ThreadData *td,
-                              TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
+void av1_tokenize_sb_supertx(struct AV1_COMP *cpi, struct ThreadData *td,
+                             TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
 #endif
 
-extern const int16_t *vp10_dct_value_cost_ptr;
+extern const int16_t *av1_dct_value_cost_ptr;
 /* TODO: The Token field should be broken out into a separate char array to
  *  improve cache locality, since it's needed for costing when the rest of the
  *  fields are not.
  */
-extern const TOKENVALUE *vp10_dct_value_tokens_ptr;
-extern const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens;
-extern const int *vp10_dct_cat_lt_10_value_cost;
-extern const int16_t vp10_cat6_low_cost[256];
-extern const int vp10_cat6_high_cost[64];
-extern const int vp10_cat6_high10_high_cost[256];
-extern const int vp10_cat6_high12_high_cost[1024];
-static INLINE int vp10_get_cost(int16_t token, EXTRABIT extrabits,
-                                const int *cat6_high_table) {
+extern const TOKENVALUE *av1_dct_value_tokens_ptr;
+extern const TOKENVALUE *av1_dct_cat_lt_10_value_tokens;
+extern const int *av1_dct_cat_lt_10_value_cost;
+extern const int16_t av1_cat6_low_cost[256];
+extern const int av1_cat6_high_cost[64];
+extern const int av1_cat6_high10_high_cost[256];
+extern const int av1_cat6_high12_high_cost[1024];
+static INLINE int av1_get_cost(int16_t token, EXTRABIT extrabits,
+                               const int *cat6_high_table) {
   if (token != CATEGORY6_TOKEN)
-    return vp10_extra_bits[token].cost[extrabits >> 1];
-  return vp10_cat6_low_cost[(extrabits >> 1) & 0xff] +
+    return av1_extra_bits[token].cost[extrabits >> 1];
+  return av1_cat6_low_cost[(extrabits >> 1) & 0xff] +
          cat6_high_table[extrabits >> 9];
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-static INLINE const int *vp10_get_high_cost_table(int bit_depth) {
-  return bit_depth == 8 ? vp10_cat6_high_cost
-                        : (bit_depth == 10 ? vp10_cat6_high10_high_cost
-                                           : vp10_cat6_high12_high_cost);
+#if CONFIG_AOM_HIGHBITDEPTH
+static INLINE const int *av1_get_high_cost_table(int bit_depth) {
+  return bit_depth == 8 ? av1_cat6_high_cost
+                        : (bit_depth == 10 ? av1_cat6_high10_high_cost
+                                           : av1_cat6_high12_high_cost);
 }
 #else
-static INLINE const int *vp10_get_high_cost_table(int bit_depth) {
+static INLINE const int *av1_get_high_cost_table(int bit_depth) {
   (void)bit_depth;
-  return vp10_cat6_high_cost;
+  return av1_cat6_high_cost;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static INLINE void vp10_get_token_extra(int v, int16_t *token,
-                                        EXTRABIT *extra) {
+static INLINE void av1_get_token_extra(int v, int16_t *token, EXTRABIT *extra) {
   if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) {
     *token = CATEGORY6_TOKEN;
     if (v >= CAT6_MIN_VAL)
@@ -113,29 +112,29 @@
       *extra = -2 * v - 2 * CAT6_MIN_VAL + 1;
     return;
   }
-  *token = vp10_dct_cat_lt_10_value_tokens[v].token;
-  *extra = vp10_dct_cat_lt_10_value_tokens[v].extra;
+  *token = av1_dct_cat_lt_10_value_tokens[v].token;
+  *extra = av1_dct_cat_lt_10_value_tokens[v].extra;
 }
-static INLINE int16_t vp10_get_token(int v) {
+static INLINE int16_t av1_get_token(int v) {
   if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) return 10;
-  return vp10_dct_cat_lt_10_value_tokens[v].token;
+  return av1_dct_cat_lt_10_value_tokens[v].token;
 }
 
-static INLINE int vp10_get_token_cost(int v, int16_t *token,
-                                      const int *cat6_high_table) {
+static INLINE int av1_get_token_cost(int v, int16_t *token,
+                                     const int *cat6_high_table) {
   if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) {
     EXTRABIT extrabits;
     *token = CATEGORY6_TOKEN;
     extrabits = abs(v) - CAT6_MIN_VAL;
-    return vp10_cat6_low_cost[extrabits & 0xff] +
+    return av1_cat6_low_cost[extrabits & 0xff] +
            cat6_high_table[extrabits >> 8];
   }
-  *token = vp10_dct_cat_lt_10_value_tokens[v].token;
-  return vp10_dct_cat_lt_10_value_cost[v];
+  *token = av1_dct_cat_lt_10_value_tokens[v].token;
+  return av1_dct_cat_lt_10_value_cost[v];
 }
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_TOKENIZE_H_
+#endif  // AV1_ENCODER_TOKENIZE_H_
diff --git a/av1/encoder/treewriter.c b/av1/encoder/treewriter.c
index d3fcd45..5fbc857 100644
--- a/av1/encoder/treewriter.c
+++ b/av1/encoder/treewriter.c
@@ -10,13 +10,13 @@
 
 #include "av1/encoder/treewriter.h"
 
-static void tree2tok(struct vp10_token *tokens, const vpx_tree_index *tree,
+static void tree2tok(struct av1_token *tokens, const aom_tree_index *tree,
                      int i, int v, int l) {
   v += v;
   ++l;
 
   do {
-    const vpx_tree_index j = tree[i++];
+    const aom_tree_index j = tree[i++];
     if (j <= 0) {
       tokens[-j].value = v;
       tokens[-j].len = l;
@@ -26,12 +26,12 @@
   } while (++v & 1);
 }
 
-void vp10_tokens_from_tree(struct vp10_token *tokens,
-                           const vpx_tree_index *tree) {
+void av1_tokens_from_tree(struct av1_token *tokens,
+                          const aom_tree_index *tree) {
   tree2tok(tokens, tree, 0, 0, 0);
 }
 
-static unsigned int convert_distribution(unsigned int i, vpx_tree tree,
+static unsigned int convert_distribution(unsigned int i, aom_tree tree,
                                          unsigned int branch_ct[][2],
                                          const unsigned int num_events[]) {
   unsigned int left, right;
@@ -51,8 +51,8 @@
   return left + right;
 }
 
-void vp10_tree_probs_from_distribution(vpx_tree tree,
-                                       unsigned int branch_ct[/* n-1 */][2],
-                                       const unsigned int num_events[/* n */]) {
+void av1_tree_probs_from_distribution(aom_tree tree,
+                                      unsigned int branch_ct[/* n-1 */][2],
+                                      const unsigned int num_events[/* n */]) {
   convert_distribution(0, tree, branch_ct, num_events);
 }
diff --git a/av1/encoder/treewriter.h b/av1/encoder/treewriter.h
index 43c615f..9a66115 100644
--- a/av1/encoder/treewriter.h
+++ b/av1/encoder/treewriter.h
@@ -8,37 +8,37 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_TREEWRITER_H_
-#define VP10_ENCODER_TREEWRITER_H_
+#ifndef AV1_ENCODER_TREEWRITER_H_
+#define AV1_ENCODER_TREEWRITER_H_
 
-#ifdef VP10_FORCE_VPXBOOL_TREEWRITER
+#ifdef AV1_FORCE_AOMBOOL_TREEWRITER
 #include "aom_dsp/bitwriter.h"
-#define tree_writer vpx_writer
-#define tree_bit_write vpx_write
+#define tree_writer aom_writer
+#define tree_bit_write aom_write
 #else
 #include "av1/encoder/bitwriter.h"
-#define tree_writer vp10_writer
-#define tree_bit_write vp10_write
+#define tree_writer aom_writer
+#define tree_bit_write aom_write
 #endif
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-void vp10_tree_probs_from_distribution(vpx_tree tree,
-                                       unsigned int branch_ct[/* n - 1 */][2],
-                                       const unsigned int num_events[/* n */]);
+void av1_tree_probs_from_distribution(aom_tree tree,
+                                      unsigned int branch_ct[/* n - 1 */][2],
+                                      const unsigned int num_events[/* n */]);
 
-struct vp10_token {
+struct av1_token {
   int value;
   int len;
 };
 
-void vp10_tokens_from_tree(struct vp10_token *, const vpx_tree_index *);
+void av1_tokens_from_tree(struct av1_token *, const aom_tree_index *);
 
-static INLINE void vp10_write_tree(tree_writer *w, const vpx_tree_index *tree,
-                                   const vpx_prob *probs, int bits, int len,
-                                   vpx_tree_index i) {
+static INLINE void av1_write_tree(tree_writer *w, const aom_tree_index *tree,
+                                  const aom_prob *probs, int bits, int len,
+                                  aom_tree_index i) {
   do {
     const int bit = (bits >> --len) & 1;
     tree_bit_write(w, bit, probs[i >> 1]);
@@ -46,10 +46,10 @@
   } while (len);
 }
 
-static INLINE void vp10_write_token(tree_writer *w, const vpx_tree_index *tree,
-                                    const vpx_prob *probs,
-                                    const struct vp10_token *token) {
-  vp10_write_tree(w, tree, probs, token->value, token->len, 0);
+static INLINE void av1_write_token(tree_writer *w, const aom_tree_index *tree,
+                                   const aom_prob *probs,
+                                   const struct av1_token *token) {
+  av1_write_tree(w, tree, probs, token->value, token->len, 0);
 }
 
 #undef tree_writer
@@ -58,4 +58,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_TREEWRITER_H_
+#endif  // AV1_ENCODER_TREEWRITER_H_
diff --git a/av1/encoder/variance_tree.c b/av1/encoder/variance_tree.c
index 219d39a..3a23027 100644
--- a/av1/encoder/variance_tree.c
+++ b/av1/encoder/variance_tree.c
@@ -11,7 +11,7 @@
 #include "av1/encoder/variance_tree.h"
 #include "av1/encoder/encoder.h"
 
-void vp10_setup_var_tree(struct VP10Common *cm, ThreadData *td) {
+void av1_setup_var_tree(struct AV1Common *cm, ThreadData *td) {
   int i, j;
 #if CONFIG_EXT_PARTITION
   const int leaf_nodes = 1024;
@@ -24,9 +24,9 @@
   VAR_TREE *this_var;
   int nodes;
 
-  vpx_free(td->var_tree);
+  aom_free(td->var_tree);
   CHECK_MEM_ERROR(cm, td->var_tree,
-                  vpx_calloc(tree_nodes, sizeof(*td->var_tree)));
+                  aom_calloc(tree_nodes, sizeof(*td->var_tree)));
 
   this_var = &td->var_tree[0];
 
@@ -54,7 +54,7 @@
   }
 }
 
-void vp10_free_var_tree(ThreadData *td) {
-  vpx_free(td->var_tree);
+void av1_free_var_tree(ThreadData *td) {
+  aom_free(td->var_tree);
   td->var_tree = NULL;
 }
diff --git a/av1/encoder/variance_tree.h b/av1/encoder/variance_tree.h
index 08c40d3..728d7f4 100644
--- a/av1/encoder/variance_tree.h
+++ b/av1/encoder/variance_tree.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_ENCODER_VARIANCE_TREE_H_
-#define VP10_ENCODER_VARIANCE_TREE_H_
+#ifndef AV1_ENCODER_VARIANCE_TREE_H_
+#define AV1_ENCODER_VARIANCE_TREE_H_
 
 #include <assert.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #include "av1/common/enums.h"
 
@@ -23,7 +23,7 @@
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 struct ThreadData;
 
 typedef struct {
@@ -50,13 +50,13 @@
   int ref_stride;
   int width;
   int height;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   int highbd;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 } VAR_TREE;
 
-void vp10_setup_var_tree(struct VP10Common *cm, struct ThreadData *td);
-void vp10_free_var_tree(struct ThreadData *td);
+void av1_setup_var_tree(struct AV1Common *cm, struct ThreadData *td);
+void av1_free_var_tree(struct ThreadData *td);
 
 // Set variance values given sum square error, sum error, count.
 static INLINE void fill_variance(int64_t s2, int64_t s, int c, var *v) {
@@ -92,4 +92,4 @@
 }  // extern "C"
 #endif
 
-#endif /* VP10_ENCODER_VARIANCE_TREE_H_ */
+#endif /* AV1_ENCODER_VARIANCE_TREE_H_ */
diff --git a/av1/encoder/wedge_utils.c b/av1/encoder/wedge_utils.c
index 548bc48..596c5df 100644
--- a/av1/encoder/wedge_utils.c
+++ b/av1/encoder/wedge_utils.c
@@ -10,11 +10,11 @@
 
 #include <assert.h>
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #include "aom_ports/mem.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 #include "av1/common/reconinter.h"
 
@@ -48,8 +48,8 @@
  * holds for 8 bit input, and on real input, it should hold practically always,
  * as residuals are expected to be small.
  */
-uint64_t vp10_wedge_sse_from_residuals_c(const int16_t *r1, const int16_t *d,
-                                         const uint8_t *m, int N) {
+uint64_t av1_wedge_sse_from_residuals_c(const int16_t *r1, const int16_t *d,
+                                        const uint8_t *m, int N) {
   uint64_t csse = 0;
   int i;
   assert(N % 64 == 0);
@@ -92,8 +92,8 @@
  *  Note that for efficiency, ds is stored on 16 bits. Real input residuals
  *  being small, this should not cause a noticeable issue.
  */
-int vp10_wedge_sign_from_residuals_c(const int16_t *ds, const uint8_t *m, int N,
-                                     int64_t limit) {
+int av1_wedge_sign_from_residuals_c(const int16_t *ds, const uint8_t *m, int N,
+                                    int64_t limit) {
   int64_t acc = 0;
 
   assert(N % 64 == 0);
@@ -117,8 +117,8 @@
  *
  * The result is saturated to signed 16 bits.
  */
-void vp10_wedge_compute_delta_squares_c(int16_t *d, const int16_t *a,
-                                        const int16_t *b, int N) {
+void av1_wedge_compute_delta_squares_c(int16_t *d, const int16_t *a,
+                                       const int16_t *b, int N) {
   int i;
 
   assert(N % 64 == 0);
diff --git a/av1/encoder/x86/vp10_highbd_quantize_sse4.c b/av1/encoder/x86/av1_highbd_quantize_sse4.c
similarity index 98%
rename from av1/encoder/x86/vp10_highbd_quantize_sse4.c
rename to av1/encoder/x86/av1_highbd_quantize_sse4.c
index 8b05c6a..dd3405f 100644
--- a/av1/encoder/x86/vp10_highbd_quantize_sse4.c
+++ b/av1/encoder/x86/av1_highbd_quantize_sse4.c
@@ -11,8 +11,8 @@
 #include <smmintrin.h>
 #include <stdint.h>
 
-#include "./vp10_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./av1_rtcd.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 // Coefficient quantization phase 1
 // param[0-2] : rounding/quan/dequan constants
@@ -106,7 +106,7 @@
   return eobValue;
 }
 
-void vp10_highbd_quantize_fp_sse4_1(
+void av1_highbd_quantize_fp_sse4_1(
     const tran_low_t *coeff_ptr, intptr_t count, int skip_block,
     const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
     const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
diff --git a/av1/encoder/x86/dct_intrin_sse2.c b/av1/encoder/x86/dct_intrin_sse2.c
index 6fe3ada..6b88879 100644
--- a/av1/encoder/x86/dct_intrin_sse2.c
+++ b/av1/encoder/x86/dct_intrin_sse2.c
@@ -11,8 +11,8 @@
 #include <assert.h>
 #include <emmintrin.h>  // SSE2
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/x86/fwd_txfm_sse2.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
@@ -200,12 +200,12 @@
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
-                      int tx_type) {
+void av1_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
+                     int tx_type) {
   __m128i in[4];
 
   switch (tx_type) {
-    case DCT_DCT: vpx_fdct4x4_sse2(input, output, stride); break;
+    case DCT_DCT: aom_fdct4x4_sse2(input, output, stride); break;
     case ADST_DCT:
       load_buffer_4x4(input, in, stride, 0, 0);
       fadst4_sse2(in);
@@ -296,12 +296,14 @@
   }
 }
 
-void vp10_fdct8x8_quant_sse2(
-    const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
-    int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
-    const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
-    int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
-    uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+void av1_fdct8x8_quant_sse2(const int16_t *input, int stride,
+                            int16_t *coeff_ptr, intptr_t n_coeffs,
+                            int skip_block, const int16_t *zbin_ptr,
+                            const int16_t *round_ptr, const int16_t *quant_ptr,
+                            const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+                            int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                            uint16_t *eob_ptr, const int16_t *scan_ptr,
+                            const int16_t *iscan_ptr) {
   __m128i zero;
   int pass;
   // Constants
@@ -1282,12 +1284,12 @@
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
-                      int tx_type) {
+void av1_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
+                     int tx_type) {
   __m128i in[8];
 
   switch (tx_type) {
-    case DCT_DCT: vpx_fdct8x8_sse2(input, output, stride); break;
+    case DCT_DCT: aom_fdct8x8_sse2(input, output, stride); break;
     case ADST_DCT:
       load_buffer_8x8(input, in, stride, 0, 0);
       fadst8_sse2(in);
@@ -2472,12 +2474,12 @@
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
-                        int tx_type) {
+void av1_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
+                       int tx_type) {
   __m128i in0[16], in1[16];
 
   switch (tx_type) {
-    case DCT_DCT: vpx_fdct16x16_sse2(input, output, stride); break;
+    case DCT_DCT: aom_fdct16x16_sse2(input, output, stride); break;
     case ADST_DCT:
       load_buffer_16x16(input, in0, in1, stride, 0, 0);
       fadst16_sse2(in0, in1);
diff --git a/av1/encoder/x86/dct_sse2.asm b/av1/encoder/x86/dct_sse2.asm
index c3a5fb5..7d8eb61 100644
--- a/av1/encoder/x86/dct_sse2.asm
+++ b/av1/encoder/x86/dct_sse2.asm
@@ -8,7 +8,7 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-%define private_prefix vp10
+%define private_prefix av1
 
 %include "third_party/x86inc/x86inc.asm"
 
@@ -62,7 +62,7 @@
   psllw           m0,        2
   psllw           m1,        2
 
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; sign extension
   mova            m2,             m0
   mova            m3,             m1
diff --git a/av1/encoder/x86/dct_ssse3.c b/av1/encoder/x86/dct_ssse3.c
index aa018a1..b589914 100644
--- a/av1/encoder/x86/dct_ssse3.c
+++ b/av1/encoder/x86/dct_ssse3.c
@@ -16,11 +16,11 @@
 #endif
 #include <tmmintrin.h>  // SSSE3
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "aom_dsp/x86/inv_txfm_sse2.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
 
-void vp10_fdct8x8_quant_ssse3(
+void av1_fdct8x8_quant_ssse3(
     const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
     int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
     const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
diff --git a/av1/encoder/x86/error_intrin_avx2.c b/av1/encoder/x86/error_intrin_avx2.c
index 6e7c093..8ab1ea0 100644
--- a/av1/encoder/x86/error_intrin_avx2.c
+++ b/av1/encoder/x86/error_intrin_avx2.c
@@ -10,11 +10,11 @@
 
 #include <immintrin.h>  // AVX2
 
-#include "./vp10_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./av1_rtcd.h"
+#include "aom/aom_integer.h"
 
-int64_t vp10_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
-                              intptr_t block_size, int64_t *ssz) {
+int64_t av1_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
+                             intptr_t block_size, int64_t *ssz) {
   __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
   __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
   __m256i sse_reg_64hi, ssz_reg_64hi;
diff --git a/av1/encoder/x86/error_sse2.asm b/av1/encoder/x86/error_sse2.asm
index 0772da4..44a52d7 100644
--- a/av1/encoder/x86/error_sse2.asm
+++ b/av1/encoder/x86/error_sse2.asm
@@ -8,13 +8,13 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-%define private_prefix vp10
+%define private_prefix av1
 
 %include "third_party/x86inc/x86inc.asm"
 
 SECTION .text
 
-; int64_t vp10_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
+; int64_t av1_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
 ;                         int64_t *ssz)
 
 INIT_XMM sse2
@@ -76,7 +76,7 @@
   RET
 
 ; Compute the sum of squared difference between two int16_t vectors.
-; int64_t vp10_block_error_fp(int16_t *coeff, int16_t *dqcoeff,
+; int64_t av1_block_error_fp(int16_t *coeff, int16_t *dqcoeff,
 ;                            intptr_t block_size)
 
 INIT_XMM sse2
diff --git a/av1/encoder/x86/highbd_block_error_intrin_sse2.c b/av1/encoder/x86/highbd_block_error_intrin_sse2.c
index 2728880..bae0a81 100644
--- a/av1/encoder/x86/highbd_block_error_intrin_sse2.c
+++ b/av1/encoder/x86/highbd_block_error_intrin_sse2.c
@@ -13,9 +13,9 @@
 
 #include "av1/common/common.h"
 
-int64_t vp10_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
-                                     intptr_t block_size, int64_t *ssz,
-                                     int bps) {
+int64_t av1_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
+                                    intptr_t block_size, int64_t *ssz,
+                                    int bps) {
   int i, j, test;
   uint32_t temp[4];
   __m128i max, min, cmp0, cmp1, cmp2, cmp3;
diff --git a/av1/encoder/x86/highbd_fwd_txfm_sse4.c b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
index a6cb454..d601208 100644
--- a/av1/encoder/x86/highbd_fwd_txfm_sse4.c
+++ b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
@@ -11,10 +11,10 @@
 #include <assert.h>
 #include <smmintrin.h> /* SSE4.1 */
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "av1/common/vp10_fwd_txfm2d_cfg.h"
-#include "av1/common/vp10_txfm.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "av1/common/av1_fwd_txfm2d_cfg.h"
+#include "av1/common/av1_txfm.h"
 #include "av1/common/x86/highbd_txfm_utility_sse4.h"
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
@@ -121,10 +121,10 @@
 }
 
 // Note:
-//  We implement vp10_fwd_txfm2d_4x4(). This function is kept here since
-//  vp10_highbd_fht4x4_c() is not removed yet
-void vp10_highbd_fht4x4_sse4_1(const int16_t *input, tran_low_t *output,
-                               int stride, int tx_type) {
+//  We implement av1_fwd_txfm2d_4x4(). This function is kept here since
+//  av1_highbd_fht4x4_c() is not removed yet
+void av1_highbd_fht4x4_sse4_1(const int16_t *input, tran_low_t *output,
+                              int stride, int tx_type) {
   (void)input;
   (void)output;
   (void)stride;
@@ -206,8 +206,8 @@
   in[3] = _mm_unpackhi_epi64(v1, v3);
 }
 
-void vp10_fwd_txfm2d_4x4_sse4_1(const int16_t *input, int32_t *coeff,
-                                int input_stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_4x4_sse4_1(const int16_t *input, int32_t *coeff,
+                               int input_stride, int tx_type, int bd) {
   __m128i in[4];
   const TXFM_2D_CFG *cfg = NULL;
 
@@ -927,8 +927,8 @@
   out[15] = _mm_sub_epi32(kZero, u[1]);
 }
 
-void vp10_fwd_txfm2d_8x8_sse4_1(const int16_t *input, int32_t *coeff,
-                                int stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_8x8_sse4_1(const int16_t *input, int32_t *coeff, int stride,
+                               int tx_type, int bd) {
   __m128i in[16], out[16];
   const TXFM_2D_CFG *cfg = NULL;
 
@@ -1791,8 +1791,8 @@
   write_buffer_8x8(&in[48], output);
 }
 
-void vp10_fwd_txfm2d_16x16_sse4_1(const int16_t *input, int32_t *coeff,
-                                  int stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_16x16_sse4_1(const int16_t *input, int32_t *coeff,
+                                 int stride, int tx_type, int bd) {
   __m128i in[64], out[64];
   const TXFM_2D_CFG *cfg = NULL;
 
diff --git a/av1/encoder/x86/quantize_sse2.c b/av1/encoder/x86/quantize_sse2.c
index b8cd0c7..2f0051b 100644
--- a/av1/encoder/x86/quantize_sse2.c
+++ b/av1/encoder/x86/quantize_sse2.c
@@ -11,16 +11,16 @@
 #include <emmintrin.h>
 #include <xmmintrin.h>
 
-#include "./vp10_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./av1_rtcd.h"
+#include "aom/aom_integer.h"
 
-void vp10_quantize_fp_sse2(const int16_t *coeff_ptr, intptr_t n_coeffs,
-                           int skip_block, const int16_t *zbin_ptr,
-                           const int16_t *round_ptr, const int16_t *quant_ptr,
-                           const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
-                           int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
-                           uint16_t *eob_ptr, const int16_t *scan_ptr,
-                           const int16_t *iscan_ptr) {
+void av1_quantize_fp_sse2(const int16_t *coeff_ptr, intptr_t n_coeffs,
+                          int skip_block, const int16_t *zbin_ptr,
+                          const int16_t *round_ptr, const int16_t *quant_ptr,
+                          const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+                          int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                          uint16_t *eob_ptr, const int16_t *scan_ptr,
+                          const int16_t *iscan_ptr) {
   __m128i zero;
   __m128i thr;
   int16_t nzflag;
diff --git a/av1/encoder/x86/quantize_ssse3_x86_64.asm b/av1/encoder/x86/quantize_ssse3_x86_64.asm
index b8fefa2..05e0be6 100644
--- a/av1/encoder/x86/quantize_ssse3_x86_64.asm
+++ b/av1/encoder/x86/quantize_ssse3_x86_64.asm
@@ -8,7 +8,7 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-%define private_prefix vp10
+%define private_prefix av1
 
 %include "third_party/x86inc/x86inc.asm"
 
diff --git a/av1/encoder/x86/ssim_opt_x86_64.asm b/av1/encoder/x86/ssim_opt_x86_64.asm
index 29659ee..4b5c450 100644
--- a/av1/encoder/x86/ssim_opt_x86_64.asm
+++ b/av1/encoder/x86/ssim_opt_x86_64.asm
@@ -61,8 +61,8 @@
 ; or pavgb At this point this is just meant to be first pass for calculating
 ; all the parms needed for 16x16 ssim so we can play with dssim as distortion
 ; in mode selection code.
-global sym(vp10_ssim_parms_16x16_sse2) PRIVATE
-sym(vp10_ssim_parms_16x16_sse2):
+global sym(av1_ssim_parms_16x16_sse2) PRIVATE
+sym(av1_ssim_parms_16x16_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 9
@@ -151,8 +151,8 @@
 ; or pavgb At this point this is just meant to be first pass for calculating
 ; all the parms needed for 16x16 ssim so we can play with dssim as distortion
 ; in mode selection code.
-global sym(vp10_ssim_parms_8x8_sse2) PRIVATE
-sym(vp10_ssim_parms_8x8_sse2):
+global sym(av1_ssim_parms_8x8_sse2) PRIVATE
+sym(av1_ssim_parms_8x8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 9
diff --git a/av1/encoder/x86/temporal_filter_apply_sse2.asm b/av1/encoder/x86/temporal_filter_apply_sse2.asm
index eabe575..15de6e8 100644
--- a/av1/encoder/x86/temporal_filter_apply_sse2.asm
+++ b/av1/encoder/x86/temporal_filter_apply_sse2.asm
@@ -11,7 +11,7 @@
 
 %include "aom_ports/x86_abi_support.asm"
 
-; void vp10_temporal_filter_apply_sse2 | arg
+; void av1_temporal_filter_apply_sse2 | arg
 ;  (unsigned char  *frame1,           |  0
 ;   unsigned int    stride,           |  1
 ;   unsigned char  *frame2,           |  2
@@ -21,8 +21,8 @@
 ;   int             filter_weight,    |  6
 ;   unsigned int   *accumulator,      |  7
 ;   unsigned short *count)            |  8
-global sym(vp10_temporal_filter_apply_sse2) PRIVATE
-sym(vp10_temporal_filter_apply_sse2):
+global sym(av1_temporal_filter_apply_sse2) PRIVATE
+sym(av1_temporal_filter_apply_sse2):
 
     push        rbp
     mov         rbp, rsp
diff --git a/av1/encoder/x86/wedge_utils_sse2.c b/av1/encoder/x86/wedge_utils_sse2.c
index a6be947..35e8493 100644
--- a/av1/encoder/x86/wedge_utils_sse2.c
+++ b/av1/encoder/x86/wedge_utils_sse2.c
@@ -13,17 +13,17 @@
 
 #include "aom_dsp/x86/synonyms.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 #include "av1/common/reconinter.h"
 
 #define MAX_MASK_VALUE (1 << WEDGE_WEIGHT_BITS)
 
 /**
- * See vp10_wedge_sse_from_residuals_c
+ * See av1_wedge_sse_from_residuals_c
  */
-uint64_t vp10_wedge_sse_from_residuals_sse2(const int16_t *r1, const int16_t *d,
-                                            const uint8_t *m, int N) {
+uint64_t av1_wedge_sse_from_residuals_sse2(const int16_t *r1, const int16_t *d,
+                                           const uint8_t *m, int N) {
   int n = -N;
   int n8 = n + 8;
 
@@ -94,10 +94,10 @@
 }
 
 /**
- * See vp10_wedge_sign_from_residuals_c
+ * See av1_wedge_sign_from_residuals_c
  */
-int vp10_wedge_sign_from_residuals_sse2(const int16_t *ds, const uint8_t *m,
-                                        int N, int64_t limit) {
+int av1_wedge_sign_from_residuals_sse2(const int16_t *ds, const uint8_t *m,
+                                       int N, int64_t limit) {
   int64_t acc;
 
   __m128i v_sign_d;
@@ -188,10 +188,10 @@
 }
 
 /**
- * vp10_wedge_compute_delta_squares_c
+ * av1_wedge_compute_delta_squares_c
  */
-void vp10_wedge_compute_delta_squares_sse2(int16_t *d, const int16_t *a,
-                                           const int16_t *b, int N) {
+void av1_wedge_compute_delta_squares_sse2(int16_t *d, const int16_t *a,
+                                          const int16_t *b, int N) {
   const __m128i v_neg_w =
       _mm_set_epi16(0xffff, 0, 0xffff, 0, 0xffff, 0, 0xffff, 0);
 
diff --git a/av1/exports_dec b/av1/exports_dec
index 71c8369..05860e8 100644
--- a/av1/exports_dec
+++ b/av1/exports_dec
@@ -1,2 +1,2 @@
-data vpx_codec_vp10_dx_algo
-text vpx_codec_vp10_dx
+data aom_codec_av1_dx_algo
+text aom_codec_av1_dx
diff --git a/av1/exports_enc b/av1/exports_enc
index d1644f2..dc4a9ea 100644
--- a/av1/exports_enc
+++ b/av1/exports_enc
@@ -1,2 +1,2 @@
-data vpx_codec_vp10_cx_algo
-text vpx_codec_vp10_cx
+data aom_codec_av1_cx_algo
+text aom_codec_av1_cx
diff --git a/av1/vp10_common.mk b/av1/vp10_common.mk
deleted file mode 100644
index e776a8a..0000000
--- a/av1/vp10_common.mk
+++ /dev/null
@@ -1,139 +0,0 @@
-##
-##  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-##
-##  Use of this source code is governed by a BSD-style license
-##  that can be found in the LICENSE file in the root of the source
-##  tree. An additional intellectual property rights grant can be found
-##  in the file PATENTS.  All contributing project authors may
-##  be found in the AUTHORS file in the root of the source tree.
-##
-
-VP10_COMMON_SRCS-yes += vp10_common.mk
-VP10_COMMON_SRCS-yes += vp10_iface_common.h
-VP10_COMMON_SRCS-yes += common/ans.h
-VP10_COMMON_SRCS-yes += common/alloccommon.c
-VP10_COMMON_SRCS-yes += common/blockd.c
-VP10_COMMON_SRCS-yes += common/debugmodes.c
-VP10_COMMON_SRCS-yes += common/divide.h
-VP10_COMMON_SRCS-yes += common/entropy.c
-VP10_COMMON_SRCS-yes += common/entropymode.c
-VP10_COMMON_SRCS-yes += common/entropymv.c
-VP10_COMMON_SRCS-yes += common/frame_buffers.c
-VP10_COMMON_SRCS-yes += common/frame_buffers.h
-VP10_COMMON_SRCS-yes += common/alloccommon.h
-VP10_COMMON_SRCS-yes += common/blockd.h
-VP10_COMMON_SRCS-yes += common/common.h
-VP10_COMMON_SRCS-yes += common/entropy.h
-VP10_COMMON_SRCS-yes += common/entropymode.h
-VP10_COMMON_SRCS-yes += common/entropymv.h
-VP10_COMMON_SRCS-yes += common/enums.h
-VP10_COMMON_SRCS-yes += common/filter.h
-VP10_COMMON_SRCS-yes += common/filter.c
-VP10_COMMON_SRCS-yes += common/idct.h
-VP10_COMMON_SRCS-yes += common/idct.c
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm.h
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm.c
-VP10_COMMON_SRCS-yes += common/loopfilter.h
-VP10_COMMON_SRCS-yes += common/thread_common.h
-VP10_COMMON_SRCS-yes += common/mv.h
-VP10_COMMON_SRCS-yes += common/onyxc_int.h
-VP10_COMMON_SRCS-yes += common/pred_common.h
-VP10_COMMON_SRCS-yes += common/pred_common.c
-VP10_COMMON_SRCS-yes += common/quant_common.h
-VP10_COMMON_SRCS-yes += common/reconinter.h
-VP10_COMMON_SRCS-yes += common/reconintra.h
-VP10_COMMON_SRCS-yes += common/vp10_rtcd.c
-VP10_COMMON_SRCS-yes += common/vp10_rtcd_defs.pl
-VP10_COMMON_SRCS-yes += common/scale.h
-VP10_COMMON_SRCS-yes += common/scale.c
-VP10_COMMON_SRCS-yes += common/seg_common.h
-VP10_COMMON_SRCS-yes += common/seg_common.c
-VP10_COMMON_SRCS-yes += common/tile_common.h
-VP10_COMMON_SRCS-yes += common/tile_common.c
-VP10_COMMON_SRCS-yes += common/loopfilter.c
-VP10_COMMON_SRCS-yes += common/thread_common.c
-VP10_COMMON_SRCS-yes += common/mvref_common.c
-VP10_COMMON_SRCS-yes += common/mvref_common.h
-VP10_COMMON_SRCS-yes += common/quant_common.c
-VP10_COMMON_SRCS-yes += common/reconinter.c
-VP10_COMMON_SRCS-yes += common/reconintra.c
-VP10_COMMON_SRCS-yes += common/restoration.h
-VP10_COMMON_SRCS-yes += common/common_data.h
-VP10_COMMON_SRCS-yes += common/scan.c
-VP10_COMMON_SRCS-yes += common/scan.h
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm.h
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm.c
-VP10_COMMON_SRCS-yes += common/vp10_txfm.h
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm1d.h
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm1d.c
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm1d.h
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm1d.c
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm2d.c
-VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm2d_cfg.h
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm2d.c
-VP10_COMMON_SRCS-yes += common/vp10_inv_txfm2d_cfg.h
-VP10_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp10_convolve_ssse3.c
-VP10_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp10_convolve_filters_ssse3.c
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp10_highbd_convolve_sse4.c
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp10_highbd_convolve_filters_sse4.c
-endif
-VP10_COMMON_SRCS-yes += common/vp10_convolve.c
-VP10_COMMON_SRCS-yes += common/vp10_convolve.h
-VP10_COMMON_SRCS-$(CONFIG_ANS) += common/ans.h
-VP10_COMMON_SRCS-$(CONFIG_ANS) += common/divide.h
-VP10_COMMON_SRCS-$(CONFIG_ANS) += common/divide.c
-VP10_COMMON_SRCS-$(CONFIG_LOOP_RESTORATION) += common/restoration.h
-VP10_COMMON_SRCS-$(CONFIG_LOOP_RESTORATION) += common/restoration.c
-ifeq (yes,$(filter yes,$(CONFIG_GLOBAL_MOTION) $(CONFIG_WARPED_MOTION)))
-VP10_COMMON_SRCS-yes += common/warped_motion.h
-VP10_COMMON_SRCS-yes += common/warped_motion.c
-endif
-VP10_COMMON_SRCS-yes += common/clpf.c
-VP10_COMMON_SRCS-yes += common/clpf.h
-ifeq ($(CONFIG_DERING),yes)
-VP10_COMMON_SRCS-yes += common/od_dering.c
-VP10_COMMON_SRCS-yes += common/od_dering.h
-VP10_COMMON_SRCS-yes += common/dering.c
-VP10_COMMON_SRCS-yes += common/dering.h
-endif
-VP10_COMMON_SRCS-yes += common/odintrin.c
-VP10_COMMON_SRCS-yes += common/odintrin.h
-
-ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans4_dspr2.c
-VP10_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans8_dspr2.c
-VP10_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans16_dspr2.c
-endif
-
-# common (msa)
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct4x4_msa.c
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct8x8_msa.c
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct16x16_msa.c
-
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_intrin_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_fwd_txfm_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_fwd_dct32x32_impl_sse2.h
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_fwd_txfm_impl_sse2.h
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp10_txfm1d_sse4.h
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp10_fwd_txfm1d_sse4.c
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp10_fwd_txfm2d_sse4.c
-
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/highbd_txfm_utility_sse4.h
-endif
-
-ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht4x4_add_neon.c
-VP10_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht8x8_add_neon.c
-endif
-
-ifeq ($(CONFIG_EXT_INTRA),yes)
-VP10_COMMON_SRCS-yes += common/intra_filters.h
-VP10_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/reconintra_sse4.c
-endif
-
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_inv_txfm_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_inv_txfm_sse2.h
-
-$(eval $(call rtcd_h_template,vp10_rtcd,av1/common/vp10_rtcd_defs.pl))
diff --git a/av1/vp10_iface_common.h b/av1/vp10_iface_common.h
deleted file mode 100644
index 37a9cc1..0000000
--- a/av1/vp10_iface_common.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-#ifndef VP10_VP10_IFACE_COMMON_H_
-#define VP10_VP10_IFACE_COMMON_H_
-
-#include "aom_ports/mem.h"
-
-static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
-                            void *user_priv) {
-  /** vpx_img_wrap() doesn't allow specifying independent strides for
-    * the Y, U, and V planes, nor other alignment adjustments that
-    * might be representable by a YV12_BUFFER_CONFIG, so we just
-    * initialize all the fields.*/
-  int bps;
-  if (!yv12->subsampling_y) {
-    if (!yv12->subsampling_x) {
-      img->fmt = VPX_IMG_FMT_I444;
-      bps = 24;
-    } else {
-      img->fmt = VPX_IMG_FMT_I422;
-      bps = 16;
-    }
-  } else {
-    if (!yv12->subsampling_x) {
-      img->fmt = VPX_IMG_FMT_I440;
-      bps = 16;
-    } else {
-      img->fmt = VPX_IMG_FMT_I420;
-      bps = 12;
-    }
-  }
-  img->cs = yv12->color_space;
-  img->range = yv12->color_range;
-  img->bit_depth = 8;
-  img->w = yv12->y_stride;
-  img->h = ALIGN_POWER_OF_TWO(yv12->y_height + 2 * VPX_ENC_BORDER_IN_PIXELS, 3);
-  img->d_w = yv12->y_crop_width;
-  img->d_h = yv12->y_crop_height;
-  img->r_w = yv12->render_width;
-  img->r_h = yv12->render_height;
-  img->x_chroma_shift = yv12->subsampling_x;
-  img->y_chroma_shift = yv12->subsampling_y;
-  img->planes[VPX_PLANE_Y] = yv12->y_buffer;
-  img->planes[VPX_PLANE_U] = yv12->u_buffer;
-  img->planes[VPX_PLANE_V] = yv12->v_buffer;
-  img->planes[VPX_PLANE_ALPHA] = NULL;
-  img->stride[VPX_PLANE_Y] = yv12->y_stride;
-  img->stride[VPX_PLANE_U] = yv12->uv_stride;
-  img->stride[VPX_PLANE_V] = yv12->uv_stride;
-  img->stride[VPX_PLANE_ALPHA] = yv12->y_stride;
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (yv12->flags & YV12_FLAG_HIGHBITDEPTH) {
-    // vpx_image_t uses byte strides and a pointer to the first byte
-    // of the image.
-    img->fmt = (vpx_img_fmt_t)(img->fmt | VPX_IMG_FMT_HIGHBITDEPTH);
-    img->bit_depth = yv12->bit_depth;
-    img->planes[VPX_PLANE_Y] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->y_buffer);
-    img->planes[VPX_PLANE_U] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->u_buffer);
-    img->planes[VPX_PLANE_V] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->v_buffer);
-    img->planes[VPX_PLANE_ALPHA] = NULL;
-    img->stride[VPX_PLANE_Y] = 2 * yv12->y_stride;
-    img->stride[VPX_PLANE_U] = 2 * yv12->uv_stride;
-    img->stride[VPX_PLANE_V] = 2 * yv12->uv_stride;
-    img->stride[VPX_PLANE_ALPHA] = 2 * yv12->y_stride;
-  }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-  img->bps = bps;
-  img->user_priv = user_priv;
-  img->img_data = yv12->buffer_alloc;
-  img->img_data_owner = 0;
-  img->self_allocd = 0;
-}
-
-static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
-                                       YV12_BUFFER_CONFIG *yv12) {
-  yv12->y_buffer = img->planes[VPX_PLANE_Y];
-  yv12->u_buffer = img->planes[VPX_PLANE_U];
-  yv12->v_buffer = img->planes[VPX_PLANE_V];
-
-  yv12->y_crop_width = img->d_w;
-  yv12->y_crop_height = img->d_h;
-  yv12->render_width = img->r_w;
-  yv12->render_height = img->r_h;
-  yv12->y_width = img->d_w;
-  yv12->y_height = img->d_h;
-
-  yv12->uv_width =
-      img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2 : yv12->y_width;
-  yv12->uv_height =
-      img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2 : yv12->y_height;
-  yv12->uv_crop_width = yv12->uv_width;
-  yv12->uv_crop_height = yv12->uv_height;
-
-  yv12->y_stride = img->stride[VPX_PLANE_Y];
-  yv12->uv_stride = img->stride[VPX_PLANE_U];
-  yv12->color_space = img->cs;
-  yv12->color_range = img->range;
-
-#if CONFIG_VP9_HIGHBITDEPTH
-  if (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
-    // In vpx_image_t
-    //     planes point to uint8 address of start of data
-    //     stride counts uint8s to reach next row
-    // In YV12_BUFFER_CONFIG
-    //     y_buffer, u_buffer, v_buffer point to uint16 address of data
-    //     stride and border counts in uint16s
-    // This means that all the address calculations in the main body of code
-    // should work correctly.
-    // However, before we do any pixel operations we need to cast the address
-    // to a uint16 ponter and double its value.
-    yv12->y_buffer = CONVERT_TO_BYTEPTR(yv12->y_buffer);
-    yv12->u_buffer = CONVERT_TO_BYTEPTR(yv12->u_buffer);
-    yv12->v_buffer = CONVERT_TO_BYTEPTR(yv12->v_buffer);
-    yv12->y_stride >>= 1;
-    yv12->uv_stride >>= 1;
-    yv12->flags = YV12_FLAG_HIGHBITDEPTH;
-  } else {
-    yv12->flags = 0;
-  }
-  yv12->border = (yv12->y_stride - img->w) / 2;
-#else
-  yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-  yv12->subsampling_x = img->x_chroma_shift;
-  yv12->subsampling_y = img->y_chroma_shift;
-  return VPX_CODEC_OK;
-}
-
-static VPX_REFFRAME ref_frame_to_vp10_reframe(vpx_ref_frame_type_t frame) {
-  switch (frame) {
-    case VP8_LAST_FRAME: return VPX_LAST_FLAG;
-    case VP8_GOLD_FRAME: return VPX_GOLD_FLAG;
-    case VP8_ALTR_FRAME: return VPX_ALT_FLAG;
-  }
-  assert(0 && "Invalid Reference Frame");
-  return VPX_LAST_FLAG;
-}
-#endif  // VP10_VP10_IFACE_COMMON_H_
diff --git a/av1/vp10cx.mk b/av1/vp10cx.mk
index e4d40c8..463c5f7 100644
--- a/av1/vp10cx.mk
+++ b/av1/vp10cx.mk
@@ -8,140 +8,140 @@
 ##  be found in the AUTHORS file in the root of the source tree.
 ##
 
-VP10_CX_EXPORTS += exports_enc
+AV1_CX_EXPORTS += exports_enc
 
-VP10_CX_SRCS-yes += $(VP10_COMMON_SRCS-yes)
-VP10_CX_SRCS-no  += $(VP10_COMMON_SRCS-no)
-VP10_CX_SRCS_REMOVE-yes += $(VP10_COMMON_SRCS_REMOVE-yes)
-VP10_CX_SRCS_REMOVE-no  += $(VP10_COMMON_SRCS_REMOVE-no)
+AV1_CX_SRCS-yes += $(AV1_COMMON_SRCS-yes)
+AV1_CX_SRCS-no  += $(AV1_COMMON_SRCS-no)
+AV1_CX_SRCS_REMOVE-yes += $(AV1_COMMON_SRCS_REMOVE-yes)
+AV1_CX_SRCS_REMOVE-no  += $(AV1_COMMON_SRCS_REMOVE-no)
 
-VP10_CX_SRCS-yes += vp10_cx_iface.c
+AV1_CX_SRCS-yes += av1_cx_iface.c
 
-VP10_CX_SRCS-yes += encoder/bitstream.c
-VP10_CX_SRCS-yes += encoder/bitwriter.h
-VP10_CX_SRCS-yes += encoder/context_tree.c
-VP10_CX_SRCS-yes += encoder/context_tree.h
-VP10_CX_SRCS-yes += encoder/variance_tree.c
-VP10_CX_SRCS-yes += encoder/variance_tree.h
-VP10_CX_SRCS-yes += encoder/cost.h
-VP10_CX_SRCS-yes += encoder/cost.c
-VP10_CX_SRCS-yes += encoder/dct.c
-VP10_CX_SRCS-yes += encoder/hybrid_fwd_txfm.c
-VP10_CX_SRCS-yes += encoder/hybrid_fwd_txfm.h
-VP10_CX_SRCS-yes += encoder/encodeframe.c
-VP10_CX_SRCS-yes += encoder/encodeframe.h
-VP10_CX_SRCS-yes += encoder/encodemb.c
-VP10_CX_SRCS-yes += encoder/encodemv.c
-VP10_CX_SRCS-yes += encoder/ethread.h
-VP10_CX_SRCS-yes += encoder/ethread.c
-VP10_CX_SRCS-yes += encoder/extend.c
-VP10_CX_SRCS-yes += encoder/firstpass.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.h
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/nonmax.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast_9.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.h
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.h
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.h
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.c
-VP10_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.h
-VP10_CX_SRCS-yes += encoder/block.h
-VP10_CX_SRCS-yes += encoder/bitstream.h
-VP10_CX_SRCS-yes += encoder/encodemb.h
-VP10_CX_SRCS-yes += encoder/encodemv.h
-VP10_CX_SRCS-yes += encoder/extend.h
-VP10_CX_SRCS-yes += encoder/firstpass.h
-VP10_CX_SRCS-yes += encoder/lookahead.c
-VP10_CX_SRCS-yes += encoder/lookahead.h
-VP10_CX_SRCS-yes += encoder/mcomp.h
-VP10_CX_SRCS-yes += encoder/encoder.h
-VP10_CX_SRCS-yes += encoder/quantize.h
-VP10_CX_SRCS-yes += encoder/ratectrl.h
-VP10_CX_SRCS-yes += encoder/rd.h
-VP10_CX_SRCS-yes += encoder/rdopt.h
-VP10_CX_SRCS-yes += encoder/tokenize.h
-VP10_CX_SRCS-yes += encoder/treewriter.h
-VP10_CX_SRCS-yes += encoder/mcomp.c
-VP10_CX_SRCS-yes += encoder/encoder.c
-VP10_CX_SRCS-yes += encoder/palette.h
-VP10_CX_SRCS-yes += encoder/palette.c
-VP10_CX_SRCS-yes += encoder/picklpf.c
-VP10_CX_SRCS-yes += encoder/picklpf.h
-VP10_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.c
-VP10_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.h
-VP10_CX_SRCS-yes += encoder/quantize.c
-VP10_CX_SRCS-yes += encoder/ratectrl.c
-VP10_CX_SRCS-yes += encoder/rd.c
-VP10_CX_SRCS-yes += encoder/rdopt.c
-VP10_CX_SRCS-yes += encoder/segmentation.c
-VP10_CX_SRCS-yes += encoder/segmentation.h
-VP10_CX_SRCS-yes += encoder/speed_features.c
-VP10_CX_SRCS-yes += encoder/speed_features.h
-VP10_CX_SRCS-yes += encoder/subexp.c
-VP10_CX_SRCS-yes += encoder/subexp.h
-VP10_CX_SRCS-yes += encoder/resize.c
-VP10_CX_SRCS-yes += encoder/resize.h
-VP10_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/blockiness.c
-VP10_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.h
-VP10_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.c
+AV1_CX_SRCS-yes += encoder/bitstream.c
+AV1_CX_SRCS-yes += encoder/bitwriter.h
+AV1_CX_SRCS-yes += encoder/context_tree.c
+AV1_CX_SRCS-yes += encoder/context_tree.h
+AV1_CX_SRCS-yes += encoder/variance_tree.c
+AV1_CX_SRCS-yes += encoder/variance_tree.h
+AV1_CX_SRCS-yes += encoder/cost.h
+AV1_CX_SRCS-yes += encoder/cost.c
+AV1_CX_SRCS-yes += encoder/dct.c
+AV1_CX_SRCS-yes += encoder/hybrid_fwd_txfm.c
+AV1_CX_SRCS-yes += encoder/hybrid_fwd_txfm.h
+AV1_CX_SRCS-yes += encoder/encodeframe.c
+AV1_CX_SRCS-yes += encoder/encodeframe.h
+AV1_CX_SRCS-yes += encoder/encodemb.c
+AV1_CX_SRCS-yes += encoder/encodemv.c
+AV1_CX_SRCS-yes += encoder/ethread.h
+AV1_CX_SRCS-yes += encoder/ethread.c
+AV1_CX_SRCS-yes += encoder/extend.c
+AV1_CX_SRCS-yes += encoder/firstpass.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/nonmax.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast_9.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_match.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/corner_detect.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/global_motion.h
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.c
+AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/ransac.h
+AV1_CX_SRCS-yes += encoder/block.h
+AV1_CX_SRCS-yes += encoder/bitstream.h
+AV1_CX_SRCS-yes += encoder/encodemb.h
+AV1_CX_SRCS-yes += encoder/encodemv.h
+AV1_CX_SRCS-yes += encoder/extend.h
+AV1_CX_SRCS-yes += encoder/firstpass.h
+AV1_CX_SRCS-yes += encoder/lookahead.c
+AV1_CX_SRCS-yes += encoder/lookahead.h
+AV1_CX_SRCS-yes += encoder/mcomp.h
+AV1_CX_SRCS-yes += encoder/encoder.h
+AV1_CX_SRCS-yes += encoder/quantize.h
+AV1_CX_SRCS-yes += encoder/ratectrl.h
+AV1_CX_SRCS-yes += encoder/rd.h
+AV1_CX_SRCS-yes += encoder/rdopt.h
+AV1_CX_SRCS-yes += encoder/tokenize.h
+AV1_CX_SRCS-yes += encoder/treewriter.h
+AV1_CX_SRCS-yes += encoder/mcomp.c
+AV1_CX_SRCS-yes += encoder/encoder.c
+AV1_CX_SRCS-yes += encoder/palette.h
+AV1_CX_SRCS-yes += encoder/palette.c
+AV1_CX_SRCS-yes += encoder/picklpf.c
+AV1_CX_SRCS-yes += encoder/picklpf.h
+AV1_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.c
+AV1_CX_SRCS-$(CONFIG_LOOP_RESTORATION) += encoder/pickrst.h
+AV1_CX_SRCS-yes += encoder/quantize.c
+AV1_CX_SRCS-yes += encoder/ratectrl.c
+AV1_CX_SRCS-yes += encoder/rd.c
+AV1_CX_SRCS-yes += encoder/rdopt.c
+AV1_CX_SRCS-yes += encoder/segmentation.c
+AV1_CX_SRCS-yes += encoder/segmentation.h
+AV1_CX_SRCS-yes += encoder/speed_features.c
+AV1_CX_SRCS-yes += encoder/speed_features.h
+AV1_CX_SRCS-yes += encoder/subexp.c
+AV1_CX_SRCS-yes += encoder/subexp.h
+AV1_CX_SRCS-yes += encoder/resize.c
+AV1_CX_SRCS-yes += encoder/resize.h
+AV1_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/blockiness.c
+AV1_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.h
+AV1_CX_SRCS-$(CONFIG_ANS) += encoder/buf_ans.c
 
-VP10_CX_SRCS-yes += encoder/tokenize.c
-VP10_CX_SRCS-yes += encoder/treewriter.c
-VP10_CX_SRCS-yes += encoder/aq_variance.c
-VP10_CX_SRCS-yes += encoder/aq_variance.h
-VP10_CX_SRCS-yes += encoder/aq_cyclicrefresh.c
-VP10_CX_SRCS-yes += encoder/aq_cyclicrefresh.h
-VP10_CX_SRCS-yes += encoder/aq_complexity.c
-VP10_CX_SRCS-yes += encoder/aq_complexity.h
-VP10_CX_SRCS-yes += encoder/temporal_filter.c
-VP10_CX_SRCS-yes += encoder/temporal_filter.h
-VP10_CX_SRCS-yes += encoder/mbgraph.c
-VP10_CX_SRCS-yes += encoder/mbgraph.h
+AV1_CX_SRCS-yes += encoder/tokenize.c
+AV1_CX_SRCS-yes += encoder/treewriter.c
+AV1_CX_SRCS-yes += encoder/aq_variance.c
+AV1_CX_SRCS-yes += encoder/aq_variance.h
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.c
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.h
+AV1_CX_SRCS-yes += encoder/aq_complexity.c
+AV1_CX_SRCS-yes += encoder/aq_complexity.h
+AV1_CX_SRCS-yes += encoder/temporal_filter.c
+AV1_CX_SRCS-yes += encoder/temporal_filter.h
+AV1_CX_SRCS-yes += encoder/mbgraph.c
+AV1_CX_SRCS-yes += encoder/mbgraph.h
 ifeq ($(CONFIG_DERING),yes)
-VP10_CX_SRCS-yes += encoder/pickdering.c
+AV1_CX_SRCS-yes += encoder/pickdering.c
 endif
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/highbd_block_error_intrin_sse2.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/highbd_block_error_intrin_sse2.c
 endif
 
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.asm
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/error_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/error_sse2.asm
 
 ifeq ($(ARCH_X86_64),yes)
-VP10_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/quantize_ssse3_x86_64.asm
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/quantize_ssse3_x86_64.asm
 endif
 
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_intrin_sse2.c
-VP10_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/highbd_fwd_txfm_sse4.c
-VP10_CX_SRCS-$(HAVE_SSE4_1) += common/x86/highbd_inv_txfm_sse4.c
-VP10_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/vp10_highbd_quantize_sse4.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_intrin_sse2.c
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/highbd_fwd_txfm_sse4.c
+AV1_CX_SRCS-$(HAVE_SSE4_1) += common/x86/highbd_inv_txfm_sse4.c
+AV1_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/av1_highbd_quantize_sse4.c
 endif
 
 ifeq ($(CONFIG_EXT_INTER),yes)
-VP10_CX_SRCS-yes += encoder/wedge_utils.c
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/wedge_utils_sse2.c
+AV1_CX_SRCS-yes += encoder/wedge_utils.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/wedge_utils_sse2.c
 endif
 
-VP10_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c
+AV1_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c
 
-ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/dct_neon.c
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/error_neon.c
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/dct_neon.c
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/error_neon.c
 endif
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/quantize_neon.c
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/quantize_neon.c
 
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/error_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct4x4_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct8x8_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct16x16_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct_msa.h
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/error_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct4x4_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct8x8_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct16x16_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct_msa.h
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
 
-VP10_CX_SRCS-yes := $(filter-out $(VP10_CX_SRCS_REMOVE-yes),$(VP10_CX_SRCS-yes))
+AV1_CX_SRCS-yes := $(filter-out $(AV1_CX_SRCS_REMOVE-yes),$(AV1_CX_SRCS-yes))
diff --git a/av1/vp10dx.mk b/av1/vp10dx.mk
deleted file mode 100644
index ae68475..0000000
--- a/av1/vp10dx.mk
+++ /dev/null
@@ -1,34 +0,0 @@
-##
-##  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-##
-##  Use of this source code is governed by a BSD-style license
-##  that can be found in the LICENSE file in the root of the source
-##  tree. An additional intellectual property rights grant can be found
-##  in the file PATENTS.  All contributing project authors may
-##  be found in the AUTHORS file in the root of the source tree.
-##
-
-VP10_DX_EXPORTS += exports_dec
-
-VP10_DX_SRCS-yes += $(VP10_COMMON_SRCS-yes)
-VP10_DX_SRCS-no  += $(VP10_COMMON_SRCS-no)
-VP10_DX_SRCS_REMOVE-yes += $(VP10_COMMON_SRCS_REMOVE-yes)
-VP10_DX_SRCS_REMOVE-no  += $(VP10_COMMON_SRCS_REMOVE-no)
-
-VP10_DX_SRCS-yes += vp10_dx_iface.c
-
-VP10_DX_SRCS-yes += decoder/decodemv.c
-VP10_DX_SRCS-yes += decoder/decodeframe.c
-VP10_DX_SRCS-yes += decoder/decodeframe.h
-VP10_DX_SRCS-yes += decoder/detokenize.c
-VP10_DX_SRCS-yes += decoder/decodemv.h
-VP10_DX_SRCS-yes += decoder/detokenize.h
-VP10_DX_SRCS-yes += decoder/dthread.c
-VP10_DX_SRCS-yes += decoder/dthread.h
-VP10_DX_SRCS-yes += decoder/decoder.c
-VP10_DX_SRCS-yes += decoder/decoder.h
-VP10_DX_SRCS-yes += decoder/dsubexp.c
-VP10_DX_SRCS-yes += decoder/dsubexp.h
-VP10_DX_SRCS-yes += decoder/bitreader.h
-
-VP10_DX_SRCS-yes := $(filter-out $(VP10_DX_SRCS_REMOVE-yes),$(VP10_DX_SRCS-yes))
diff --git a/build/make/Android.mk b/build/make/Android.mk
index b3a2a08..290c974 100644
--- a/build/make/Android.mk
+++ b/build/make/Android.mk
@@ -102,18 +102,18 @@
 TGT_ISA:=$(word 1, $(subst -, ,$(TOOLCHAIN)))
 target := libs
 
-LOCAL_SRC_FILES += vpx_config.c
+LOCAL_SRC_FILES += aom_config.c
 
 # Remove duplicate entries
 CODEC_SRCS_UNIQUE = $(sort $(CODEC_SRCS))
 
-# Pull out C files.  vpx_config.c is in the immediate directory and
+# Pull out C files.  aom_config.c is in the immediate directory and
 # so it does not need libaom/ prefixed like the rest of the source files.
 # The neon files with intrinsics need to have .neon appended so the proper
 # flags are applied.
 CODEC_SRCS_C = $(filter %.c, $(CODEC_SRCS_UNIQUE))
 LOCAL_NEON_SRCS_C = $(filter %_neon.c, $(CODEC_SRCS_C))
-LOCAL_CODEC_SRCS_C = $(filter-out vpx_config.c %_neon.c, $(CODEC_SRCS_C))
+LOCAL_CODEC_SRCS_C = $(filter-out aom_config.c %_neon.c, $(CODEC_SRCS_C))
 
 LOCAL_SRC_FILES += $(foreach file, $(LOCAL_CODEC_SRCS_C), libaom/$(file))
 ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
@@ -154,7 +154,7 @@
 endif
 
 LOCAL_CFLAGS += \
-    -DHAVE_CONFIG_H=vpx_config.h \
+    -DHAVE_CONFIG_H=aom_config.h \
     -I$(LIBAOM_PATH) \
     -I$(ASM_CNV_PATH)
 
@@ -168,14 +168,14 @@
 define rtcd_dep_template
 rtcd_dep_template_SRCS := $(addprefix $(LOCAL_PATH)/, $(LOCAL_SRC_FILES))
 rtcd_dep_template_SRCS := $$(rtcd_dep_template_SRCS:.neon=)
-ifeq ($(CONFIG_VP10), yes)
-$$(rtcd_dep_template_SRCS): vp10_rtcd.h
+ifeq ($(CONFIG_AV1), yes)
+$$(rtcd_dep_template_SRCS): av1_rtcd.h
 endif
-$$(rtcd_dep_template_SRCS): vpx_scale_rtcd.h
-$$(rtcd_dep_template_SRCS): vpx_dsp_rtcd.h
+$$(rtcd_dep_template_SRCS): aom_scale_rtcd.h
+$$(rtcd_dep_template_SRCS): aom_dsp_rtcd.h
 
 ifneq ($(findstring $(TARGET_ARCH_ABI),x86 x86_64),)
-$$(rtcd_dep_template_SRCS): vpx_config.asm
+$$(rtcd_dep_template_SRCS): aom_config.asm
 endif
 endef
 
diff --git a/build/make/Makefile b/build/make/Makefile
index 469eb74..dadf1b9 100644
--- a/build/make/Makefile
+++ b/build/make/Makefile
@@ -98,7 +98,7 @@
 	if [ -z "$(target)" ]; then \
       rm -f Makefile; \
       rm -f config.log config.mk; \
-      rm -f vpx_config.[hc] vpx_config.asm; \
+      rm -f aom_config.[hc] aom_config.asm; \
     else \
       rm -f $(target)-$(TOOLCHAIN).mk; \
     fi
@@ -211,7 +211,7 @@
 # Older binutils strip global symbols not needed for relocation processing
 # when given --strip-unneeded. Using nm and awk to identify globals and
 # keep them caused command line length issues under mingw and segfaults in
-# test_libvpx were observed under OS/2: simply use --strip-debug.
+# test_libaom were observed under OS/2: simply use --strip-debug.
 %.a: %_g.a
 	$(if $(quiet),@echo "    [STRIP] $@ < $<")
 	$(qexec)$(STRIP) --strip-debug \
diff --git a/build/make/configure.sh b/build/make/configure.sh
index 230d588..6d9b215 100644
--- a/build/make/configure.sh
+++ b/build/make/configure.sh
@@ -206,14 +206,14 @@
   enabled "${1}" || echo "  enabling ${1}"
   enable_feature "${1}"
 
-  is_in "${1}" vp8 vp9 vp10 && enable_feature "${1}_encoder" "${1}_decoder"
+  is_in "${1}" av1 && enable_feature "${1}_encoder" "${1}_decoder"
 }
 
 disable_codec(){
   disabled "${1}" || echo "  disabling ${1}"
   disable_feature "${1}"
 
-  is_in "${1}" vp8 vp9 vp10 && disable_feature "${1}_encoder" "${1}_decoder"
+  is_in "${1}" av1 && disable_feature "${1}_encoder" "${1}_decoder"
 }
 
 # Iterates through positional parameters, checks to confirm the parameter has
@@ -271,12 +271,12 @@
   TMPDIRx="/tmp"
 fi
 RAND=$(awk 'BEGIN { srand(); printf "%d\n",(rand() * 32768)}')
-TMP_H="${TMPDIRx}/vpx-conf-$$-${RAND}.h"
-TMP_C="${TMPDIRx}/vpx-conf-$$-${RAND}.c"
-TMP_CC="${TMPDIRx}/vpx-conf-$$-${RAND}.cc"
-TMP_O="${TMPDIRx}/vpx-conf-$$-${RAND}.o"
-TMP_X="${TMPDIRx}/vpx-conf-$$-${RAND}.x"
-TMP_ASM="${TMPDIRx}/vpx-conf-$$-${RAND}.asm"
+TMP_H="${TMPDIRx}/aom-conf-$$-${RAND}.h"
+TMP_C="${TMPDIRx}/aom-conf-$$-${RAND}.c"
+TMP_CC="${TMPDIRx}/aom-conf-$$-${RAND}.cc"
+TMP_O="${TMPDIRx}/aom-conf-$$-${RAND}.o"
+TMP_X="${TMPDIRx}/aom-conf-$$-${RAND}.x"
+TMP_ASM="${TMPDIRx}/aom-conf-$$-${RAND}.asm"
 
 clean_temp_files() {
   rm -f ${TMP_C} ${TMP_CC} ${TMP_H} ${TMP_O} ${TMP_X} ${TMP_ASM}
@@ -489,8 +489,8 @@
   print_webm_license ${TMP_H} "/*" " */"
   cat >> ${TMP_H} << EOF
 /* This file automatically generated by configure. Do not edit! */
-#ifndef VPX_CONFIG_H
-#define VPX_CONFIG_H
+#ifndef AOM_CONFIG_H
+#define AOM_CONFIG_H
 #define RESTRICT    ${RESTRICT}
 #define INLINE      ${INLINE}
 EOF
@@ -498,7 +498,7 @@
   print_config_h HAVE   "${TMP_H}" ${HAVE_LIST}
   print_config_h CONFIG "${TMP_H}" ${CONFIG_LIST}
   print_config_vars_h   "${TMP_H}" ${VAR_LIST}
-  echo "#endif /* VPX_CONFIG_H */" >> ${TMP_H}
+  echo "#endif /* AOM_CONFIG_H */" >> ${TMP_H}
   mkdir -p `dirname "$1"`
   cmp "$1" ${TMP_H} >/dev/null 2>&1 || mv ${TMP_H} "$1"
 }
diff --git a/build/make/gen_msvs_vcxproj.sh b/build/make/gen_msvs_vcxproj.sh
index 9c5e859..60e1fd9 100755
--- a/build/make/gen_msvs_vcxproj.sh
+++ b/build/make/gen_msvs_vcxproj.sh
@@ -83,8 +83,8 @@
                              -e 's/^[\./]\+//g' -e 's,[:/ ],_,g')
 
                 if ([ "$pat" == "asm" ] || [ "$pat" == "s" ]) && $asm_use_custom_step; then
-                    # Avoid object file name collisions, i.e. vpx_config.c and
-                    # vpx_config.asm produce the same object file without
+                    # Avoid object file name collisions, i.e. aom_config.c and
+                    # aom_config.asm produce the same object file without
                     # this additional suffix.
                     objf=${objf%.obj}_asm.obj
                     open_tag CustomBuild \
@@ -392,7 +392,7 @@
         for config in Debug Release; do
             open_tag ItemDefinitionGroup \
                 Condition="'\$(Configuration)|\$(Platform)'=='$config|$plat'"
-            if [ "$name" == "vpx" ]; then
+            if [ "$name" == "aom" ]; then
                 hostplat=$plat
                 if [ "$hostplat" == "ARM" ]; then
                     hostplat=Win32
diff --git a/build/make/ios-Info.plist b/build/make/ios-Info.plist
index d157b11..300e3e3 100644
--- a/build/make/ios-Info.plist
+++ b/build/make/ios-Info.plist
@@ -5,13 +5,13 @@
 	<key>CFBundleDevelopmentRegion</key>
 	<string>en</string>
 	<key>CFBundleExecutable</key>
-	<string>VPX</string>
+	<string>AOM</string>
 	<key>CFBundleIdentifier</key>
-	<string>org.webmproject.VPX</string>
+	<string>org.webmproject.AOM</string>
 	<key>CFBundleInfoDictionaryVersion</key>
 	<string>6.0</string>
 	<key>CFBundleName</key>
-	<string>VPX</string>
+	<string>AOM</string>
 	<key>CFBundlePackageType</key>
 	<string>FMWK</string>
 	<key>CFBundleShortVersionString</key>
@@ -31,7 +31,7 @@
 		<integer>1</integer>
 		<integer>2</integer>
 	</array>
-	<key>VPXFullVersion</key>
+	<key>AOMFullVersion</key>
 	<string>${FULLVERSION}</string>
 </dict>
 </plist>
diff --git a/build/make/iosbuild.sh b/build/make/iosbuild.sh
index e8fa112..f4cb938 100755
--- a/build/make/iosbuild.sh
+++ b/build/make/iosbuild.sh
@@ -9,10 +9,10 @@
 ##  be found in the AUTHORS file in the root of the source tree.
 ##
 ##
-## This script generates 'VPX.framework'. An iOS app can encode and decode VPx
-## video by including 'VPX.framework'.
+## This script generates 'AOM.framework'. An iOS app can encode and decode AVx
+## video by including 'AOM.framework'.
 ##
-## Run iosbuild.sh to create 'VPX.framework' in the current directory.
+## Run iosbuild.sh to create 'AOM.framework' in the current directory.
 ##
 set -e
 devnull='> /dev/null 2>&1'
@@ -23,9 +23,9 @@
                 --disable-libyuv
                 --disable-unit-tests"
 DIST_DIR="_dist"
-FRAMEWORK_DIR="VPX.framework"
-FRAMEWORK_LIB="VPX.framework/VPX"
-HEADER_DIR="${FRAMEWORK_DIR}/Headers/vpx"
+FRAMEWORK_DIR="AOM.framework"
+FRAMEWORK_LIB="AOM.framework/AOM"
+HEADER_DIR="${FRAMEWORK_DIR}/Headers/aom"
 SCRIPT_DIR=$(dirname "$0")
 LIBAOM_SOURCE_DIR=$(cd ${SCRIPT_DIR}/../..; pwd)
 LIPO=$(xcrun -sdk iphoneos${SDK} -find lipo)
@@ -93,15 +93,15 @@
   esac
 }
 
-# Create a vpx_config.h shim that, based on preprocessor settings for the
-# current target CPU, includes the real vpx_config.h for the current target.
+# Create a aom_config.h shim that, based on preprocessor settings for the
+# current target CPU, includes the real aom_config.h for the current target.
 # $1 is the list of targets.
-create_vpx_framework_config_shim() {
+create_aom_framework_config_shim() {
   local targets="$1"
-  local config_file="${HEADER_DIR}/vpx_config.h"
+  local config_file="${HEADER_DIR}/aom_config.h"
   local preproc_symbol=""
   local target=""
-  local include_guard="VPX_FRAMEWORK_HEADERS_VPX_VPX_CONFIG_H_"
+  local include_guard="AOM_FRAMEWORK_HEADERS_AOM_AOM_CONFIG_H_"
 
   local file_header="/*
  *  Copyright (c) $(date +%Y) The WebM project authors. All Rights Reserved.
@@ -124,11 +124,11 @@
   for target in ${targets}; do
     preproc_symbol=$(target_to_preproc_symbol "${target}")
     printf " ${preproc_symbol}\n" >> "${config_file}"
-    printf "#define VPX_FRAMEWORK_TARGET \"${target}\"\n" >> "${config_file}"
-    printf "#include \"VPX/aom/${target}/vpx_config.h\"\n" >> "${config_file}"
+    printf "#define AOM_FRAMEWORK_TARGET \"${target}\"\n" >> "${config_file}"
+    printf "#include \"AOM/aom/${target}/aom_config.h\"\n" >> "${config_file}"
     printf "#elif defined" >> "${config_file}"
     mkdir "${HEADER_DIR}/${target}"
-    cp -p "${BUILD_ROOT}/${target}/vpx_config.h" "${HEADER_DIR}/${target}"
+    cp -p "${BUILD_ROOT}/${target}/aom_config.h" "${HEADER_DIR}/${target}"
   done
 
   # Consume the last line of output from the loop: We don't want it.
@@ -177,7 +177,7 @@
 }
 
 # Configures and builds each target specified by $1, and then builds
-# VPX.framework.
+# AOM.framework.
 build_framework() {
   local lib_list=""
   local targets="$1"
@@ -211,18 +211,18 @@
   cp -p "${target_dist_dir}"/include/aom/* "${HEADER_DIR}"
 
   # Build the fat library.
-  ${LIPO} -create ${lib_list} -output ${FRAMEWORK_DIR}/VPX
+  ${LIPO} -create ${lib_list} -output ${FRAMEWORK_DIR}/AOM
 
-  # Create the vpx_config.h shim that allows usage of vpx_config.h from
-  # within VPX.framework.
-  create_vpx_framework_config_shim "${targets}"
+  # Create the aom_config.h shim that allows usage of aom_config.h from
+  # within AOM.framework.
+  create_aom_framework_config_shim "${targets}"
 
-  # Copy in vpx_version.h.
-  cp -p "${BUILD_ROOT}/${target}/vpx_version.h" "${HEADER_DIR}"
+  # Copy in aom_version.h.
+  cp -p "${BUILD_ROOT}/${target}/aom_version.h" "${HEADER_DIR}"
 
   if [ "${ENABLE_SHARED}" = "yes" ]; then
     # Adjust the dylib's name so dynamic linking in apps works as expected.
-    install_name_tool -id '@rpath/VPX.framework/VPX' ${FRAMEWORK_DIR}/VPX
+    install_name_tool -id '@rpath/AOM.framework/AOM' ${FRAMEWORK_DIR}/AOM
 
     # Copy in Info.plist.
     cat "${SCRIPT_DIR}/ios-Info.plist" \
@@ -232,7 +232,7 @@
       > "${FRAMEWORK_DIR}/Info.plist"
   fi
 
-  # Confirm VPX.framework/VPX contains the targets requested.
+  # Confirm AOM.framework/AOM contains the targets requested.
   verify_framework_targets ${targets}
 
   vlog "Created fat library ${FRAMEWORK_LIB} containing:"
diff --git a/build/make/rtcd.pl b/build/make/rtcd.pl
index 59dbe52..a286f52 100755
--- a/build/make/rtcd.pl
+++ b/build/make/rtcd.pl
@@ -59,7 +59,7 @@
 #
 # Routines for the RTCD DSL to call
 #
-sub vpx_config($) {
+sub aom_config($) {
   return (defined $config{$_[0]}) ? $config{$_[0]} : "";
 }
 
@@ -122,7 +122,7 @@
 }
 
 sub determine_indirection {
-  vpx_config("CONFIG_RUNTIME_CPU_DETECT") eq "yes" or &require(@ALL_ARCHS);
+  aom_config("CONFIG_RUNTIME_CPU_DETECT") eq "yes" or &require(@ALL_ARCHS);
   foreach my $fn (keys %ALL_FUNCS) {
     my $n = "";
     my @val = @{$ALL_FUNCS{$fn}};
@@ -282,7 +282,7 @@
 
   common_top;
   print <<EOF;
-#include "vpx_config.h"
+#include "aom_config.h"
 
 #ifdef RTCD_C
 #include "aom_ports/arm.h"
@@ -308,7 +308,7 @@
   common_top;
 
   print <<EOF;
-#include "vpx_config.h"
+#include "aom_config.h"
 
 #ifdef RTCD_C
 static void setup_rtcd_internal(void)
@@ -319,8 +319,8 @@
 
   print <<EOF;
 #if HAVE_DSPR2
-void vpx_dsputil_static_init();
-vpx_dsputil_static_init();
+void aom_dsputil_static_init();
+aom_dsputil_static_init();
 #endif
 }
 #endif
@@ -332,7 +332,7 @@
   determine_indirection "c";
   common_top;
   print <<EOF;
-#include "vpx_config.h"
+#include "aom_config.h"
 
 #ifdef RTCD_C
 static void setup_rtcd_internal(void)
diff --git a/configure b/configure
index 73a5308..3a7cca8 100755
--- a/configure
+++ b/configure
@@ -34,14 +34,14 @@
   ${toggle_codec_srcs}            in/exclude codec library source code
   ${toggle_debug_libs}            in/exclude debug version of libraries
   ${toggle_static_msvcrt}         use static MSVCRT (VS builds only)
-  ${toggle_vp9_highbitdepth}      use high bit depth (10/12) profiles
+  ${toggle_aom_highbitdepth}      use high bit depth (10/12) profiles
   ${toggle_better_hw_compatibility}
                                   enable encoder to produce streams with better
                                   hardware decoder compatibility
-  ${toggle_vp10}                  VP10 codec support
+  ${toggle_av1}                  AV1 codec support
   ${toggle_internal_stats}        output of encoder internal stats for debug, if supported (encoders)
   ${toggle_postproc}              postprocessing
-  ${toggle_vp9_postproc}          vp9 specific postprocessing
+  ${toggle_av1_postproc}          av1 specific postprocessing
   ${toggle_multithread}           multithreaded encoding and decoding
   ${toggle_spatial_resampling}    spatial sampling (scaling) support
   ${toggle_realtime_only}         enable this option while building for real-time encoding
@@ -57,8 +57,8 @@
   ${toggle_postproc_visualizer}   macro block / block level visualizers
   ${toggle_multi_res_encoding}    enable multiple-resolution encoding
   ${toggle_temporal_denoising}    enable temporal denoising and disable the spatial denoiser
-  ${toggle_vp9_temporal_denoising}
-                                  enable vp9 temporal denoising
+  ${toggle_av1_temporal_denoising}
+                                  enable av1 temporal denoising
   ${toggle_webm_io}               enable input from and output to WebM container
   ${toggle_libyuv}                enable libyuv
 
@@ -170,7 +170,7 @@
 
 if [ "`cd \"${source_path}\" && pwd`" != "`pwd`" ]; then
   # test to see if source_path already configured
-  if [ -f "${source_path}/vpx_config.h" ]; then
+  if [ -f "${source_path}/aom_config.h" ]; then
     die "source directory already configured; run 'make distclean' there first"
   fi
 fi
@@ -189,7 +189,7 @@
 fi
 
 # disable codecs when their source directory does not exist
-[ -d "${source_path}/av1" ] || disable_codec vp10
+[ -d "${source_path}/av1" ] || disable_codec av1
 
 # install everything except the sources, by default. sources will have
 # to be enabled when doing dist builds, since that's no longer a common
@@ -207,11 +207,11 @@
 enable_feature temporal_denoising
 
 CODECS="
-    vp10_encoder
-    vp10_decoder
+    av1_encoder
+    av1_decoder
 "
 CODEC_FAMILIES="
-    vp10
+    av1
 "
 
 ARCH_LIST="
@@ -245,7 +245,7 @@
 "
 HAVE_LIST="
     ${ARCH_EXT_LIST}
-    vpx_ports
+    aom_ports
     pthread_h
     unistd_h
 "
@@ -300,7 +300,7 @@
     dc_recon
     runtime_cpu_detect
     postproc
-    vp9_postproc
+    av1_postproc
     multithread
     internal_stats
     ${CODECS}
@@ -324,9 +324,9 @@
     encode_perf_tests
     multi_res_encoding
     temporal_denoising
-    vp9_temporal_denoising
+    av1_temporal_denoising
     coefficient_range_checking
-    vp9_highbitdepth
+    aom_highbitdepth
     better_hw_compatibility
     experimental
     size_limit
@@ -363,7 +363,7 @@
     dequant_tokens
     dc_recon
     postproc
-    vp9_postproc
+    av1_postproc
     multithread
     internal_stats
     ${CODECS}
@@ -384,10 +384,10 @@
     encode_perf_tests
     multi_res_encoding
     temporal_denoising
-    vp9_temporal_denoising
+    av1_temporal_denoising
     coefficient_range_checking
     better_hw_compatibility
-    vp9_highbitdepth
+    aom_highbitdepth
     experimental
     aom_qm
 "
@@ -445,12 +445,12 @@
 
 process_targets() {
     enabled child || write_common_config_banner
-    write_common_target_config_h ${BUILD_PFX}vpx_config.h
+    write_common_target_config_h ${BUILD_PFX}aom_config.h
     write_common_config_targets
 
     # Calculate the default distribution name, based on the enabled features
     cf=""
-    DIST_DIR=vpx
+    DIST_DIR=aom
     for cf in $CODEC_FAMILIES; do
         if enabled ${cf}_encoder && enabled ${cf}_decoder; then
             DIST_DIR="${DIST_DIR}-${cf}"
@@ -462,7 +462,7 @@
     done
     enabled debug_libs && DIST_DIR="${DIST_DIR}-debug"
     enabled codec_srcs && DIST_DIR="${DIST_DIR}-src"
-    ! enabled postproc && ! enabled vp9_postproc && DIST_DIR="${DIST_DIR}-nopost"
+    ! enabled postproc && ! enabled av1_postproc && DIST_DIR="${DIST_DIR}-nopost"
     ! enabled multithread && DIST_DIR="${DIST_DIR}-nomt"
     ! enabled install_docs && DIST_DIR="${DIST_DIR}-nodocs"
     DIST_DIR="${DIST_DIR}-${tgt_isa}-${tgt_os}"
@@ -510,7 +510,7 @@
 
         if enabled $tgt; then
             echo "Creating makefiles for ${toolchain} ${tgt}"
-            write_common_target_config_mk $tgt_fn ${BUILD_PFX}vpx_config.h
+            write_common_target_config_mk $tgt_fn ${BUILD_PFX}aom_config.h
             #write_${tgt}_config
         fi
     done
@@ -584,7 +584,7 @@
     check_header pthread.h
     check_header unistd.h # for sysconf(3) and friends.
 
-    check_header aom/vpx_integer.h -I${source_path} && enable_feature vpx_ports
+    check_header aom/aom_integer.h -I${source_path} && enable_feature aom_ports
 }
 
 process_toolchain() {
@@ -656,10 +656,6 @@
         enable_feature dc_recon
     fi
 
-    if enabled internal_stats; then
-        enable_feature vp9_postproc
-    fi
-
     # Enable the postbuild target if building for visual studio.
     case "$tgt_cc" in
         vs*) enable_feature msvs
@@ -746,9 +742,9 @@
 ##
 CONFIGURE_ARGS="$@"
 process "$@"
-print_webm_license ${BUILD_PFX}vpx_config.c "/*" " */"
-cat <<EOF >> ${BUILD_PFX}vpx_config.c
-#include "aom/vpx_codec.h"
+print_webm_license ${BUILD_PFX}aom_config.c "/*" " */"
+cat <<EOF >> ${BUILD_PFX}aom_config.c
+#include "aom/aom_codec.h"
 static const char* const cfg = "$CONFIGURE_ARGS";
-const char *vpx_codec_build_config(void) {return cfg;}
+const char *aom_codec_build_config(void) {return cfg;}
 EOF
diff --git a/examples.mk b/examples.mk
index 07bdb05..e55fd03 100644
--- a/examples.mk
+++ b/examples.mk
@@ -63,51 +63,51 @@
 
 # List of examples to build. UTILS are tools meant for distribution
 # while EXAMPLES demonstrate specific portions of the API.
-UTILS-$(CONFIG_DECODERS)    += vpxdec.c
-vpxdec.SRCS                 += md5_utils.c md5_utils.h
-vpxdec.SRCS                 += aom_ports/mem_ops.h
-vpxdec.SRCS                 += aom_ports/mem_ops_aligned.h
-vpxdec.SRCS                 += aom_ports/msvc.h
-vpxdec.SRCS                 += aom_ports/vpx_timer.h
-vpxdec.SRCS                 += aom/vpx_integer.h
-vpxdec.SRCS                 += args.c args.h
-vpxdec.SRCS                 += ivfdec.c ivfdec.h
-vpxdec.SRCS                 += tools_common.c tools_common.h
-vpxdec.SRCS                 += y4menc.c y4menc.h
+UTILS-$(CONFIG_DECODERS)    += aomdec.c
+aomdec.SRCS                 += md5_utils.c md5_utils.h
+aomdec.SRCS                 += aom_ports/mem_ops.h
+aomdec.SRCS                 += aom_ports/mem_ops_aligned.h
+aomdec.SRCS                 += aom_ports/msvc.h
+aomdec.SRCS                 += aom_ports/aom_timer.h
+aomdec.SRCS                 += aom/aom_integer.h
+aomdec.SRCS                 += args.c args.h
+aomdec.SRCS                 += ivfdec.c ivfdec.h
+aomdec.SRCS                 += tools_common.c tools_common.h
+aomdec.SRCS                 += y4menc.c y4menc.h
 ifeq ($(CONFIG_LIBYUV),yes)
-  vpxdec.SRCS                 += $(LIBYUV_SRCS)
+  aomdec.SRCS                 += $(LIBYUV_SRCS)
 endif
 ifeq ($(CONFIG_WEBM_IO),yes)
-  vpxdec.SRCS                 += $(LIBWEBM_COMMON_SRCS)
-  vpxdec.SRCS                 += $(LIBWEBM_MUXER_SRCS)
-  vpxdec.SRCS                 += $(LIBWEBM_PARSER_SRCS)
-  vpxdec.SRCS                 += webmdec.cc webmdec.h
+  aomdec.SRCS                 += $(LIBWEBM_COMMON_SRCS)
+  aomdec.SRCS                 += $(LIBWEBM_MUXER_SRCS)
+  aomdec.SRCS                 += $(LIBWEBM_PARSER_SRCS)
+  aomdec.SRCS                 += webmdec.cc webmdec.h
 endif
-vpxdec.GUID                  = BA5FE66F-38DD-E034-F542-B1578C5FB950
-vpxdec.DESCRIPTION           = Full featured decoder
-UTILS-$(CONFIG_ENCODERS)    += vpxenc.c
-vpxenc.SRCS                 += args.c args.h y4minput.c y4minput.h vpxenc.h
-vpxenc.SRCS                 += ivfdec.c ivfdec.h
-vpxenc.SRCS                 += ivfenc.c ivfenc.h
-vpxenc.SRCS                 += rate_hist.c rate_hist.h
-vpxenc.SRCS                 += tools_common.c tools_common.h
-vpxenc.SRCS                 += warnings.c warnings.h
-vpxenc.SRCS                 += aom_ports/mem_ops.h
-vpxenc.SRCS                 += aom_ports/mem_ops_aligned.h
-vpxenc.SRCS                 += aom_ports/msvc.h
-vpxenc.SRCS                 += aom_ports/vpx_timer.h
-vpxenc.SRCS                 += vpxstats.c vpxstats.h
+aomdec.GUID                  = BA5FE66F-38DD-E034-F542-B1578C5FB950
+aomdec.DESCRIPTION           = Full featured decoder
+UTILS-$(CONFIG_ENCODERS)    += aomenc.c
+aomenc.SRCS                 += args.c args.h y4minput.c y4minput.h aomenc.h
+aomenc.SRCS                 += ivfdec.c ivfdec.h
+aomenc.SRCS                 += ivfenc.c ivfenc.h
+aomenc.SRCS                 += rate_hist.c rate_hist.h
+aomenc.SRCS                 += tools_common.c tools_common.h
+aomenc.SRCS                 += warnings.c warnings.h
+aomenc.SRCS                 += aom_ports/mem_ops.h
+aomenc.SRCS                 += aom_ports/mem_ops_aligned.h
+aomenc.SRCS                 += aom_ports/msvc.h
+aomenc.SRCS                 += aom_ports/aom_timer.h
+aomenc.SRCS                 += aomstats.c aomstats.h
 ifeq ($(CONFIG_LIBYUV),yes)
-  vpxenc.SRCS                 += $(LIBYUV_SRCS)
+  aomenc.SRCS                 += $(LIBYUV_SRCS)
 endif
 ifeq ($(CONFIG_WEBM_IO),yes)
-  vpxenc.SRCS                 += $(LIBWEBM_COMMON_SRCS)
-  vpxenc.SRCS                 += $(LIBWEBM_MUXER_SRCS)
-  vpxenc.SRCS                 += $(LIBWEBM_PARSER_SRCS)
-  vpxenc.SRCS                 += webmenc.cc webmenc.h
+  aomenc.SRCS                 += $(LIBWEBM_COMMON_SRCS)
+  aomenc.SRCS                 += $(LIBWEBM_MUXER_SRCS)
+  aomenc.SRCS                 += $(LIBWEBM_PARSER_SRCS)
+  aomenc.SRCS                 += webmenc.cc webmenc.h
 endif
-vpxenc.GUID                  = 548DEC74-7A15-4B2B-AFC3-AA102E7C25C1
-vpxenc.DESCRIPTION           = Full featured encoder
+aomenc.GUID                  = 548DEC74-7A15-4B2B-AFC3-AA102E7C25C1
+aomenc.DESCRIPTION           = Full featured encoder
 
 EXAMPLES-$(CONFIG_DECODERS)        += simple_decoder.c
 simple_decoder.GUID                 = D3BBF1E9-2427-450D-BBFF-B2843C1D44CC
@@ -138,7 +138,7 @@
 simple_encoder.SRCS             += aom_ports/msvc.h
 simple_encoder.GUID              = 4607D299-8A71-4D2C-9B1D-071899B6FBFD
 simple_encoder.DESCRIPTION       = Simplified encoder loop
-EXAMPLES-$(CONFIG_VP10_ENCODER) += lossless_encoder.c
+EXAMPLES-$(CONFIG_AV1_ENCODER) += lossless_encoder.c
 lossless_encoder.SRCS           += ivfenc.h ivfenc.c
 lossless_encoder.SRCS           += tools_common.h tools_common.c
 lossless_encoder.SRCS           += video_common.h
@@ -174,15 +174,15 @@
 set_maps.DESCRIPTION                = Set active and ROI maps
 
 
-ifeq ($(CONFIG_VP10_ENCODER), yes)
+ifeq ($(CONFIG_AV1_ENCODER), yes)
 ifeq ($(CONFIG_DECODERS),yes)
-EXAMPLES-yes                       += vpxcx_set_ref.c
-vpxcx_set_ref.SRCS                 += ivfenc.h ivfenc.c
-vpxcx_set_ref.SRCS                 += tools_common.h tools_common.c
-vpxcx_set_ref.SRCS                 += video_common.h
-vpxcx_set_ref.SRCS                 += video_writer.h video_writer.c
-vpxcx_set_ref.GUID                  = 65D7F14A-2EE6-4293-B958-AB5107A03B55
-vpxcx_set_ref.DESCRIPTION           = VP10 set encoder reference frame
+EXAMPLES-yes                       += aomcx_set_ref.c
+aomcx_set_ref.SRCS                 += ivfenc.h ivfenc.c
+aomcx_set_ref.SRCS                 += tools_common.h tools_common.c
+aomcx_set_ref.SRCS                 += video_common.h
+aomcx_set_ref.SRCS                 += video_writer.h video_writer.c
+aomcx_set_ref.GUID                  = 65D7F14A-2EE6-4293-B958-AB5107A03B55
+aomcx_set_ref.DESCRIPTION           = AV1 set encoder reference frame
 endif
 endif
 
@@ -192,10 +192,10 @@
 # We should not link to math library (libm) on RVCT
 # when building for bare-metal targets
 ifeq ($(CONFIG_OS_SUPPORT), yes)
-CODEC_EXTRA_LIBS-$(CONFIG_VP10)        += m
+CODEC_EXTRA_LIBS-$(CONFIG_AV1)        += m
 else
     ifeq ($(CONFIG_GCC), yes)
-    CODEC_EXTRA_LIBS-$(CONFIG_VP10)        += m
+    CODEC_EXTRA_LIBS-$(CONFIG_AV1)        += m
     endif
 endif
 #
@@ -212,8 +212,8 @@
     INC_PATH-yes := $(SRC_PATH_BARE)/../include
 else
     LIB_PATH-yes                     += $(if $(BUILD_PFX),$(BUILD_PFX),.)
-    INC_PATH-$(CONFIG_VP10_DECODER)   += $(SRC_PATH_BARE)/vp10
-    INC_PATH-$(CONFIG_VP10_ENCODER)   += $(SRC_PATH_BARE)/vp10
+    INC_PATH-$(CONFIG_AV1_DECODER)   += $(SRC_PATH_BARE)/av1
+    INC_PATH-$(CONFIG_AV1_ENCODER)   += $(SRC_PATH_BARE)/av1
 endif
 INC_PATH-$(CONFIG_LIBYUV) += $(SRC_PATH_BARE)/third_party/libyuv/include
 LIB_PATH := $(call enabled,LIB_PATH)
@@ -277,7 +277,7 @@
 
 # Set up additional MSVS environment
 ifeq ($(CONFIG_MSVS),yes)
-CODEC_LIB=$(if $(CONFIG_SHARED),vpx,$(if $(CONFIG_STATIC_MSVCRT),vpxmt,vpxmd))
+CODEC_LIB=$(if $(CONFIG_SHARED),aom,$(if $(CONFIG_STATIC_MSVCRT),aommt,aommd))
 # This variable uses deferred expansion intentionally, since the results of
 # $(wildcard) may change during the course of the Make.
 VS_PLATFORMS = $(foreach d,$(wildcard */Release/$(CODEC_LIB).lib),$(word 1,$(subst /, ,$(d))))
@@ -292,7 +292,7 @@
 # even though there is no real dependency there (the dependency is on
 # the makefiles). We may want to revisit this.
 define vcproj_template
-$(1): $($(1:.$(VCPROJ_SFX)=).SRCS) vpx.$(VCPROJ_SFX)
+$(1): $($(1:.$(VCPROJ_SFX)=).SRCS) aom.$(VCPROJ_SFX)
 	$(if $(quiet),@echo "    [vcproj] $$@")
 	$(qexec)$$(GEN_VCPROJ)\
             --exe\
diff --git a/examples/vpxcx_set_ref.c b/examples/aomcx_set_ref.c
similarity index 66%
rename from examples/vpxcx_set_ref.c
rename to examples/aomcx_set_ref.c
index 0d57f4d..7429970 100644
--- a/examples/vpxcx_set_ref.c
+++ b/examples/aomcx_set_ref.c
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-// VP10 Set Reference Frame
+// AV1 Set Reference Frame
 // ============================
 //
-// This is an example demonstrating how to overwrite the VP10 encoder's
+// This is an example demonstrating how to overwrite the AV1 encoder's
 // internal reference frame. In the sample we set the last frame to the
 // current frame. This technique could be used to bounce between two cameras.
 //
@@ -24,7 +24,7 @@
 // -----
 // This example encodes a raw video. And the last argument passed in specifies
 // the frame number to update the reference frame on. For example, run
-// examples/vpx_cx_set_ref vp10 352 288 in.yuv out.ivf 4 30
+// examples/aom_cx_set_ref av1 352 288 in.yuv out.ivf 4 30
 // The parameter is parsed as follows:
 //
 //
@@ -50,9 +50,9 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "aom/vp8cx.h"
-#include "aom/vpx_decoder.h"
-#include "aom/vpx_encoder.h"
+#include "aom/aomcx.h"
+#include "aom/aom_decoder.h"
+#include "aom/aom_encoder.h"
 
 #include "./tools_common.h"
 #include "./video_writer.h"
@@ -67,8 +67,8 @@
   exit(EXIT_FAILURE);
 }
 
-static int compare_img(const vpx_image_t *const img1,
-                       const vpx_image_t *const img2) {
+static int compare_img(const aom_image_t *const img1,
+                       const aom_image_t *const img2) {
   uint32_t l_w = img1->d_w;
   uint32_t c_w = (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift;
   const uint32_t c_h =
@@ -81,26 +81,26 @@
   match &= (img1->d_h == img2->d_h);
 
   for (i = 0; i < img1->d_h; ++i)
-    match &= (memcmp(img1->planes[VPX_PLANE_Y] + i * img1->stride[VPX_PLANE_Y],
-                     img2->planes[VPX_PLANE_Y] + i * img2->stride[VPX_PLANE_Y],
+    match &= (memcmp(img1->planes[AOM_PLANE_Y] + i * img1->stride[AOM_PLANE_Y],
+                     img2->planes[AOM_PLANE_Y] + i * img2->stride[AOM_PLANE_Y],
                      l_w) == 0);
 
   for (i = 0; i < c_h; ++i)
-    match &= (memcmp(img1->planes[VPX_PLANE_U] + i * img1->stride[VPX_PLANE_U],
-                     img2->planes[VPX_PLANE_U] + i * img2->stride[VPX_PLANE_U],
+    match &= (memcmp(img1->planes[AOM_PLANE_U] + i * img1->stride[AOM_PLANE_U],
+                     img2->planes[AOM_PLANE_U] + i * img2->stride[AOM_PLANE_U],
                      c_w) == 0);
 
   for (i = 0; i < c_h; ++i)
-    match &= (memcmp(img1->planes[VPX_PLANE_V] + i * img1->stride[VPX_PLANE_V],
-                     img2->planes[VPX_PLANE_V] + i * img2->stride[VPX_PLANE_V],
+    match &= (memcmp(img1->planes[AOM_PLANE_V] + i * img1->stride[AOM_PLANE_V],
+                     img2->planes[AOM_PLANE_V] + i * img2->stride[AOM_PLANE_V],
                      c_w) == 0);
 
   return match;
 }
 
 #define mmin(a, b) ((a) < (b) ? (a) : (b))
-static void find_mismatch(const vpx_image_t *const img1,
-                          const vpx_image_t *const img2, int yloc[4],
+static void find_mismatch(const aom_image_t *const img1,
+                          const aom_image_t *const img2, int yloc[4],
                           int uloc[4], int vloc[4]) {
   const uint32_t bsize = 64;
   const uint32_t bsizey = bsize >> img1->y_chroma_shift;
@@ -119,16 +119,16 @@
       const int sj = mmin(j + bsize, img1->d_w) - j;
       for (k = 0; match && k < si; ++k) {
         for (l = 0; match && l < sj; ++l) {
-          if (*(img1->planes[VPX_PLANE_Y] +
-                (i + k) * img1->stride[VPX_PLANE_Y] + j + l) !=
-              *(img2->planes[VPX_PLANE_Y] +
-                (i + k) * img2->stride[VPX_PLANE_Y] + j + l)) {
+          if (*(img1->planes[AOM_PLANE_Y] +
+                (i + k) * img1->stride[AOM_PLANE_Y] + j + l) !=
+              *(img2->planes[AOM_PLANE_Y] +
+                (i + k) * img2->stride[AOM_PLANE_Y] + j + l)) {
             yloc[0] = i + k;
             yloc[1] = j + l;
-            yloc[2] = *(img1->planes[VPX_PLANE_Y] +
-                        (i + k) * img1->stride[VPX_PLANE_Y] + j + l);
-            yloc[3] = *(img2->planes[VPX_PLANE_Y] +
-                        (i + k) * img2->stride[VPX_PLANE_Y] + j + l);
+            yloc[2] = *(img1->planes[AOM_PLANE_Y] +
+                        (i + k) * img1->stride[AOM_PLANE_Y] + j + l);
+            yloc[3] = *(img2->planes[AOM_PLANE_Y] +
+                        (i + k) * img2->stride[AOM_PLANE_Y] + j + l);
             match = 0;
             break;
           }
@@ -145,16 +145,16 @@
       const int sj = mmin(j + bsizex, c_w - j);
       for (k = 0; match && k < si; ++k) {
         for (l = 0; match && l < sj; ++l) {
-          if (*(img1->planes[VPX_PLANE_U] +
-                (i + k) * img1->stride[VPX_PLANE_U] + j + l) !=
-              *(img2->planes[VPX_PLANE_U] +
-                (i + k) * img2->stride[VPX_PLANE_U] + j + l)) {
+          if (*(img1->planes[AOM_PLANE_U] +
+                (i + k) * img1->stride[AOM_PLANE_U] + j + l) !=
+              *(img2->planes[AOM_PLANE_U] +
+                (i + k) * img2->stride[AOM_PLANE_U] + j + l)) {
             uloc[0] = i + k;
             uloc[1] = j + l;
-            uloc[2] = *(img1->planes[VPX_PLANE_U] +
-                        (i + k) * img1->stride[VPX_PLANE_U] + j + l);
-            uloc[3] = *(img2->planes[VPX_PLANE_U] +
-                        (i + k) * img2->stride[VPX_PLANE_U] + j + l);
+            uloc[2] = *(img1->planes[AOM_PLANE_U] +
+                        (i + k) * img1->stride[AOM_PLANE_U] + j + l);
+            uloc[3] = *(img2->planes[AOM_PLANE_U] +
+                        (i + k) * img2->stride[AOM_PLANE_U] + j + l);
             match = 0;
             break;
           }
@@ -170,16 +170,16 @@
       const int sj = mmin(j + bsizex, c_w - j);
       for (k = 0; match && k < si; ++k) {
         for (l = 0; match && l < sj; ++l) {
-          if (*(img1->planes[VPX_PLANE_V] +
-                (i + k) * img1->stride[VPX_PLANE_V] + j + l) !=
-              *(img2->planes[VPX_PLANE_V] +
-                (i + k) * img2->stride[VPX_PLANE_V] + j + l)) {
+          if (*(img1->planes[AOM_PLANE_V] +
+                (i + k) * img1->stride[AOM_PLANE_V] + j + l) !=
+              *(img2->planes[AOM_PLANE_V] +
+                (i + k) * img2->stride[AOM_PLANE_V] + j + l)) {
             vloc[0] = i + k;
             vloc[1] = j + l;
-            vloc[2] = *(img1->planes[VPX_PLANE_V] +
-                        (i + k) * img1->stride[VPX_PLANE_V] + j + l);
-            vloc[3] = *(img2->planes[VPX_PLANE_V] +
-                        (i + k) * img2->stride[VPX_PLANE_V] + j + l);
+            vloc[2] = *(img1->planes[AOM_PLANE_V] +
+                        (i + k) * img1->stride[AOM_PLANE_V] + j + l);
+            vloc[3] = *(img2->planes[AOM_PLANE_V] +
+                        (i + k) * img2->stride[AOM_PLANE_V] + j + l);
             match = 0;
             break;
           }
@@ -189,20 +189,20 @@
   }
 }
 
-static void testing_decode(vpx_codec_ctx_t *encoder, vpx_codec_ctx_t *decoder,
-                           vpx_codec_enc_cfg_t *cfg, unsigned int frame_out,
+static void testing_decode(aom_codec_ctx_t *encoder, aom_codec_ctx_t *decoder,
+                           aom_codec_enc_cfg_t *cfg, unsigned int frame_out,
                            int *mismatch_seen) {
-  vpx_image_t enc_img, dec_img;
-  struct vp9_ref_frame ref_enc, ref_dec;
+  aom_image_t enc_img, dec_img;
+  struct av1_ref_frame ref_enc, ref_dec;
 
   if (*mismatch_seen) return;
 
   ref_enc.idx = 0;
   ref_dec.idx = 0;
-  if (vpx_codec_control(encoder, VP9_GET_REFERENCE, &ref_enc))
+  if (aom_codec_control(encoder, AV1_GET_REFERENCE, &ref_enc))
     die_codec(encoder, "Failed to get encoder reference frame");
   enc_img = ref_enc.img;
-  if (vpx_codec_control(decoder, VP9_GET_REFERENCE, &ref_dec))
+  if (aom_codec_control(decoder, AV1_GET_REFERENCE, &ref_dec))
     die_codec(decoder, "Failed to get decoder reference frame");
   dec_img = ref_dec.img;
 
@@ -221,36 +221,36 @@
         v[2], v[3]);
   }
 
-  vpx_img_free(&enc_img);
-  vpx_img_free(&dec_img);
+  aom_img_free(&enc_img);
+  aom_img_free(&dec_img);
 }
 
-static int encode_frame(vpx_codec_ctx_t *ecodec, vpx_codec_enc_cfg_t *cfg,
-                        vpx_image_t *img, unsigned int frame_in,
-                        VpxVideoWriter *writer, int test_decode,
-                        vpx_codec_ctx_t *dcodec, unsigned int *frame_out,
+static int encode_frame(aom_codec_ctx_t *ecodec, aom_codec_enc_cfg_t *cfg,
+                        aom_image_t *img, unsigned int frame_in,
+                        AvxVideoWriter *writer, int test_decode,
+                        aom_codec_ctx_t *dcodec, unsigned int *frame_out,
                         int *mismatch_seen) {
   int got_pkts = 0;
-  vpx_codec_iter_t iter = NULL;
-  const vpx_codec_cx_pkt_t *pkt = NULL;
+  aom_codec_iter_t iter = NULL;
+  const aom_codec_cx_pkt_t *pkt = NULL;
   int got_data;
-  const vpx_codec_err_t res =
-      vpx_codec_encode(ecodec, img, frame_in, 1, 0, VPX_DL_GOOD_QUALITY);
-  if (res != VPX_CODEC_OK) die_codec(ecodec, "Failed to encode frame");
+  const aom_codec_err_t res =
+      aom_codec_encode(ecodec, img, frame_in, 1, 0, AOM_DL_GOOD_QUALITY);
+  if (res != AOM_CODEC_OK) die_codec(ecodec, "Failed to encode frame");
 
   got_data = 0;
 
-  while ((pkt = vpx_codec_get_cx_data(ecodec, &iter)) != NULL) {
+  while ((pkt = aom_codec_get_cx_data(ecodec, &iter)) != NULL) {
     got_pkts = 1;
 
-    if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
-      const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
+    if (pkt->kind == AOM_CODEC_CX_FRAME_PKT) {
+      const int keyframe = (pkt->data.frame.flags & AOM_FRAME_IS_KEY) != 0;
 
-      if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT)) {
+      if (!(pkt->data.frame.flags & AOM_FRAME_IS_FRAGMENT)) {
         *frame_out += 1;
       }
 
-      if (!vpx_video_writer_write_frame(writer, pkt->data.frame.buf,
+      if (!aom_video_writer_write_frame(writer, pkt->data.frame.buf,
                                         pkt->data.frame.sz,
                                         pkt->data.frame.pts)) {
         die_codec(ecodec, "Failed to write compressed frame");
@@ -261,7 +261,7 @@
 
       // Decode 1 frame.
       if (test_decode) {
-        if (vpx_codec_decode(dcodec, pkt->data.frame.buf,
+        if (aom_codec_decode(dcodec, pkt->data.frame.buf,
                              (unsigned int)pkt->data.frame.sz, NULL, 0))
           die_codec(dcodec, "Failed to decode frame.");
       }
@@ -279,19 +279,19 @@
 int main(int argc, char **argv) {
   FILE *infile = NULL;
   // Encoder
-  vpx_codec_ctx_t ecodec = { 0 };
-  vpx_codec_enc_cfg_t cfg = { 0 };
+  aom_codec_ctx_t ecodec = { 0 };
+  aom_codec_enc_cfg_t cfg = { 0 };
   unsigned int frame_in = 0;
-  vpx_image_t raw;
-  vpx_codec_err_t res;
-  VpxVideoInfo info = { 0 };
-  VpxVideoWriter *writer = NULL;
-  const VpxInterface *encoder = NULL;
+  aom_image_t raw;
+  aom_codec_err_t res;
+  AvxVideoInfo info = { 0 };
+  AvxVideoWriter *writer = NULL;
+  const AvxInterface *encoder = NULL;
 
   // Test encoder/decoder mismatch.
   int test_decode = 1;
   // Decoder
-  vpx_codec_ctx_t dcodec;
+  aom_codec_ctx_t dcodec;
   unsigned int frame_out = 0;
 
   // The frame number to set reference frame on
@@ -317,12 +317,12 @@
   infile_arg = argv[4];
   outfile_arg = argv[5];
 
-  encoder = get_vpx_encoder_by_name(codec_arg);
+  encoder = get_aom_encoder_by_name(codec_arg);
   if (!encoder) die("Unsupported codec.");
 
   update_frame_num = atoi(argv[6]);
-  // In VP10, the reference buffers (cm->buffer_pool->frame_bufs[i].buf) are
-  // allocated while calling vpx_codec_encode(), thus, setting reference for
+  // In AV1, the reference buffers (cm->buffer_pool->frame_bufs[i].buf) are
+  // allocated while calling aom_codec_encode(), thus, setting reference for
   // 1st frame isn't supported.
   if (update_frame_num <= 1) die("Couldn't parse frame number '%s'\n", argv[6]);
 
@@ -343,14 +343,14 @@
     die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
   }
 
-  if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
+  if (!aom_img_alloc(&raw, AOM_IMG_FMT_I420, info.frame_width,
                      info.frame_height, 1)) {
     die("Failed to allocate image.");
   }
 
-  printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
+  printf("Using %s\n", aom_codec_iface_name(encoder->codec_interface()));
 
-  res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
+  res = aom_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
   if (res) die_codec(&ecodec, "Failed to get default codec config.");
 
   cfg.g_w = info.frame_width;
@@ -360,41 +360,41 @@
   cfg.rc_target_bitrate = bitrate;
   cfg.g_lag_in_frames = 3;
 
-  writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
+  writer = aom_video_writer_open(outfile_arg, kContainerIVF, &info);
   if (!writer) die("Failed to open %s for writing.", outfile_arg);
 
   if (!(infile = fopen(infile_arg, "rb")))
     die("Failed to open %s for reading.", infile_arg);
 
-  if (vpx_codec_enc_init(&ecodec, encoder->codec_interface(), &cfg, 0))
+  if (aom_codec_enc_init(&ecodec, encoder->codec_interface(), &cfg, 0))
     die_codec(&ecodec, "Failed to initialize encoder");
 
   // Disable alt_ref.
-  if (vpx_codec_control(&ecodec, VP8E_SET_ENABLEAUTOALTREF, 0))
+  if (aom_codec_control(&ecodec, AOME_SET_ENABLEAUTOALTREF, 0))
     die_codec(&ecodec, "Failed to set enable auto alt ref");
 
   if (test_decode) {
-    const VpxInterface *decoder = get_vpx_decoder_by_name(codec_arg);
-    if (vpx_codec_dec_init(&dcodec, decoder->codec_interface(), NULL, 0))
+    const AvxInterface *decoder = get_aom_decoder_by_name(codec_arg);
+    if (aom_codec_dec_init(&dcodec, decoder->codec_interface(), NULL, 0))
       die_codec(&dcodec, "Failed to initialize decoder.");
   }
 
   // Encode frames.
-  while (vpx_img_read(&raw, infile)) {
+  while (aom_img_read(&raw, infile)) {
     if (limit && frame_in >= limit) break;
     if (update_frame_num > 1 && frame_out + 1 == update_frame_num) {
-      vpx_ref_frame_t ref;
-      ref.frame_type = VP8_LAST_FRAME;
+      aom_ref_frame_t ref;
+      ref.frame_type = AOM_LAST_FRAME;
       ref.img = raw;
       // Set reference frame in encoder.
-      if (vpx_codec_control(&ecodec, VP8_SET_REFERENCE, &ref))
+      if (aom_codec_control(&ecodec, AOM_SET_REFERENCE, &ref))
         die_codec(&ecodec, "Failed to set reference frame");
       printf(" <SET_REF>");
 
       // If set_reference in decoder is commented out, the enc/dec mismatch
       // would be seen.
       if (test_decode) {
-        if (vpx_codec_control(&dcodec, VP8_SET_REFERENCE, &ref))
+        if (aom_codec_control(&dcodec, AOM_SET_REFERENCE, &ref))
           die_codec(&dcodec, "Failed to set reference frame");
       }
     }
@@ -423,14 +423,14 @@
   }
 
   if (test_decode)
-    if (vpx_codec_destroy(&dcodec))
+    if (aom_codec_destroy(&dcodec))
       die_codec(&dcodec, "Failed to destroy decoder");
 
-  vpx_img_free(&raw);
-  if (vpx_codec_destroy(&ecodec))
+  aom_img_free(&raw);
+  if (aom_codec_destroy(&ecodec))
     die_codec(&ecodec, "Failed to destroy encoder.");
 
-  vpx_video_writer_close(writer);
+  aom_video_writer_close(writer);
 
   return EXIT_SUCCESS;
 }
diff --git a/examples/decode_to_md5.c b/examples/decode_to_md5.c
index 3cf40e6..869e989 100644
--- a/examples/decode_to_md5.c
+++ b/examples/decode_to_md5.c
@@ -33,15 +33,15 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "aom/vp8dx.h"
-#include "aom/vpx_decoder.h"
+#include "aom/aomdx.h"
+#include "aom/aom_decoder.h"
 
 #include "../md5_utils.h"
 #include "../tools_common.h"
 #include "../video_reader.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
-static void get_image_md5(const vpx_image_t *img, unsigned char digest[16]) {
+static void get_image_md5(const aom_image_t *img, unsigned char digest[16]) {
   int plane, y;
   MD5Context md5;
 
@@ -78,41 +78,41 @@
 int main(int argc, char **argv) {
   int frame_cnt = 0;
   FILE *outfile = NULL;
-  vpx_codec_ctx_t codec;
-  VpxVideoReader *reader = NULL;
-  const VpxVideoInfo *info = NULL;
-  const VpxInterface *decoder = NULL;
+  aom_codec_ctx_t codec;
+  AvxVideoReader *reader = NULL;
+  const AvxVideoInfo *info = NULL;
+  const AvxInterface *decoder = NULL;
 
   exec_name = argv[0];
 
   if (argc != 3) die("Invalid number of arguments.");
 
-  reader = vpx_video_reader_open(argv[1]);
+  reader = aom_video_reader_open(argv[1]);
   if (!reader) die("Failed to open %s for reading.", argv[1]);
 
   if (!(outfile = fopen(argv[2], "wb")))
     die("Failed to open %s for writing.", argv[2]);
 
-  info = vpx_video_reader_get_info(reader);
+  info = aom_video_reader_get_info(reader);
 
-  decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc);
+  decoder = get_aom_decoder_by_fourcc(info->codec_fourcc);
   if (!decoder) die("Unknown input codec.");
 
-  printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
+  printf("Using %s\n", aom_codec_iface_name(decoder->codec_interface()));
 
-  if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
+  if (aom_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
     die_codec(&codec, "Failed to initialize decoder");
 
-  while (vpx_video_reader_read_frame(reader)) {
-    vpx_codec_iter_t iter = NULL;
-    vpx_image_t *img = NULL;
+  while (aom_video_reader_read_frame(reader)) {
+    aom_codec_iter_t iter = NULL;
+    aom_image_t *img = NULL;
     size_t frame_size = 0;
     const unsigned char *frame =
-        vpx_video_reader_get_frame(reader, &frame_size);
-    if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
+        aom_video_reader_get_frame(reader, &frame_size);
+    if (aom_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
       die_codec(&codec, "Failed to decode frame");
 
-    while ((img = vpx_codec_get_frame(&codec, &iter)) != NULL) {
+    while ((img = aom_codec_get_frame(&codec, &iter)) != NULL) {
       unsigned char digest[16];
 
       get_image_md5(img, digest);
@@ -123,9 +123,9 @@
   }
 
   printf("Processed %d frames.\n", frame_cnt);
-  if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
+  if (aom_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
 
-  vpx_video_reader_close(reader);
+  aom_video_reader_close(reader);
 
   fclose(outfile);
   return EXIT_SUCCESS;
diff --git a/examples/decode_with_drops.c b/examples/decode_with_drops.c
index 17757f6..506aff1 100644
--- a/examples/decode_with_drops.c
+++ b/examples/decode_with_drops.c
@@ -56,12 +56,12 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "aom/vp8dx.h"
-#include "aom/vpx_decoder.h"
+#include "aom/aomdx.h"
+#include "aom/aom_decoder.h"
 
 #include "../tools_common.h"
 #include "../video_reader.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 static const char *exec_name;
 
@@ -73,10 +73,10 @@
 int main(int argc, char **argv) {
   int frame_cnt = 0;
   FILE *outfile = NULL;
-  vpx_codec_ctx_t codec;
-  const VpxInterface *decoder = NULL;
-  VpxVideoReader *reader = NULL;
-  const VpxVideoInfo *info = NULL;
+  aom_codec_ctx_t codec;
+  const AvxInterface *decoder = NULL;
+  AvxVideoReader *reader = NULL;
+  const AvxVideoInfo *info = NULL;
   int n = 0;
   int m = 0;
   int is_range = 0;
@@ -86,7 +86,7 @@
 
   if (argc != 4) die("Invalid number of arguments.");
 
-  reader = vpx_video_reader_open(argv[1]);
+  reader = aom_video_reader_open(argv[1]);
   if (!reader) die("Failed to open %s for reading.", argv[1]);
 
   if (!(outfile = fopen(argv[2], "wb")))
@@ -98,24 +98,24 @@
   if (!n || !m || (*nptr != '-' && *nptr != '/'))
     die("Couldn't parse pattern %s.\n", argv[3]);
 
-  info = vpx_video_reader_get_info(reader);
+  info = aom_video_reader_get_info(reader);
 
-  decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc);
+  decoder = get_aom_decoder_by_fourcc(info->codec_fourcc);
   if (!decoder) die("Unknown input codec.");
 
-  printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
+  printf("Using %s\n", aom_codec_iface_name(decoder->codec_interface()));
 
-  if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
+  if (aom_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
     die_codec(&codec, "Failed to initialize decoder.");
 
-  while (vpx_video_reader_read_frame(reader)) {
-    vpx_codec_iter_t iter = NULL;
-    vpx_image_t *img = NULL;
+  while (aom_video_reader_read_frame(reader)) {
+    aom_codec_iter_t iter = NULL;
+    aom_image_t *img = NULL;
     size_t frame_size = 0;
     int skip;
     const unsigned char *frame =
-        vpx_video_reader_get_frame(reader, &frame_size);
-    if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
+        aom_video_reader_get_frame(reader, &frame_size);
+    if (aom_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
       die_codec(&codec, "Failed to decode frame.");
 
     ++frame_cnt;
@@ -126,8 +126,8 @@
     if (!skip) {
       putc('.', stdout);
 
-      while ((img = vpx_codec_get_frame(&codec, &iter)) != NULL)
-        vpx_img_write(img, outfile);
+      while ((img = aom_codec_get_frame(&codec, &iter)) != NULL)
+        aom_img_write(img, outfile);
     } else {
       putc('X', stdout);
     }
@@ -136,12 +136,12 @@
   }
 
   printf("Processed %d frames.\n", frame_cnt);
-  if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
+  if (aom_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
 
   printf("Play: ffplay -f rawvideo -pix_fmt yuv420p -s %dx%d %s\n",
          info->frame_width, info->frame_height, argv[2]);
 
-  vpx_video_reader_close(reader);
+  aom_video_reader_close(reader);
   fclose(outfile);
 
   return EXIT_SUCCESS;
diff --git a/examples/lossless_encoder.c b/examples/lossless_encoder.c
index 09693c8..7676226 100644
--- a/examples/lossless_encoder.c
+++ b/examples/lossless_encoder.c
@@ -12,8 +12,8 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "aom/vpx_encoder.h"
-#include "aom/vp8cx.h"
+#include "aom/aom_encoder.h"
+#include "aom/aomcx.h"
 
 #include "../tools_common.h"
 #include "../video_writer.h"
@@ -28,21 +28,21 @@
   exit(EXIT_FAILURE);
 }
 
-static int encode_frame(vpx_codec_ctx_t *codec, vpx_image_t *img,
-                        int frame_index, int flags, VpxVideoWriter *writer) {
+static int encode_frame(aom_codec_ctx_t *codec, aom_image_t *img,
+                        int frame_index, int flags, AvxVideoWriter *writer) {
   int got_pkts = 0;
-  vpx_codec_iter_t iter = NULL;
-  const vpx_codec_cx_pkt_t *pkt = NULL;
-  const vpx_codec_err_t res =
-      vpx_codec_encode(codec, img, frame_index, 1, flags, VPX_DL_GOOD_QUALITY);
-  if (res != VPX_CODEC_OK) die_codec(codec, "Failed to encode frame");
+  aom_codec_iter_t iter = NULL;
+  const aom_codec_cx_pkt_t *pkt = NULL;
+  const aom_codec_err_t res =
+      aom_codec_encode(codec, img, frame_index, 1, flags, AOM_DL_GOOD_QUALITY);
+  if (res != AOM_CODEC_OK) die_codec(codec, "Failed to encode frame");
 
-  while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
+  while ((pkt = aom_codec_get_cx_data(codec, &iter)) != NULL) {
     got_pkts = 1;
 
-    if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
-      const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
-      if (!vpx_video_writer_write_frame(writer, pkt->data.frame.buf,
+    if (pkt->kind == AOM_CODEC_CX_FRAME_PKT) {
+      const int keyframe = (pkt->data.frame.flags & AOM_FRAME_IS_KEY) != 0;
+      if (!aom_video_writer_write_frame(writer, pkt->data.frame.buf,
                                         pkt->data.frame.sz,
                                         pkt->data.frame.pts)) {
         die_codec(codec, "Failed to write compressed frame");
@@ -57,21 +57,21 @@
 
 int main(int argc, char **argv) {
   FILE *infile = NULL;
-  vpx_codec_ctx_t codec;
-  vpx_codec_enc_cfg_t cfg;
+  aom_codec_ctx_t codec;
+  aom_codec_enc_cfg_t cfg;
   int frame_count = 0;
-  vpx_image_t raw;
-  vpx_codec_err_t res;
-  VpxVideoInfo info = { 0 };
-  VpxVideoWriter *writer = NULL;
-  const VpxInterface *encoder = NULL;
+  aom_image_t raw;
+  aom_codec_err_t res;
+  AvxVideoInfo info = { 0 };
+  AvxVideoWriter *writer = NULL;
+  const AvxInterface *encoder = NULL;
   const int fps = 30;
 
   exec_name = argv[0];
 
   if (argc < 5) die("Invalid number of arguments");
 
-  encoder = get_vpx_encoder_by_name("vp9");
+  encoder = get_aom_encoder_by_name("av1");
   if (!encoder) die("Unsupported codec.");
 
   info.codec_fourcc = encoder->fourcc;
@@ -85,14 +85,14 @@
     die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
   }
 
-  if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
+  if (!aom_img_alloc(&raw, AOM_IMG_FMT_I420, info.frame_width,
                      info.frame_height, 1)) {
     die("Failed to allocate image.");
   }
 
-  printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
+  printf("Using %s\n", aom_codec_iface_name(encoder->codec_interface()));
 
-  res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
+  res = aom_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
   if (res) die_codec(&codec, "Failed to get default codec config.");
 
   cfg.g_w = info.frame_width;
@@ -100,20 +100,20 @@
   cfg.g_timebase.num = info.time_base.numerator;
   cfg.g_timebase.den = info.time_base.denominator;
 
-  writer = vpx_video_writer_open(argv[4], kContainerIVF, &info);
+  writer = aom_video_writer_open(argv[4], kContainerIVF, &info);
   if (!writer) die("Failed to open %s for writing.", argv[4]);
 
   if (!(infile = fopen(argv[3], "rb")))
     die("Failed to open %s for reading.", argv[3]);
 
-  if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
+  if (aom_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
     die_codec(&codec, "Failed to initialize encoder");
 
-  if (vpx_codec_control_(&codec, VP9E_SET_LOSSLESS, 1))
+  if (aom_codec_control_(&codec, AV1E_SET_LOSSLESS, 1))
     die_codec(&codec, "Failed to use lossless mode");
 
   // Encode frames.
-  while (vpx_img_read(&raw, infile)) {
+  while (aom_img_read(&raw, infile)) {
     encode_frame(&codec, &raw, frame_count++, 0, writer);
   }
 
@@ -125,10 +125,10 @@
   fclose(infile);
   printf("Processed %d frames.\n", frame_count);
 
-  vpx_img_free(&raw);
-  if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
+  aom_img_free(&raw);
+  if (aom_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
 
-  vpx_video_writer_close(writer);
+  aom_video_writer_close(writer);
 
   return EXIT_SUCCESS;
 }
diff --git a/examples/resize_util.c b/examples/resize_util.c
index 7e529b2..4f56885 100644
--- a/examples/resize_util.c
+++ b/examples/resize_util.c
@@ -16,7 +16,7 @@
 #include <string.h>
 
 #include "../tools_common.h"
-#include "../vp9/encoder/vp9_resize.h"
+#include "../av1/encoder/av1_resize.h"
 
 static const char *exec_name = NULL;
 
@@ -107,7 +107,7 @@
   f = 0;
   while (f < frames) {
     if (fread(inbuf, width * height * 3 / 2, 1, fpin) != 1) break;
-    vp9_resize_frame420(inbuf, width, inbuf_u, inbuf_v, width / 2, height,
+    av1_resize_frame420(inbuf, width, inbuf_u, inbuf_v, width / 2, height,
                         width, outbuf, target_width, outbuf_u, outbuf_v,
                         target_width / 2, target_height, target_width);
     fwrite(outbuf, target_width * target_height * 3 / 2, 1, fpout);
diff --git a/examples/set_maps.c b/examples/set_maps.c
index ae6a113c..2f42751 100644
--- a/examples/set_maps.c
+++ b/examples/set_maps.c
@@ -46,8 +46,8 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "aom/vp8cx.h"
-#include "aom/vpx_encoder.h"
+#include "aom/aomcx.h"
+#include "aom/aom_encoder.h"
 
 #include "../tools_common.h"
 #include "../video_writer.h"
@@ -60,10 +60,10 @@
   exit(EXIT_FAILURE);
 }
 
-static void set_roi_map(const vpx_codec_enc_cfg_t *cfg,
-                        vpx_codec_ctx_t *codec) {
+static void set_roi_map(const aom_codec_enc_cfg_t *cfg,
+                        aom_codec_ctx_t *codec) {
   unsigned int i;
-  vpx_roi_map_t roi;
+  aom_roi_map_t roi;
   memset(&roi, 0, sizeof(roi));
 
   roi.rows = (cfg->g_h + 15) / 16;
@@ -87,16 +87,16 @@
   roi.roi_map = (uint8_t *)malloc(roi.rows * roi.cols);
   for (i = 0; i < roi.rows * roi.cols; ++i) roi.roi_map[i] = i % 4;
 
-  if (vpx_codec_control(codec, VP8E_SET_ROI_MAP, &roi))
+  if (aom_codec_control(codec, AOME_SET_ROI_MAP, &roi))
     die_codec(codec, "Failed to set ROI map");
 
   free(roi.roi_map);
 }
 
-static void set_active_map(const vpx_codec_enc_cfg_t *cfg,
-                           vpx_codec_ctx_t *codec) {
+static void set_active_map(const aom_codec_enc_cfg_t *cfg,
+                           aom_codec_ctx_t *codec) {
   unsigned int i;
-  vpx_active_map_t map = { 0, 0, 0 };
+  aom_active_map_t map = { 0, 0, 0 };
 
   map.rows = (cfg->g_h + 15) / 16;
   map.cols = (cfg->g_w + 15) / 16;
@@ -104,39 +104,39 @@
   map.active_map = (uint8_t *)malloc(map.rows * map.cols);
   for (i = 0; i < map.rows * map.cols; ++i) map.active_map[i] = i % 2;
 
-  if (vpx_codec_control(codec, VP8E_SET_ACTIVEMAP, &map))
+  if (aom_codec_control(codec, AOME_SET_ACTIVEMAP, &map))
     die_codec(codec, "Failed to set active map");
 
   free(map.active_map);
 }
 
-static void unset_active_map(const vpx_codec_enc_cfg_t *cfg,
-                             vpx_codec_ctx_t *codec) {
-  vpx_active_map_t map = { 0, 0, 0 };
+static void unset_active_map(const aom_codec_enc_cfg_t *cfg,
+                             aom_codec_ctx_t *codec) {
+  aom_active_map_t map = { 0, 0, 0 };
 
   map.rows = (cfg->g_h + 15) / 16;
   map.cols = (cfg->g_w + 15) / 16;
   map.active_map = NULL;
 
-  if (vpx_codec_control(codec, VP8E_SET_ACTIVEMAP, &map))
+  if (aom_codec_control(codec, AOME_SET_ACTIVEMAP, &map))
     die_codec(codec, "Failed to set active map");
 }
 
-static int encode_frame(vpx_codec_ctx_t *codec, vpx_image_t *img,
-                        int frame_index, VpxVideoWriter *writer) {
+static int encode_frame(aom_codec_ctx_t *codec, aom_image_t *img,
+                        int frame_index, AvxVideoWriter *writer) {
   int got_pkts = 0;
-  vpx_codec_iter_t iter = NULL;
-  const vpx_codec_cx_pkt_t *pkt = NULL;
-  const vpx_codec_err_t res =
-      vpx_codec_encode(codec, img, frame_index, 1, 0, VPX_DL_GOOD_QUALITY);
-  if (res != VPX_CODEC_OK) die_codec(codec, "Failed to encode frame");
+  aom_codec_iter_t iter = NULL;
+  const aom_codec_cx_pkt_t *pkt = NULL;
+  const aom_codec_err_t res =
+      aom_codec_encode(codec, img, frame_index, 1, 0, AOM_DL_GOOD_QUALITY);
+  if (res != AOM_CODEC_OK) die_codec(codec, "Failed to encode frame");
 
-  while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
+  while ((pkt = aom_codec_get_cx_data(codec, &iter)) != NULL) {
     got_pkts = 1;
 
-    if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
-      const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
-      if (!vpx_video_writer_write_frame(writer, pkt->data.frame.buf,
+    if (pkt->kind == AOM_CODEC_CX_FRAME_PKT) {
+      const int keyframe = (pkt->data.frame.flags & AOM_FRAME_IS_KEY) != 0;
+      if (!aom_video_writer_write_frame(writer, pkt->data.frame.buf,
                                         pkt->data.frame.sz,
                                         pkt->data.frame.pts)) {
         die_codec(codec, "Failed to write compressed frame");
@@ -152,14 +152,14 @@
 
 int main(int argc, char **argv) {
   FILE *infile = NULL;
-  vpx_codec_ctx_t codec;
-  vpx_codec_enc_cfg_t cfg;
+  aom_codec_ctx_t codec;
+  aom_codec_enc_cfg_t cfg;
   int frame_count = 0;
-  vpx_image_t raw;
-  vpx_codec_err_t res;
-  VpxVideoInfo info;
-  VpxVideoWriter *writer = NULL;
-  const VpxInterface *encoder = NULL;
+  aom_image_t raw;
+  aom_codec_err_t res;
+  AvxVideoInfo info;
+  AvxVideoWriter *writer = NULL;
+  const AvxInterface *encoder = NULL;
   const int fps = 2;  // TODO(dkovalev) add command line argument
   const double bits_per_pixel_per_frame = 0.067;
 
@@ -168,7 +168,7 @@
 
   memset(&info, 0, sizeof(info));
 
-  encoder = get_vpx_encoder_by_name(argv[1]);
+  encoder = get_aom_encoder_by_name(argv[1]);
   if (encoder == NULL) {
     die("Unsupported codec.");
   }
@@ -184,14 +184,14 @@
     die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
   }
 
-  if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
+  if (!aom_img_alloc(&raw, AOM_IMG_FMT_I420, info.frame_width,
                      info.frame_height, 1)) {
     die("Failed to allocate image.");
   }
 
-  printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
+  printf("Using %s\n", aom_codec_iface_name(encoder->codec_interface()));
 
-  res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
+  res = aom_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
   if (res) die_codec(&codec, "Failed to get default codec config.");
 
   cfg.g_w = info.frame_width;
@@ -202,20 +202,20 @@
       (unsigned int)(bits_per_pixel_per_frame * cfg.g_w * cfg.g_h * fps / 1000);
   cfg.g_lag_in_frames = 0;
 
-  writer = vpx_video_writer_open(argv[5], kContainerIVF, &info);
+  writer = aom_video_writer_open(argv[5], kContainerIVF, &info);
   if (!writer) die("Failed to open %s for writing.", argv[5]);
 
   if (!(infile = fopen(argv[4], "rb")))
     die("Failed to open %s for reading.", argv[4]);
 
-  if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
+  if (aom_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
     die_codec(&codec, "Failed to initialize encoder");
 
   // Encode frames.
-  while (vpx_img_read(&raw, infile)) {
+  while (aom_img_read(&raw, infile)) {
     ++frame_count;
 
-    if (frame_count == 22 && encoder->fourcc == VP8_FOURCC) {
+    if (frame_count == 22 && encoder->fourcc == AV1_FOURCC) {
       set_roi_map(&cfg, &codec);
     } else if (frame_count == 33) {
       set_active_map(&cfg, &codec);
@@ -234,10 +234,10 @@
   fclose(infile);
   printf("Processed %d frames.\n", frame_count);
 
-  vpx_img_free(&raw);
-  if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
+  aom_img_free(&raw);
+  if (aom_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
 
-  vpx_video_writer_close(writer);
+  aom_video_writer_close(writer);
 
   return EXIT_SUCCESS;
 }
diff --git a/examples/simple_decoder.c b/examples/simple_decoder.c
index 7b21ebf..42e9cbb 100644
--- a/examples/simple_decoder.c
+++ b/examples/simple_decoder.c
@@ -26,26 +26,26 @@
 //
 // Standard Includes
 // -----------------
-// For decoders, you only have to include `vpx_decoder.h` and then any
+// For decoders, you only have to include `aom_decoder.h` and then any
 // header files for the specific codecs you use. In this case, we're using
 // vp8.
 //
 // Initializing The Codec
 // ----------------------
-// The libaom decoder is initialized by the call to vpx_codec_dec_init().
-// Determining the codec interface to use is handled by VpxVideoReader and the
-// functions prefixed with vpx_video_reader_. Discussion of those functions is
+// The libaom decoder is initialized by the call to aom_codec_dec_init().
+// Determining the codec interface to use is handled by AvxVideoReader and the
+// functions prefixed with aom_video_reader_. Discussion of those functions is
 // beyond the scope of this example, but the main gist is to open the input file
-// and parse just enough of it to determine if it's a VPx file and which VPx
+// and parse just enough of it to determine if it's a AVx file and which AVx
 // codec is contained within the file.
-// Note the NULL pointer passed to vpx_codec_dec_init(). We do that in this
+// Note the NULL pointer passed to aom_codec_dec_init(). We do that in this
 // example because we want the algorithm to determine the stream configuration
 // (width/height) and allocate memory automatically.
 //
 // Decoding A Frame
 // ----------------
 // Once the frame has been read into memory, it is decoded using the
-// `vpx_codec_decode` function. The call takes a pointer to the data
+// `aom_codec_decode` function. The call takes a pointer to the data
 // (`frame`) and the length of the data (`frame_size`). No application data
 // is associated with the frame in this example, so the `user_priv`
 // parameter is NULL. The `deadline` parameter is left at zero for this
@@ -53,10 +53,10 @@
 // processing.
 //
 // Codecs may produce a variable number of output frames for every call to
-// `vpx_codec_decode`. These frames are retrieved by the
-// `vpx_codec_get_frame` iterator function. The iterator variable `iter` is
-// initialized to NULL each time `vpx_codec_decode` is called.
-// `vpx_codec_get_frame` is called in a loop, returning a pointer to a
+// `aom_codec_decode`. These frames are retrieved by the
+// `aom_codec_get_frame` iterator function. The iterator variable `iter` is
+// initialized to NULL each time `aom_codec_decode` is called.
+// `aom_codec_get_frame` is called in a loop, returning a pointer to a
 // decoded image or NULL to indicate the end of list.
 //
 // Processing The Decoded Data
@@ -66,24 +66,24 @@
 //
 // Cleanup
 // -------
-// The `vpx_codec_destroy` call frees any memory allocated by the codec.
+// The `aom_codec_destroy` call frees any memory allocated by the codec.
 //
 // Error Handling
 // --------------
 // This example does not special case any error return codes. If there was
 // an error, a descriptive message is printed and the program exits. With
-// few exceptions, vpx_codec functions return an enumerated error status,
+// few exceptions, aom_codec functions return an enumerated error status,
 // with the value `0` indicating success.
 
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 
-#include "aom/vpx_decoder.h"
+#include "aom/aom_decoder.h"
 
 #include "../tools_common.h"
 #include "../video_reader.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 static const char *exec_name;
 
@@ -95,53 +95,53 @@
 int main(int argc, char **argv) {
   int frame_cnt = 0;
   FILE *outfile = NULL;
-  vpx_codec_ctx_t codec;
-  VpxVideoReader *reader = NULL;
-  const VpxInterface *decoder = NULL;
-  const VpxVideoInfo *info = NULL;
+  aom_codec_ctx_t codec;
+  AvxVideoReader *reader = NULL;
+  const AvxInterface *decoder = NULL;
+  const AvxVideoInfo *info = NULL;
 
   exec_name = argv[0];
 
   if (argc != 3) die("Invalid number of arguments.");
 
-  reader = vpx_video_reader_open(argv[1]);
+  reader = aom_video_reader_open(argv[1]);
   if (!reader) die("Failed to open %s for reading.", argv[1]);
 
   if (!(outfile = fopen(argv[2], "wb")))
     die("Failed to open %s for writing.", argv[2]);
 
-  info = vpx_video_reader_get_info(reader);
+  info = aom_video_reader_get_info(reader);
 
-  decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc);
+  decoder = get_aom_decoder_by_fourcc(info->codec_fourcc);
   if (!decoder) die("Unknown input codec.");
 
-  printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
+  printf("Using %s\n", aom_codec_iface_name(decoder->codec_interface()));
 
-  if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
+  if (aom_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
     die_codec(&codec, "Failed to initialize decoder.");
 
-  while (vpx_video_reader_read_frame(reader)) {
-    vpx_codec_iter_t iter = NULL;
-    vpx_image_t *img = NULL;
+  while (aom_video_reader_read_frame(reader)) {
+    aom_codec_iter_t iter = NULL;
+    aom_image_t *img = NULL;
     size_t frame_size = 0;
     const unsigned char *frame =
-        vpx_video_reader_get_frame(reader, &frame_size);
-    if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
+        aom_video_reader_get_frame(reader, &frame_size);
+    if (aom_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
       die_codec(&codec, "Failed to decode frame.");
 
-    while ((img = vpx_codec_get_frame(&codec, &iter)) != NULL) {
-      vpx_img_write(img, outfile);
+    while ((img = aom_codec_get_frame(&codec, &iter)) != NULL) {
+      aom_img_write(img, outfile);
       ++frame_cnt;
     }
   }
 
   printf("Processed %d frames.\n", frame_cnt);
-  if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec");
+  if (aom_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec");
 
   printf("Play: ffplay -f rawvideo -pix_fmt yuv420p -s %dx%d %s\n",
          info->frame_width, info->frame_height, argv[2]);
 
-  vpx_video_reader_close(reader);
+  aom_video_reader_close(reader);
 
   fclose(outfile);
 
diff --git a/examples/simple_encoder.c b/examples/simple_encoder.c
index 2cae453..2b1bc7f 100644
--- a/examples/simple_encoder.c
+++ b/examples/simple_encoder.c
@@ -26,9 +26,9 @@
 //
 // Standard Includes
 // -----------------
-// For encoders, you only have to include `vpx_encoder.h` and then any
+// For encoders, you only have to include `aom_encoder.h` and then any
 // header files for the specific codecs you use. In this case, we're using
-// vp8.
+// aom.
 //
 // Getting The Default Configuration
 // ---------------------------------
@@ -60,32 +60,32 @@
 // is passed, indicating the End-Of-Stream condition to the encoder. The
 // `frame_cnt` is reused as the presentation time stamp (PTS) and each
 // frame is shown for one frame-time in duration. The flags parameter is
-// unused in this example. The deadline is set to VPX_DL_REALTIME to
+// unused in this example. The deadline is set to AOM_DL_REALTIME to
 // make the example run as quickly as possible.
 
 // Forced Keyframes
 // ----------------
-// Keyframes can be forced by setting the VPX_EFLAG_FORCE_KF bit of the
-// flags passed to `vpx_codec_control()`. In this example, we force a
+// Keyframes can be forced by setting the AOM_EFLAG_FORCE_KF bit of the
+// flags passed to `aom_codec_control()`. In this example, we force a
 // keyframe every <keyframe-interval> frames. Note, the output stream can
 // contain additional keyframes beyond those that have been forced using the
-// VPX_EFLAG_FORCE_KF flag because of automatic keyframe placement by the
+// AOM_EFLAG_FORCE_KF flag because of automatic keyframe placement by the
 // encoder.
 //
 // Processing The Encoded Data
 // ---------------------------
-// Each packet of type `VPX_CODEC_CX_FRAME_PKT` contains the encoded data
+// Each packet of type `AOM_CODEC_CX_FRAME_PKT` contains the encoded data
 // for this frame. We write a IVF frame header, followed by the raw data.
 //
 // Cleanup
 // -------
-// The `vpx_codec_destroy` call frees any memory allocated by the codec.
+// The `aom_codec_destroy` call frees any memory allocated by the codec.
 //
 // Error Handling
 // --------------
 // This example does not special case any error return codes. If there was
 // an error, a descriptive message is printed and the program exits. With
-// few exeptions, vpx_codec functions return an enumerated error status,
+// few exeptions, aom_codec functions return an enumerated error status,
 // with the value `0` indicating success.
 //
 // Error Resiliency Features
@@ -99,7 +99,7 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 
 #include "../tools_common.h"
 #include "../video_writer.h"
@@ -115,21 +115,21 @@
   exit(EXIT_FAILURE);
 }
 
-static int encode_frame(vpx_codec_ctx_t *codec, vpx_image_t *img,
-                        int frame_index, int flags, VpxVideoWriter *writer) {
+static int encode_frame(aom_codec_ctx_t *codec, aom_image_t *img,
+                        int frame_index, int flags, AvxVideoWriter *writer) {
   int got_pkts = 0;
-  vpx_codec_iter_t iter = NULL;
-  const vpx_codec_cx_pkt_t *pkt = NULL;
-  const vpx_codec_err_t res =
-      vpx_codec_encode(codec, img, frame_index, 1, flags, VPX_DL_GOOD_QUALITY);
-  if (res != VPX_CODEC_OK) die_codec(codec, "Failed to encode frame");
+  aom_codec_iter_t iter = NULL;
+  const aom_codec_cx_pkt_t *pkt = NULL;
+  const aom_codec_err_t res =
+      aom_codec_encode(codec, img, frame_index, 1, flags, AOM_DL_GOOD_QUALITY);
+  if (res != AOM_CODEC_OK) die_codec(codec, "Failed to encode frame");
 
-  while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
+  while ((pkt = aom_codec_get_cx_data(codec, &iter)) != NULL) {
     got_pkts = 1;
 
-    if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
-      const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
-      if (!vpx_video_writer_write_frame(writer, pkt->data.frame.buf,
+    if (pkt->kind == AOM_CODEC_CX_FRAME_PKT) {
+      const int keyframe = (pkt->data.frame.flags & AOM_FRAME_IS_KEY) != 0;
+      if (!aom_video_writer_write_frame(writer, pkt->data.frame.buf,
                                         pkt->data.frame.sz,
                                         pkt->data.frame.pts)) {
         die_codec(codec, "Failed to write compressed frame");
@@ -145,14 +145,14 @@
 // TODO(tomfinegan): Improve command line parsing and add args for bitrate/fps.
 int main(int argc, char **argv) {
   FILE *infile = NULL;
-  vpx_codec_ctx_t codec;
-  vpx_codec_enc_cfg_t cfg;
+  aom_codec_ctx_t codec;
+  aom_codec_enc_cfg_t cfg;
   int frame_count = 0;
-  vpx_image_t raw;
-  vpx_codec_err_t res;
-  VpxVideoInfo info = { 0 };
-  VpxVideoWriter *writer = NULL;
-  const VpxInterface *encoder = NULL;
+  aom_image_t raw;
+  aom_codec_err_t res;
+  AvxVideoInfo info = { 0 };
+  AvxVideoWriter *writer = NULL;
+  const AvxInterface *encoder = NULL;
   const int fps = 30;
   const int bitrate = 200;
   int keyframe_interval = 0;
@@ -177,7 +177,7 @@
   keyframe_interval_arg = argv[6];
   max_frames = strtol(argv[8], NULL, 0);
 
-  encoder = get_vpx_encoder_by_name(codec_arg);
+  encoder = get_aom_encoder_by_name(codec_arg);
   if (!encoder) die("Unsupported codec.");
 
   info.codec_fourcc = encoder->fourcc;
@@ -191,7 +191,7 @@
     die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
   }
 
-  if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
+  if (!aom_img_alloc(&raw, AOM_IMG_FMT_I420, info.frame_width,
                      info.frame_height, 1)) {
     die("Failed to allocate image.");
   }
@@ -199,9 +199,9 @@
   keyframe_interval = strtol(keyframe_interval_arg, NULL, 0);
   if (keyframe_interval < 0) die("Invalid keyframe interval value.");
 
-  printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
+  printf("Using %s\n", aom_codec_iface_name(encoder->codec_interface()));
 
-  res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
+  res = aom_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
   if (res) die_codec(&codec, "Failed to get default codec config.");
 
   cfg.g_w = info.frame_width;
@@ -211,20 +211,20 @@
   cfg.rc_target_bitrate = bitrate;
   cfg.g_error_resilient = strtol(argv[7], NULL, 0);
 
-  writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
+  writer = aom_video_writer_open(outfile_arg, kContainerIVF, &info);
   if (!writer) die("Failed to open %s for writing.", outfile_arg);
 
   if (!(infile = fopen(infile_arg, "rb")))
     die("Failed to open %s for reading.", infile_arg);
 
-  if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
+  if (aom_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
     die_codec(&codec, "Failed to initialize encoder");
 
   // Encode frames.
-  while (vpx_img_read(&raw, infile)) {
+  while (aom_img_read(&raw, infile)) {
     int flags = 0;
     if (keyframe_interval > 0 && frame_count % keyframe_interval == 0)
-      flags |= VPX_EFLAG_FORCE_KF;
+      flags |= AOM_EFLAG_FORCE_KF;
     encode_frame(&codec, &raw, frame_count++, flags, writer);
     frames_encoded++;
     if (max_frames > 0 && frames_encoded >= max_frames) break;
@@ -238,10 +238,10 @@
   fclose(infile);
   printf("Processed %d frames.\n", frame_count);
 
-  vpx_img_free(&raw);
-  if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
+  aom_img_free(&raw);
+  if (aom_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
 
-  vpx_video_writer_close(writer);
+  aom_video_writer_close(writer);
 
   return EXIT_SUCCESS;
 }
diff --git a/examples/twopass_encoder.c b/examples/twopass_encoder.c
index 0ed303a..6c910ef 100644
--- a/examples/twopass_encoder.c
+++ b/examples/twopass_encoder.c
@@ -29,11 +29,11 @@
 // ----------------
 // Encoding a frame in two pass mode is identical to the simple encoder
 // example. To increase the quality while sacrificing encoding speed,
-// VPX_DL_BEST_QUALITY can be used in place of VPX_DL_GOOD_QUALITY.
+// AOM_DL_BEST_QUALITY can be used in place of AOM_DL_GOOD_QUALITY.
 //
 // Processing Statistics Packets
 // -----------------------------
-// Each packet of type `VPX_CODEC_CX_FRAME_PKT` contains the encoded data
+// Each packet of type `AOM_CODEC_CX_FRAME_PKT` contains the encoded data
 // for this frame. We write a IVF frame header, followed by the raw data.
 //
 //
@@ -51,7 +51,7 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 
 #include "../tools_common.h"
 #include "../video_writer.h"
@@ -66,21 +66,21 @@
   exit(EXIT_FAILURE);
 }
 
-static int get_frame_stats(vpx_codec_ctx_t *ctx, const vpx_image_t *img,
-                           vpx_codec_pts_t pts, unsigned int duration,
-                           vpx_enc_frame_flags_t flags, unsigned int deadline,
-                           vpx_fixed_buf_t *stats) {
+static int get_frame_stats(aom_codec_ctx_t *ctx, const aom_image_t *img,
+                           aom_codec_pts_t pts, unsigned int duration,
+                           aom_enc_frame_flags_t flags, unsigned int deadline,
+                           aom_fixed_buf_t *stats) {
   int got_pkts = 0;
-  vpx_codec_iter_t iter = NULL;
-  const vpx_codec_cx_pkt_t *pkt = NULL;
-  const vpx_codec_err_t res =
-      vpx_codec_encode(ctx, img, pts, duration, flags, deadline);
-  if (res != VPX_CODEC_OK) die_codec(ctx, "Failed to get frame stats.");
+  aom_codec_iter_t iter = NULL;
+  const aom_codec_cx_pkt_t *pkt = NULL;
+  const aom_codec_err_t res =
+      aom_codec_encode(ctx, img, pts, duration, flags, deadline);
+  if (res != AOM_CODEC_OK) die_codec(ctx, "Failed to get frame stats.");
 
-  while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) {
+  while ((pkt = aom_codec_get_cx_data(ctx, &iter)) != NULL) {
     got_pkts = 1;
 
-    if (pkt->kind == VPX_CODEC_STATS_PKT) {
+    if (pkt->kind == AOM_CODEC_STATS_PKT) {
       const uint8_t *const pkt_buf = pkt->data.twopass_stats.buf;
       const size_t pkt_size = pkt->data.twopass_stats.sz;
       stats->buf = realloc(stats->buf, stats->sz + pkt_size);
@@ -92,23 +92,23 @@
   return got_pkts;
 }
 
-static int encode_frame(vpx_codec_ctx_t *ctx, const vpx_image_t *img,
-                        vpx_codec_pts_t pts, unsigned int duration,
-                        vpx_enc_frame_flags_t flags, unsigned int deadline,
-                        VpxVideoWriter *writer) {
+static int encode_frame(aom_codec_ctx_t *ctx, const aom_image_t *img,
+                        aom_codec_pts_t pts, unsigned int duration,
+                        aom_enc_frame_flags_t flags, unsigned int deadline,
+                        AvxVideoWriter *writer) {
   int got_pkts = 0;
-  vpx_codec_iter_t iter = NULL;
-  const vpx_codec_cx_pkt_t *pkt = NULL;
-  const vpx_codec_err_t res =
-      vpx_codec_encode(ctx, img, pts, duration, flags, deadline);
-  if (res != VPX_CODEC_OK) die_codec(ctx, "Failed to encode frame.");
+  aom_codec_iter_t iter = NULL;
+  const aom_codec_cx_pkt_t *pkt = NULL;
+  const aom_codec_err_t res =
+      aom_codec_encode(ctx, img, pts, duration, flags, deadline);
+  if (res != AOM_CODEC_OK) die_codec(ctx, "Failed to encode frame.");
 
-  while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) {
+  while ((pkt = aom_codec_get_cx_data(ctx, &iter)) != NULL) {
     got_pkts = 1;
-    if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
-      const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
+    if (pkt->kind == AOM_CODEC_CX_FRAME_PKT) {
+      const int keyframe = (pkt->data.frame.flags & AOM_FRAME_IS_KEY) != 0;
 
-      if (!vpx_video_writer_write_frame(writer, pkt->data.frame.buf,
+      if (!aom_video_writer_write_frame(writer, pkt->data.frame.buf,
                                         pkt->data.frame.sz,
                                         pkt->data.frame.pts))
         die_codec(ctx, "Failed to write compressed frame.");
@@ -120,69 +120,69 @@
   return got_pkts;
 }
 
-static vpx_fixed_buf_t pass0(vpx_image_t *raw, FILE *infile,
-                             const VpxInterface *encoder,
-                             const vpx_codec_enc_cfg_t *cfg, int max_frames) {
-  vpx_codec_ctx_t codec;
+static aom_fixed_buf_t pass0(aom_image_t *raw, FILE *infile,
+                             const AvxInterface *encoder,
+                             const aom_codec_enc_cfg_t *cfg, int max_frames) {
+  aom_codec_ctx_t codec;
   int frame_count = 0;
-  vpx_fixed_buf_t stats = { NULL, 0 };
+  aom_fixed_buf_t stats = { NULL, 0 };
 
-  if (vpx_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0))
+  if (aom_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0))
     die_codec(&codec, "Failed to initialize encoder");
 
   // Calculate frame statistics.
-  while (vpx_img_read(raw, infile)) {
+  while (aom_img_read(raw, infile)) {
     ++frame_count;
-    get_frame_stats(&codec, raw, frame_count, 1, 0, VPX_DL_GOOD_QUALITY,
+    get_frame_stats(&codec, raw, frame_count, 1, 0, AOM_DL_GOOD_QUALITY,
                     &stats);
     if (max_frames > 0 && frame_count >= max_frames) break;
   }
 
   // Flush encoder.
-  while (get_frame_stats(&codec, NULL, frame_count, 1, 0, VPX_DL_GOOD_QUALITY,
+  while (get_frame_stats(&codec, NULL, frame_count, 1, 0, AOM_DL_GOOD_QUALITY,
                          &stats)) {
   }
 
   printf("Pass 0 complete. Processed %d frames.\n", frame_count);
-  if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
+  if (aom_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
 
   return stats;
 }
 
-static void pass1(vpx_image_t *raw, FILE *infile, const char *outfile_name,
-                  const VpxInterface *encoder, const vpx_codec_enc_cfg_t *cfg,
+static void pass1(aom_image_t *raw, FILE *infile, const char *outfile_name,
+                  const AvxInterface *encoder, const aom_codec_enc_cfg_t *cfg,
                   int max_frames) {
-  VpxVideoInfo info = { encoder->fourcc,
+  AvxVideoInfo info = { encoder->fourcc,
                         cfg->g_w,
                         cfg->g_h,
                         { cfg->g_timebase.num, cfg->g_timebase.den } };
-  VpxVideoWriter *writer = NULL;
-  vpx_codec_ctx_t codec;
+  AvxVideoWriter *writer = NULL;
+  aom_codec_ctx_t codec;
   int frame_count = 0;
 
-  writer = vpx_video_writer_open(outfile_name, kContainerIVF, &info);
+  writer = aom_video_writer_open(outfile_name, kContainerIVF, &info);
   if (!writer) die("Failed to open %s for writing", outfile_name);
 
-  if (vpx_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0))
+  if (aom_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0))
     die_codec(&codec, "Failed to initialize encoder");
 
   // Encode frames.
-  while (vpx_img_read(raw, infile)) {
+  while (aom_img_read(raw, infile)) {
     ++frame_count;
-    encode_frame(&codec, raw, frame_count, 1, 0, VPX_DL_GOOD_QUALITY, writer);
+    encode_frame(&codec, raw, frame_count, 1, 0, AOM_DL_GOOD_QUALITY, writer);
 
     if (max_frames > 0 && frame_count >= max_frames) break;
   }
 
   // Flush encoder.
-  while (encode_frame(&codec, NULL, -1, 1, 0, VPX_DL_GOOD_QUALITY, writer)) {
+  while (encode_frame(&codec, NULL, -1, 1, 0, AOM_DL_GOOD_QUALITY, writer)) {
   }
 
   printf("\n");
 
-  if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
+  if (aom_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
 
-  vpx_video_writer_close(writer);
+  aom_video_writer_close(writer);
 
   printf("Pass 1 complete. Processed %d frames.\n", frame_count);
 }
@@ -190,13 +190,13 @@
 int main(int argc, char **argv) {
   FILE *infile = NULL;
   int w, h;
-  vpx_codec_ctx_t codec;
-  vpx_codec_enc_cfg_t cfg;
-  vpx_image_t raw;
-  vpx_codec_err_t res;
-  vpx_fixed_buf_t stats;
+  aom_codec_ctx_t codec;
+  aom_codec_enc_cfg_t cfg;
+  aom_image_t raw;
+  aom_codec_err_t res;
+  aom_fixed_buf_t stats;
 
-  const VpxInterface *encoder = NULL;
+  const AvxInterface *encoder = NULL;
   const int fps = 30;       // TODO(dkovalev) add command line argument
   const int bitrate = 200;  // kbit/s TODO(dkovalev) add command line argument
   const char *const codec_arg = argv[1];
@@ -211,7 +211,7 @@
 
   max_frames = strtol(argv[6], NULL, 0);
 
-  encoder = get_vpx_encoder_by_name(codec_arg);
+  encoder = get_aom_encoder_by_name(codec_arg);
   if (!encoder) die("Unsupported codec.");
 
   w = strtol(width_arg, NULL, 0);
@@ -220,13 +220,13 @@
   if (w <= 0 || h <= 0 || (w % 2) != 0 || (h % 2) != 0)
     die("Invalid frame size: %dx%d", w, h);
 
-  if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, w, h, 1))
+  if (!aom_img_alloc(&raw, AOM_IMG_FMT_I420, w, h, 1))
     die("Failed to allocate image", w, h);
 
-  printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
+  printf("Using %s\n", aom_codec_iface_name(encoder->codec_interface()));
 
   // Configuration
-  res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
+  res = aom_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
   if (res) die_codec(&codec, "Failed to get default codec config.");
 
   cfg.g_w = w;
@@ -239,17 +239,17 @@
     die("Failed to open %s for reading", infile_arg);
 
   // Pass 0
-  cfg.g_pass = VPX_RC_FIRST_PASS;
+  cfg.g_pass = AOM_RC_FIRST_PASS;
   stats = pass0(&raw, infile, encoder, &cfg, max_frames);
 
   // Pass 1
   rewind(infile);
-  cfg.g_pass = VPX_RC_LAST_PASS;
+  cfg.g_pass = AOM_RC_LAST_PASS;
   cfg.rc_twopass_stats_in = stats;
   pass1(&raw, infile, outfile_arg, encoder, &cfg, max_frames);
   free(stats.buf);
 
-  vpx_img_free(&raw);
+  aom_img_free(&raw);
   fclose(infile);
 
   return EXIT_SUCCESS;
diff --git a/ivfdec.c b/ivfdec.c
index 24a0d94..d11dfff 100644
--- a/ivfdec.c
+++ b/ivfdec.c
@@ -19,7 +19,7 @@
 static const char *IVF_SIGNATURE = "DKIF";
 
 static void fix_framerate(int *num, int *den) {
-  // Some versions of vpxenc used 1/(2*fps) for the timebase, so
+  // Some versions of aomenc used 1/(2*fps) for the timebase, so
   // we can guess the framerate using only the timebase in this
   // case. Other files would require reading ahead to guess the
   // timebase, like we do for webm.
@@ -37,7 +37,7 @@
   }
 }
 
-int file_is_ivf(struct VpxInputContext *input_ctx) {
+int file_is_ivf(struct AvxInputContext *input_ctx) {
   char raw_hdr[32];
   int is_ivf = 0;
 
diff --git a/ivfdec.h b/ivfdec.h
index af72557..2639ace 100644
--- a/ivfdec.h
+++ b/ivfdec.h
@@ -16,7 +16,7 @@
 extern "C" {
 #endif
 
-int file_is_ivf(struct VpxInputContext *input);
+int file_is_ivf(struct AvxInputContext *input);
 
 int ivf_read_frame(FILE *infile, uint8_t **buffer, size_t *bytes_read,
                    size_t *buffer_size);
diff --git a/ivfenc.c b/ivfenc.c
index b1e0f77..9ee0ea1 100644
--- a/ivfenc.c
+++ b/ivfenc.c
@@ -10,10 +10,10 @@
 
 #include "./ivfenc.h"
 
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 #include "aom_ports/mem_ops.h"
 
-void ivf_write_file_header(FILE *outfile, const struct vpx_codec_enc_cfg *cfg,
+void ivf_write_file_header(FILE *outfile, const struct aom_codec_enc_cfg *cfg,
                            unsigned int fourcc, int frame_cnt) {
   char header[32];
 
diff --git a/ivfenc.h b/ivfenc.h
index ebdce47..9129057 100644
--- a/ivfenc.h
+++ b/ivfenc.h
@@ -12,14 +12,14 @@
 
 #include "./tools_common.h"
 
-struct vpx_codec_enc_cfg;
-struct vpx_codec_cx_pkt;
+struct aom_codec_enc_cfg;
+struct aom_codec_cx_pkt;
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-void ivf_write_file_header(FILE *outfile, const struct vpx_codec_enc_cfg *cfg,
+void ivf_write_file_header(FILE *outfile, const struct aom_codec_enc_cfg *cfg,
                            uint32_t fourcc, int frame_cnt);
 
 void ivf_write_frame_header(FILE *outfile, int64_t pts, size_t frame_size);
diff --git a/libs.doxy_template b/libs.doxy_template
index 5a8f847..55b4a4a 100644
--- a/libs.doxy_template
+++ b/libs.doxy_template
@@ -36,7 +36,7 @@
 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded
 # by quotes) that should identify the project.
 
-PROJECT_NAME           = "WebM Codec SDK"
+PROJECT_NAME           = "AOMedia Codec SDK"
 
 # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
 # base path where the generated documentation will be put.
diff --git a/libs.mk b/libs.mk
index 2979cec..4755c3a 100644
--- a/libs.mk
+++ b/libs.mk
@@ -34,57 +34,57 @@
 CODEC_SRCS-yes += CHANGELOG
 CODEC_SRCS-yes += libs.mk
 
-include $(SRC_PATH_BARE)/aom/vpx_codec.mk
+include $(SRC_PATH_BARE)/aom/aom_codec.mk
 CODEC_SRCS-yes += $(addprefix aom/,$(call enabled,API_SRCS))
 CODEC_DOC_SRCS += $(addprefix aom/,$(call enabled,API_DOC_SRCS))
 
-include $(SRC_PATH_BARE)/aom_mem/vpx_mem.mk
+include $(SRC_PATH_BARE)/aom_mem/aom_mem.mk
 CODEC_SRCS-yes += $(addprefix aom_mem/,$(call enabled,MEM_SRCS))
 
-include $(SRC_PATH_BARE)/aom_scale/vpx_scale.mk
+include $(SRC_PATH_BARE)/aom_scale/aom_scale.mk
 CODEC_SRCS-yes += $(addprefix aom_scale/,$(call enabled,SCALE_SRCS))
 
-include $(SRC_PATH_BARE)/aom_ports/vpx_ports.mk
+include $(SRC_PATH_BARE)/aom_ports/aom_ports.mk
 CODEC_SRCS-yes += $(addprefix aom_ports/,$(call enabled,PORTS_SRCS))
 
-include $(SRC_PATH_BARE)/aom_dsp/vpx_dsp.mk
+include $(SRC_PATH_BARE)/aom_dsp/aom_dsp.mk
 CODEC_SRCS-yes += $(addprefix aom_dsp/,$(call enabled,DSP_SRCS))
 
-include $(SRC_PATH_BARE)/aom_util/vpx_util.mk
+include $(SRC_PATH_BARE)/aom_util/aom_util.mk
 CODEC_SRCS-yes += $(addprefix aom_util/,$(call enabled,UTIL_SRCS))
 
-#  VP10 make file
-ifeq ($(CONFIG_VP10),yes)
-  VP10_PREFIX=av1/
-  include $(SRC_PATH_BARE)/$(VP10_PREFIX)vp10_common.mk
+#  AV1 make file
+ifeq ($(CONFIG_AV1),yes)
+  AV1_PREFIX=av1/
+  include $(SRC_PATH_BARE)/$(AV1_PREFIX)av1_common.mk
 endif
 
-ifeq ($(CONFIG_VP10_ENCODER),yes)
-  VP10_PREFIX=av1/
-  include $(SRC_PATH_BARE)/$(VP10_PREFIX)vp10cx.mk
-  CODEC_SRCS-yes += $(addprefix $(VP10_PREFIX),$(call enabled,VP10_CX_SRCS))
-  CODEC_EXPORTS-yes += $(addprefix $(VP10_PREFIX),$(VP10_CX_EXPORTS))
-  CODEC_SRCS-yes += $(VP10_PREFIX)vp10cx.mk aom/vp8.h aom/vp8cx.h
-  INSTALL-LIBS-yes += include/aom/vp8.h include/aom/vp8cx.h
-  INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(VP10_PREFIX)/%
-  CODEC_DOC_SRCS += aom/vp8.h aom/vp8cx.h
-  CODEC_DOC_SECTIONS += vp9 vp9_encoder
+ifeq ($(CONFIG_AV1_ENCODER),yes)
+  AV1_PREFIX=av1/
+  include $(SRC_PATH_BARE)/$(AV1_PREFIX)av1cx.mk
+  CODEC_SRCS-yes += $(addprefix $(AV1_PREFIX),$(call enabled,AV1_CX_SRCS))
+  CODEC_EXPORTS-yes += $(addprefix $(AV1_PREFIX),$(AV1_CX_EXPORTS))
+  CODEC_SRCS-yes += $(AV1_PREFIX)av1cx.mk aom/aom.h aom/aomcx.h
+  INSTALL-LIBS-yes += include/aom/aom.h include/aom/aomcx.h
+  INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(AV1_PREFIX)/%
+  CODEC_DOC_SRCS += aom/aom.h aom/aomcx.h
+  CODEC_DOC_SECTIONS += av1 av1_encoder
 endif
 
-ifeq ($(CONFIG_VP10_DECODER),yes)
-  VP10_PREFIX=av1/
-  include $(SRC_PATH_BARE)/$(VP10_PREFIX)vp10dx.mk
-  CODEC_SRCS-yes += $(addprefix $(VP10_PREFIX),$(call enabled,VP10_DX_SRCS))
-  CODEC_EXPORTS-yes += $(addprefix $(VP10_PREFIX),$(VP10_DX_EXPORTS))
-  CODEC_SRCS-yes += $(VP10_PREFIX)vp10dx.mk aom/vp8.h aom/vp8dx.h
-  INSTALL-LIBS-yes += include/aom/vp8.h include/aom/vp8dx.h
-  INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(VP10_PREFIX)/%
-  CODEC_DOC_SRCS += aom/vp8.h aom/vp8dx.h
-  CODEC_DOC_SECTIONS += vp9 vp9_decoder
+ifeq ($(CONFIG_AV1_DECODER),yes)
+  AV1_PREFIX=av1/
+  include $(SRC_PATH_BARE)/$(AV1_PREFIX)av1dx.mk
+  CODEC_SRCS-yes += $(addprefix $(AV1_PREFIX),$(call enabled,AV1_DX_SRCS))
+  CODEC_EXPORTS-yes += $(addprefix $(AV1_PREFIX),$(AV1_DX_EXPORTS))
+  CODEC_SRCS-yes += $(AV1_PREFIX)av1dx.mk aom/aom.h aom/aomdx.h
+  INSTALL-LIBS-yes += include/aom/aom.h include/aom/aomdx.h
+  INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(AV1_PREFIX)/%
+  CODEC_DOC_SRCS += aom/aom.h aom/aomdx.h
+  CODEC_DOC_SECTIONS += av1 av1_decoder
 endif
 
-VP10_PREFIX=av1/
-$(BUILD_PFX)$(VP10_PREFIX)%.c.o: CFLAGS += -Wextra
+AV1_PREFIX=av1/
+$(BUILD_PFX)$(AV1_PREFIX)%.c.o: CFLAGS += -Wextra
 
 ifeq ($(CONFIG_ENCODERS),yes)
   CODEC_DOC_SECTIONS += encoder
@@ -95,7 +95,7 @@
 
 
 ifeq ($(CONFIG_MSVS),yes)
-CODEC_LIB=$(if $(CONFIG_STATIC_MSVCRT),vpxmt,vpxmd)
+CODEC_LIB=$(if $(CONFIG_STATIC_MSVCRT),aommt,aommd)
 GTEST_LIB=$(if $(CONFIG_STATIC_MSVCRT),gtestmt,gtestmd)
 # This variable uses deferred expansion intentionally, since the results of
 # $(wildcard) may change during the course of the Make.
@@ -118,9 +118,9 @@
 CODEC_SRCS-yes += aom_ports/emmintrin_compat.h
 CODEC_SRCS-yes += aom_ports/mem_ops.h
 CODEC_SRCS-yes += aom_ports/mem_ops_aligned.h
-CODEC_SRCS-yes += aom_ports/vpx_once.h
-CODEC_SRCS-yes += $(BUILD_PFX)vpx_config.c
-INSTALL-SRCS-no += $(BUILD_PFX)vpx_config.c
+CODEC_SRCS-yes += aom_ports/aom_once.h
+CODEC_SRCS-yes += $(BUILD_PFX)aom_config.c
+INSTALL-SRCS-no += $(BUILD_PFX)aom_config.c
 ifeq ($(ARCH_X86)$(ARCH_X86_64),yes)
 INSTALL-SRCS-$(CONFIG_CODEC_SRCS) += third_party/x86inc/x86inc.asm
 endif
@@ -128,18 +128,18 @@
 CODEC_EXPORTS-$(CONFIG_ENCODERS) += aom/exports_enc
 CODEC_EXPORTS-$(CONFIG_DECODERS) += aom/exports_dec
 
-INSTALL-LIBS-yes += include/aom/vpx_codec.h
-INSTALL-LIBS-yes += include/aom/vpx_frame_buffer.h
-INSTALL-LIBS-yes += include/aom/vpx_image.h
-INSTALL-LIBS-yes += include/aom/vpx_integer.h
-INSTALL-LIBS-$(CONFIG_DECODERS) += include/aom/vpx_decoder.h
-INSTALL-LIBS-$(CONFIG_ENCODERS) += include/aom/vpx_encoder.h
+INSTALL-LIBS-yes += include/aom/aom_codec.h
+INSTALL-LIBS-yes += include/aom/aom_frame_buffer.h
+INSTALL-LIBS-yes += include/aom/aom_image.h
+INSTALL-LIBS-yes += include/aom/aom_integer.h
+INSTALL-LIBS-$(CONFIG_DECODERS) += include/aom/aom_decoder.h
+INSTALL-LIBS-$(CONFIG_ENCODERS) += include/aom/aom_encoder.h
 ifeq ($(CONFIG_EXTERNAL_BUILD),yes)
 ifeq ($(CONFIG_MSVS),yes)
 INSTALL-LIBS-yes                  += $(foreach p,$(VS_PLATFORMS),$(LIBSUBDIR)/$(p)/$(CODEC_LIB).lib)
 INSTALL-LIBS-$(CONFIG_DEBUG_LIBS) += $(foreach p,$(VS_PLATFORMS),$(LIBSUBDIR)/$(p)/$(CODEC_LIB)d.lib)
-INSTALL-LIBS-$(CONFIG_SHARED) += $(foreach p,$(VS_PLATFORMS),$(LIBSUBDIR)/$(p)/vpx.dll)
-INSTALL-LIBS-$(CONFIG_SHARED) += $(foreach p,$(VS_PLATFORMS),$(LIBSUBDIR)/$(p)/vpx.exp)
+INSTALL-LIBS-$(CONFIG_SHARED) += $(foreach p,$(VS_PLATFORMS),$(LIBSUBDIR)/$(p)/aom.dll)
+INSTALL-LIBS-$(CONFIG_SHARED) += $(foreach p,$(VS_PLATFORMS),$(LIBSUBDIR)/$(p)/aom.exp)
 endif
 else
 INSTALL-LIBS-$(CONFIG_STATIC) += $(LIBSUBDIR)/libaom.a
@@ -162,39 +162,39 @@
 ifeq ($(CONFIG_EXTERNAL_BUILD),yes)
 ifeq ($(CONFIG_MSVS),yes)
 
-vpx.def: $(call enabled,CODEC_EXPORTS)
+aom.def: $(call enabled,CODEC_EXPORTS)
 	@echo "    [CREATE] $@"
 	$(qexec)$(SRC_PATH_BARE)/build/make/gen_msvs_def.sh\
-            --name=vpx\
+            --name=aom\
             --out=$@ $^
-CLEAN-OBJS += vpx.def
+CLEAN-OBJS += aom.def
 
 # Assembly files that are included, but don't define symbols themselves.
 # Filtered out to avoid Visual Studio build warnings.
 ASM_INCLUDES := \
     third_party/x86inc/x86inc.asm \
-    vpx_config.asm \
+    aom_config.asm \
     aom_ports/x86_abi_support.asm \
 
-vpx.$(VCPROJ_SFX): $(CODEC_SRCS) vpx.def
+aom.$(VCPROJ_SFX): $(CODEC_SRCS) aom.def
 	@echo "    [CREATE] $@"
 	$(qexec)$(GEN_VCPROJ) \
             $(if $(CONFIG_SHARED),--dll,--lib) \
             --target=$(TOOLCHAIN) \
             $(if $(CONFIG_STATIC_MSVCRT),--static-crt) \
-            --name=vpx \
+            --name=aom \
             --proj-guid=DCE19DAF-69AC-46DB-B14A-39F0FAA5DB74 \
-            --module-def=vpx.def \
+            --module-def=aom.def \
             --ver=$(CONFIG_VS_VERSION) \
             --src-path-bare="$(SRC_PATH_BARE)" \
             --out=$@ $(CFLAGS) \
             $(filter-out $(addprefix %, $(ASM_INCLUDES)), $^) \
             --src-path-bare="$(SRC_PATH_BARE)" \
 
-PROJECTS-yes += vpx.$(VCPROJ_SFX)
+PROJECTS-yes += aom.$(VCPROJ_SFX)
 
-vpx.$(VCPROJ_SFX): vpx_config.asm
-vpx.$(VCPROJ_SFX): $(RTCD)
+aom.$(VCPROJ_SFX): aom_config.asm
+aom.$(VCPROJ_SFX): $(RTCD)
 
 endif
 else
@@ -289,8 +289,8 @@
 INSTALL-LIBS-$(CONFIG_SHARED) += $(if $(LIBAOM_SO_IMPLIB),$(LIBSUBDIR)/$(LIBAOM_SO_IMPLIB))
 
 
-LIBS-yes += vpx.pc
-vpx.pc: config.mk libs.mk
+LIBS-yes += aom.pc
+aom.pc: config.mk libs.mk
 	@echo "    [CREATE] $@"
 	$(qexec)echo '# pkg-config file from libaom $(VERSION_STRING)' > $@
 	$(qexec)echo 'prefix=$(PREFIX)' >> $@
@@ -298,8 +298,8 @@
 	$(qexec)echo 'libdir=$${prefix}/$(LIBSUBDIR)' >> $@
 	$(qexec)echo 'includedir=$${prefix}/include' >> $@
 	$(qexec)echo '' >> $@
-	$(qexec)echo 'Name: vpx' >> $@
-	$(qexec)echo 'Description: WebM Project VPx codec implementation' >> $@
+	$(qexec)echo 'Name: aom' >> $@
+	$(qexec)echo 'Description: WebM Project AVx codec implementation' >> $@
 	$(qexec)echo 'Version: $(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH)' >> $@
 	$(qexec)echo 'Requires:' >> $@
 	$(qexec)echo 'Conflicts:' >> $@
@@ -310,9 +310,9 @@
 	$(qexec)echo 'Libs.private: -lm' >> $@
 endif
 	$(qexec)echo 'Cflags: -I$${includedir}' >> $@
-INSTALL-LIBS-yes += $(LIBSUBDIR)/pkgconfig/vpx.pc
+INSTALL-LIBS-yes += $(LIBSUBDIR)/pkgconfig/aom.pc
 INSTALL_MAPS += $(LIBSUBDIR)/pkgconfig/%.pc %.pc
-CLEAN-OBJS += vpx.pc
+CLEAN-OBJS += aom.pc
 endif
 
 #
@@ -320,29 +320,29 @@
 #
 ifeq ($(ARCH_X86)$(ARCH_X86_64),yes)
 # YASM
-$(BUILD_PFX)vpx_config.asm: $(BUILD_PFX)vpx_config.h
+$(BUILD_PFX)aom_config.asm: $(BUILD_PFX)aom_config.h
 	@echo "    [CREATE] $@"
 	@egrep "#define [A-Z0-9_]+ [01]" $< \
 	    | awk '{print $$2 " equ " $$3}' > $@
 else
 ADS2GAS=$(if $(filter yes,$(CONFIG_GCC)),| $(ASM_CONVERSION))
-$(BUILD_PFX)vpx_config.asm: $(BUILD_PFX)vpx_config.h
+$(BUILD_PFX)aom_config.asm: $(BUILD_PFX)aom_config.h
 	@echo "    [CREATE] $@"
 	@egrep "#define [A-Z0-9_]+ [01]" $< \
 	    | awk '{print $$2 " EQU " $$3}' $(ADS2GAS) > $@
 	@echo "        END" $(ADS2GAS) >> $@
-CLEAN-OBJS += $(BUILD_PFX)vpx_config.asm
+CLEAN-OBJS += $(BUILD_PFX)aom_config.asm
 endif
 
 #
 # Add assembler dependencies for configuration.
 #
-$(filter %.s.o,$(OBJS-yes)):     $(BUILD_PFX)vpx_config.asm
-$(filter %$(ASM).o,$(OBJS-yes)): $(BUILD_PFX)vpx_config.asm
+$(filter %.s.o,$(OBJS-yes)):     $(BUILD_PFX)aom_config.asm
+$(filter %$(ASM).o,$(OBJS-yes)): $(BUILD_PFX)aom_config.asm
 
 
-$(shell $(SRC_PATH_BARE)/build/make/version.sh "$(SRC_PATH_BARE)" $(BUILD_PFX)vpx_version.h)
-CLEAN-OBJS += $(BUILD_PFX)vpx_version.h
+$(shell $(SRC_PATH_BARE)/build/make/version.sh "$(SRC_PATH_BARE)" $(BUILD_PFX)aom_version.h)
+CLEAN-OBJS += $(BUILD_PFX)aom_version.h
 
 #
 # Add include path for libwebm sources.
@@ -359,7 +359,7 @@
 
 include $(SRC_PATH_BARE)/test/test.mk
 LIBAOM_TEST_SRCS=$(addprefix test/,$(call enabled,LIBAOM_TEST_SRCS))
-LIBAOM_TEST_BIN=./test_libvpx$(EXE_SFX)
+LIBAOM_TEST_BIN=./test_libaom$(EXE_SFX)
 LIBAOM_TEST_DATA=$(addprefix $(LIBVPX_TEST_DATA_PATH)/,\
                      $(call enabled,LIBAOM_TEST_DATA))
 libaom_test_data_url=http://downloads.webmproject.org/test_data/libvpx/$(1)
@@ -412,12 +412,12 @@
 
 PROJECTS-$(CONFIG_MSVS) += gtest.$(VCPROJ_SFX)
 
-test_libvpx.$(VCPROJ_SFX): $(LIBAOM_TEST_SRCS) vpx.$(VCPROJ_SFX) gtest.$(VCPROJ_SFX)
+test_libaom.$(VCPROJ_SFX): $(LIBAOM_TEST_SRCS) aom.$(VCPROJ_SFX) gtest.$(VCPROJ_SFX)
 	@echo "    [CREATE] $@"
 	$(qexec)$(GEN_VCPROJ) \
             --exe \
             --target=$(TOOLCHAIN) \
-            --name=test_libvpx \
+            --name=test_libaom \
             -D_VARIADIC_MAX=10 \
             --proj-guid=CD837F5F-52D8-4314-A370-895D614166A7 \
             --ver=$(CONFIG_VS_VERSION) \
@@ -428,13 +428,13 @@
             $(if $(CONFIG_WEBM_IO),-I"$(SRC_PATH_BARE)/third_party/libwebm") \
             -L. -l$(CODEC_LIB) -l$(GTEST_LIB) $^
 
-PROJECTS-$(CONFIG_MSVS) += test_libvpx.$(VCPROJ_SFX)
+PROJECTS-$(CONFIG_MSVS) += test_libaom.$(VCPROJ_SFX)
 
 LIBAOM_TEST_BIN := $(addprefix $(TGT_OS:win64=x64)/Release/,$(notdir $(LIBAOM_TEST_BIN)))
 
 ifneq ($(strip $(TEST_INTRA_PRED_SPEED_OBJS)),)
 PROJECTS-$(CONFIG_MSVS) += test_intra_pred_speed.$(VCPROJ_SFX)
-test_intra_pred_speed.$(VCPROJ_SFX): $(TEST_INTRA_PRED_SPEED_SRCS) vpx.$(VCPROJ_SFX) gtest.$(VCPROJ_SFX)
+test_intra_pred_speed.$(VCPROJ_SFX): $(TEST_INTRA_PRED_SPEED_SRCS) aom.$(VCPROJ_SFX) gtest.$(VCPROJ_SFX)
 	@echo "    [CREATE] $@"
 	$(qexec)$(GEN_VCPROJ) \
             --exe \
@@ -539,7 +539,7 @@
 SRCS += $(CODEC_SRCS) $(LIBAOM_TEST_SRCS) $(GTEST_SRCS)
 
 ##
-## vpxdec/vpxenc tests.
+## aomdec/aomenc tests.
 ##
 ifeq ($(CONFIG_UNIT_TESTS),yes)
 TEST_BIN_PATH = .
@@ -552,10 +552,10 @@
 TEST_BIN_PATH := $(addsuffix /$(TGT_OS:win64=x64)/Release, $(TEST_BIN_PATH))
 endif
 utiltest utiltest-no-data-check:
-	$(qexec)$(SRC_PATH_BARE)/test/vpxdec.sh \
+	$(qexec)$(SRC_PATH_BARE)/test/aomdec.sh \
 		--test-data-path $(LIBVPX_TEST_DATA_PATH) \
 		--bin-path $(TEST_BIN_PATH)
-	$(qexec)$(SRC_PATH_BARE)/test/vpxenc.sh \
+	$(qexec)$(SRC_PATH_BARE)/test/aomenc.sh \
 		--test-data-path $(LIBVPX_TEST_DATA_PATH) \
 		--bin-path $(TEST_BIN_PATH)
 utiltest: testdata
diff --git a/mainpage.dox b/mainpage.dox
index ec202fa..9a82f43 100644
--- a/mainpage.dox
+++ b/mainpage.dox
@@ -1,4 +1,4 @@
-/*!\mainpage WebM Codec SDK
+/*!\mainpage AMedia Codec SDK
 
   \section main_contents Page Contents
   - \ref main_intro
@@ -6,17 +6,16 @@
   - \ref main_support
 
   \section main_intro Introduction
-  Welcome to the WebM Codec SDK. This SDK allows you to integrate your
-  applications with the VP8 and VP9 video codecs, high quality, royalty free,
-  open source codecs deployed on billions of computers and devices worldwide.
+  Welcome to the AMedia Codec SDK. This SDK allows you to integrate your
+  applications with the AOM and AV1 video codecs.
 
-  This distribution of the WebM Codec SDK includes the following support:
+  This distribution of the AOMedia Codec SDK includes the following support:
 
-  \if vp8_encoder
-  - \ref vp8_encoder
+  \if aom_encoder
+  - \ref aom_encoder
   \endif
-  \if vp8_decoder
-  - \ref vp8_decoder
+  \if aom_decoder
+  - \ref aom_decoder
   \endif
 
 
@@ -36,7 +35,7 @@
   \endif
 
   \section main_support Support Options & FAQ
-  The WebM project is an open source project supported by its community. For
+  The AOMedia project is an open source project supported by its community. For
   questions about this SDK, please mail the apps-devel@webmproject.org list.
   To contribute, see http://www.webmproject.org/code/contribute and mail
   codec-devel@webmproject.org.
diff --git a/md5_utils.c b/md5_utils.c
index 093798b..34012b2 100644
--- a/md5_utils.c
+++ b/md5_utils.c
@@ -147,13 +147,13 @@
 
 #if defined(__clang__) && defined(__has_attribute)
 #if __has_attribute(no_sanitize)
-#define VPX_NO_UNSIGNED_OVERFLOW_CHECK \
+#define AOM_NO_UNSIGNED_OVERFLOW_CHECK \
   __attribute__((no_sanitize("unsigned-integer-overflow")))
 #endif
 #endif
 
-#ifndef VPX_NO_UNSIGNED_OVERFLOW_CHECK
-#define VPX_NO_UNSIGNED_OVERFLOW_CHECK
+#ifndef AOM_NO_UNSIGNED_OVERFLOW_CHECK
+#define AOM_NO_UNSIGNED_OVERFLOW_CHECK
 #endif
 
 /*
@@ -161,7 +161,7 @@
  * reflect the addition of 16 longwords of new data.  MD5Update blocks
  * the data and converts bytes into longwords for this routine.
  */
-VPX_NO_UNSIGNED_OVERFLOW_CHECK void MD5Transform(UWORD32 buf[4],
+AOM_NO_UNSIGNED_OVERFLOW_CHECK void MD5Transform(UWORD32 buf[4],
                                                  UWORD32 const in[16]) {
   register UWORD32 a, b, c, d;
 
@@ -244,6 +244,6 @@
   buf[3] += d;
 }
 
-#undef VPX_NO_UNSIGNED_OVERFLOW_CHECK
+#undef AOM_NO_UNSIGNED_OVERFLOW_CHECK
 
 #endif
diff --git a/rate_hist.c b/rate_hist.c
index 872a10b..187e3dd 100644
--- a/rate_hist.c
+++ b/rate_hist.c
@@ -34,8 +34,8 @@
   int total;
 };
 
-struct rate_hist *init_rate_histogram(const vpx_codec_enc_cfg_t *cfg,
-                                      const vpx_rational_t *fps) {
+struct rate_hist *init_rate_histogram(const aom_codec_enc_cfg_t *cfg,
+                                      const aom_rational_t *fps) {
   int i;
   struct rate_hist *hist = malloc(sizeof(*hist));
 
@@ -70,8 +70,8 @@
 }
 
 void update_rate_histogram(struct rate_hist *hist,
-                           const vpx_codec_enc_cfg_t *cfg,
-                           const vpx_codec_cx_pkt_t *pkt) {
+                           const aom_codec_enc_cfg_t *cfg,
+                           const aom_codec_cx_pkt_t *pkt) {
   int i;
   int64_t then = 0;
   int64_t avg_bitrate = 0;
@@ -254,7 +254,7 @@
   show_histogram(bucket, buckets, total, scale);
 }
 
-void show_rate_histogram(struct rate_hist *hist, const vpx_codec_enc_cfg_t *cfg,
+void show_rate_histogram(struct rate_hist *hist, const aom_codec_enc_cfg_t *cfg,
                          int max_buckets) {
   int i, scale;
   int buckets = 0;
diff --git a/rate_hist.h b/rate_hist.h
index df49411..f62f5de 100644
--- a/rate_hist.h
+++ b/rate_hist.h
@@ -11,7 +11,7 @@
 #ifndef RATE_HIST_H_
 #define RATE_HIST_H_
 
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -19,18 +19,18 @@
 
 struct rate_hist;
 
-struct rate_hist *init_rate_histogram(const vpx_codec_enc_cfg_t *cfg,
-                                      const vpx_rational_t *fps);
+struct rate_hist *init_rate_histogram(const aom_codec_enc_cfg_t *cfg,
+                                      const aom_rational_t *fps);
 
 void destroy_rate_histogram(struct rate_hist *hist);
 
 void update_rate_histogram(struct rate_hist *hist,
-                           const vpx_codec_enc_cfg_t *cfg,
-                           const vpx_codec_cx_pkt_t *pkt);
+                           const aom_codec_enc_cfg_t *cfg,
+                           const aom_codec_cx_pkt_t *pkt);
 
 void show_q_histogram(const int counts[64], int max_buckets);
 
-void show_rate_histogram(struct rate_hist *hist, const vpx_codec_enc_cfg_t *cfg,
+void show_rate_histogram(struct rate_hist *hist, const aom_codec_enc_cfg_t *cfg,
                          int max_buckets);
 
 #ifdef __cplusplus
diff --git a/solution.mk b/solution.mk
index 8d3531e..829f42a 100644
--- a/solution.mk
+++ b/solution.mk
@@ -8,23 +8,24 @@
 ##  be found in the AUTHORS file in the root of the source tree.
 ##
 
-# libaom reverse dependencies (targets that depend on libaom)
-VPX_NONDEPS=$(addsuffix .$(VCPROJ_SFX),vpx gtest)
-VPX_RDEPS=$(foreach vcp,\
-              $(filter-out $(VPX_NONDEPS),$^), --dep=$(vcp:.$(VCPROJ_SFX)=):vpx)
 
-vpx.sln: $(wildcard *.$(VCPROJ_SFX))
+# libaom reverse dependencies (targets that depend on libaom)
+AOM_NONDEPS=$(addsuffix .$(VCPROJ_SFX),aom gtest)
+AOM_RDEPS=$(foreach vcp,\
+              $(filter-out $(AOM_NONDEPS),$^), --dep=$(vcp:.$(VCPROJ_SFX)=):aom)
+
+aom.sln: $(wildcard *.$(VCPROJ_SFX))
 	@echo "    [CREATE] $@"
 	$(SRC_PATH_BARE)/build/make/gen_msvs_sln.sh \
-            $(if $(filter vpx.$(VCPROJ_SFX),$^),$(VPX_RDEPS)) \
-            --dep=test_libvpx:gtest \
+            $(if $(filter aom.$(VCPROJ_SFX),$^),$(AOM_RDEPS)) \
+            --dep=test_libaom:gtest \
             --ver=$(CONFIG_VS_VERSION)\
             --out=$@ $^
-vpx.sln.mk: vpx.sln
+aom.sln.mk: aom.sln
 	@true
 
-PROJECTS-yes += vpx.sln vpx.sln.mk
--include vpx.sln.mk
+PROJECTS-yes += aom.sln aom.sln.mk
+-include aom.sln.mk
 
 # Always install this file, as it is an unconditional post-build rule.
 INSTALL_MAPS += src/%     $(SRC_PATH_BARE)/%
diff --git a/test/acm_random.h b/test/acm_random.h
index aec4c6b..d57a9df 100644
--- a/test/acm_random.h
+++ b/test/acm_random.h
@@ -13,7 +13,7 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 namespace libaom_test {
 
diff --git a/test/active_map_refresh_test.cc b/test/active_map_refresh_test.cc
index 994d474..a04bd57 100644
--- a/test/active_map_refresh_test.cc
+++ b/test/active_map_refresh_test.cc
@@ -17,7 +17,7 @@
 namespace {
 
 // Check if any pixel in a 16x16 macroblock varies between frames.
-int CheckMb(const vpx_image_t &current, const vpx_image_t &previous, int mb_r,
+int CheckMb(const aom_image_t &current, const aom_image_t &previous, int mb_r,
             int mb_c) {
   for (int plane = 0; plane < 3; plane++) {
     int r = 16 * mb_r;
@@ -45,8 +45,8 @@
   return 0;
 }
 
-void GenerateMap(int mb_rows, int mb_cols, const vpx_image_t &current,
-                 const vpx_image_t &previous, uint8_t *map) {
+void GenerateMap(int mb_rows, int mb_cols, const aom_image_t &current,
+                 const aom_image_t &previous, uint8_t *map) {
   for (int mb_r = 0; mb_r < mb_rows; ++mb_r) {
     for (int mb_c = 0; mb_c < mb_cols; ++mb_c) {
       map[mb_r * mb_cols + mb_c] = CheckMb(current, previous, mb_r, mb_c);
@@ -74,13 +74,13 @@
     ::libaom_test::Y4mVideoSource *y4m_video =
         static_cast<libaom_test::Y4mVideoSource *>(video);
     if (video->frame() == 1) {
-      encoder->Control(VP8E_SET_CPUUSED, cpu_used_);
-      encoder->Control(VP9E_SET_AQ_MODE, kAqModeCyclicRefresh);
+      encoder->Control(AOME_SET_CPUUSED, cpu_used_);
+      encoder->Control(AV1E_SET_AQ_MODE, kAqModeCyclicRefresh);
     } else if (video->frame() >= 2 && video->img()) {
-      vpx_image_t *current = video->img();
-      vpx_image_t *previous = y4m_holder_->img();
+      aom_image_t *current = video->img();
+      aom_image_t *previous = y4m_holder_->img();
       ASSERT_TRUE(previous != NULL);
-      vpx_active_map_t map = vpx_active_map_t();
+      aom_active_map_t map = aom_active_map_t();
       const int width = static_cast<int>(current->d_w);
       const int height = static_cast<int>(current->d_h);
       const int mb_width = (width + 15) / 16;
@@ -90,7 +90,7 @@
       map.cols = mb_width;
       map.rows = mb_height;
       map.active_map = active_map;
-      encoder->Control(VP8E_SET_ACTIVEMAP, &map);
+      encoder->Control(AOME_SET_ACTIVEMAP, &map);
       delete[] active_map;
     }
     if (video->img()) {
@@ -109,15 +109,15 @@
   cfg_.rc_resize_allowed = 0;
   cfg_.rc_min_quantizer = 8;
   cfg_.rc_max_quantizer = 30;
-  cfg_.g_pass = VPX_RC_ONE_PASS;
-  cfg_.rc_end_usage = VPX_CBR;
+  cfg_.g_pass = AOM_RC_ONE_PASS;
+  cfg_.rc_end_usage = AOM_CBR;
   cfg_.kf_max_dist = 90000;
 
-#if CONFIG_VP10
-  const int nframes = codec_ == &libaom_test::kVP10 ? 10 : 30;
+#if CONFIG_AV1
+  const int nframes = codec_ == &libaom_test::kAV1 ? 10 : 30;
 #else
   const int nframes = 30;
-#endif  // CONFIG_VP10
+#endif  // CONFIG_AV1
   ::libaom_test::Y4mVideoSource video("desktop_credits.y4m", 0, nframes);
   ::libaom_test::Y4mVideoSource video_holder("desktop_credits.y4m", 0, nframes);
   video_holder.Begin();
@@ -126,9 +126,9 @@
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-#if CONFIG_VP10
-VP10_INSTANTIATE_TEST_CASE(ActiveMapRefreshTest,
-                           ::testing::Values(::libaom_test::kRealTime),
-                           ::testing::Range(5, 6));
-#endif  // CONFIG_VP10
+#if CONFIG_AV1
+AV1_INSTANTIATE_TEST_CASE(ActiveMapRefreshTest,
+                          ::testing::Values(::libaom_test::kRealTime),
+                          ::testing::Range(5, 6));
+#endif  // CONFIG_AV1
 }  // namespace
diff --git a/test/active_map_test.cc b/test/active_map_test.cc
index 35da706..717ed20 100644
--- a/test/active_map_test.cc
+++ b/test/active_map_test.cc
@@ -36,9 +36,9 @@
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 1) {
-      encoder->Control(VP8E_SET_CPUUSED, cpu_used_);
+      encoder->Control(AOME_SET_CPUUSED, cpu_used_);
     } else if (video->frame() == 3) {
-      vpx_active_map_t map = vpx_active_map_t();
+      aom_active_map_t map = aom_active_map_t();
       /* clang-format off */
       uint8_t active_map[9 * 13] = {
         1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
@@ -57,13 +57,13 @@
       ASSERT_EQ(map.cols, 13u);
       ASSERT_EQ(map.rows, 9u);
       map.active_map = active_map;
-      encoder->Control(VP8E_SET_ACTIVEMAP, &map);
+      encoder->Control(AOME_SET_ACTIVEMAP, &map);
     } else if (video->frame() == 15) {
-      vpx_active_map_t map = vpx_active_map_t();
+      aom_active_map_t map = aom_active_map_t();
       map.cols = (kWidth + 15) / 16;
       map.rows = (kHeight + 15) / 16;
       map.active_map = NULL;
-      encoder->Control(VP8E_SET_ACTIVEMAP, &map);
+      encoder->Control(AOME_SET_ACTIVEMAP, &map);
     }
   }
 
@@ -72,8 +72,8 @@
     cfg_.g_lag_in_frames = 0;
     cfg_.rc_target_bitrate = 400;
     cfg_.rc_resize_allowed = 0;
-    cfg_.g_pass = VPX_RC_ONE_PASS;
-    cfg_.rc_end_usage = VPX_CBR;
+    cfg_.g_pass = AOM_RC_ONE_PASS;
+    cfg_.rc_end_usage = AOM_CBR;
     cfg_.kf_max_dist = 90000;
     ::libaom_test::I420VideoSource video("hantro_odd.yuv", kWidth, kHeight, 30,
                                          1, 0, 20);
@@ -90,12 +90,12 @@
 
 TEST_P(ActiveMapTestLarge, Test) { DoTest(); }
 
-VP10_INSTANTIATE_TEST_CASE(ActiveMapTestLarge,
-                           ::testing::Values(::libaom_test::kRealTime),
-                           ::testing::Range(0, 5));
+AV1_INSTANTIATE_TEST_CASE(ActiveMapTestLarge,
+                          ::testing::Values(::libaom_test::kRealTime),
+                          ::testing::Range(0, 5));
 
-VP10_INSTANTIATE_TEST_CASE(ActiveMapTest,
-                           ::testing::Values(::libaom_test::kRealTime),
-                           ::testing::Range(5, 9));
+AV1_INSTANTIATE_TEST_CASE(ActiveMapTest,
+                          ::testing::Values(::libaom_test::kRealTime),
+                          ::testing::Range(5, 9));
 
 }  // namespace
diff --git a/test/add_noise_test.cc b/test/add_noise_test.cc
index 62f14b4..768ac36 100644
--- a/test/add_noise_test.cc
+++ b/test/add_noise_test.cc
@@ -11,10 +11,10 @@
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "third_party/googletest/src/include/gtest/gtest.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/postproc.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 namespace {
 
@@ -46,7 +46,7 @@
   const int height = 64;
   const int image_size = width * height;
   char noise[3072];
-  const int clamp = vpx_setup_noise(4.4, sizeof(noise), noise);
+  const int clamp = aom_setup_noise(4.4, sizeof(noise), noise);
 
   for (int i = 0; i < 16; i++) {
     blackclamp[i] = clamp;
@@ -54,7 +54,7 @@
     bothclamp[i] = 2 * clamp;
   }
 
-  uint8_t *const s = reinterpret_cast<uint8_t *>(vpx_calloc(image_size, 1));
+  uint8_t *const s = reinterpret_cast<uint8_t *>(aom_calloc(image_size, 1));
   memset(s, 99, image_size);
 
   ASM_REGISTER_STATE_CHECK(GetParam()(s, noise, blackclamp, whiteclamp,
@@ -95,7 +95,7 @@
     EXPECT_LT(static_cast<int>(s[i]), 255 - clamp) << "i = " << i;
   }
 
-  vpx_free(s);
+  aom_free(s);
 }
 
 TEST_P(AddNoiseTest, CheckCvsAssembly) {
@@ -107,7 +107,7 @@
   const int image_size = width * height;
   char noise[3072];
 
-  const int clamp = vpx_setup_noise(4.4, sizeof(noise), noise);
+  const int clamp = aom_setup_noise(4.4, sizeof(noise), noise);
 
   for (int i = 0; i < 16; i++) {
     blackclamp[i] = clamp;
@@ -115,8 +115,8 @@
     bothclamp[i] = 2 * clamp;
   }
 
-  uint8_t *const s = reinterpret_cast<uint8_t *>(vpx_calloc(image_size, 1));
-  uint8_t *const d = reinterpret_cast<uint8_t *>(vpx_calloc(image_size, 1));
+  uint8_t *const s = reinterpret_cast<uint8_t *>(aom_calloc(image_size, 1));
+  uint8_t *const d = reinterpret_cast<uint8_t *>(aom_calloc(image_size, 1));
 
   memset(s, 99, image_size);
   memset(d, 99, image_size);
@@ -125,27 +125,27 @@
   ASM_REGISTER_STATE_CHECK(GetParam()(s, noise, blackclamp, whiteclamp,
                                       bothclamp, width, height, width));
   srand(0);
-  ASM_REGISTER_STATE_CHECK(vpx_plane_add_noise_c(
+  ASM_REGISTER_STATE_CHECK(aom_plane_add_noise_c(
       d, noise, blackclamp, whiteclamp, bothclamp, width, height, width));
 
   for (int i = 0; i < image_size; ++i) {
     EXPECT_EQ(static_cast<int>(s[i]), static_cast<int>(d[i])) << "i = " << i;
   }
 
-  vpx_free(d);
-  vpx_free(s);
+  aom_free(d);
+  aom_free(s);
 }
 
 INSTANTIATE_TEST_CASE_P(C, AddNoiseTest,
-                        ::testing::Values(vpx_plane_add_noise_c));
+                        ::testing::Values(aom_plane_add_noise_c));
 
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(SSE2, AddNoiseTest,
-                        ::testing::Values(vpx_plane_add_noise_sse2));
+                        ::testing::Values(aom_plane_add_noise_sse2));
 #endif
 
 #if HAVE_MSA
 INSTANTIATE_TEST_CASE_P(MSA, AddNoiseTest,
-                        ::testing::Values(vpx_plane_add_noise_msa));
+                        ::testing::Values(aom_plane_add_noise_msa));
 #endif
 }  // namespace
diff --git a/test/altref_test.cc b/test/altref_test.cc
index 2592914..29d6a63 100644
--- a/test/altref_test.cc
+++ b/test/altref_test.cc
@@ -26,29 +26,29 @@
   virtual void SetUp() {
     InitializeConfig();
     SetMode(encoding_mode_);
-    cfg_.rc_end_usage = VPX_VBR;
+    cfg_.rc_end_usage = AOM_VBR;
     cfg_.g_threads = 0;
   }
 
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 0) {
-      encoder->Control(VP8E_SET_CPUUSED, cpu_used_);
-      encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
-#if CONFIG_VP10_ENCODER
+      encoder->Control(AOME_SET_CPUUSED, cpu_used_);
+      encoder->Control(AOME_SET_ENABLEAUTOALTREF, 1);
+#if CONFIG_AV1_ENCODER
       // override test default for tile columns if necessary.
-      if (GET_PARAM(0) == &libaom_test::kVP10) {
-        encoder->Control(VP9E_SET_TILE_COLUMNS, 6);
+      if (GET_PARAM(0) == &libaom_test::kAV1) {
+        encoder->Control(AV1E_SET_TILE_COLUMNS, 6);
       }
 #endif
     }
     frame_flags_ =
-        (video->frame() == forced_kf_frame_num_) ? VPX_EFLAG_FORCE_KF : 0;
+        (video->frame() == forced_kf_frame_num_) ? AOM_EFLAG_FORCE_KF : 0;
   }
 
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void FramePktHook(const aom_codec_cx_pkt_t *pkt) {
     if (frame_num_ == forced_kf_frame_num_) {
-      ASSERT_TRUE(!!(pkt->data.frame.flags & VPX_FRAME_IS_KEY))
+      ASSERT_TRUE(!!(pkt->data.frame.flags & AOM_FRAME_IS_KEY))
           << "Frame #" << frame_num_ << " isn't a keyframe!";
     }
     ++frame_num_;
@@ -61,7 +61,7 @@
 };
 
 TEST_P(AltRefForcedKeyTestLarge, Frame1IsKey) {
-  const vpx_rational timebase = { 1, 30 };
+  const aom_rational timebase = { 1, 30 };
   const int lag_values[] = { 3, 15, 25, -1 };
 
   forced_kf_frame_num_ = 1;
@@ -75,7 +75,7 @@
 }
 
 TEST_P(AltRefForcedKeyTestLarge, ForcedFrameIsKey) {
-  const vpx_rational timebase = { 1, 30 };
+  const aom_rational timebase = { 1, 30 };
   const int lag_values[] = { 3, 15, 25, -1 };
 
   for (int i = 0; lag_values[i] != -1; ++i) {
@@ -88,8 +88,8 @@
   }
 }
 
-VP10_INSTANTIATE_TEST_CASE(AltRefForcedKeyTestLarge,
-                           ::testing::Values(::libaom_test::kOnePassGood),
-                           ::testing::Range(0, 9));
+AV1_INSTANTIATE_TEST_CASE(AltRefForcedKeyTestLarge,
+                          ::testing::Values(::libaom_test::kOnePassGood),
+                          ::testing::Range(0, 9));
 
 }  // namespace
diff --git a/test/android/Android.mk b/test/android/Android.mk
index 4f0565a..d337fa5 100644
--- a/test/android/Android.mk
+++ b/test/android/Android.mk
@@ -6,7 +6,7 @@
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 #
-# This make file builds vpx_test app for android.
+# This make file builds aom_test app for android.
 # The test app itself runs on the command line through adb shell
 # The paths are really messed up as the libaom make file
 # expects to be made from a parent directory.
@@ -42,9 +42,9 @@
 LOCAL_STATIC_LIBRARIES := gtest libwebm
 
 ifeq ($(ENABLE_SHARED),1)
-  LOCAL_SHARED_LIBRARIES := vpx
+  LOCAL_SHARED_LIBRARIES := aom
 else
-  LOCAL_STATIC_LIBRARIES += vpx
+  LOCAL_STATIC_LIBRARIES += aom
 endif
 
 include $(LOCAL_PATH)/test/test.mk
diff --git a/test/android/README b/test/android/README
index e5b25d5..35c8297 100644
--- a/test/android/README
+++ b/test/android/README
@@ -1,4 +1,4 @@
-Android.mk will build vpx unittests on android.
+Android.mk will build aom unittests on android.
 1) Configure libaom from the parent directory:
 ./libaom/configure --target=armv7-android-gcc --enable-external-build \
   --enable-postproc --disable-install-srcs --enable-multi-res-encoding \
@@ -29,4 +29,4 @@
 adb shell
 (on device)
 cd /data/local/tmp
-LD_LIBRARY_PATH=. ./vpx_test
+LD_LIBRARY_PATH=. ./aom_test
diff --git a/test/aomdec.sh b/test/aomdec.sh
new file mode 100755
index 0000000..c471f10
--- /dev/null
+++ b/test/aomdec.sh
@@ -0,0 +1,116 @@
+#!/bin/sh
+##
+##  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+##
+##  Use of this source code is governed by a BSD-style license
+##  that can be found in the LICENSE file in the root of the source
+##  tree. An additional intellectual property rights grant can be found
+##  in the file PATENTS.  All contributing project authors may
+##  be found in the AUTHORS file in the root of the source tree.
+##
+##  This file tests aomdec. To add new tests to this file, do the following:
+##    1. Write a shell function (this is your test).
+##    2. Add the function to aomdec_tests (on a new line).
+##
+. $(dirname $0)/tools_common.sh
+
+# Environment check: Make sure input is available.
+aomdec_verify_environment() {
+  if [ ! -e "${VP8_IVF_FILE}" ] || [ ! -e "${AV1_WEBM_FILE}" ] || \
+    [ ! -e "${AV1_FPM_WEBM_FILE}" ] || \
+    [ ! -e "${AV1_LT_50_FRAMES_WEBM_FILE}" ] ; then
+    elog "Libaom test data must exist in LIBVPX_TEST_DATA_PATH."
+    return 1
+  fi
+  if [ -z "$(aom_tool_path aomdec)" ]; then
+    elog "aomdec not found. It must exist in LIBAOM_BIN_PATH or its parent."
+    return 1
+  fi
+}
+
+# Wrapper function for running aomdec with pipe input. Requires that
+# LIBAOM_BIN_PATH points to the directory containing aomdec. $1 is used as the
+# input file path and shifted away. All remaining parameters are passed through
+# to aomdec.
+aomdec_pipe() {
+  local readonly decoder="$(aom_tool_path aomdec)"
+  local readonly input="$1"
+  shift
+  cat "${input}" | eval "${AOM_TEST_PREFIX}" "${decoder}" - "$@" ${devnull}
+}
+
+# Wrapper function for running aomdec. Requires that LIBAOM_BIN_PATH points to
+# the directory containing aomdec. $1 one is used as the input file path and
+# shifted away. All remaining parameters are passed through to aomdec.
+aomdec() {
+  local readonly decoder="$(aom_tool_path aomdec)"
+  local readonly input="$1"
+  shift
+  eval "${AOM_TEST_PREFIX}" "${decoder}" "$input" "$@" ${devnull}
+}
+
+aomdec_can_decode_vp8() {
+  if [ "$(vp8_decode_available)" = "yes" ]; then
+    echo yes
+  fi
+}
+
+aomdec_can_decode_av1() {
+  if [ "$(av1_decode_available)" = "yes" ]; then
+    echo yes
+  fi
+}
+
+aomdec_vp8_ivf() {
+  if [ "$(aomdec_can_decode_vp8)" = "yes" ]; then
+    aomdec "${VP8_IVF_FILE}" --summary --noblit
+  fi
+}
+
+aomdec_vp8_ivf_pipe_input() {
+  if [ "$(aomdec_can_decode_vp8)" = "yes" ]; then
+    aomdec_pipe "${VP8_IVF_FILE}" --summary --noblit
+  fi
+}
+
+aomdec_av1_webm() {
+  if [ "$(aomdec_can_decode_av1)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    aomdec "${AV1_WEBM_FILE}" --summary --noblit
+  fi
+}
+
+aomdec_av1_webm_frame_parallel() {
+  if [ "$(aomdec_can_decode_av1)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    for threads in 2 3 4 5 6 7 8; do
+      aomdec "${AV1_FPM_WEBM_FILE}" --summary --noblit --threads=$threads \
+        --frame-parallel
+    done
+  fi
+}
+
+aomdec_av1_webm_less_than_50_frames() {
+  # ensure that reaching eof in webm_guess_framerate doesn't result in invalid
+  # frames in actual webm_read_frame calls.
+  if [ "$(aomdec_can_decode_av1)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly decoder="$(aom_tool_path aomdec)"
+    local readonly expected=10
+    local readonly num_frames=$(${AOM_TEST_PREFIX} "${decoder}" \
+      "${AV1_LT_50_FRAMES_WEBM_FILE}" --summary --noblit 2>&1 \
+      | awk '/^[0-9]+ decoded frames/ { print $1 }')
+    if [ "$num_frames" -ne "$expected" ]; then
+      elog "Output frames ($num_frames) != expected ($expected)"
+      return 1
+    fi
+  fi
+}
+
+aomdec_tests="aomdec_vp8_ivf
+              aomdec_vp8_ivf_pipe_input
+              aomdec_av1_webm
+              aomdec_av1_webm_frame_parallel
+              aomdec_av1_webm_less_than_50_frames"
+
+run_tests aomdec_verify_environment "${aomdec_tests}"
diff --git a/test/aomenc.sh b/test/aomenc.sh
new file mode 100755
index 0000000..5edc1de
--- /dev/null
+++ b/test/aomenc.sh
@@ -0,0 +1,429 @@
+#!/bin/sh
+##
+##  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+##
+##  Use of this source code is governed by a BSD-style license
+##  that can be found in the LICENSE file in the root of the source
+##  tree. An additional intellectual property rights grant can be found
+##  in the file PATENTS.  All contributing project authors may
+##  be found in the AUTHORS file in the root of the source tree.
+##
+##  This file tests aomenc using hantro_collage_w352h288.yuv as input. To add
+##  new tests to this file, do the following:
+##    1. Write a shell function (this is your test).
+##    2. Add the function to aomenc_tests (on a new line).
+##
+. $(dirname $0)/tools_common.sh
+
+readonly TEST_FRAMES=10
+
+# Environment check: Make sure input is available.
+aomenc_verify_environment() {
+  if [ ! -e "${YUV_RAW_INPUT}" ]; then
+    elog "The file ${YUV_RAW_INPUT##*/} must exist in LIBVPX_TEST_DATA_PATH."
+    return 1
+  fi
+  if [ "$(aomenc_can_encode_av1)" = "yes" ]; then
+    if [ ! -e "${Y4M_NOSQ_PAR_INPUT}" ]; then
+      elog "The file ${Y4M_NOSQ_PAR_INPUT##*/} must exist in"
+      elog "LIBVPX_TEST_DATA_PATH."
+      return 1
+    fi
+  fi
+  if [ -z "$(aom_tool_path aomenc)" ]; then
+    elog "aomenc not found. It must exist in LIBAOM_BIN_PATH or its parent."
+    return 1
+  fi
+}
+
+aomenc_can_encode_vp8() {
+  if [ "$(vp8_encode_available)" = "yes" ]; then
+    echo yes
+  fi
+}
+
+aomenc_can_encode_av1() {
+  if [ "$(av1_encode_available)" = "yes" ]; then
+    echo yes
+  fi
+}
+
+# Echo aomenc command line parameters allowing use of
+# hantro_collage_w352h288.yuv as input.
+yuv_input_hantro_collage() {
+  echo ""${YUV_RAW_INPUT}"
+       --width="${YUV_RAW_INPUT_WIDTH}"
+       --height="${YUV_RAW_INPUT_HEIGHT}""
+}
+
+y4m_input_non_square_par() {
+  echo ""${Y4M_NOSQ_PAR_INPUT}""
+}
+
+y4m_input_720p() {
+  echo ""${Y4M_720P_INPUT}""
+}
+
+# Echo default aomenc real time encoding params. $1 is the codec, which defaults
+# to vp8 if unspecified.
+aomenc_rt_params() {
+  local readonly codec="${1:-vp8}"
+  echo "--codec=${codec}
+    --buf-initial-sz=500
+    --buf-optimal-sz=600
+    --buf-sz=1000
+    --cpu-used=-6
+    --end-usage=cbr
+    --error-resilient=1
+    --kf-max-dist=90000
+    --lag-in-frames=0
+    --max-intra-rate=300
+    --max-q=56
+    --min-q=2
+    --noise-sensitivity=0
+    --overshoot-pct=50
+    --passes=1
+    --profile=0
+    --resize-allowed=0
+    --rt
+    --static-thresh=0
+    --undershoot-pct=50"
+}
+
+# Wrapper function for running aomenc with pipe input. Requires that
+# LIBAOM_BIN_PATH points to the directory containing aomenc. $1 is used as the
+# input file path and shifted away. All remaining parameters are passed through
+# to aomenc.
+aomenc_pipe() {
+  local readonly encoder="$(aom_tool_path aomenc)"
+  local readonly input="$1"
+  shift
+  cat "${input}" | eval "${AOM_TEST_PREFIX}" "${encoder}" - \
+    --test-decode=fatal \
+    "$@" ${devnull}
+}
+
+# Wrapper function for running aomenc. Requires that LIBAOM_BIN_PATH points to
+# the directory containing aomenc. $1 one is used as the input file path and
+# shifted away. All remaining parameters are passed through to aomenc.
+aomenc() {
+  local readonly encoder="$(aom_tool_path aomenc)"
+  local readonly input="$1"
+  shift
+  eval "${AOM_TEST_PREFIX}" "${encoder}" "${input}" \
+    --test-decode=fatal \
+    "$@" ${devnull}
+}
+
+aomenc_vp8_ivf() {
+  if [ "$(aomenc_can_encode_vp8)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/vp8.ivf"
+    aomenc $(yuv_input_hantro_collage) \
+      --codec=vp8 \
+      --limit="${TEST_FRAMES}" \
+      --ivf \
+      --output="${output}"
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_vp8_webm() {
+  if [ "$(aomenc_can_encode_vp8)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/vp8.webm"
+    aomenc $(yuv_input_hantro_collage) \
+      --codec=vp8 \
+      --limit="${TEST_FRAMES}" \
+      --output="${output}"
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_vp8_webm_rt() {
+  if [ "$(aomenc_can_encode_vp8)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/vp8_rt.webm"
+    aomenc $(yuv_input_hantro_collage) \
+      $(aomenc_rt_params vp8) \
+      --output="${output}"
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_vp8_webm_2pass() {
+  if [ "$(aomenc_can_encode_vp8)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/vp8.webm"
+    aomenc $(yuv_input_hantro_collage) \
+      --codec=vp8 \
+      --limit="${TEST_FRAMES}" \
+      --output="${output}" \
+      --passes=2
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_vp8_webm_lag10_frames20() {
+  if [ "$(aomenc_can_encode_vp8)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly lag_total_frames=20
+    local readonly lag_frames=10
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/vp8_lag10_frames20.webm"
+    aomenc $(yuv_input_hantro_collage) \
+      --codec=vp8 \
+      --limit="${lag_total_frames}" \
+      --lag-in-frames="${lag_frames}" \
+      --output="${output}" \
+      --auto-alt-ref=1 \
+      --passes=2
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_vp8_ivf_piped_input() {
+  if [ "$(aomenc_can_encode_vp8)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/vp8_piped_input.ivf"
+    aomenc_pipe $(yuv_input_hantro_collage) \
+      --codec=vp8 \
+      --limit="${TEST_FRAMES}" \
+      --ivf \
+      --output="${output}"
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_av1_ivf() {
+  if [ "$(aomenc_can_encode_av1)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/av1.ivf"
+    aomenc $(yuv_input_hantro_collage) \
+      --codec=av1 \
+      --limit="${TEST_FRAMES}" \
+      --ivf \
+      --output="${output}"
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_av1_webm() {
+  if [ "$(aomenc_can_encode_av1)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/av1.webm"
+    aomenc $(yuv_input_hantro_collage) \
+      --codec=av1 \
+      --limit="${TEST_FRAMES}" \
+      --output="${output}"
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_av1_webm_rt() {
+  if [ "$(aomenc_can_encode_av1)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/av1_rt.webm"
+    aomenc $(yuv_input_hantro_collage) \
+      $(aomenc_rt_params av1) \
+      --output="${output}"
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_av1_webm_rt_multithread_tiled() {
+  if [ "$(aomenc_can_encode_av1)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/av1_rt_multithread_tiled.webm"
+    local readonly tilethread_min=2
+    local readonly tilethread_max=4
+    local readonly num_threads="$(seq ${tilethread_min} ${tilethread_max})"
+    local readonly num_tile_cols="$(seq ${tilethread_min} ${tilethread_max})"
+
+    for threads in ${num_threads}; do
+      for tile_cols in ${num_tile_cols}; do
+        aomenc $(y4m_input_720p) \
+          $(aomenc_rt_params av1) \
+          --threads=${threads} \
+          --tile-columns=${tile_cols} \
+          --output="${output}"
+      done
+    done
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+
+    rm "${output}"
+  fi
+}
+
+aomenc_av1_webm_rt_multithread_tiled_frameparallel() {
+  if [ "$(aomenc_can_encode_av1)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/av1_rt_mt_t_fp.webm"
+    local readonly tilethread_min=2
+    local readonly tilethread_max=4
+    local readonly num_threads="$(seq ${tilethread_min} ${tilethread_max})"
+    local readonly num_tile_cols="$(seq ${tilethread_min} ${tilethread_max})"
+
+    for threads in ${num_threads}; do
+      for tile_cols in ${num_tile_cols}; do
+        aomenc $(y4m_input_720p) \
+          $(aomenc_rt_params av1) \
+          --threads=${threads} \
+          --tile-columns=${tile_cols} \
+          --frame-parallel=1 \
+          --output="${output}"
+      done
+    done
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+
+    rm "${output}"
+  fi
+}
+
+aomenc_av1_webm_2pass() {
+  if [ "$(aomenc_can_encode_av1)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/av1.webm"
+    aomenc $(yuv_input_hantro_collage) \
+      --codec=av1 \
+      --limit="${TEST_FRAMES}" \
+      --output="${output}" \
+      --passes=2
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_av1_ivf_lossless() {
+  if [ "$(aomenc_can_encode_av1)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/av1_lossless.ivf"
+    aomenc $(yuv_input_hantro_collage) \
+      --codec=av1 \
+      --limit="${TEST_FRAMES}" \
+      --ivf \
+      --output="${output}" \
+      --lossless=1
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_av1_ivf_minq0_maxq0() {
+  if [ "$(aomenc_can_encode_av1)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/av1_lossless_minq0_maxq0.ivf"
+    aomenc $(yuv_input_hantro_collage) \
+      --codec=av1 \
+      --limit="${TEST_FRAMES}" \
+      --ivf \
+      --output="${output}" \
+      --min-q=0 \
+      --max-q=0
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_av1_webm_lag10_frames20() {
+  if [ "$(aomenc_can_encode_av1)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly lag_total_frames=20
+    local readonly lag_frames=10
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/av1_lag10_frames20.webm"
+    aomenc $(yuv_input_hantro_collage) \
+      --codec=av1 \
+      --limit="${lag_total_frames}" \
+      --lag-in-frames="${lag_frames}" \
+      --output="${output}" \
+      --passes=2 \
+      --auto-alt-ref=1
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+# TODO(fgalligan): Test that DisplayWidth is different than video width.
+aomenc_av1_webm_non_square_par() {
+  if [ "$(aomenc_can_encode_av1)" = "yes" ] && \
+     [ "$(webm_io_available)" = "yes" ]; then
+    local readonly output="${AOM_TEST_OUTPUT_DIR}/av1_non_square_par.webm"
+    aomenc $(y4m_input_non_square_par) \
+      --codec=av1 \
+      --limit="${TEST_FRAMES}" \
+      --output="${output}"
+
+    if [ ! -e "${output}" ]; then
+      elog "Output file does not exist."
+      return 1
+    fi
+  fi
+}
+
+aomenc_tests="aomenc_vp8_ivf
+              aomenc_vp8_webm
+              aomenc_vp8_webm_rt
+              aomenc_vp8_webm_2pass
+              aomenc_vp8_webm_lag10_frames20
+              aomenc_vp8_ivf_piped_input
+              aomenc_av1_ivf
+              aomenc_av1_webm
+              aomenc_av1_webm_rt
+              aomenc_av1_webm_rt_multithread_tiled
+              aomenc_av1_webm_rt_multithread_tiled_frameparallel
+              aomenc_av1_webm_2pass
+              aomenc_av1_ivf_lossless
+              aomenc_av1_ivf_minq0_maxq0
+              aomenc_av1_webm_lag10_frames20
+              aomenc_av1_webm_non_square_par"
+
+run_tests aomenc_verify_environment "${aomenc_tests}"
diff --git a/test/aq_segment_test.cc b/test/aq_segment_test.cc
index bd4e51e..17e0555 100644
--- a/test/aq_segment_test.cc
+++ b/test/aq_segment_test.cc
@@ -32,9 +32,9 @@
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 1) {
-      encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
-      encoder->Control(VP9E_SET_AQ_MODE, aq_mode_);
-      encoder->Control(VP8E_SET_MAX_INTRA_BITRATE_PCT, 100);
+      encoder->Control(AOME_SET_CPUUSED, set_cpu_used_);
+      encoder->Control(AV1E_SET_AQ_MODE, aq_mode_);
+      encoder->Control(AOME_SET_MAX_INTRA_BITRATE_PCT, 100);
     }
   }
 
@@ -43,7 +43,7 @@
     cfg_.kf_max_dist = 12;
     cfg_.rc_min_quantizer = 8;
     cfg_.rc_max_quantizer = 56;
-    cfg_.rc_end_usage = VPX_CBR;
+    cfg_.rc_end_usage = AOM_CBR;
     cfg_.g_lag_in_frames = 6;
     cfg_.rc_buf_initial_sz = 500;
     cfg_.rc_buf_optimal_sz = 500;
@@ -78,12 +78,12 @@
 
 TEST_P(AqSegmentTestLarge, TestNoMisMatchAQ3) { DoTest(3); }
 
-VP10_INSTANTIATE_TEST_CASE(AqSegmentTest,
-                           ::testing::Values(::libaom_test::kRealTime,
-                                             ::libaom_test::kOnePassGood),
-                           ::testing::Range(5, 9));
-VP10_INSTANTIATE_TEST_CASE(AqSegmentTestLarge,
-                           ::testing::Values(::libaom_test::kRealTime,
-                                             ::libaom_test::kOnePassGood),
-                           ::testing::Range(3, 5));
+AV1_INSTANTIATE_TEST_CASE(AqSegmentTest,
+                          ::testing::Values(::libaom_test::kRealTime,
+                                            ::libaom_test::kOnePassGood),
+                          ::testing::Range(5, 9));
+AV1_INSTANTIATE_TEST_CASE(AqSegmentTestLarge,
+                          ::testing::Values(::libaom_test::kRealTime,
+                                            ::libaom_test::kOnePassGood),
+                          ::testing::Range(3, 5));
 }  // namespace
diff --git a/test/arf_freq_test.cc b/test/arf_freq_test.cc
index 83a0337..d319cbe 100644
--- a/test/arf_freq_test.cc
+++ b/test/arf_freq_test.cc
@@ -32,8 +32,8 @@
   unsigned int framerate_num;
   unsigned int framerate_den;
   unsigned int input_bit_depth;
-  vpx_img_fmt fmt;
-  vpx_bit_depth_t bit_depth;
+  aom_img_fmt fmt;
+  aom_bit_depth_t bit_depth;
   unsigned int profile;
 } TestVideoParam;
 
@@ -44,14 +44,14 @@
 
 const TestVideoParam kTestVectors[] = {
   // artificially increase framerate to trigger default check
-  { "hantro_collage_w352h288.yuv", 352, 288, 5000, 1, 8, VPX_IMG_FMT_I420,
-    VPX_BITS_8, 0 },
-  { "hantro_collage_w352h288.yuv", 352, 288, 30, 1, 8, VPX_IMG_FMT_I420,
-    VPX_BITS_8, 0 },
-  { "rush_hour_444.y4m", 352, 288, 30, 1, 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 },
-#if CONFIG_VP9_HIGHBITDEPTH
+  { "hantro_collage_w352h288.yuv", 352, 288, 5000, 1, 8, AOM_IMG_FMT_I420,
+    AOM_BITS_8, 0 },
+  { "hantro_collage_w352h288.yuv", 352, 288, 30, 1, 8, AOM_IMG_FMT_I420,
+    AOM_BITS_8, 0 },
+  { "rush_hour_444.y4m", 352, 288, 30, 1, 8, AOM_IMG_FMT_I444, AOM_BITS_8, 1 },
+#if CONFIG_AOM_HIGHBITDEPTH
 // Add list of profile 2/3 test videos here ...
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 
 const TestEncodeParam kEncodeVectors[] = {
@@ -62,7 +62,7 @@
 
 const int kMinArfVectors[] = {
   // NOTE: 0 refers to the default built-in logic in:
-  //       vp9_rc_get_default_min_gf_interval(...)
+  //       av1_rc_get_default_min_gf_interval(...)
   0, 4, 8, 12, 15
 };
 
@@ -90,10 +90,10 @@
     SetMode(test_encode_param_.mode);
     if (test_encode_param_.mode != ::libaom_test::kRealTime) {
       cfg_.g_lag_in_frames = 25;
-      cfg_.rc_end_usage = VPX_VBR;
+      cfg_.rc_end_usage = AOM_VBR;
     } else {
       cfg_.g_lag_in_frames = 0;
-      cfg_.rc_end_usage = VPX_CBR;
+      cfg_.rc_end_usage = AOM_CBR;
       cfg_.rc_buf_sz = 1000;
       cfg_.rc_buf_initial_sz = 500;
       cfg_.rc_buf_optimal_sz = 600;
@@ -106,7 +106,7 @@
     run_of_visible_frames_ = 0;
   }
 
-  int GetNumFramesInPkt(const vpx_codec_cx_pkt_t *pkt) {
+  int GetNumFramesInPkt(const aom_codec_cx_pkt_t *pkt) {
     const uint8_t *buffer = reinterpret_cast<uint8_t *>(pkt->data.frame.buf);
     const uint8_t marker = buffer[pkt->data.frame.sz - 1];
     const int mag = ((marker >> 3) & 3) + 1;
@@ -123,8 +123,8 @@
     return frames;
   }
 
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
-    if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return;
+  virtual void FramePktHook(const aom_codec_cx_pkt_t *pkt) {
+    if (pkt->kind != AOM_CODEC_CX_FRAME_PKT) return;
     const int frames = GetNumFramesInPkt(pkt);
     if (frames == 1) {
       run_of_visible_frames_++;
@@ -145,15 +145,15 @@
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 0) {
-      encoder->Control(VP9E_SET_FRAME_PARALLEL_DECODING, 1);
-      encoder->Control(VP9E_SET_TILE_COLUMNS, 4);
-      encoder->Control(VP8E_SET_CPUUSED, test_encode_param_.cpu_used);
-      encoder->Control(VP9E_SET_MIN_GF_INTERVAL, min_arf_requested_);
+      encoder->Control(AV1E_SET_FRAME_PARALLEL_DECODING, 1);
+      encoder->Control(AV1E_SET_TILE_COLUMNS, 4);
+      encoder->Control(AOME_SET_CPUUSED, test_encode_param_.cpu_used);
+      encoder->Control(AV1E_SET_MIN_GF_INTERVAL, min_arf_requested_);
       if (test_encode_param_.mode != ::libaom_test::kRealTime) {
-        encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
-        encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
-        encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
-        encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+        encoder->Control(AOME_SET_ENABLEAUTOALTREF, 1);
+        encoder->Control(AOME_SET_ARNR_MAXFRAMES, 7);
+        encoder->Control(AOME_SET_ARNR_STRENGTH, 5);
+        encoder->Control(AOME_SET_ARNR_TYPE, 3);
       }
     }
   }
@@ -164,7 +164,7 @@
     if (min_arf_requested_)
       return min_arf_requested_;
     else
-      return vp10_rc_get_default_min_gf_interval(
+      return av1_rc_get_default_min_gf_interval(
           test_video_param_.width, test_video_param_.height,
           (double)test_video_param_.framerate_num /
               test_video_param_.framerate_den);
@@ -185,8 +185,8 @@
   cfg_.g_profile = test_video_param_.profile;
   cfg_.g_input_bit_depth = test_video_param_.input_bit_depth;
   cfg_.g_bit_depth = test_video_param_.bit_depth;
-  init_flags_ = VPX_CODEC_USE_PSNR;
-  if (cfg_.g_bit_depth > 8) init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH;
+  init_flags_ = AOM_CODEC_USE_PSNR;
+  if (cfg_.g_bit_depth > 8) init_flags_ |= AOM_CODEC_USE_HIGHBITDEPTH;
 
   testing::internal::scoped_ptr<libaom_test::VideoSource> video;
   if (is_extension_y4m(test_video_param_.filename)) {
@@ -209,24 +209,24 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH || CONFIG_EXT_REFS
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AOM_HIGHBITDEPTH || CONFIG_EXT_REFS
+#if CONFIG_AV1_ENCODER
 // TODO(angiebird): 25-29 fail in high bitdepth mode.
 // TODO(zoeliu): This ArfFreqTest does not work with BWDREF_FRAME, as
 // BWDREF_FRAME is also a non-show frame, and the minimum run between two
 // consecutive BWDREF_FRAME's may vary between 1 and any arbitrary positive
 // number as long as it does not exceed the gf_group interval.
 INSTANTIATE_TEST_CASE_P(
-    DISABLED_VP10, ArfFreqTestLarge,
+    DISABLED_AV1, ArfFreqTestLarge,
     ::testing::Combine(
-        ::testing::Values(static_cast<const libaom_test::CodecFactory *>(
-            &libaom_test::kVP10)),
+        ::testing::Values(
+            static_cast<const libaom_test::CodecFactory *>(&libaom_test::kAV1)),
         ::testing::ValuesIn(kTestVectors), ::testing::ValuesIn(kEncodeVectors),
         ::testing::ValuesIn(kMinArfVectors)));
-#endif  // CONFIG_VP10_ENCODER
+#endif  // CONFIG_AV1_ENCODER
 #else
-VP10_INSTANTIATE_TEST_CASE(ArfFreqTestLarge, ::testing::ValuesIn(kTestVectors),
-                           ::testing::ValuesIn(kEncodeVectors),
-                           ::testing::ValuesIn(kMinArfVectors));
-#endif  // CONFIG_VP9_HIGHBITDEPTH || CONFIG_EXT_REFS
+AV1_INSTANTIATE_TEST_CASE(ArfFreqTestLarge, ::testing::ValuesIn(kTestVectors),
+                          ::testing::ValuesIn(kEncodeVectors),
+                          ::testing::ValuesIn(kMinArfVectors));
+#endif  // CONFIG_AOM_HIGHBITDEPTH || CONFIG_EXT_REFS
 }  // namespace
diff --git a/test/vp10_ans_test.cc b/test/av1_ans_test.cc
similarity index 84%
rename from test/vp10_ans_test.cc
rename to test/av1_ans_test.cc
index ddedbea..20fc223 100644
--- a/test/vp10_ans_test.cc
+++ b/test/av1_ans_test.cc
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#define VP10_FORCE_VPXBOOL_TREEWRITER
+#define AV1_FORCE_AOMBOOL_TREEWRITER
 
 #include <assert.h>
 #include <math.h>
@@ -124,25 +124,25 @@
   return ans_read_end(&d);
 }
 
-bool check_vpxbool(const PvVec &pv_vec, uint8_t *buf) {
-  vpx_writer w;
-  vpx_reader r;
-  vpx_start_encode(&w, buf);
+bool check_aombool(const PvVec &pv_vec, uint8_t *buf) {
+  aom_writer w;
+  aom_reader r;
+  aom_start_encode(&w, buf);
 
   std::clock_t start = std::clock();
   for (PvVec::const_iterator it = pv_vec.begin(); it != pv_vec.end(); ++it) {
-    vpx_write(&w, it->second, 256 - it->first);
+    aom_write(&w, it->second, 256 - it->first);
   }
   std::clock_t enc_time = std::clock() - start;
-  vpx_stop_encode(&w);
+  aom_stop_encode(&w);
   bool okay = true;
-  vpx_reader_init(&r, buf, w.pos, NULL, NULL);
+  aom_reader_init(&r, buf, w.pos, NULL, NULL);
   start = std::clock();
   for (PvVec::const_iterator it = pv_vec.begin(); it != pv_vec.end(); ++it) {
-    okay &= vpx_read(&r, 256 - it->first) == it->second;
+    okay &= aom_read(&r, 256 - it->first) == it->second;
   }
   std::clock_t dec_time = std::clock() - start;
-  printf("VPX size %d enc_time %f dec_time %f\n", w.pos,
+  printf("AOM size %d enc_time %f dec_time %f\n", w.pos,
          static_cast<float>(enc_time) / CLOCKS_PER_SEC,
          static_cast<float>(dec_time) / CLOCKS_PER_SEC);
   return okay;
@@ -212,8 +212,8 @@
   return ans_read_end(&d);
 }
 
-void build_tree(vpx_tree_index *tree, int num_syms) {
-  vpx_tree_index i;
+void build_tree(aom_tree_index *tree, int num_syms) {
+  aom_tree_index i;
   int sym = 0;
   for (i = 0; i < num_syms - 1; ++i) {
     tree[2 * i] = sym--;
@@ -232,7 +232,7 @@
  *             / \
  *        -sym2  -sym3
  */
-void tab2tree(const rans_sym *tab, int tab_size, vpx_prob *treep) {
+void tab2tree(const rans_sym *tab, int tab_size, aom_prob *treep) {
   const unsigned basep = rans_precision;
   unsigned pleft = basep;
   for (int i = 0; i < tab_size - 1; ++i) {
@@ -263,8 +263,8 @@
   tab[i].len = len;
 }
 
-void build_tpb(vpx_prob probs[/*num_syms*/],
-               vpx_tree_index tree[/*2*num_syms*/],
+void build_tpb(aom_prob probs[/*num_syms*/],
+               aom_tree_index tree[/*2*num_syms*/],
                sym_bools bit_len[/*num_syms*/],
                const rans_sym sym_tab[/*num_syms*/], int num_syms) {
   tab2tree(sym_tab, num_syms, probs);
@@ -272,38 +272,38 @@
   make_tree_bits_tab(bit_len, num_syms);
 }
 
-bool check_vpxtree(const std::vector<int> &sym_vec, const rans_sym *sym_tab,
+bool check_aomtree(const std::vector<int> &sym_vec, const rans_sym *sym_tab,
                    uint8_t *buf) {
-  vpx_writer w;
-  vpx_reader r;
-  vpx_start_encode(&w, buf);
+  aom_writer w;
+  aom_reader r;
+  aom_start_encode(&w, buf);
 
-  vpx_prob probs[kDistinctSyms];
-  vpx_tree_index tree[2 * kDistinctSyms];
+  aom_prob probs[kDistinctSyms];
+  aom_tree_index tree[2 * kDistinctSyms];
   sym_bools bit_len[kDistinctSyms];
   build_tpb(probs, tree, bit_len, sym_tab, kDistinctSyms);
 
   std::clock_t start = std::clock();
   for (std::vector<int>::const_iterator it = sym_vec.begin();
        it != sym_vec.end(); ++it) {
-    vp10_write_tree(&w, tree, probs, bit_len[*it].bits, bit_len[*it].len, 0);
+    av1_write_tree(&w, tree, probs, bit_len[*it].bits, bit_len[*it].len, 0);
   }
   std::clock_t enc_time = std::clock() - start;
-  vpx_stop_encode(&w);
-  vpx_reader_init(&r, buf, w.pos, NULL, NULL);
+  aom_stop_encode(&w);
+  aom_reader_init(&r, buf, w.pos, NULL, NULL);
   start = std::clock();
   for (std::vector<int>::const_iterator it = sym_vec.begin();
        it != sym_vec.end(); ++it) {
-    if (vpx_read_tree(&r, tree, probs) != *it) return false;
+    if (aom_read_tree(&r, tree, probs) != *it) return false;
   }
   std::clock_t dec_time = std::clock() - start;
-  printf("VPXtree size %u enc_time %f dec_time %f\n", w.pos,
+  printf("AOMtree size %u enc_time %f dec_time %f\n", w.pos,
          static_cast<float>(enc_time) / CLOCKS_PER_SEC,
          static_cast<float>(dec_time) / CLOCKS_PER_SEC);
   return true;
 }
 
-class Vp10AbsTest : public ::testing::Test {
+class Av1AbsTest : public ::testing::Test {
  protected:
   static void SetUpTestCase() { pv_vec_ = abs_encode_build_vals(kNumBools); }
   virtual void SetUp() { buf_ = new uint8_t[kNumBools / 8]; }
@@ -312,9 +312,9 @@
   static PvVec pv_vec_;
   uint8_t *buf_;
 };
-PvVec Vp10AbsTest::pv_vec_;
+PvVec Av1AbsTest::pv_vec_;
 
-class Vp10AnsTest : public ::testing::Test {
+class Av1AnsTest : public ::testing::Test {
  protected:
   static void SetUpTestCase() {
     sym_vec_ = ans_encode_build_vals(rans_sym_tab, kNumSyms);
@@ -325,17 +325,17 @@
   static std::vector<int> sym_vec_;
   uint8_t *buf_;
 };
-std::vector<int> Vp10AnsTest::sym_vec_;
+std::vector<int> Av1AnsTest::sym_vec_;
 
-TEST_F(Vp10AbsTest, Vpxbool) { EXPECT_TRUE(check_vpxbool(pv_vec_, buf_)); }
-TEST_F(Vp10AbsTest, Rabs) { EXPECT_TRUE(check_rabs(pv_vec_, buf_)); }
-TEST_F(Vp10AbsTest, RabsAsc) { EXPECT_TRUE(check_rabs_asc(pv_vec_, buf_)); }
-TEST_F(Vp10AbsTest, Uabs) { EXPECT_TRUE(check_uabs(pv_vec_, buf_)); }
+TEST_F(Av1AbsTest, Avxbool) { EXPECT_TRUE(check_aombool(pv_vec_, buf_)); }
+TEST_F(Av1AbsTest, Rabs) { EXPECT_TRUE(check_rabs(pv_vec_, buf_)); }
+TEST_F(Av1AbsTest, RabsAsc) { EXPECT_TRUE(check_rabs_asc(pv_vec_, buf_)); }
+TEST_F(Av1AbsTest, Uabs) { EXPECT_TRUE(check_uabs(pv_vec_, buf_)); }
 
-TEST_F(Vp10AnsTest, Rans) {
+TEST_F(Av1AnsTest, Rans) {
   EXPECT_TRUE(check_rans(sym_vec_, rans_sym_tab, buf_));
 }
-TEST_F(Vp10AnsTest, Vpxtree) {
-  EXPECT_TRUE(check_vpxtree(sym_vec_, rans_sym_tab, buf_));
+TEST_F(Av1AnsTest, Avxtree) {
+  EXPECT_TRUE(check_aomtree(sym_vec_, rans_sym_tab, buf_));
 }
 }  // namespace
diff --git a/test/vp10_convolve_optimz_test.cc b/test/av1_convolve_optimz_test.cc
similarity index 79%
rename from test/vp10_convolve_optimz_test.cc
rename to test/av1_convolve_optimz_test.cc
index cb8b8fb..6bbda33 100644
--- a/test/vp10_convolve_optimz_test.cc
+++ b/test/av1_convolve_optimz_test.cc
@@ -10,7 +10,7 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
@@ -23,7 +23,7 @@
 
 typedef void (*conv_filter_t)(const uint8_t *, int, uint8_t *, int, int, int,
                               const InterpFilterParams, const int, int, int);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*hbd_conv_filter_t)(const uint16_t *, int, uint16_t *, int, int,
                                   int, const InterpFilterParams, const int, int,
                                   int, int);
@@ -35,7 +35,7 @@
 typedef tuple<int, int> BlockDimension;
 typedef tuple<conv_filter_t, conv_filter_t, BlockDimension, INTERP_FILTER, int,
               int> ConvParams;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 // Test parameter list:
 //  <convolve_horiz_func, convolve_vert_func,
 //  <width, height>, filter_params, subpel_x_q4, avg, bit_dpeth>
@@ -55,9 +55,9 @@
 const int stride = 128;
 const int x_step_q4 = 16;
 
-class VP10ConvolveOptimzTest : public ::testing::TestWithParam<ConvParams> {
+class AV1ConvolveOptimzTest : public ::testing::TestWithParam<ConvParams> {
  public:
-  virtual ~VP10ConvolveOptimzTest() {}
+  virtual ~AV1ConvolveOptimzTest() {}
   virtual void SetUp() {
     conv_horiz_ = GET_PARAM(0);
     conv_vert_ = GET_PARAM(1);
@@ -103,7 +103,7 @@
   int avg_;
 };
 
-void VP10ConvolveOptimzTest::PrepFilterBuffer(int w, int h) {
+void AV1ConvolveOptimzTest::PrepFilterBuffer(int w, int h) {
   int r, c;
   ACMRandom rnd(ACMRandom::DeterministicSeed());
 
@@ -128,7 +128,7 @@
   }
 }
 
-void VP10ConvolveOptimzTest::DiffFilterBuffer() {
+void AV1ConvolveOptimzTest::DiffFilterBuffer() {
   int r, c;
   const uint8_t *dst_ptr = dst_;
   const uint8_t *dst_ref_ptr = dst_ref_;
@@ -146,13 +146,13 @@
   }
 }
 
-void VP10ConvolveOptimzTest::RunHorizFilterBitExactCheck() {
+void AV1ConvolveOptimzTest::RunHorizFilterBitExactCheck() {
   PrepFilterBuffer(testMaxBlk, testMaxBlk);
 
-  InterpFilterParams filter_params = vp10_get_interp_filter_params(filter_);
+  InterpFilterParams filter_params = av1_get_interp_filter_params(filter_);
 
-  vp10_convolve_horiz_c(src_ref_, stride, dst_ref_, stride, width_, height_,
-                        filter_params, subpel_, x_step_q4, avg_);
+  av1_convolve_horiz_c(src_ref_, stride, dst_ref_, stride, width_, height_,
+                       filter_params, subpel_, x_step_q4, avg_);
 
   conv_horiz_(src_, stride, dst_, stride, width_, height_, filter_params,
               subpel_, x_step_q4, avg_);
@@ -166,9 +166,9 @@
       (((height_ - 1) * 16 + subpel_) >> SUBPEL_BITS) + filter_params.taps;
   PrepFilterBuffer(testMaxBlk, testMaxBlk);
 
-  vp10_convolve_horiz_c(src_ref_, stride, dst_ref_, stride, width_,
-                        intermediate_height, filter_params, subpel_, x_step_q4,
-                        avg_);
+  av1_convolve_horiz_c(src_ref_, stride, dst_ref_, stride, width_,
+                       intermediate_height, filter_params, subpel_, x_step_q4,
+                       avg_);
 
   conv_horiz_(src_, stride, dst_, stride, width_, intermediate_height,
               filter_params, subpel_, x_step_q4, avg_);
@@ -176,13 +176,13 @@
   DiffFilterBuffer();
 }
 
-void VP10ConvolveOptimzTest::RunVertFilterBitExactCheck() {
+void AV1ConvolveOptimzTest::RunVertFilterBitExactCheck() {
   PrepFilterBuffer(testMaxBlk, testMaxBlk);
 
-  InterpFilterParams filter_params = vp10_get_interp_filter_params(filter_);
+  InterpFilterParams filter_params = av1_get_interp_filter_params(filter_);
 
-  vp10_convolve_vert_c(src_ref_, stride, dst_ref_, stride, width_, height_,
-                       filter_params, subpel_, x_step_q4, avg_);
+  av1_convolve_vert_c(src_ref_, stride, dst_ref_, stride, width_, height_,
+                      filter_params, subpel_, x_step_q4, avg_);
 
   conv_vert_(src_, stride, dst_, stride, width_, height_, filter_params,
              subpel_, x_step_q4, avg_);
@@ -190,10 +190,10 @@
   DiffFilterBuffer();
 }
 
-TEST_P(VP10ConvolveOptimzTest, HorizBitExactCheck) {
+TEST_P(AV1ConvolveOptimzTest, HorizBitExactCheck) {
   RunHorizFilterBitExactCheck();
 }
-TEST_P(VP10ConvolveOptimzTest, VerticalBitExactCheck) {
+TEST_P(AV1ConvolveOptimzTest, VerticalBitExactCheck) {
   RunVertFilterBitExactCheck();
 }
 
@@ -219,20 +219,20 @@
 
 #if HAVE_SSSE3 && CONFIG_EXT_INTERP
 INSTANTIATE_TEST_CASE_P(
-    SSSE3, VP10ConvolveOptimzTest,
-    ::testing::Combine(::testing::Values(vp10_convolve_horiz_ssse3),
-                       ::testing::Values(vp10_convolve_vert_ssse3),
+    SSSE3, AV1ConvolveOptimzTest,
+    ::testing::Combine(::testing::Values(av1_convolve_horiz_ssse3),
+                       ::testing::Values(av1_convolve_vert_ssse3),
                        ::testing::ValuesIn(kBlockDim),
                        ::testing::ValuesIn(kFilter),
                        ::testing::ValuesIn(kSubpelQ4),
                        ::testing::ValuesIn(kAvg)));
 #endif  // HAVE_SSSE3 && CONFIG_EXT_INTERP
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef ::testing::TestWithParam<HbdConvParams> TestWithHbdConvParams;
-class VP10HbdConvolveOptimzTest : public TestWithHbdConvParams {
+class AV1HbdConvolveOptimzTest : public TestWithHbdConvParams {
  public:
-  virtual ~VP10HbdConvolveOptimzTest() {}
+  virtual ~AV1HbdConvolveOptimzTest() {}
   virtual void SetUp() {
     conv_horiz_ = GET_PARAM(0);
     conv_vert_ = GET_PARAM(1);
@@ -280,7 +280,7 @@
   int bit_depth_;
 };
 
-void VP10HbdConvolveOptimzTest::PrepFilterBuffer(int w, int h) {
+void AV1HbdConvolveOptimzTest::PrepFilterBuffer(int w, int h) {
   int r, c;
   ACMRandom rnd(ACMRandom::DeterministicSeed());
 
@@ -303,7 +303,7 @@
   }
 }
 
-void VP10HbdConvolveOptimzTest::DiffFilterBuffer() {
+void AV1HbdConvolveOptimzTest::DiffFilterBuffer() {
   int r, c;
   const uint16_t *dst_ptr = dst_;
   const uint16_t *dst_ref_ptr = dst_ref_;
@@ -322,14 +322,14 @@
   }
 }
 
-void VP10HbdConvolveOptimzTest::RunHorizFilterBitExactCheck() {
+void AV1HbdConvolveOptimzTest::RunHorizFilterBitExactCheck() {
   PrepFilterBuffer(testMaxBlk, testMaxBlk);
 
-  InterpFilterParams filter_params = vp10_get_interp_filter_params(filter_);
+  InterpFilterParams filter_params = av1_get_interp_filter_params(filter_);
 
-  vp10_highbd_convolve_horiz_c(src_, stride, dst_ref_, stride, width_, height_,
-                               filter_params, subpel_, x_step_q4, avg_,
-                               bit_depth_);
+  av1_highbd_convolve_horiz_c(src_, stride, dst_ref_, stride, width_, height_,
+                              filter_params, subpel_, x_step_q4, avg_,
+                              bit_depth_);
 
   conv_horiz_(src_, stride, dst_, stride, width_, height_, filter_params,
               subpel_, x_step_q4, avg_, bit_depth_);
@@ -343,9 +343,9 @@
       (((height_ - 1) * 16 + subpel_) >> SUBPEL_BITS) + filter_params.taps;
   PrepFilterBuffer(testMaxBlk, testMaxBlk);
 
-  vp10_highbd_convolve_horiz_c(src_, stride, dst_ref_, stride, width_,
-                               intermediate_height, filter_params, subpel_,
-                               x_step_q4, avg_, bit_depth_);
+  av1_highbd_convolve_horiz_c(src_, stride, dst_ref_, stride, width_,
+                              intermediate_height, filter_params, subpel_,
+                              x_step_q4, avg_, bit_depth_);
 
   conv_horiz_(src_, stride, dst_, stride, width_, intermediate_height,
               filter_params, subpel_, x_step_q4, avg_, bit_depth_);
@@ -353,14 +353,14 @@
   DiffFilterBuffer();
 }
 
-void VP10HbdConvolveOptimzTest::RunVertFilterBitExactCheck() {
+void AV1HbdConvolveOptimzTest::RunVertFilterBitExactCheck() {
   PrepFilterBuffer(testMaxBlk, testMaxBlk);
 
-  InterpFilterParams filter_params = vp10_get_interp_filter_params(filter_);
+  InterpFilterParams filter_params = av1_get_interp_filter_params(filter_);
 
-  vp10_highbd_convolve_vert_c(src_, stride, dst_ref_, stride, width_, height_,
-                              filter_params, subpel_, x_step_q4, avg_,
-                              bit_depth_);
+  av1_highbd_convolve_vert_c(src_, stride, dst_ref_, stride, width_, height_,
+                             filter_params, subpel_, x_step_q4, avg_,
+                             bit_depth_);
 
   conv_vert_(src_, stride, dst_, stride, width_, height_, filter_params,
              subpel_, x_step_q4, avg_, bit_depth_);
@@ -368,10 +368,10 @@
   DiffFilterBuffer();
 }
 
-TEST_P(VP10HbdConvolveOptimzTest, HorizBitExactCheck) {
+TEST_P(AV1HbdConvolveOptimzTest, HorizBitExactCheck) {
   RunHorizFilterBitExactCheck();
 }
-TEST_P(VP10HbdConvolveOptimzTest, VertBitExactCheck) {
+TEST_P(AV1HbdConvolveOptimzTest, VertBitExactCheck) {
   RunVertFilterBitExactCheck();
 }
 
@@ -380,14 +380,14 @@
 const int kBitdepth[] = { 10, 12 };
 
 INSTANTIATE_TEST_CASE_P(
-    SSE4_1, VP10HbdConvolveOptimzTest,
-    ::testing::Combine(::testing::Values(vp10_highbd_convolve_horiz_sse4_1),
-                       ::testing::Values(vp10_highbd_convolve_vert_sse4_1),
+    SSE4_1, AV1HbdConvolveOptimzTest,
+    ::testing::Combine(::testing::Values(av1_highbd_convolve_horiz_sse4_1),
+                       ::testing::Values(av1_highbd_convolve_vert_sse4_1),
                        ::testing::ValuesIn(kBlockDim),
                        ::testing::ValuesIn(kFilter),
                        ::testing::ValuesIn(kSubpelQ4),
                        ::testing::ValuesIn(kAvg),
                        ::testing::ValuesIn(kBitdepth)));
 #endif  // HAVE_SSE4_1 && CONFIG_EXT_INTERP
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }  // namespace
diff --git a/test/vp10_convolve_test.cc b/test/av1_convolve_test.cc
similarity index 72%
rename from test/vp10_convolve_test.cc
rename to test/av1_convolve_test.cc
index 2ec6ae7..69273de 100644
--- a/test/vp10_convolve_test.cc
+++ b/test/av1_convolve_test.cc
@@ -1,11 +1,11 @@
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "av1/common/filter.h"
-#include "av1/common/vp10_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "av1/common/av1_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
 using libaom_test::ACMRandom;
@@ -13,22 +13,22 @@
 namespace {
 void setup_convolve() {
 #if HAVE_SSSE3 && CONFIG_RUNTIME_CPU_DETECT
-  vp10_convolve_horiz = vp10_convolve_horiz_c;
-  vp10_convolve_vert = vp10_convolve_vert_c;
+  av1_convolve_horiz = av1_convolve_horiz_c;
+  av1_convolve_vert = av1_convolve_vert_c;
 #endif
 }
 
-TEST(VP10ConvolveTest, vp10_convolve8) {
+TEST(AV1ConvolveTest, av1_convolve8) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
 #if CONFIG_DUAL_FILTER
   INTERP_FILTER interp_filter[4] = { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
                                      EIGHTTAP_REGULAR, EIGHTTAP_REGULAR };
   InterpFilterParams filter_params =
-      vp10_get_interp_filter_params(interp_filter[0]);
+      av1_get_interp_filter_params(interp_filter[0]);
 #else
   INTERP_FILTER interp_filter = EIGHTTAP_REGULAR;
   InterpFilterParams filter_params =
-      vp10_get_interp_filter_params(interp_filter);
+      av1_get_interp_filter_params(interp_filter);
 #endif
   int filter_size = filter_params.taps;
   int filter_center = filter_size / 2 - 1;
@@ -52,30 +52,30 @@
     src[i] = rnd.Rand16() % (1 << 8);
   }
 
-  vp10_convolve(src + src_stride * filter_center + filter_center, src_stride,
-                dst, dst_stride, w, h, interp_filter, subpel_x_q4, x_step_q4,
-                subpel_y_q4, y_step_q4, avg);
+  av1_convolve(src + src_stride * filter_center + filter_center, src_stride,
+               dst, dst_stride, w, h, interp_filter, subpel_x_q4, x_step_q4,
+               subpel_y_q4, y_step_q4, avg);
 
   const int16_t *x_filter =
-      vp10_get_interp_filter_subpel_kernel(filter_params, subpel_x_q4);
+      av1_get_interp_filter_subpel_kernel(filter_params, subpel_x_q4);
   const int16_t *y_filter =
-      vp10_get_interp_filter_subpel_kernel(filter_params, subpel_y_q4);
+      av1_get_interp_filter_subpel_kernel(filter_params, subpel_y_q4);
 
-  vpx_convolve8_c(src + src_stride * filter_center + filter_center, src_stride,
+  aom_convolve8_c(src + src_stride * filter_center + filter_center, src_stride,
                   dst1, dst_stride, x_filter, 16, y_filter, 16, w, h);
   EXPECT_EQ(dst[0], dst1[0]);
 }
-TEST(VP10ConvolveTest, vp10_convolve) {
+TEST(AV1ConvolveTest, av1_convolve) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
 #if CONFIG_DUAL_FILTER
   INTERP_FILTER interp_filter[4] = { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
                                      EIGHTTAP_REGULAR, EIGHTTAP_REGULAR };
   InterpFilterParams filter_params =
-      vp10_get_interp_filter_params(interp_filter[0]);
+      av1_get_interp_filter_params(interp_filter[0]);
 #else
   INTERP_FILTER interp_filter = EIGHTTAP_REGULAR;
   InterpFilterParams filter_params =
-      vp10_get_interp_filter_params(interp_filter);
+      av1_get_interp_filter_params(interp_filter);
 #endif
   int filter_size = filter_params.taps;
   int filter_center = filter_size / 2 - 1;
@@ -100,14 +100,14 @@
 
   for (subpel_x_q4 = 0; subpel_x_q4 < 16; subpel_x_q4++) {
     for (subpel_y_q4 = 0; subpel_y_q4 < 16; subpel_y_q4++) {
-      vp10_convolve(src + src_stride * filter_center + filter_center,
-                    src_stride, dst, dst_stride, w, h, interp_filter,
-                    subpel_x_q4, x_step_q4, subpel_y_q4, y_step_q4, avg);
+      av1_convolve(src + src_stride * filter_center + filter_center, src_stride,
+                   dst, dst_stride, w, h, interp_filter, subpel_x_q4, x_step_q4,
+                   subpel_y_q4, y_step_q4, avg);
 
       const int16_t *x_filter =
-          vp10_get_interp_filter_subpel_kernel(filter_params, subpel_x_q4);
+          av1_get_interp_filter_subpel_kernel(filter_params, subpel_x_q4);
       const int16_t *y_filter =
-          vp10_get_interp_filter_subpel_kernel(filter_params, subpel_y_q4);
+          av1_get_interp_filter_subpel_kernel(filter_params, subpel_y_q4);
 
       int temp[12];
       int dst_ref = 0;
@@ -125,17 +125,17 @@
   }
 }
 
-TEST(VP10ConvolveTest, vp10_convolve_avg) {
+TEST(AV1ConvolveTest, av1_convolve_avg) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
 #if CONFIG_DUAL_FILTER
   INTERP_FILTER interp_filter[4] = { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
                                      EIGHTTAP_REGULAR, EIGHTTAP_REGULAR };
   InterpFilterParams filter_params =
-      vp10_get_interp_filter_params(interp_filter[0]);
+      av1_get_interp_filter_params(interp_filter[0]);
 #else
   INTERP_FILTER interp_filter = EIGHTTAP_REGULAR;
   InterpFilterParams filter_params =
-      vp10_get_interp_filter_params(interp_filter);
+      av1_get_interp_filter_params(interp_filter);
 #endif
   int filter_size = filter_params.taps;
   int filter_center = filter_size / 2 - 1;
@@ -168,40 +168,40 @@
   for (subpel_x_q4 = 0; subpel_x_q4 < 16; subpel_x_q4++) {
     for (subpel_y_q4 = 0; subpel_y_q4 < 16; subpel_y_q4++) {
       avg = 0;
-      vp10_convolve(src0 + offset, src_stride, dst0, dst_stride, w, h,
-                    interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
-                    y_step_q4, avg);
+      av1_convolve(src0 + offset, src_stride, dst0, dst_stride, w, h,
+                   interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
+                   y_step_q4, avg);
       avg = 0;
-      vp10_convolve(src1 + offset, src_stride, dst1, dst_stride, w, h,
-                    interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
-                    y_step_q4, avg);
+      av1_convolve(src1 + offset, src_stride, dst1, dst_stride, w, h,
+                   interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
+                   y_step_q4, avg);
 
       avg = 0;
-      vp10_convolve(src0 + offset, src_stride, dst, dst_stride, w, h,
-                    interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
-                    y_step_q4, avg);
+      av1_convolve(src0 + offset, src_stride, dst, dst_stride, w, h,
+                   interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
+                   y_step_q4, avg);
       avg = 1;
-      vp10_convolve(src1 + offset, src_stride, dst, dst_stride, w, h,
-                    interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
-                    y_step_q4, avg);
+      av1_convolve(src1 + offset, src_stride, dst, dst_stride, w, h,
+                   interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
+                   y_step_q4, avg);
 
       EXPECT_EQ(dst[0], ROUND_POWER_OF_TWO(dst0[0] + dst1[0], 1));
     }
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-TEST(VP10ConvolveTest, vp10_highbd_convolve) {
+#if CONFIG_AOM_HIGHBITDEPTH
+TEST(AV1ConvolveTest, av1_highbd_convolve) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
 #if CONFIG_DUAL_FILTER
   INTERP_FILTER interp_filter[4] = { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
                                      EIGHTTAP_REGULAR, EIGHTTAP_REGULAR };
   InterpFilterParams filter_params =
-      vp10_get_interp_filter_params(interp_filter[0]);
+      av1_get_interp_filter_params(interp_filter[0]);
 #else
   INTERP_FILTER interp_filter = EIGHTTAP_REGULAR;
   InterpFilterParams filter_params =
-      vp10_get_interp_filter_params(interp_filter);
+      av1_get_interp_filter_params(interp_filter);
 #endif
   int filter_size = filter_params.taps;
   int filter_center = filter_size / 2 - 1;
@@ -225,15 +225,15 @@
 
   for (subpel_x_q4 = 0; subpel_x_q4 < 16; subpel_x_q4++) {
     for (subpel_y_q4 = 0; subpel_y_q4 < 16; subpel_y_q4++) {
-      vp10_highbd_convolve(
+      av1_highbd_convolve(
           CONVERT_TO_BYTEPTR(src + src_stride * filter_center + filter_center),
           src_stride, CONVERT_TO_BYTEPTR(dst), dst_stride, w, h, interp_filter,
           subpel_x_q4, x_step_q4, subpel_y_q4, y_step_q4, avg, bd);
 
       const int16_t *x_filter =
-          vp10_get_interp_filter_subpel_kernel(filter_params, subpel_x_q4);
+          av1_get_interp_filter_subpel_kernel(filter_params, subpel_x_q4);
       const int16_t *y_filter =
-          vp10_get_interp_filter_subpel_kernel(filter_params, subpel_y_q4);
+          av1_get_interp_filter_subpel_kernel(filter_params, subpel_y_q4);
 
       int temp[12];
       int dst_ref = 0;
@@ -252,17 +252,17 @@
   }
 }
 
-TEST(VP10ConvolveTest, vp10_highbd_convolve_avg) {
+TEST(AV1ConvolveTest, av1_highbd_convolve_avg) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
 #if CONFIG_DUAL_FILTER
   INTERP_FILTER interp_filter[4] = { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
                                      EIGHTTAP_REGULAR, EIGHTTAP_REGULAR };
   InterpFilterParams filter_params =
-      vp10_get_interp_filter_params(interp_filter[0]);
+      av1_get_interp_filter_params(interp_filter[0]);
 #else
   INTERP_FILTER interp_filter = EIGHTTAP_REGULAR;
   InterpFilterParams filter_params =
-      vp10_get_interp_filter_params(interp_filter);
+      av1_get_interp_filter_params(interp_filter);
 #endif
   int filter_size = filter_params.taps;
   int filter_center = filter_size / 2 - 1;
@@ -294,41 +294,41 @@
       int offset = filter_size * filter_center + filter_center;
 
       avg = 0;
-      vp10_highbd_convolve(CONVERT_TO_BYTEPTR(src0 + offset), src_stride,
-                           CONVERT_TO_BYTEPTR(dst0), dst_stride, w, h,
-                           interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
-                           y_step_q4, avg, bd);
+      av1_highbd_convolve(CONVERT_TO_BYTEPTR(src0 + offset), src_stride,
+                          CONVERT_TO_BYTEPTR(dst0), dst_stride, w, h,
+                          interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
+                          y_step_q4, avg, bd);
       avg = 0;
-      vp10_highbd_convolve(CONVERT_TO_BYTEPTR(src1 + offset), src_stride,
-                           CONVERT_TO_BYTEPTR(dst1), dst_stride, w, h,
-                           interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
-                           y_step_q4, avg, bd);
+      av1_highbd_convolve(CONVERT_TO_BYTEPTR(src1 + offset), src_stride,
+                          CONVERT_TO_BYTEPTR(dst1), dst_stride, w, h,
+                          interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
+                          y_step_q4, avg, bd);
 
       avg = 0;
-      vp10_highbd_convolve(CONVERT_TO_BYTEPTR(src0 + offset), src_stride,
-                           CONVERT_TO_BYTEPTR(dst), dst_stride, w, h,
-                           interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
-                           y_step_q4, avg, bd);
+      av1_highbd_convolve(CONVERT_TO_BYTEPTR(src0 + offset), src_stride,
+                          CONVERT_TO_BYTEPTR(dst), dst_stride, w, h,
+                          interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
+                          y_step_q4, avg, bd);
       avg = 1;
-      vp10_highbd_convolve(CONVERT_TO_BYTEPTR(src1 + offset), src_stride,
-                           CONVERT_TO_BYTEPTR(dst), dst_stride, w, h,
-                           interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
-                           y_step_q4, avg, bd);
+      av1_highbd_convolve(CONVERT_TO_BYTEPTR(src1 + offset), src_stride,
+                          CONVERT_TO_BYTEPTR(dst), dst_stride, w, h,
+                          interp_filter, subpel_x_q4, x_step_q4, subpel_y_q4,
+                          y_step_q4, avg, bd);
 
       EXPECT_EQ(dst[0], ROUND_POWER_OF_TWO(dst0[0] + dst1[0], 1));
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #define CONVOLVE_SPEED_TEST 0
 #if CONVOLVE_SPEED_TEST
 #define highbd_convolve_speed(func, block_size, frame_size)                  \
-  TEST(VP10ConvolveTest, func##_speed_##block_size##_##frame_size) {         \
+  TEST(AV1ConvolveTest, func##_speed_##block_size##_##frame_size) {          \
     ACMRandom rnd(ACMRandom::DeterministicSeed());                           \
     INTERP_FILTER interp_filter = EIGHTTAP;                                  \
     InterpFilterParams filter_params =                                       \
-        vp10_get_interp_filter_params(interp_filter);                        \
+        av1_get_interp_filter_params(interp_filter);                         \
     int filter_size = filter_params.tap;                                     \
     int filter_center = filter_size / 2 - 1;                                 \
     DECLARE_ALIGNED(16, uint16_t,                                            \
@@ -346,9 +346,9 @@
     int h = block_size;                                                      \
                                                                              \
     const int16_t *filter_x =                                                \
-        vp10_get_interp_filter_kernel(filter_params, subpel_x_q4);           \
+        av1_get_interp_filter_kernel(filter_params, subpel_x_q4);            \
     const int16_t *filter_y =                                                \
-        vp10_get_interp_filter_kernel(filter_params, subpel_y_q4);           \
+        av1_get_interp_filter_kernel(filter_params, subpel_y_q4);            \
                                                                              \
     for (int i = 0; i < src_stride * src_stride; i++) {                      \
       src[i] = rnd.Rand16() % (1 << bd);                                     \
@@ -376,11 +376,11 @@
   }
 
 #define lowbd_convolve_speed(func, block_size, frame_size)                  \
-  TEST(VP10ConvolveTest, func##_speed_l_##block_size##_##frame_size) {      \
+  TEST(AV1ConvolveTest, func##_speed_l_##block_size##_##frame_size) {       \
     ACMRandom rnd(ACMRandom::DeterministicSeed());                          \
     INTERP_FILTER interp_filter = EIGHTTAP;                                 \
     InterpFilterParams filter_params =                                      \
-        vp10_get_interp_filter_params(interp_filter);                       \
+        av1_get_interp_filter_params(interp_filter);                        \
     int filter_size = filter_params.tap;                                    \
     int filter_center = filter_size / 2 - 1;                                \
     DECLARE_ALIGNED(16, uint8_t, src[(frame_size + 7) * (frame_size + 7)]); \
@@ -397,9 +397,9 @@
     int h = block_size;                                                     \
                                                                             \
     const int16_t *filter_x =                                               \
-        vp10_get_interp_filter_kernel(filter_params, subpel_x_q4);          \
+        av1_get_interp_filter_kernel(filter_params, subpel_x_q4);           \
     const int16_t *filter_y =                                               \
-        vp10_get_interp_filter_kernel(filter_params, subpel_y_q4);          \
+        av1_get_interp_filter_kernel(filter_params, subpel_y_q4);           \
                                                                             \
     for (int i = 0; i < src_stride * src_stride; i++) {                     \
       src[i] = rnd.Rand16() % (1 << bd);                                    \
@@ -424,28 +424,28 @@
   }
 
 // This experiment shows that when frame size is 64x64
-// vpx_highbd_convolve8_sse2 and vpx_convolve8_sse2's speed are similar.
+// aom_highbd_convolve8_sse2 and aom_convolve8_sse2's speed are similar.
 // However when frame size becomes 1024x1024
-// vpx_highbd_convolve8_sse2 is around 50% slower than vpx_convolve8_sse2
+// aom_highbd_convolve8_sse2 is around 50% slower than aom_convolve8_sse2
 // we think the bottleneck is from memory IO
-highbd_convolve_speed(vpx_highbd_convolve8_sse2, 8, 64);
-highbd_convolve_speed(vpx_highbd_convolve8_sse2, 16, 64);
-highbd_convolve_speed(vpx_highbd_convolve8_sse2, 32, 64);
-highbd_convolve_speed(vpx_highbd_convolve8_sse2, 64, 64);
+highbd_convolve_speed(aom_highbd_convolve8_sse2, 8, 64);
+highbd_convolve_speed(aom_highbd_convolve8_sse2, 16, 64);
+highbd_convolve_speed(aom_highbd_convolve8_sse2, 32, 64);
+highbd_convolve_speed(aom_highbd_convolve8_sse2, 64, 64);
 
-lowbd_convolve_speed(vpx_convolve8_sse2, 8, 64);
-lowbd_convolve_speed(vpx_convolve8_sse2, 16, 64);
-lowbd_convolve_speed(vpx_convolve8_sse2, 32, 64);
-lowbd_convolve_speed(vpx_convolve8_sse2, 64, 64);
+lowbd_convolve_speed(aom_convolve8_sse2, 8, 64);
+lowbd_convolve_speed(aom_convolve8_sse2, 16, 64);
+lowbd_convolve_speed(aom_convolve8_sse2, 32, 64);
+lowbd_convolve_speed(aom_convolve8_sse2, 64, 64);
 
-highbd_convolve_speed(vpx_highbd_convolve8_sse2, 8, 1024);
-highbd_convolve_speed(vpx_highbd_convolve8_sse2, 16, 1024);
-highbd_convolve_speed(vpx_highbd_convolve8_sse2, 32, 1024);
-highbd_convolve_speed(vpx_highbd_convolve8_sse2, 64, 1024);
+highbd_convolve_speed(aom_highbd_convolve8_sse2, 8, 1024);
+highbd_convolve_speed(aom_highbd_convolve8_sse2, 16, 1024);
+highbd_convolve_speed(aom_highbd_convolve8_sse2, 32, 1024);
+highbd_convolve_speed(aom_highbd_convolve8_sse2, 64, 1024);
 
-lowbd_convolve_speed(vpx_convolve8_sse2, 8, 1024);
-lowbd_convolve_speed(vpx_convolve8_sse2, 16, 1024);
-lowbd_convolve_speed(vpx_convolve8_sse2, 32, 1024);
-lowbd_convolve_speed(vpx_convolve8_sse2, 64, 1024);
+lowbd_convolve_speed(aom_convolve8_sse2, 8, 1024);
+lowbd_convolve_speed(aom_convolve8_sse2, 16, 1024);
+lowbd_convolve_speed(aom_convolve8_sse2, 32, 1024);
+lowbd_convolve_speed(aom_convolve8_sse2, 64, 1024);
 #endif  // CONVOLVE_SPEED_TEST
 }  // namespace
diff --git a/test/vp10_dct_test.cc b/test/av1_dct_test.cc
similarity index 92%
rename from test/vp10_dct_test.cc
rename to test/av1_dct_test.cc
index 5d31adf..82b0199 100644
--- a/test/vp10_dct_test.cc
+++ b/test/av1_dct_test.cc
@@ -15,7 +15,7 @@
 #include "third_party/googletest/src/include/gtest/gtest.h"
 #include "test/acm_random.h"
 #include "test/util.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_ports/msvc.h"
 
 #undef CONFIG_COEFFICIENT_RANGE_CHECKING
@@ -83,8 +83,8 @@
 };
 
 typedef std::tr1::tuple<FdctFunc, FdctFuncRef, int, int> FdctParam;
-class Vp10FwdTxfm : public TransTestBase,
-                    public ::testing::TestWithParam<FdctParam> {
+class AV1FwdTxfm : public TransTestBase,
+                   public ::testing::TestWithParam<FdctParam> {
  public:
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
@@ -95,10 +95,10 @@
   virtual void TearDown() {}
 };
 
-TEST_P(Vp10FwdTxfm, RunFwdAccuracyCheck) { RunFwdAccuracyCheck(); }
+TEST_P(AV1FwdTxfm, RunFwdAccuracyCheck) { RunFwdAccuracyCheck(); }
 
 INSTANTIATE_TEST_CASE_P(
-    C, Vp10FwdTxfm,
+    C, AV1FwdTxfm,
     ::testing::Values(FdctParam(&fdct4, &reference_dct_1d, 4, 1),
                       FdctParam(&fdct8, &reference_dct_1d, 8, 1),
                       FdctParam(&fdct16, &reference_dct_1d, 16, 2)));
diff --git a/test/vp10_ext_tile_test.cc b/test/av1_ext_tile_test.cc
similarity index 70%
rename from test/vp10_ext_tile_test.cc
rename to test/av1_ext_tile_test.cc
index 6ca7ec8..9563fd5 100644
--- a/test/vp10_ext_tile_test.cc
+++ b/test/av1_ext_tile_test.cc
@@ -29,31 +29,31 @@
 const int kImgWidth = 704;
 const int kImgHeight = 576;
 
-class VP10ExtTileTest
+class AV1ExtTileTest
     : public ::libaom_test::EncoderTest,
       public ::libaom_test::CodecTestWith2Params<libaom_test::TestMode, int> {
  protected:
-  VP10ExtTileTest()
+  AV1ExtTileTest()
       : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)),
         set_cpu_used_(GET_PARAM(2)) {
-    init_flags_ = VPX_CODEC_USE_PSNR;
-    vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
+    init_flags_ = AOM_CODEC_USE_PSNR;
+    aom_codec_dec_cfg_t cfg = aom_codec_dec_cfg_t();
     cfg.w = kImgWidth;
     cfg.h = kImgHeight;
 
     decoder_ = codec_->CreateDecoder(cfg, 0);
-    decoder_->Control(VP10_SET_DECODE_TILE_ROW, -1);
-    decoder_->Control(VP10_SET_DECODE_TILE_COL, -1);
+    decoder_->Control(AV1_SET_DECODE_TILE_ROW, -1);
+    decoder_->Control(AV1_SET_DECODE_TILE_COL, -1);
 
     // Allocate buffer to store tile image.
-    vpx_img_alloc(&tile_img_, VPX_IMG_FMT_I420, kImgWidth, kImgHeight, 32);
+    aom_img_alloc(&tile_img_, AOM_IMG_FMT_I420, kImgWidth, kImgHeight, 32);
 
     md5_.clear();
     tile_md5_.clear();
   }
 
-  virtual ~VP10ExtTileTest() {
-    vpx_img_free(&tile_img_);
+  virtual ~AV1ExtTileTest() {
+    aom_img_free(&tile_img_);
     delete decoder_;
   }
 
@@ -62,7 +62,7 @@
     SetMode(encoding_mode_);
 
     cfg_.g_lag_in_frames = 0;
-    cfg_.rc_end_usage = VPX_VBR;
+    cfg_.rc_end_usage = AOM_VBR;
     cfg_.g_error_resilient = 1;
 
     cfg_.rc_max_quantizer = 56;
@@ -73,30 +73,30 @@
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 0) {
       // Encode setting
-      encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
-      encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 0);
-      encoder->Control(VP9E_SET_FRAME_PARALLEL_DECODING, 1);
+      encoder->Control(AOME_SET_CPUUSED, set_cpu_used_);
+      encoder->Control(AOME_SET_ENABLEAUTOALTREF, 0);
+      encoder->Control(AV1E_SET_FRAME_PARALLEL_DECODING, 1);
 
       // The tile size is 64x64.
-      encoder->Control(VP9E_SET_TILE_COLUMNS, kTileSize);
-      encoder->Control(VP9E_SET_TILE_ROWS, kTileSize);
+      encoder->Control(AV1E_SET_TILE_COLUMNS, kTileSize);
+      encoder->Control(AV1E_SET_TILE_ROWS, kTileSize);
 #if CONFIG_EXT_PARTITION
       // Always use 64x64 max partition.
-      encoder->Control(VP10E_SET_SUPERBLOCK_SIZE, VPX_SUPERBLOCK_SIZE_64X64);
+      encoder->Control(AV1E_SET_SUPERBLOCK_SIZE, AOM_SUPERBLOCK_SIZE_64X64);
 #endif
     }
 
     if (video->frame() == 1) {
       frame_flags_ =
-          VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+          AOM_EFLAG_NO_UPD_LAST | AOM_EFLAG_NO_UPD_GF | AOM_EFLAG_NO_UPD_ARF;
     }
   }
 
-  virtual void DecompressedFrameHook(const vpx_image_t &img,
-                                     vpx_codec_pts_t pts) {
+  virtual void DecompressedFrameHook(const aom_image_t &img,
+                                     aom_codec_pts_t pts) {
     // Skip 1 already decoded frame to be consistent with the decoder in this
     // test.
-    if (pts == (vpx_codec_pts_t)kSkip) return;
+    if (pts == (aom_codec_pts_t)kSkip) return;
 
     // Calculate MD5 as the reference.
     ::libaom_test::MD5 md5_res;
@@ -104,32 +104,32 @@
     md5_.push_back(md5_res.Get());
   }
 
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void FramePktHook(const aom_codec_cx_pkt_t *pkt) {
     // Skip decoding 1 frame.
-    if (pkt->data.frame.pts == (vpx_codec_pts_t)kSkip) return;
+    if (pkt->data.frame.pts == (aom_codec_pts_t)kSkip) return;
 
-    bool IsLastFrame = (pkt->data.frame.pts == (vpx_codec_pts_t)(kLimit - 1));
+    bool IsLastFrame = (pkt->data.frame.pts == (aom_codec_pts_t)(kLimit - 1));
 
     // Decode the first (kLimit - 1) frames as whole frame, and decode the last
     // frame in single tiles.
     for (int r = 0; r < kImgHeight / kTIleSizeInPixels; ++r) {
       for (int c = 0; c < kImgWidth / kTIleSizeInPixels; ++c) {
         if (!IsLastFrame) {
-          decoder_->Control(VP10_SET_DECODE_TILE_ROW, -1);
-          decoder_->Control(VP10_SET_DECODE_TILE_COL, -1);
+          decoder_->Control(AV1_SET_DECODE_TILE_ROW, -1);
+          decoder_->Control(AV1_SET_DECODE_TILE_COL, -1);
         } else {
-          decoder_->Control(VP10_SET_DECODE_TILE_ROW, r);
-          decoder_->Control(VP10_SET_DECODE_TILE_COL, c);
+          decoder_->Control(AV1_SET_DECODE_TILE_ROW, r);
+          decoder_->Control(AV1_SET_DECODE_TILE_COL, c);
         }
 
-        const vpx_codec_err_t res = decoder_->DecodeFrame(
+        const aom_codec_err_t res = decoder_->DecodeFrame(
             reinterpret_cast<uint8_t *>(pkt->data.frame.buf),
             pkt->data.frame.sz);
-        if (res != VPX_CODEC_OK) {
+        if (res != AOM_CODEC_OK) {
           abort_ = true;
-          ASSERT_EQ(VPX_CODEC_OK, res);
+          ASSERT_EQ(AOM_CODEC_OK, res);
         }
-        const vpx_image_t *img = decoder_->GetDxData().Next();
+        const aom_image_t *img = decoder_->GetDxData().Next();
 
         if (!IsLastFrame) {
           if (img) {
@@ -168,29 +168,29 @@
   ::libaom_test::TestMode encoding_mode_;
   int set_cpu_used_;
   ::libaom_test::Decoder *decoder_;
-  vpx_image_t tile_img_;
+  aom_image_t tile_img_;
   std::vector<std::string> md5_;
   std::vector<std::string> tile_md5_;
 };
 
-TEST_P(VP10ExtTileTest, DecoderResultTest) {
+TEST_P(AV1ExtTileTest, DecoderResultTest) {
   ::libaom_test::I420VideoSource video("hantro_collage_w352h288.yuv", kImgWidth,
                                        kImgHeight, 30, 1, 0, kLimit);
   cfg_.rc_target_bitrate = 500;
-  cfg_.g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT;
+  cfg_.g_error_resilient = AOM_ERROR_RESILIENT_DEFAULT;
   cfg_.g_lag_in_frames = 0;
   cfg_.g_threads = 1;
 
   // Tile encoding
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 
   // Compare to check if two vectors are equal.
   ASSERT_EQ(md5_, tile_md5_);
 }
 
-VP10_INSTANTIATE_TEST_CASE(
+AV1_INSTANTIATE_TEST_CASE(
     // Now only test 2-pass mode.
-    VP10ExtTileTest, ::testing::Values(::libaom_test::kTwoPassGood),
+    AV1ExtTileTest, ::testing::Values(::libaom_test::kTwoPassGood),
     ::testing::Range(0, 4));
 }  // namespace
diff --git a/test/av1_fht16x16_test.cc b/test/av1_fht16x16_test.cc
new file mode 100644
index 0000000..a9b2549
--- /dev/null
+++ b/test/av1_fht16x16_test.cc
@@ -0,0 +1,220 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/transform_test_base.h"
+#include "test/util.h"
+#include "aom_ports/mem.h"
+
+using libaom_test::ACMRandom;
+
+namespace {
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                        int tx_type);
+using std::tr1::tuple;
+using libaom_test::FhtFunc;
+typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht16x16Param;
+
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+  av1_fht16x16_c(in, out, stride, tx_type);
+}
+
+#if CONFIG_AOM_HIGHBITDEPTH
+typedef void (*IHbdHtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                           int tx_type, int bd);
+typedef void (*HbdHtFunc)(const int16_t *input, int32_t *output, int stride,
+                          int tx_type, int bd);
+
+// Target optimized function, tx_type, bit depth
+typedef tuple<HbdHtFunc, int, int> HighbdHt16x16Param;
+
+void highbd_fht16x16_ref(const int16_t *in, int32_t *out, int stride,
+                         int tx_type, int bd) {
+  av1_fwd_txfm2d_16x16_c(in, out, stride, tx_type, bd);
+}
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+
+class AV1Trans16x16HT : public libaom_test::TransformTestBase,
+                        public ::testing::TestWithParam<Ht16x16Param> {
+ public:
+  virtual ~AV1Trans16x16HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    inv_txfm_ = GET_PARAM(1);
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 16;
+    fwd_txfm_ref = fht16x16_ref;
+    bit_depth_ = GET_PARAM(3);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = GET_PARAM(4);
+  }
+  virtual void TearDown() { libaom_test::ClearSystemState(); }
+
+ protected:
+  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+    fwd_txfm_(in, out, stride, tx_type_);
+  }
+
+  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+    inv_txfm_(out, dst, stride, tx_type_);
+  }
+
+  FhtFunc fwd_txfm_;
+  IhtFunc inv_txfm_;
+};
+
+TEST_P(AV1Trans16x16HT, CoeffCheck) { RunCoeffCheck(); }
+
+#if CONFIG_AOM_HIGHBITDEPTH
+class AV1HighbdTrans16x16HT
+    : public ::testing::TestWithParam<HighbdHt16x16Param> {
+ public:
+  virtual ~AV1HighbdTrans16x16HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    fwd_txfm_ref_ = highbd_fht16x16_ref;
+    tx_type_ = GET_PARAM(1);
+    bit_depth_ = GET_PARAM(2);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = 256;
+
+    input_ = reinterpret_cast<int16_t *>(
+        aom_memalign(16, sizeof(int16_t) * num_coeffs_));
+    output_ = reinterpret_cast<int32_t *>(
+        aom_memalign(16, sizeof(int32_t) * num_coeffs_));
+    output_ref_ = reinterpret_cast<int32_t *>(
+        aom_memalign(16, sizeof(int32_t) * num_coeffs_));
+  }
+
+  virtual void TearDown() {
+    aom_free(input_);
+    aom_free(output_);
+    aom_free(output_ref_);
+    libaom_test::ClearSystemState();
+  }
+
+ protected:
+  void RunBitexactCheck();
+
+ private:
+  HbdHtFunc fwd_txfm_;
+  HbdHtFunc fwd_txfm_ref_;
+  int tx_type_;
+  int bit_depth_;
+  int mask_;
+  int num_coeffs_;
+  int16_t *input_;
+  int32_t *output_;
+  int32_t *output_ref_;
+};
+
+void AV1HighbdTrans16x16HT::RunBitexactCheck() {
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  int i, j;
+  const int stride = 16;
+  const int num_tests = 1000;
+
+  for (i = 0; i < num_tests; ++i) {
+    for (j = 0; j < num_coeffs_; ++j) {
+      input_[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
+    }
+
+    fwd_txfm_ref_(input_, output_ref_, stride, tx_type_, bit_depth_);
+    ASM_REGISTER_STATE_CHECK(
+        fwd_txfm_(input_, output_, stride, tx_type_, bit_depth_));
+
+    for (j = 0; j < num_coeffs_; ++j) {
+      EXPECT_EQ(output_ref_[j], output_[j])
+          << "Not bit-exact result at index: " << j << " at test block: " << i;
+    }
+  }
+}
+
+TEST_P(AV1HighbdTrans16x16HT, HighbdCoeffCheck) { RunBitexactCheck(); }
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+
+using std::tr1::make_tuple;
+
+#if HAVE_SSE2
+const Ht16x16Param kArrayHt16x16Param_sse2[] = {
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 0, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 1, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 2, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 3, AOM_BITS_8,
+             256),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 4, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 5, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 6, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 7, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 8, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 10, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 11, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 12, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 13, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 14, AOM_BITS_8,
+             256),
+  make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 15, AOM_BITS_8,
+             256)
+#endif  // CONFIG_EXT_TX
+};
+INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans16x16HT,
+                        ::testing::ValuesIn(kArrayHt16x16Param_sse2));
+#endif  // HAVE_SSE2
+
+#if HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
+const HighbdHt16x16Param kArrayHBDHt16x16Param_sse4_1[] = {
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 0, 10),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 0, 12),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 1, 10),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 1, 12),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 2, 10),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 2, 12),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 3, 10),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 3, 12),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 4, 10),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 4, 12),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 5, 10),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 5, 12),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 6, 10),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 6, 12),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 7, 10),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 7, 12),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 8, 10),
+  make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 8, 12),
+#endif  // CONFIG_EXT_TX
+};
+INSTANTIATE_TEST_CASE_P(SSE4_1, AV1HighbdTrans16x16HT,
+                        ::testing::ValuesIn(kArrayHBDHt16x16Param_sse4_1));
+#endif  // HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
+
+}  // namespace
diff --git a/test/av1_fht4x4_test.cc b/test/av1_fht4x4_test.cc
new file mode 100644
index 0000000..104b8652
--- /dev/null
+++ b/test/av1_fht4x4_test.cc
@@ -0,0 +1,207 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/transform_test_base.h"
+#include "test/util.h"
+#include "aom_ports/mem.h"
+
+using libaom_test::ACMRandom;
+
+namespace {
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                        int tx_type);
+using std::tr1::tuple;
+using libaom_test::FhtFunc;
+typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht4x4Param;
+
+void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+  av1_fht4x4_c(in, out, stride, tx_type);
+}
+
+#if CONFIG_AOM_HIGHBITDEPTH
+typedef void (*IhighbdHtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                              int tx_type, int bd);
+typedef void (*HBDFhtFunc)(const int16_t *input, int32_t *output, int stride,
+                           int tx_type, int bd);
+
+// HighbdHt4x4Param argument list:
+// <Target optimized function, tx_type, bit depth>
+typedef tuple<HBDFhtFunc, int, int> HighbdHt4x4Param;
+
+void highbe_fht4x4_ref(const int16_t *in, int32_t *out, int stride, int tx_type,
+                       int bd) {
+  av1_fwd_txfm2d_4x4_c(in, out, stride, tx_type, bd);
+}
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+
+class AV1Trans4x4HT : public libaom_test::TransformTestBase,
+                      public ::testing::TestWithParam<Ht4x4Param> {
+ public:
+  virtual ~AV1Trans4x4HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    inv_txfm_ = GET_PARAM(1);
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 4;
+    fwd_txfm_ref = fht4x4_ref;
+    bit_depth_ = GET_PARAM(3);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = GET_PARAM(4);
+  }
+  virtual void TearDown() { libaom_test::ClearSystemState(); }
+
+ protected:
+  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+    fwd_txfm_(in, out, stride, tx_type_);
+  }
+
+  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+    inv_txfm_(out, dst, stride, tx_type_);
+  }
+
+  FhtFunc fwd_txfm_;
+  IhtFunc inv_txfm_;
+};
+
+TEST_P(AV1Trans4x4HT, CoeffCheck) { RunCoeffCheck(); }
+
+#if CONFIG_AOM_HIGHBITDEPTH
+class AV1HighbdTrans4x4HT : public ::testing::TestWithParam<HighbdHt4x4Param> {
+ public:
+  virtual ~AV1HighbdTrans4x4HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    fwd_txfm_ref_ = highbe_fht4x4_ref;
+    tx_type_ = GET_PARAM(1);
+    bit_depth_ = GET_PARAM(2);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = 16;
+
+    input_ = reinterpret_cast<int16_t *>(
+        aom_memalign(16, sizeof(int16_t) * num_coeffs_));
+    output_ = reinterpret_cast<int32_t *>(
+        aom_memalign(16, sizeof(int32_t) * num_coeffs_));
+    output_ref_ = reinterpret_cast<int32_t *>(
+        aom_memalign(16, sizeof(int32_t) * num_coeffs_));
+  }
+
+  virtual void TearDown() {
+    aom_free(input_);
+    aom_free(output_);
+    aom_free(output_ref_);
+    libaom_test::ClearSystemState();
+  }
+
+ protected:
+  void RunBitexactCheck();
+
+ private:
+  HBDFhtFunc fwd_txfm_;
+  HBDFhtFunc fwd_txfm_ref_;
+  int tx_type_;
+  int bit_depth_;
+  int mask_;
+  int num_coeffs_;
+  int16_t *input_;
+  int32_t *output_;
+  int32_t *output_ref_;
+};
+
+void AV1HighbdTrans4x4HT::RunBitexactCheck() {
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  int i, j;
+  const int stride = 4;
+  const int num_tests = 1000;
+  const int num_coeffs = 16;
+
+  for (i = 0; i < num_tests; ++i) {
+    for (j = 0; j < num_coeffs; ++j) {
+      input_[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
+    }
+
+    fwd_txfm_ref_(input_, output_ref_, stride, tx_type_, bit_depth_);
+    fwd_txfm_(input_, output_, stride, tx_type_, bit_depth_);
+
+    for (j = 0; j < num_coeffs; ++j) {
+      EXPECT_EQ(output_[j], output_ref_[j])
+          << "Not bit-exact result at index: " << j << " at test block: " << i;
+    }
+  }
+}
+
+TEST_P(AV1HighbdTrans4x4HT, HighbdCoeffCheck) { RunBitexactCheck(); }
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+
+using std::tr1::make_tuple;
+
+#if HAVE_SSE2
+const Ht4x4Param kArrayHt4x4Param_sse2[] = {
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 0, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 1, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 2, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 3, AOM_BITS_8, 16),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 4, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 5, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 6, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 7, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 8, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 10, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 11, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 12, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 13, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 14, AOM_BITS_8, 16),
+  make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 15, AOM_BITS_8, 16)
+#endif  // CONFIG_EXT_TX
+};
+INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans4x4HT,
+                        ::testing::ValuesIn(kArrayHt4x4Param_sse2));
+#endif  // HAVE_SSE2
+
+#if HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
+const HighbdHt4x4Param kArrayHighbdHt4x4Param[] = {
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 0, 10),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 0, 12),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 1, 10),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 1, 12),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 2, 10),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 2, 12),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 3, 10),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 3, 12),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 4, 10),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 4, 12),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 5, 10),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 5, 12),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 6, 10),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 6, 12),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 7, 10),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 7, 12),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 8, 10),
+  make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 8, 12),
+#endif  // CONFIG_EXT_TX
+};
+
+INSTANTIATE_TEST_CASE_P(SSE4_1, AV1HighbdTrans4x4HT,
+                        ::testing::ValuesIn(kArrayHighbdHt4x4Param));
+
+#endif  // HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
+
+}  // namespace
diff --git a/test/av1_fht8x8_test.cc b/test/av1_fht8x8_test.cc
new file mode 100644
index 0000000..3e8a4c8
--- /dev/null
+++ b/test/av1_fht8x8_test.cc
@@ -0,0 +1,205 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/transform_test_base.h"
+#include "test/util.h"
+#include "aom_ports/mem.h"
+
+using libaom_test::ACMRandom;
+
+namespace {
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                        int tx_type);
+
+using libaom_test::FhtFunc;
+using std::tr1::tuple;
+typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht8x8Param;
+
+void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+  av1_fht8x8_c(in, out, stride, tx_type);
+}
+
+#if CONFIG_AOM_HIGHBITDEPTH
+typedef void (*IHbdHtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                           int tx_type, int bd);
+typedef void (*HbdHtFunc)(const int16_t *input, int32_t *output, int stride,
+                          int tx_type, int bd);
+// Target optimized function, tx_type, bit depth
+typedef tuple<HbdHtFunc, int, int> HighbdHt8x8Param;
+
+void highbd_fht8x8_ref(const int16_t *in, int32_t *out, int stride, int tx_type,
+                       int bd) {
+  av1_fwd_txfm2d_8x8_c(in, out, stride, tx_type, bd);
+}
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+
+class AV1Trans8x8HT : public libaom_test::TransformTestBase,
+                      public ::testing::TestWithParam<Ht8x8Param> {
+ public:
+  virtual ~AV1Trans8x8HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    inv_txfm_ = GET_PARAM(1);
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 8;
+    fwd_txfm_ref = fht8x8_ref;
+    bit_depth_ = GET_PARAM(3);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = GET_PARAM(4);
+  }
+  virtual void TearDown() { libaom_test::ClearSystemState(); }
+
+ protected:
+  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+    fwd_txfm_(in, out, stride, tx_type_);
+  }
+
+  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+    inv_txfm_(out, dst, stride, tx_type_);
+  }
+
+  FhtFunc fwd_txfm_;
+  IhtFunc inv_txfm_;
+};
+
+TEST_P(AV1Trans8x8HT, CoeffCheck) { RunCoeffCheck(); }
+
+#if CONFIG_AOM_HIGHBITDEPTH
+class AV1HighbdTrans8x8HT : public ::testing::TestWithParam<HighbdHt8x8Param> {
+ public:
+  virtual ~AV1HighbdTrans8x8HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    fwd_txfm_ref_ = highbd_fht8x8_ref;
+    tx_type_ = GET_PARAM(1);
+    bit_depth_ = GET_PARAM(2);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = 64;
+
+    input_ = reinterpret_cast<int16_t *>(
+        aom_memalign(16, sizeof(int16_t) * num_coeffs_));
+    output_ = reinterpret_cast<int32_t *>(
+        aom_memalign(16, sizeof(int32_t) * num_coeffs_));
+    output_ref_ = reinterpret_cast<int32_t *>(
+        aom_memalign(16, sizeof(int32_t) * num_coeffs_));
+  }
+
+  virtual void TearDown() {
+    aom_free(input_);
+    aom_free(output_);
+    aom_free(output_ref_);
+    libaom_test::ClearSystemState();
+  }
+
+ protected:
+  void RunBitexactCheck();
+
+ private:
+  HbdHtFunc fwd_txfm_;
+  HbdHtFunc fwd_txfm_ref_;
+  int tx_type_;
+  int bit_depth_;
+  int mask_;
+  int num_coeffs_;
+  int16_t *input_;
+  int32_t *output_;
+  int32_t *output_ref_;
+};
+
+void AV1HighbdTrans8x8HT::RunBitexactCheck() {
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  int i, j;
+  const int stride = 8;
+  const int num_tests = 1000;
+  const int num_coeffs = 64;
+
+  for (i = 0; i < num_tests; ++i) {
+    for (j = 0; j < num_coeffs; ++j) {
+      input_[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
+    }
+
+    fwd_txfm_ref_(input_, output_ref_, stride, tx_type_, bit_depth_);
+    ASM_REGISTER_STATE_CHECK(
+        fwd_txfm_(input_, output_, stride, tx_type_, bit_depth_));
+
+    for (j = 0; j < num_coeffs; ++j) {
+      EXPECT_EQ(output_ref_[j], output_[j])
+          << "Not bit-exact result at index: " << j << " at test block: " << i;
+    }
+  }
+}
+
+TEST_P(AV1HighbdTrans8x8HT, HighbdCoeffCheck) { RunBitexactCheck(); }
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+
+using std::tr1::make_tuple;
+
+#if HAVE_SSE2
+const Ht8x8Param kArrayHt8x8Param_sse2[] = {
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 0, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 1, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 2, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 3, AOM_BITS_8, 64),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 4, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 5, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 6, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 7, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 8, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 10, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 11, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 12, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 13, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 14, AOM_BITS_8, 64),
+  make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 15, AOM_BITS_8, 64)
+#endif  // CONFIG_EXT_TX
+};
+INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans8x8HT,
+                        ::testing::ValuesIn(kArrayHt8x8Param_sse2));
+#endif  // HAVE_SSE2
+
+#if HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
+const HighbdHt8x8Param kArrayHBDHt8x8Param_sse4_1[] = {
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 0, 10),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 0, 12),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 1, 10),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 1, 12),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 2, 10),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 2, 12),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 3, 10),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 3, 12),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 4, 10),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 4, 12),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 5, 10),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 5, 12),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 6, 10),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 6, 12),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 7, 10),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 7, 12),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 8, 10),
+  make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 8, 12),
+#endif  // CONFIG_EXT_TX
+};
+INSTANTIATE_TEST_CASE_P(SSE4_1, AV1HighbdTrans8x8HT,
+                        ::testing::ValuesIn(kArrayHBDHt8x8Param_sse4_1));
+#endif  // HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
+
+}  // namespace
diff --git a/test/vp10_fwd_txfm1d_test.cc b/test/av1_fwd_txfm1d_test.cc
similarity index 89%
rename from test/vp10_fwd_txfm1d_test.cc
rename to test/av1_fwd_txfm1d_test.cc
index 2b9cfe5..03bed19 100644
--- a/test/vp10_fwd_txfm1d_test.cc
+++ b/test/av1_fwd_txfm1d_test.cc
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "av1/common/vp10_fwd_txfm1d.h"
-#include "test/vp10_txfm_test.h"
+#include "av1/common/av1_fwd_txfm1d.h"
+#include "test/av1_txfm_test.h"
 
 using libaom_test::ACMRandom;
 using libaom_test::input_base;
@@ -26,15 +26,15 @@
 const int txfm_size_ls[5] = { 4, 8, 16, 32 };
 
 const TxfmFunc fwd_txfm_func_ls[2][5] = {
-  { vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new, NULL },
-  { vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new, NULL }
+  { av1_fdct4_new, av1_fdct8_new, av1_fdct16_new, av1_fdct32_new, NULL },
+  { av1_fadst4_new, av1_fadst8_new, av1_fadst16_new, av1_fadst32_new, NULL }
 };
 
 // the maximum stage number of fwd/inv 1d dct/adst txfm is 12
 const int8_t cos_bit[12] = { 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14 };
 const int8_t range_bit[12] = { 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32 };
 
-TEST(vp10_fwd_txfm1d, round_shift) {
+TEST(av1_fwd_txfm1d, round_shift) {
   EXPECT_EQ(round_shift(7, 1), 4);
   EXPECT_EQ(round_shift(-7, 1), -3);
 
@@ -45,12 +45,12 @@
   EXPECT_EQ(round_shift(-8, 2), -2);
 }
 
-TEST(vp10_fwd_txfm1d, get_max_bit) {
+TEST(av1_fwd_txfm1d, get_max_bit) {
   int max_bit = get_max_bit(8);
   EXPECT_EQ(max_bit, 3);
 }
 
-TEST(vp10_fwd_txfm1d, cospi_arr) {
+TEST(av1_fwd_txfm1d, cospi_arr) {
   for (int i = 0; i < 7; i++) {
     for (int j = 0; j < 64; j++) {
       EXPECT_EQ(cospi_arr[i][j],
@@ -59,7 +59,7 @@
   }
 }
 
-TEST(vp10_fwd_txfm1d, clamp_block) {
+TEST(av1_fwd_txfm1d, clamp_block) {
   int16_t block[5][5] = { { 7, -5, 6, -3, 9 },
                           { 7, -5, 6, -3, 9 },
                           { 7, -5, 6, -3, 9 },
@@ -84,7 +84,7 @@
   }
 }
 
-TEST(vp10_fwd_txfm1d, accuracy) {
+TEST(av1_fwd_txfm1d, accuracy) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   for (int si = 0; si < txfm_size_num; ++si) {
     int txfm_size = txfm_size_ls[si];
diff --git a/test/av1_fwd_txfm2d_test.cc b/test/av1_fwd_txfm2d_test.cc
new file mode 100644
index 0000000..675edb0
--- /dev/null
+++ b/test/av1_fwd_txfm2d_test.cc
@@ -0,0 +1,178 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "test/acm_random.h"
+#include "test/util.h"
+#include "test/av1_txfm_test.h"
+#include "av1/common/av1_txfm.h"
+#include "./av1_rtcd.h"
+
+using libaom_test::ACMRandom;
+using libaom_test::input_base;
+using libaom_test::bd;
+using libaom_test::compute_avg_abs_error;
+using libaom_test::Fwd_Txfm2d_Func;
+using libaom_test::TYPE_TXFM;
+
+namespace {
+#if CONFIG_AOM_HIGHBITDEPTH
+// tx_type_, tx_size_, max_error_, max_avg_error_
+typedef std::tr1::tuple<TX_TYPE, TX_SIZE, double, double> AV1FwdTxfm2dParam;
+
+class AV1FwdTxfm2d : public ::testing::TestWithParam<AV1FwdTxfm2dParam> {
+ public:
+  virtual void SetUp() {
+    tx_type_ = GET_PARAM(0);
+    tx_size_ = GET_PARAM(1);
+    max_error_ = GET_PARAM(2);
+    max_avg_error_ = GET_PARAM(3);
+    count_ = 500;
+    TXFM_2D_FLIP_CFG fwd_txfm_flip_cfg =
+        av1_get_fwd_txfm_cfg(tx_type_, tx_size_);
+    const TXFM_2D_CFG *fwd_txfm_cfg = fwd_txfm_flip_cfg.cfg;
+    int amplify_bit = fwd_txfm_cfg->shift[0] + fwd_txfm_cfg->shift[1] +
+                      fwd_txfm_cfg->shift[2];
+    ud_flip_ = fwd_txfm_flip_cfg.ud_flip;
+    lr_flip_ = fwd_txfm_flip_cfg.lr_flip;
+    amplify_factor_ =
+        amplify_bit >= 0 ? (1 << amplify_bit) : (1.0 / (1 << -amplify_bit));
+
+    fwd_txfm_ = libaom_test::fwd_txfm_func_ls[tx_size_];
+    txfm1d_size_ = libaom_test::get_txfm1d_size(tx_size_);
+    txfm2d_size_ = txfm1d_size_ * txfm1d_size_;
+    get_txfm1d_type(tx_type_, &type0_, &type1_);
+    input_ = reinterpret_cast<int16_t *>(
+        aom_memalign(16, sizeof(input_[0]) * txfm2d_size_));
+    output_ = reinterpret_cast<int32_t *>(
+        aom_memalign(16, sizeof(output_[0]) * txfm2d_size_));
+    ref_input_ = reinterpret_cast<double *>(
+        aom_memalign(16, sizeof(ref_input_[0]) * txfm2d_size_));
+    ref_output_ = reinterpret_cast<double *>(
+        aom_memalign(16, sizeof(ref_output_[0]) * txfm2d_size_));
+  }
+
+  void RunFwdAccuracyCheck() {
+    ACMRandom rnd(ACMRandom::DeterministicSeed());
+    double avg_abs_error = 0;
+    for (int ci = 0; ci < count_; ci++) {
+      for (int ni = 0; ni < txfm2d_size_; ++ni) {
+        input_[ni] = rnd.Rand16() % input_base;
+        ref_input_[ni] = static_cast<double>(input_[ni]);
+        output_[ni] = 0;
+        ref_output_[ni] = 0;
+      }
+
+      fwd_txfm_(input_, output_, txfm1d_size_, tx_type_, bd);
+
+      if (lr_flip_ && ud_flip_)
+        libaom_test::fliplrud(ref_input_, txfm1d_size_, txfm1d_size_);
+      else if (lr_flip_)
+        libaom_test::fliplr(ref_input_, txfm1d_size_, txfm1d_size_);
+      else if (ud_flip_)
+        libaom_test::flipud(ref_input_, txfm1d_size_, txfm1d_size_);
+
+      reference_hybrid_2d(ref_input_, ref_output_, txfm1d_size_, type0_,
+                          type1_);
+
+      for (int ni = 0; ni < txfm2d_size_; ++ni) {
+        ref_output_[ni] = round(ref_output_[ni] * amplify_factor_);
+        EXPECT_GE(max_error_,
+                  fabs(output_[ni] - ref_output_[ni]) / amplify_factor_);
+      }
+      avg_abs_error += compute_avg_abs_error<int32_t, double>(
+          output_, ref_output_, txfm2d_size_);
+    }
+
+    avg_abs_error /= amplify_factor_;
+    avg_abs_error /= count_;
+    // max_abs_avg_error comes from upper bound of avg_abs_error
+    // printf("type0: %d type1: %d txfm_size: %d accuracy_avg_abs_error:
+    // %f\n", type0_, type1_, txfm1d_size_, avg_abs_error);
+    EXPECT_GE(max_avg_error_, avg_abs_error);
+  }
+
+  virtual void TearDown() {
+    aom_free(input_);
+    aom_free(output_);
+    aom_free(ref_input_);
+    aom_free(ref_output_);
+  }
+
+ private:
+  double max_error_;
+  double max_avg_error_;
+  int count_;
+  double amplify_factor_;
+  TX_TYPE tx_type_;
+  TX_SIZE tx_size_;
+  int txfm1d_size_;
+  int txfm2d_size_;
+  Fwd_Txfm2d_Func fwd_txfm_;
+  TYPE_TXFM type0_;
+  TYPE_TXFM type1_;
+  int16_t *input_;
+  int32_t *output_;
+  double *ref_input_;
+  double *ref_output_;
+  int ud_flip_;  // flip upside down
+  int lr_flip_;  // flip left to right
+};
+
+TEST_P(AV1FwdTxfm2d, RunFwdAccuracyCheck) { RunFwdAccuracyCheck(); }
+const AV1FwdTxfm2dParam av1_fwd_txfm2d_param_c[] = {
+#if CONFIG_EXT_TX
+  AV1FwdTxfm2dParam(FLIPADST_DCT, TX_4X4, 2, 0.2),
+  AV1FwdTxfm2dParam(DCT_FLIPADST, TX_4X4, 2, 0.2),
+  AV1FwdTxfm2dParam(FLIPADST_FLIPADST, TX_4X4, 2, 0.2),
+  AV1FwdTxfm2dParam(ADST_FLIPADST, TX_4X4, 2, 0.2),
+  AV1FwdTxfm2dParam(FLIPADST_ADST, TX_4X4, 2, 0.2),
+  AV1FwdTxfm2dParam(FLIPADST_DCT, TX_8X8, 5, 0.6),
+  AV1FwdTxfm2dParam(DCT_FLIPADST, TX_8X8, 5, 0.6),
+  AV1FwdTxfm2dParam(FLIPADST_FLIPADST, TX_8X8, 5, 0.6),
+  AV1FwdTxfm2dParam(ADST_FLIPADST, TX_8X8, 5, 0.6),
+  AV1FwdTxfm2dParam(FLIPADST_ADST, TX_8X8, 5, 0.6),
+  AV1FwdTxfm2dParam(FLIPADST_DCT, TX_16X16, 11, 1.5),
+  AV1FwdTxfm2dParam(DCT_FLIPADST, TX_16X16, 11, 1.5),
+  AV1FwdTxfm2dParam(FLIPADST_FLIPADST, TX_16X16, 11, 1.5),
+  AV1FwdTxfm2dParam(ADST_FLIPADST, TX_16X16, 11, 1.5),
+  AV1FwdTxfm2dParam(FLIPADST_ADST, TX_16X16, 11, 1.5),
+  AV1FwdTxfm2dParam(FLIPADST_DCT, TX_32X32, 70, 7),
+  AV1FwdTxfm2dParam(DCT_FLIPADST, TX_32X32, 70, 7),
+  AV1FwdTxfm2dParam(FLIPADST_FLIPADST, TX_32X32, 70, 7),
+  AV1FwdTxfm2dParam(ADST_FLIPADST, TX_32X32, 70, 7),
+  AV1FwdTxfm2dParam(FLIPADST_ADST, TX_32X32, 70, 7),
+#endif
+  AV1FwdTxfm2dParam(DCT_DCT, TX_4X4, 2, 0.2),
+  AV1FwdTxfm2dParam(ADST_DCT, TX_4X4, 2, 0.2),
+  AV1FwdTxfm2dParam(DCT_ADST, TX_4X4, 2, 0.2),
+  AV1FwdTxfm2dParam(ADST_ADST, TX_4X4, 2, 0.2),
+  AV1FwdTxfm2dParam(DCT_DCT, TX_8X8, 5, 0.6),
+  AV1FwdTxfm2dParam(ADST_DCT, TX_8X8, 5, 0.6),
+  AV1FwdTxfm2dParam(DCT_ADST, TX_8X8, 5, 0.6),
+  AV1FwdTxfm2dParam(ADST_ADST, TX_8X8, 5, 0.6),
+  AV1FwdTxfm2dParam(DCT_DCT, TX_16X16, 11, 1.5),
+  AV1FwdTxfm2dParam(ADST_DCT, TX_16X16, 11, 1.5),
+  AV1FwdTxfm2dParam(DCT_ADST, TX_16X16, 11, 1.5),
+  AV1FwdTxfm2dParam(ADST_ADST, TX_16X16, 11, 1.5),
+  AV1FwdTxfm2dParam(DCT_DCT, TX_32X32, 70, 7),
+  AV1FwdTxfm2dParam(ADST_DCT, TX_32X32, 70, 7),
+  AV1FwdTxfm2dParam(DCT_ADST, TX_32X32, 70, 7),
+  AV1FwdTxfm2dParam(ADST_ADST, TX_32X32, 70, 7)
+};
+
+INSTANTIATE_TEST_CASE_P(C, AV1FwdTxfm2d,
+                        ::testing::ValuesIn(av1_fwd_txfm2d_param_c));
+
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+}  // namespace
diff --git a/test/vp10_highbd_iht_test.cc b/test/av1_highbd_iht_test.cc
similarity index 82%
rename from test/vp10_highbd_iht_test.cc
rename to test/av1_highbd_iht_test.cc
index 6190971..648e744 100644
--- a/test/vp10_highbd_iht_test.cc
+++ b/test/av1_highbd_iht_test.cc
@@ -10,13 +10,13 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
 #include "av1/common/enums.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
 namespace {
@@ -39,9 +39,9 @@
 //    bit_depth>
 typedef tuple<HbdHtFunc, IHbdHtFunc, IHbdHtFunc, int, int, int> IHbdHtParam;
 
-class VP10HighbdInvHTNxN : public ::testing::TestWithParam<IHbdHtParam> {
+class AV1HighbdInvHTNxN : public ::testing::TestWithParam<IHbdHtParam> {
  public:
-  virtual ~VP10HighbdInvHTNxN() {}
+  virtual ~AV1HighbdInvHTNxN() {}
 
   virtual void SetUp() {
     txfm_ref_ = GET_PARAM(0);
@@ -52,25 +52,25 @@
     bit_depth_ = GET_PARAM(5);
 
     input_ = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(input_[0]) * num_coeffs_));
+        aom_memalign(16, sizeof(input_[0]) * num_coeffs_));
 
     // Note:
     // Inverse transform input buffer is 32-byte aligned
     // Refer to <root>/av1/encoder/context_tree.c, function,
     // void alloc_mode_context().
     coeffs_ = reinterpret_cast<int32_t *>(
-        vpx_memalign(32, sizeof(coeffs_[0]) * num_coeffs_));
+        aom_memalign(32, sizeof(coeffs_[0]) * num_coeffs_));
     output_ = reinterpret_cast<uint16_t *>(
-        vpx_memalign(32, sizeof(output_[0]) * num_coeffs_));
+        aom_memalign(32, sizeof(output_[0]) * num_coeffs_));
     output_ref_ = reinterpret_cast<uint16_t *>(
-        vpx_memalign(32, sizeof(output_ref_[0]) * num_coeffs_));
+        aom_memalign(32, sizeof(output_ref_[0]) * num_coeffs_));
   }
 
   virtual void TearDown() {
-    vpx_free(input_);
-    vpx_free(coeffs_);
-    vpx_free(output_);
-    vpx_free(output_ref_);
+    aom_free(input_);
+    aom_free(coeffs_);
+    aom_free(output_);
+    aom_free(output_ref_);
     libaom_test::ClearSystemState();
   }
 
@@ -103,7 +103,7 @@
   uint16_t *output_ref_;
 };
 
-void VP10HighbdInvHTNxN::RunBitexactCheck() {
+void AV1HighbdInvHTNxN::RunBitexactCheck() {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int stride = GetStride();
   const int num_tests = 20000;
@@ -128,22 +128,22 @@
   }
 }
 
-TEST_P(VP10HighbdInvHTNxN, InvTransResultCheck) { RunBitexactCheck(); }
+TEST_P(AV1HighbdInvHTNxN, InvTransResultCheck) { RunBitexactCheck(); }
 
 using std::tr1::make_tuple;
 
-#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
-#define PARAM_LIST_4X4                                     \
-  &vp10_fwd_txfm2d_4x4_c, &vp10_inv_txfm2d_add_4x4_sse4_1, \
-      &vp10_inv_txfm2d_add_4x4_c, 16
+#if HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
+#define PARAM_LIST_4X4                                   \
+  &av1_fwd_txfm2d_4x4_c, &av1_inv_txfm2d_add_4x4_sse4_1, \
+      &av1_inv_txfm2d_add_4x4_c, 16
 
-#define PARAM_LIST_8X8                                     \
-  &vp10_fwd_txfm2d_8x8_c, &vp10_inv_txfm2d_add_8x8_sse4_1, \
-      &vp10_inv_txfm2d_add_8x8_c, 64
+#define PARAM_LIST_8X8                                   \
+  &av1_fwd_txfm2d_8x8_c, &av1_inv_txfm2d_add_8x8_sse4_1, \
+      &av1_inv_txfm2d_add_8x8_c, 64
 
-#define PARAM_LIST_16X16                                       \
-  &vp10_fwd_txfm2d_16x16_c, &vp10_inv_txfm2d_add_16x16_sse4_1, \
-      &vp10_inv_txfm2d_add_16x16_c, 256
+#define PARAM_LIST_16X16                                     \
+  &av1_fwd_txfm2d_16x16_c, &av1_inv_txfm2d_add_16x16_sse4_1, \
+      &av1_inv_txfm2d_add_16x16_c, 256
 
 const IHbdHtParam kArrayIhtParam[] = {
   // 16x16
@@ -211,8 +211,8 @@
 #endif
 };
 
-INSTANTIATE_TEST_CASE_P(SSE4_1, VP10HighbdInvHTNxN,
+INSTANTIATE_TEST_CASE_P(SSE4_1, AV1HighbdInvHTNxN,
                         ::testing::ValuesIn(kArrayIhtParam));
-#endif  // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
+#endif  // HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
 
 }  // namespace
diff --git a/test/vp10_inv_txfm1d_test.cc b/test/av1_inv_txfm1d_test.cc
similarity index 82%
rename from test/vp10_inv_txfm1d_test.cc
rename to test/av1_inv_txfm1d_test.cc
index 744aae8..110d4c3 100644
--- a/test/vp10_inv_txfm1d_test.cc
+++ b/test/av1_inv_txfm1d_test.cc
@@ -8,9 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "test/vp10_txfm_test.h"
-#include "av1/common/vp10_fwd_txfm1d.h"
-#include "av1/common/vp10_inv_txfm1d.h"
+#include "test/av1_txfm_test.h"
+#include "av1/common/av1_fwd_txfm1d.h"
+#include "av1/common/av1_inv_txfm1d.h"
 
 using libaom_test::ACMRandom;
 using libaom_test::input_base;
@@ -21,20 +21,20 @@
 const int txfm_size_ls[5] = { 4, 8, 16, 32 };
 
 const TxfmFunc fwd_txfm_func_ls[2][5] = {
-  { vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new, NULL },
-  { vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new, NULL }
+  { av1_fdct4_new, av1_fdct8_new, av1_fdct16_new, av1_fdct32_new, NULL },
+  { av1_fadst4_new, av1_fadst8_new, av1_fadst16_new, av1_fadst32_new, NULL }
 };
 
 const TxfmFunc inv_txfm_func_ls[2][5] = {
-  { vp10_idct4_new, vp10_idct8_new, vp10_idct16_new, vp10_idct32_new, NULL },
-  { vp10_iadst4_new, vp10_iadst8_new, vp10_iadst16_new, vp10_iadst32_new, NULL }
+  { av1_idct4_new, av1_idct8_new, av1_idct16_new, av1_idct32_new, NULL },
+  { av1_iadst4_new, av1_iadst8_new, av1_iadst16_new, av1_iadst32_new, NULL }
 };
 
 // the maximum stage number of fwd/inv 1d dct/adst txfm is 12
 const int8_t cos_bit[12] = { 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14 };
 const int8_t range_bit[12] = { 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32 };
 
-TEST(vp10_inv_txfm1d, round_trip) {
+TEST(av1_inv_txfm1d, round_trip) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   for (int si = 0; si < txfm_size_num; ++si) {
     int txfm_size = txfm_size_ls[si];
diff --git a/test/av1_inv_txfm2d_test.cc b/test/av1_inv_txfm2d_test.cc
new file mode 100644
index 0000000..55a745f
--- /dev/null
+++ b/test/av1_inv_txfm2d_test.cc
@@ -0,0 +1,157 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "./av1_rtcd.h"
+#include "test/acm_random.h"
+#include "test/util.h"
+#include "test/av1_txfm_test.h"
+#include "av1/common/av1_inv_txfm2d_cfg.h"
+
+using libaom_test::ACMRandom;
+using libaom_test::input_base;
+using libaom_test::bd;
+using libaom_test::compute_avg_abs_error;
+using libaom_test::Fwd_Txfm2d_Func;
+using libaom_test::Inv_Txfm2d_Func;
+
+namespace {
+
+#if CONFIG_AOM_HIGHBITDEPTH
+// AV1InvTxfm2dParam argument list:
+// tx_type_, tx_size_, max_error_, max_avg_error_
+typedef std::tr1::tuple<TX_TYPE, TX_SIZE, int, double> AV1InvTxfm2dParam;
+
+class AV1InvTxfm2d : public ::testing::TestWithParam<AV1InvTxfm2dParam> {
+ public:
+  virtual void SetUp() {
+    tx_type_ = GET_PARAM(0);
+    tx_size_ = GET_PARAM(1);
+    max_error_ = GET_PARAM(2);
+    max_avg_error_ = GET_PARAM(3);
+    txfm1d_size_ = libaom_test::get_txfm1d_size(tx_size_);
+    txfm2d_size_ = txfm1d_size_ * txfm1d_size_;
+    count_ = 500;
+
+    input_ = reinterpret_cast<int16_t *>(
+        aom_memalign(16, sizeof(int16_t) * txfm2d_size_));
+    ref_input_ = reinterpret_cast<uint16_t *>(
+        aom_memalign(16, sizeof(uint16_t) * txfm2d_size_));
+    output_ = reinterpret_cast<int32_t *>(
+        aom_memalign(16, sizeof(int32_t) * txfm2d_size_));
+  }
+
+  void RunRoundtripCheck() {
+    const Fwd_Txfm2d_Func fwd_txfm_func =
+        libaom_test::fwd_txfm_func_ls[tx_size_];
+    const Inv_Txfm2d_Func inv_txfm_func =
+        libaom_test::inv_txfm_func_ls[tx_size_];
+    double avg_abs_error = 0;
+    ACMRandom rnd(ACMRandom::DeterministicSeed());
+    for (int ci = 0; ci < count_; ci++) {
+      for (int ni = 0; ni < txfm2d_size_; ++ni) {
+        if (ci == 0) {
+          int extreme_input = input_base - 1;
+          input_[ni] = extreme_input;  // extreme case
+          ref_input_[ni] = 0;
+        } else {
+          input_[ni] = rnd.Rand16() % input_base;
+          ref_input_[ni] = 0;
+        }
+      }
+
+      fwd_txfm_func(input_, output_, txfm1d_size_, tx_type_, bd);
+      inv_txfm_func(output_, ref_input_, txfm1d_size_, tx_type_, bd);
+
+      for (int ni = 0; ni < txfm2d_size_; ++ni) {
+        EXPECT_GE(max_error_, abs(input_[ni] - ref_input_[ni]));
+      }
+      avg_abs_error += compute_avg_abs_error<int16_t, uint16_t>(
+          input_, ref_input_, txfm2d_size_);
+    }
+
+    avg_abs_error /= count_;
+    // max_abs_avg_error comes from upper bound of
+    // printf("txfm1d_size: %d accuracy_avg_abs_error: %f\n",
+    // txfm1d_size_, avg_abs_error);
+    EXPECT_GE(max_avg_error_, avg_abs_error);
+  }
+
+  virtual void TearDown() {
+    aom_free(input_);
+    aom_free(output_);
+    aom_free(ref_input_);
+  }
+
+ private:
+  int count_;
+  int max_error_;
+  double max_avg_error_;
+  TX_TYPE tx_type_;
+  TX_SIZE tx_size_;
+  int txfm1d_size_;
+  int txfm2d_size_;
+  int16_t *input_;
+  uint16_t *ref_input_;
+  int32_t *output_;
+};
+
+TEST_P(AV1InvTxfm2d, RunRoundtripCheck) { RunRoundtripCheck(); }
+
+const AV1InvTxfm2dParam av1_inv_txfm2d_param[] = {
+#if CONFIG_EXT_TX
+  AV1InvTxfm2dParam(FLIPADST_DCT, TX_4X4, 2, 0.002),
+  AV1InvTxfm2dParam(DCT_FLIPADST, TX_4X4, 2, 0.002),
+  AV1InvTxfm2dParam(FLIPADST_FLIPADST, TX_4X4, 2, 0.002),
+  AV1InvTxfm2dParam(ADST_FLIPADST, TX_4X4, 2, 0.002),
+  AV1InvTxfm2dParam(FLIPADST_ADST, TX_4X4, 2, 0.002),
+  AV1InvTxfm2dParam(FLIPADST_DCT, TX_8X8, 2, 0.02),
+  AV1InvTxfm2dParam(DCT_FLIPADST, TX_8X8, 2, 0.02),
+  AV1InvTxfm2dParam(FLIPADST_FLIPADST, TX_8X8, 2, 0.02),
+  AV1InvTxfm2dParam(ADST_FLIPADST, TX_8X8, 2, 0.02),
+  AV1InvTxfm2dParam(FLIPADST_ADST, TX_8X8, 2, 0.02),
+  AV1InvTxfm2dParam(FLIPADST_DCT, TX_16X16, 2, 0.04),
+  AV1InvTxfm2dParam(DCT_FLIPADST, TX_16X16, 2, 0.04),
+  AV1InvTxfm2dParam(FLIPADST_FLIPADST, TX_16X16, 11, 0.04),
+  AV1InvTxfm2dParam(ADST_FLIPADST, TX_16X16, 2, 0.04),
+  AV1InvTxfm2dParam(FLIPADST_ADST, TX_16X16, 2, 0.04),
+  AV1InvTxfm2dParam(FLIPADST_DCT, TX_32X32, 4, 0.4),
+  AV1InvTxfm2dParam(DCT_FLIPADST, TX_32X32, 4, 0.4),
+  AV1InvTxfm2dParam(FLIPADST_FLIPADST, TX_32X32, 4, 0.4),
+  AV1InvTxfm2dParam(ADST_FLIPADST, TX_32X32, 4, 0.4),
+  AV1InvTxfm2dParam(FLIPADST_ADST, TX_32X32, 4, 0.4),
+#endif
+  AV1InvTxfm2dParam(DCT_DCT, TX_4X4, 2, 0.002),
+  AV1InvTxfm2dParam(ADST_DCT, TX_4X4, 2, 0.002),
+  AV1InvTxfm2dParam(DCT_ADST, TX_4X4, 2, 0.002),
+  AV1InvTxfm2dParam(ADST_ADST, TX_4X4, 2, 0.002),
+  AV1InvTxfm2dParam(DCT_DCT, TX_8X8, 2, 0.02),
+  AV1InvTxfm2dParam(ADST_DCT, TX_8X8, 2, 0.02),
+  AV1InvTxfm2dParam(DCT_ADST, TX_8X8, 2, 0.02),
+  AV1InvTxfm2dParam(ADST_ADST, TX_8X8, 2, 0.02),
+  AV1InvTxfm2dParam(DCT_DCT, TX_16X16, 2, 0.04),
+  AV1InvTxfm2dParam(ADST_DCT, TX_16X16, 2, 0.04),
+  AV1InvTxfm2dParam(DCT_ADST, TX_16X16, 2, 0.04),
+  AV1InvTxfm2dParam(ADST_ADST, TX_16X16, 2, 0.04),
+  AV1InvTxfm2dParam(DCT_DCT, TX_32X32, 4, 0.4),
+  AV1InvTxfm2dParam(ADST_DCT, TX_32X32, 4, 0.4),
+  AV1InvTxfm2dParam(DCT_ADST, TX_32X32, 4, 0.4),
+  AV1InvTxfm2dParam(ADST_ADST, TX_32X32, 4, 0.4)
+};
+
+INSTANTIATE_TEST_CASE_P(C, AV1InvTxfm2d,
+                        ::testing::ValuesIn(av1_inv_txfm2d_param));
+
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+
+}  // namespace
diff --git a/test/vp10_inv_txfm_test.cc b/test/av1_inv_txfm_test.cc
similarity index 81%
rename from test/vp10_inv_txfm_test.cc
rename to test/av1_inv_txfm_test.cc
index df8787d..c3e2eea 100644
--- a/test/vp10_inv_txfm_test.cc
+++ b/test/av1_inv_txfm_test.cc
@@ -14,16 +14,16 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
 #include "av1/common/blockd.h"
 #include "av1/common/scan.h"
-#include "aom/vpx_integer.h"
-#include "av1/common/vp10_inv_txfm.h"
+#include "aom/aom_integer.h"
+#include "av1/common/av1_inv_txfm.h"
 
 using libaom_test::ACMRandom;
 
@@ -87,8 +87,8 @@
 };
 
 typedef std::tr1::tuple<IdctFunc, IdctFuncRef, int, int> IdctParam;
-class Vp10InvTxfm : public TransTestBase,
-                    public ::testing::TestWithParam<IdctParam> {
+class AV1InvTxfm : public TransTestBase,
+                   public ::testing::TestWithParam<IdctParam> {
  public:
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
@@ -99,24 +99,24 @@
   virtual void TearDown() {}
 };
 
-TEST_P(Vp10InvTxfm, RunInvAccuracyCheck) { RunInvAccuracyCheck(); }
+TEST_P(AV1InvTxfm, RunInvAccuracyCheck) { RunInvAccuracyCheck(); }
 
 INSTANTIATE_TEST_CASE_P(
-    C, Vp10InvTxfm,
-    ::testing::Values(IdctParam(&vp10_idct4_c, &reference_idct_1d, 4, 1),
-                      IdctParam(&vp10_idct8_c, &reference_idct_1d, 8, 2),
-                      IdctParam(&vp10_idct16_c, &reference_idct_1d, 16, 4),
-                      IdctParam(&vp10_idct32_c, &reference_idct_1d, 32, 6)));
+    C, AV1InvTxfm,
+    ::testing::Values(IdctParam(&av1_idct4_c, &reference_idct_1d, 4, 1),
+                      IdctParam(&av1_idct8_c, &reference_idct_1d, 8, 2),
+                      IdctParam(&av1_idct16_c, &reference_idct_1d, 16, 4),
+                      IdctParam(&av1_idct32_c, &reference_idct_1d, 32, 6)));
 
 typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
 typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
 typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, InvTxfmFunc, TX_SIZE, int>
     PartialInvTxfmParam;
 const int kMaxNumCoeffs = 1024;
-class Vp10PartialIDctTest
+class AV1PartialIDctTest
     : public ::testing::TestWithParam<PartialInvTxfmParam> {
  public:
-  virtual ~Vp10PartialIDctTest() {}
+  virtual ~AV1PartialIDctTest() {}
   virtual void SetUp() {
     ftxfm_ = GET_PARAM(0);
     full_itxfm_ = GET_PARAM(1);
@@ -135,7 +135,7 @@
   InvTxfmFunc partial_itxfm_;
 };
 
-TEST_P(Vp10PartialIDctTest, RunQuantCheck) {
+TEST_P(AV1PartialIDctTest, RunQuantCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   int size;
   switch (tx_size_) {
@@ -201,7 +201,7 @@
       << "Error: partial inverse transform produces different results";
 }
 
-TEST_P(Vp10PartialIDctTest, ResultsMatch) {
+TEST_P(AV1PartialIDctTest, ResultsMatch) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   int size;
   switch (tx_size_) {
@@ -256,19 +256,19 @@
 using std::tr1::make_tuple;
 
 INSTANTIATE_TEST_CASE_P(
-    C, Vp10PartialIDctTest,
-    ::testing::Values(make_tuple(&vp10_fdct32x32_c, &vp10_idct32x32_1024_add_c,
-                                 &vp10_idct32x32_34_add_c, TX_32X32, 34),
-                      make_tuple(&vp10_fdct32x32_c, &vp10_idct32x32_1024_add_c,
-                                 &vp10_idct32x32_1_add_c, TX_32X32, 1),
-                      make_tuple(&vp10_fdct16x16_c, &vp10_idct16x16_256_add_c,
-                                 &vp10_idct16x16_10_add_c, TX_16X16, 10),
-                      make_tuple(&vp10_fdct16x16_c, &vp10_idct16x16_256_add_c,
-                                 &vp10_idct16x16_1_add_c, TX_16X16, 1),
-                      make_tuple(&vp10_fdct8x8_c, &vp10_idct8x8_64_add_c,
-                                 &vp10_idct8x8_12_add_c, TX_8X8, 12),
-                      make_tuple(&vp10_fdct8x8_c, &vp10_idct8x8_64_add_c,
-                                 &vp10_idct8x8_1_add_c, TX_8X8, 1),
-                      make_tuple(&vp10_fdct4x4_c, &vp10_idct4x4_16_add_c,
-                                 &vp10_idct4x4_1_add_c, TX_4X4, 1)));
+    C, AV1PartialIDctTest,
+    ::testing::Values(make_tuple(&av1_fdct32x32_c, &av1_idct32x32_1024_add_c,
+                                 &av1_idct32x32_34_add_c, TX_32X32, 34),
+                      make_tuple(&av1_fdct32x32_c, &av1_idct32x32_1024_add_c,
+                                 &av1_idct32x32_1_add_c, TX_32X32, 1),
+                      make_tuple(&av1_fdct16x16_c, &av1_idct16x16_256_add_c,
+                                 &av1_idct16x16_10_add_c, TX_16X16, 10),
+                      make_tuple(&av1_fdct16x16_c, &av1_idct16x16_256_add_c,
+                                 &av1_idct16x16_1_add_c, TX_16X16, 1),
+                      make_tuple(&av1_fdct8x8_c, &av1_idct8x8_64_add_c,
+                                 &av1_idct8x8_12_add_c, TX_8X8, 12),
+                      make_tuple(&av1_fdct8x8_c, &av1_idct8x8_64_add_c,
+                                 &av1_idct8x8_1_add_c, TX_8X8, 1),
+                      make_tuple(&av1_fdct4x4_c, &av1_idct4x4_16_add_c,
+                                 &av1_idct4x4_1_add_c, TX_4X4, 1)));
 }  // namespace
diff --git a/test/vp10_quantize_test.cc b/test/av1_quantize_test.cc
similarity index 87%
rename from test/vp10_quantize_test.cc
rename to test/av1_quantize_test.cc
index f3990ae..88cddb3 100644
--- a/test/vp10_quantize_test.cc
+++ b/test/av1_quantize_test.cc
@@ -12,8 +12,8 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
@@ -45,7 +45,7 @@
 const int dequantRange = 32768;
 const int coeffRange = (1 << 20) - 1;
 
-class VP10QuantizeTest : public ::testing::TestWithParam<QuantizeFuncParams> {
+class AV1QuantizeTest : public ::testing::TestWithParam<QuantizeFuncParams> {
  public:
   void RunQuantizeTest() {
     ACMRandom rnd(ACMRandom::DeterministicSeed());
@@ -70,7 +70,7 @@
     QuantizeFpFunc quanFunc = params_.qFunc;
     QuantizeFpFunc quanFuncRef = params_.qFuncRef;
 
-    const scan_order scanOrder = vp10_default_scan_orders[txSize];
+    const scan_order scanOrder = av1_default_scan_orders[txSize];
     for (int i = 0; i < numTests; i++) {
       int err_count = 0;
       ref_eob = eob = -1;
@@ -137,7 +137,7 @@
     int log_scale = (txSize == TX_32X32);
     QuantizeFpFunc quanFunc = params_.qFunc;
     QuantizeFpFunc quanFuncRef = params_.qFuncRef;
-    const scan_order scanOrder = vp10_default_scan_orders[txSize];
+    const scan_order scanOrder = av1_default_scan_orders[txSize];
 
     for (int i = 0; i < numTests; i++) {
       ref_eob = eob = -1;
@@ -175,7 +175,7 @@
 
   virtual void TearDown() { libaom_test::ClearSystemState(); }
 
-  virtual ~VP10QuantizeTest() {}
+  virtual ~AV1QuantizeTest() {}
 
  private:
   TX_SIZE getTxSize(int count) {
@@ -195,19 +195,19 @@
   QuantizeFuncParams params_;
 };
 
-TEST_P(VP10QuantizeTest, BitExactCheck) { RunQuantizeTest(); }
-TEST_P(VP10QuantizeTest, EobVerify) { RunEobTest(); }
+TEST_P(AV1QuantizeTest, BitExactCheck) { RunQuantizeTest(); }
+TEST_P(AV1QuantizeTest, EobVerify) { RunEobTest(); }
 
 #if HAVE_SSE4_1
 INSTANTIATE_TEST_CASE_P(
-    SSE4_1, VP10QuantizeTest,
-    ::testing::Values(QuantizeFuncParams(&vp10_highbd_quantize_fp_sse4_1,
-                                         &vp10_highbd_quantize_fp_c, 16),
-                      QuantizeFuncParams(&vp10_highbd_quantize_fp_sse4_1,
-                                         &vp10_highbd_quantize_fp_c, 64),
-                      QuantizeFuncParams(&vp10_highbd_quantize_fp_sse4_1,
-                                         &vp10_highbd_quantize_fp_c, 256),
-                      QuantizeFuncParams(&vp10_highbd_quantize_fp_sse4_1,
-                                         &vp10_highbd_quantize_fp_c, 1024)));
+    SSE4_1, AV1QuantizeTest,
+    ::testing::Values(QuantizeFuncParams(&av1_highbd_quantize_fp_sse4_1,
+                                         &av1_highbd_quantize_fp_c, 16),
+                      QuantizeFuncParams(&av1_highbd_quantize_fp_sse4_1,
+                                         &av1_highbd_quantize_fp_c, 64),
+                      QuantizeFuncParams(&av1_highbd_quantize_fp_sse4_1,
+                                         &av1_highbd_quantize_fp_c, 256),
+                      QuantizeFuncParams(&av1_highbd_quantize_fp_sse4_1,
+                                         &av1_highbd_quantize_fp_c, 1024)));
 #endif  // HAVE_SSE4_1
 }  // namespace
diff --git a/test/vp10_txfm_test.cc b/test/av1_txfm_test.cc
similarity index 99%
rename from test/vp10_txfm_test.cc
rename to test/av1_txfm_test.cc
index 718d71b..8dc6321 100644
--- a/test/vp10_txfm_test.cc
+++ b/test/av1_txfm_test.cc
@@ -9,7 +9,7 @@
  */
 
 #include <stdio.h>
-#include "test/vp10_txfm_test.h"
+#include "test/av1_txfm_test.h"
 
 namespace libaom_test {
 
diff --git a/test/vp10_txfm_test.h b/test/av1_txfm_test.h
similarity index 84%
rename from test/vp10_txfm_test.h
rename to test/av1_txfm_test.h
index bfae073..8f0022d 100644
--- a/test/vp10_txfm_test.h
+++ b/test/av1_txfm_test.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_TXFM_TEST_H_
-#define VP10_TXFM_TEST_H_
+#ifndef AV1_TXFM_TEST_H_
+#define AV1_TXFM_TEST_H_
 
 #include <stdio.h>
 #include <stdlib.h>
@@ -22,8 +22,8 @@
 
 #include "test/acm_random.h"
 #include "av1/common/enums.h"
-#include "av1/common/vp10_txfm.h"
-#include "./vp10_rtcd.h"
+#include "av1/common/av1_txfm.h"
+#include "./av1_rtcd.h"
 
 namespace libaom_test {
 typedef enum {
@@ -75,17 +75,17 @@
 static const int bd = 10;
 static const int input_base = (1 << bd);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const Fwd_Txfm2d_Func fwd_txfm_func_ls[TX_SIZES] = {
-  vp10_fwd_txfm2d_4x4_c, vp10_fwd_txfm2d_8x8_c, vp10_fwd_txfm2d_16x16_c,
-  vp10_fwd_txfm2d_32x32_c
+  av1_fwd_txfm2d_4x4_c, av1_fwd_txfm2d_8x8_c, av1_fwd_txfm2d_16x16_c,
+  av1_fwd_txfm2d_32x32_c
 };
 
 static const Inv_Txfm2d_Func inv_txfm_func_ls[TX_SIZES] = {
-  vp10_inv_txfm2d_add_4x4_c, vp10_inv_txfm2d_add_8x8_c,
-  vp10_inv_txfm2d_add_16x16_c, vp10_inv_txfm2d_add_32x32_c
+  av1_inv_txfm2d_add_4x4_c, av1_inv_txfm2d_add_8x8_c,
+  av1_inv_txfm2d_add_16x16_c, av1_inv_txfm2d_add_32x32_c
 };
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 }  // namespace libaom_test
-#endif  // VP10_TXFM_TEST_H_
+#endif  // AV1_TXFM_TEST_H_
diff --git a/test/vp10_wedge_utils_test.cc b/test/av1_wedge_utils_test.cc
similarity index 88%
rename from test/vp10_wedge_utils_test.cc
rename to test/av1_wedge_utils_test.cc
index 127a32f..539e9ef 100644
--- a/test/vp10_wedge_utils_test.cc
+++ b/test/av1_wedge_utils_test.cc
@@ -10,12 +10,12 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
-#include "./vpx_dsp_rtcd.h"
-#include "./vp10_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./av1_rtcd.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 #include "av1/common/enums.h"
 
@@ -34,7 +34,7 @@
 static const int16_t kInt13Max = (1 << 12) - 1;
 
 //////////////////////////////////////////////////////////////////////////////
-// vp10_wedge_sse_from_residuals - functionality
+// av1_wedge_sse_from_residuals - functionality
 //////////////////////////////////////////////////////////////////////////////
 
 class WedgeUtilsSSEFuncTest : public testing::Test {
@@ -99,17 +99,17 @@
       p1[j] = clamp(s[j] + rng_(33) - 16, 0, UINT8_MAX);
     }
 
-    vpx_blend_a64_mask(p, w, p0, w, p1, w, m, w, h, w, 0, 0);
+    aom_blend_a64_mask(p, w, p0, w, p1, w, m, w, h, w, 0, 0);
 
-    vpx_subtract_block(h, w, r0, w, s, w, p0, w);
-    vpx_subtract_block(h, w, r1, w, s, w, p1, w);
+    aom_subtract_block(h, w, r0, w, s, w, p0, w);
+    aom_subtract_block(h, w, r1, w, s, w, p1, w);
 
-    vpx_subtract_block(h, w, r_ref, w, s, w, p, w);
+    aom_subtract_block(h, w, r_ref, w, s, w, p, w);
     equiv_blend_residuals(r_tst, r0, r1, m, N);
 
     for (int i = 0; i < N; ++i) ASSERT_EQ(r_ref[i], r_tst[i]);
 
-    uint64_t ref_sse = vpx_sum_squares_i16(r_ref, N);
+    uint64_t ref_sse = aom_sum_squares_i16(r_ref, N);
     uint64_t tst_sse = equiv_sse_from_residuals(r0, r1, m, N);
 
     ASSERT_EQ(ref_sse, tst_sse);
@@ -146,14 +146,14 @@
     for (int i = 0; i < N; i++) r0[i] = r1[i] + d[i];
 
     const uint64_t ref_res = sse_from_residuals(r0, r1, m, N);
-    const uint64_t tst_res = vp10_wedge_sse_from_residuals(r1, d, m, N);
+    const uint64_t tst_res = av1_wedge_sse_from_residuals(r1, d, m, N);
 
     ASSERT_EQ(ref_res, tst_res);
   }
 }
 
 //////////////////////////////////////////////////////////////////////////////
-// vp10_wedge_sse_from_residuals - optimizations
+// av1_wedge_sse_from_residuals - optimizations
 //////////////////////////////////////////////////////////////////////////////
 
 typedef uint64_t (*FSSE)(const int16_t *r1, const int16_t *d, const uint8_t *m,
@@ -220,13 +220,13 @@
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(
     SSE2, WedgeUtilsSSEOptTest,
-    ::testing::Values(TestFuncsFSSE(vp10_wedge_sse_from_residuals_c,
-                                    vp10_wedge_sse_from_residuals_sse2)));
+    ::testing::Values(TestFuncsFSSE(av1_wedge_sse_from_residuals_c,
+                                    av1_wedge_sse_from_residuals_sse2)));
 
 #endif  // HAVE_SSE2
 
 //////////////////////////////////////////////////////////////////////////////
-// vp10_wedge_sign_from_residuals
+// av1_wedge_sign_from_residuals
 //////////////////////////////////////////////////////////////////////////////
 
 typedef int (*FSign)(const int16_t *ds, const uint8_t *m, int N, int64_t limit);
@@ -251,12 +251,12 @@
       m[i] = rng_(MAX_MASK_VALUE + 1);
     }
 
-    const int maxN = VPXMIN(kMaxSize, MAX_SB_SQUARE);
+    const int maxN = AOMMIN(kMaxSize, MAX_SB_SQUARE);
     const int N = 64 * (rng_(maxN / 64 - 1) + 1);
 
     int64_t limit;
-    limit = (int64_t)vpx_sum_squares_i16(r0, N);
-    limit -= (int64_t)vpx_sum_squares_i16(r1, N);
+    limit = (int64_t)aom_sum_squares_i16(r0, N);
+    limit -= (int64_t)aom_sum_squares_i16(r1, N);
     limit *= (1 << WEDGE_WEIGHT_BITS) / 2;
 
     for (int i = 0; i < N; i++)
@@ -306,12 +306,12 @@
 
     for (int i = 0; i < MAX_SB_SQUARE; ++i) m[i] = MAX_MASK_VALUE;
 
-    const int maxN = VPXMIN(kMaxSize, MAX_SB_SQUARE);
+    const int maxN = AOMMIN(kMaxSize, MAX_SB_SQUARE);
     const int N = 64 * (rng_(maxN / 64 - 1) + 1);
 
     int64_t limit;
-    limit = (int64_t)vpx_sum_squares_i16(r0, N);
-    limit -= (int64_t)vpx_sum_squares_i16(r1, N);
+    limit = (int64_t)aom_sum_squares_i16(r0, N);
+    limit -= (int64_t)aom_sum_squares_i16(r1, N);
     limit *= (1 << WEDGE_WEIGHT_BITS) / 2;
 
     for (int i = 0; i < N; i++)
@@ -329,13 +329,13 @@
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, WedgeUtilsSignOptTest,
-    ::testing::Values(TestFuncsFSign(vp10_wedge_sign_from_residuals_c,
-                                     vp10_wedge_sign_from_residuals_sse2)));
+    ::testing::Values(TestFuncsFSign(av1_wedge_sign_from_residuals_c,
+                                     av1_wedge_sign_from_residuals_sse2)));
 
 #endif  // HAVE_SSE2
 
 //////////////////////////////////////////////////////////////////////////////
-// vp10_wedge_compute_delta_squares
+// av1_wedge_compute_delta_squares
 //////////////////////////////////////////////////////////////////////////////
 
 typedef void (*FDS)(int16_t *d, const int16_t *a, const int16_t *b, int N);
@@ -374,8 +374,8 @@
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, WedgeUtilsDeltaSquaresOptTest,
-    ::testing::Values(TestFuncsFDS(vp10_wedge_compute_delta_squares_c,
-                                   vp10_wedge_compute_delta_squares_sse2)));
+    ::testing::Values(TestFuncsFDS(av1_wedge_compute_delta_squares_c,
+                                   av1_wedge_compute_delta_squares_sse2)));
 
 #endif  // HAVE_SSE2
 
diff --git a/test/avg_test.cc b/test/avg_test.cc
index eb3e8b1..d2b83cd 100644
--- a/test/avg_test.cc
+++ b/test/avg_test.cc
@@ -14,14 +14,14 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 using libaom_test::ACMRandom;
 
@@ -32,11 +32,11 @@
 
   static void SetUpTestCase() {
     source_data_ = reinterpret_cast<uint8_t *>(
-        vpx_memalign(kDataAlignment, kDataBlockSize));
+        aom_memalign(kDataAlignment, kDataBlockSize));
   }
 
   static void TearDownTestCase() {
-    vpx_free(source_data_);
+    aom_free(source_data_);
     source_data_ = NULL;
   }
 
@@ -131,15 +131,15 @@
  protected:
   virtual void SetUp() {
     hbuf_asm_ = reinterpret_cast<int16_t *>(
-        vpx_memalign(kDataAlignment, sizeof(*hbuf_asm_) * 16));
+        aom_memalign(kDataAlignment, sizeof(*hbuf_asm_) * 16));
     hbuf_c_ = reinterpret_cast<int16_t *>(
-        vpx_memalign(kDataAlignment, sizeof(*hbuf_c_) * 16));
+        aom_memalign(kDataAlignment, sizeof(*hbuf_c_) * 16));
   }
 
   virtual void TearDown() {
-    vpx_free(hbuf_c_);
+    aom_free(hbuf_c_);
     hbuf_c_ = NULL;
-    vpx_free(hbuf_asm_);
+    aom_free(hbuf_asm_);
     hbuf_asm_ = NULL;
   }
 
@@ -194,13 +194,13 @@
     satd_func_ = GET_PARAM(1);
     rnd_.Reset(ACMRandom::DeterministicSeed());
     src_ = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(*src_) * satd_size_));
+        aom_memalign(16, sizeof(*src_) * satd_size_));
     ASSERT_TRUE(src_ != NULL);
   }
 
   virtual void TearDown() {
     libaom_test::ClearSystemState();
-    vpx_free(src_);
+    aom_free(src_);
   }
 
   void FillConstant(const int16_t val) {
@@ -309,86 +309,86 @@
 
 INSTANTIATE_TEST_CASE_P(
     C, AverageTest,
-    ::testing::Values(make_tuple(16, 16, 1, 8, &vpx_avg_8x8_c),
-                      make_tuple(16, 16, 1, 4, &vpx_avg_4x4_c)));
+    ::testing::Values(make_tuple(16, 16, 1, 8, &aom_avg_8x8_c),
+                      make_tuple(16, 16, 1, 4, &aom_avg_4x4_c)));
 
 INSTANTIATE_TEST_CASE_P(C, SatdTest,
-                        ::testing::Values(make_tuple(16, &vpx_satd_c),
-                                          make_tuple(64, &vpx_satd_c),
-                                          make_tuple(256, &vpx_satd_c),
-                                          make_tuple(1024, &vpx_satd_c)));
+                        ::testing::Values(make_tuple(16, &aom_satd_c),
+                                          make_tuple(64, &aom_satd_c),
+                                          make_tuple(256, &aom_satd_c),
+                                          make_tuple(1024, &aom_satd_c)));
 
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(
     SSE2, AverageTest,
-    ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_sse2),
-                      make_tuple(16, 16, 5, 8, &vpx_avg_8x8_sse2),
-                      make_tuple(32, 32, 15, 8, &vpx_avg_8x8_sse2),
-                      make_tuple(16, 16, 0, 4, &vpx_avg_4x4_sse2),
-                      make_tuple(16, 16, 5, 4, &vpx_avg_4x4_sse2),
-                      make_tuple(32, 32, 15, 4, &vpx_avg_4x4_sse2)));
+    ::testing::Values(make_tuple(16, 16, 0, 8, &aom_avg_8x8_sse2),
+                      make_tuple(16, 16, 5, 8, &aom_avg_8x8_sse2),
+                      make_tuple(32, 32, 15, 8, &aom_avg_8x8_sse2),
+                      make_tuple(16, 16, 0, 4, &aom_avg_4x4_sse2),
+                      make_tuple(16, 16, 5, 4, &aom_avg_4x4_sse2),
+                      make_tuple(32, 32, 15, 4, &aom_avg_4x4_sse2)));
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, IntProRowTest,
-    ::testing::Values(make_tuple(16, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
-                      make_tuple(32, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
-                      make_tuple(64, &vpx_int_pro_row_sse2,
-                                 &vpx_int_pro_row_c)));
+    ::testing::Values(make_tuple(16, &aom_int_pro_row_sse2, &aom_int_pro_row_c),
+                      make_tuple(32, &aom_int_pro_row_sse2, &aom_int_pro_row_c),
+                      make_tuple(64, &aom_int_pro_row_sse2,
+                                 &aom_int_pro_row_c)));
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, IntProColTest,
-    ::testing::Values(make_tuple(16, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
-                      make_tuple(32, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
-                      make_tuple(64, &vpx_int_pro_col_sse2,
-                                 &vpx_int_pro_col_c)));
+    ::testing::Values(make_tuple(16, &aom_int_pro_col_sse2, &aom_int_pro_col_c),
+                      make_tuple(32, &aom_int_pro_col_sse2, &aom_int_pro_col_c),
+                      make_tuple(64, &aom_int_pro_col_sse2,
+                                 &aom_int_pro_col_c)));
 
 INSTANTIATE_TEST_CASE_P(SSE2, SatdTest,
-                        ::testing::Values(make_tuple(16, &vpx_satd_sse2),
-                                          make_tuple(64, &vpx_satd_sse2),
-                                          make_tuple(256, &vpx_satd_sse2),
-                                          make_tuple(1024, &vpx_satd_sse2)));
+                        ::testing::Values(make_tuple(16, &aom_satd_sse2),
+                                          make_tuple(64, &aom_satd_sse2),
+                                          make_tuple(256, &aom_satd_sse2),
+                                          make_tuple(1024, &aom_satd_sse2)));
 #endif
 
 #if HAVE_NEON
 INSTANTIATE_TEST_CASE_P(
     NEON, AverageTest,
-    ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_neon),
-                      make_tuple(16, 16, 5, 8, &vpx_avg_8x8_neon),
-                      make_tuple(32, 32, 15, 8, &vpx_avg_8x8_neon),
-                      make_tuple(16, 16, 0, 4, &vpx_avg_4x4_neon),
-                      make_tuple(16, 16, 5, 4, &vpx_avg_4x4_neon),
-                      make_tuple(32, 32, 15, 4, &vpx_avg_4x4_neon)));
+    ::testing::Values(make_tuple(16, 16, 0, 8, &aom_avg_8x8_neon),
+                      make_tuple(16, 16, 5, 8, &aom_avg_8x8_neon),
+                      make_tuple(32, 32, 15, 8, &aom_avg_8x8_neon),
+                      make_tuple(16, 16, 0, 4, &aom_avg_4x4_neon),
+                      make_tuple(16, 16, 5, 4, &aom_avg_4x4_neon),
+                      make_tuple(32, 32, 15, 4, &aom_avg_4x4_neon)));
 
 INSTANTIATE_TEST_CASE_P(
     NEON, IntProRowTest,
-    ::testing::Values(make_tuple(16, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
-                      make_tuple(32, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
-                      make_tuple(64, &vpx_int_pro_row_neon,
-                                 &vpx_int_pro_row_c)));
+    ::testing::Values(make_tuple(16, &aom_int_pro_row_neon, &aom_int_pro_row_c),
+                      make_tuple(32, &aom_int_pro_row_neon, &aom_int_pro_row_c),
+                      make_tuple(64, &aom_int_pro_row_neon,
+                                 &aom_int_pro_row_c)));
 
 INSTANTIATE_TEST_CASE_P(
     NEON, IntProColTest,
-    ::testing::Values(make_tuple(16, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
-                      make_tuple(32, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
-                      make_tuple(64, &vpx_int_pro_col_neon,
-                                 &vpx_int_pro_col_c)));
+    ::testing::Values(make_tuple(16, &aom_int_pro_col_neon, &aom_int_pro_col_c),
+                      make_tuple(32, &aom_int_pro_col_neon, &aom_int_pro_col_c),
+                      make_tuple(64, &aom_int_pro_col_neon,
+                                 &aom_int_pro_col_c)));
 
 INSTANTIATE_TEST_CASE_P(NEON, SatdTest,
-                        ::testing::Values(make_tuple(16, &vpx_satd_neon),
-                                          make_tuple(64, &vpx_satd_neon),
-                                          make_tuple(256, &vpx_satd_neon),
-                                          make_tuple(1024, &vpx_satd_neon)));
+                        ::testing::Values(make_tuple(16, &aom_satd_neon),
+                                          make_tuple(64, &aom_satd_neon),
+                                          make_tuple(256, &aom_satd_neon),
+                                          make_tuple(1024, &aom_satd_neon)));
 #endif
 
 #if HAVE_MSA
 INSTANTIATE_TEST_CASE_P(
     MSA, AverageTest,
-    ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_msa),
-                      make_tuple(16, 16, 5, 8, &vpx_avg_8x8_msa),
-                      make_tuple(32, 32, 15, 8, &vpx_avg_8x8_msa),
-                      make_tuple(16, 16, 0, 4, &vpx_avg_4x4_msa),
-                      make_tuple(16, 16, 5, 4, &vpx_avg_4x4_msa),
-                      make_tuple(32, 32, 15, 4, &vpx_avg_4x4_msa)));
+    ::testing::Values(make_tuple(16, 16, 0, 8, &aom_avg_8x8_msa),
+                      make_tuple(16, 16, 5, 8, &aom_avg_8x8_msa),
+                      make_tuple(32, 32, 15, 8, &aom_avg_8x8_msa),
+                      make_tuple(16, 16, 0, 4, &aom_avg_4x4_msa),
+                      make_tuple(16, 16, 5, 4, &aom_avg_4x4_msa),
+                      make_tuple(32, 32, 15, 4, &aom_avg_4x4_msa)));
 #endif
 
 }  // namespace
diff --git a/test/blend_a64_mask_1d_test.cc b/test/blend_a64_mask_1d_test.cc
index c5c5929..f0f8d65 100644
--- a/test/blend_a64_mask_1d_test.cc
+++ b/test/blend_a64_mask_1d_test.cc
@@ -17,11 +17,11 @@
 
 #include "test/function_equivalence_test.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 
 #include "av1/common/enums.h"
 
@@ -142,7 +142,7 @@
     }
 
     for (int i = 0; i < kMaxMaskSize; ++i)
-      mask_[i] = rng_(VPX_BLEND_A64_MAX_ALPHA + 1);
+      mask_[i] = rng_(AOM_BLEND_A64_MAX_ALPHA + 1);
 
     Common();
   }
@@ -158,7 +158,7 @@
     }
 
     for (int i = 0; i < kMaxMaskSize; ++i)
-      mask_[i] = rng_(2) + VPX_BLEND_A64_MAX_ALPHA - 1;
+      mask_[i] = rng_(2) + AOM_BLEND_A64_MAX_ALPHA - 1;
 
     Common();
   }
@@ -174,7 +174,7 @@
   for (int row = 0; row < h; ++row)
     for (int col = 0; col < w; ++col) mask2d[row][col] = mask[col];
 
-  vpx_blend_a64_mask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+  aom_blend_a64_mask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
                        &mask2d[0][0], BlendA64Mask1DTest8B::kMaxMaskSize, h, w,
                        0, 0);
 }
@@ -189,25 +189,25 @@
   for (int row = 0; row < h; ++row)
     for (int col = 0; col < w; ++col) mask2d[row][col] = mask[row];
 
-  vpx_blend_a64_mask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+  aom_blend_a64_mask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
                        &mask2d[0][0], BlendA64Mask1DTest8B::kMaxMaskSize, h, w,
                        0, 0);
 }
 
 INSTANTIATE_TEST_CASE_P(
     C, BlendA64Mask1DTest8B,
-    ::testing::Values(TestFuncs(blend_a64_hmask_ref, vpx_blend_a64_hmask_c),
-                      TestFuncs(blend_a64_vmask_ref, vpx_blend_a64_vmask_c)));
+    ::testing::Values(TestFuncs(blend_a64_hmask_ref, aom_blend_a64_hmask_c),
+                      TestFuncs(blend_a64_vmask_ref, aom_blend_a64_vmask_c)));
 
 #if HAVE_SSE4_1
 INSTANTIATE_TEST_CASE_P(
     SSE4_1, BlendA64Mask1DTest8B,
     ::testing::Values(
-        TestFuncs(blend_a64_hmask_ref, vpx_blend_a64_hmask_sse4_1),
-        TestFuncs(blend_a64_vmask_ref, vpx_blend_a64_vmask_sse4_1)));
+        TestFuncs(blend_a64_hmask_ref, aom_blend_a64_hmask_sse4_1),
+        TestFuncs(blend_a64_vmask_ref, aom_blend_a64_vmask_sse4_1)));
 #endif  // HAVE_SSE4_1
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 //////////////////////////////////////////////////////////////////////////////
 // High bit-depth version
 //////////////////////////////////////////////////////////////////////////////
@@ -253,7 +253,7 @@
     }
 
     for (int i = 0; i < kMaxMaskSize; ++i)
-      mask_[i] = rng_(VPX_BLEND_A64_MAX_ALPHA + 1);
+      mask_[i] = rng_(AOM_BLEND_A64_MAX_ALPHA + 1);
 
     Common();
   }
@@ -278,7 +278,7 @@
     }
 
     for (int i = 0; i < kMaxMaskSize; ++i)
-      mask_[i] = rng_(2) + VPX_BLEND_A64_MAX_ALPHA - 1;
+      mask_[i] = rng_(2) + AOM_BLEND_A64_MAX_ALPHA - 1;
 
     Common();
   }
@@ -294,7 +294,7 @@
   for (int row = 0; row < h; ++row)
     for (int col = 0; col < w; ++col) mask2d[row][col] = mask[col];
 
-  vpx_highbd_blend_a64_mask_c(
+  aom_highbd_blend_a64_mask_c(
       dst, dst_stride, src0, src0_stride, src1, src1_stride, &mask2d[0][0],
       BlendA64Mask1DTestHBD::kMaxMaskSize, h, w, 0, 0, bd);
 }
@@ -309,7 +309,7 @@
   for (int row = 0; row < h; ++row)
     for (int col = 0; col < w; ++col) mask2d[row][col] = mask[row];
 
-  vpx_highbd_blend_a64_mask_c(
+  aom_highbd_blend_a64_mask_c(
       dst, dst_stride, src0, src0_stride, src1, src1_stride, &mask2d[0][0],
       BlendA64Mask1DTestHBD::kMaxMaskSize, h, w, 0, 0, bd);
 }
@@ -317,18 +317,18 @@
 INSTANTIATE_TEST_CASE_P(
     C, BlendA64Mask1DTestHBD,
     ::testing::Values(TestFuncsHBD(highbd_blend_a64_hmask_ref,
-                                   vpx_highbd_blend_a64_hmask_c),
+                                   aom_highbd_blend_a64_hmask_c),
                       TestFuncsHBD(highbd_blend_a64_vmask_ref,
-                                   vpx_highbd_blend_a64_vmask_c)));
+                                   aom_highbd_blend_a64_vmask_c)));
 
 #if HAVE_SSE4_1
 INSTANTIATE_TEST_CASE_P(
     SSE4_1, BlendA64Mask1DTestHBD,
     ::testing::Values(TestFuncsHBD(highbd_blend_a64_hmask_ref,
-                                   vpx_highbd_blend_a64_hmask_sse4_1),
+                                   aom_highbd_blend_a64_hmask_sse4_1),
                       TestFuncsHBD(highbd_blend_a64_vmask_ref,
-                                   vpx_highbd_blend_a64_vmask_sse4_1)));
+                                   aom_highbd_blend_a64_vmask_sse4_1)));
 #endif  // HAVE_SSE4_1
 
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }  // namespace
diff --git a/test/blend_a64_mask_test.cc b/test/blend_a64_mask_test.cc
index 3fe6112..4c9b1d2 100644
--- a/test/blend_a64_mask_test.cc
+++ b/test/blend_a64_mask_test.cc
@@ -17,11 +17,11 @@
 
 #include "test/function_equivalence_test.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 
 #include "av1/common/enums.h"
 
@@ -154,7 +154,7 @@
     }
 
     for (int i = 0; i < kMaxMaskSize; ++i)
-      mask_[i] = rng_(VPX_BLEND_A64_MAX_ALPHA + 1);
+      mask_[i] = rng_(AOM_BLEND_A64_MAX_ALPHA + 1);
 
     Common();
   }
@@ -170,7 +170,7 @@
     }
 
     for (int i = 0; i < kMaxMaskSize; ++i)
-      mask_[i] = rng_(2) + VPX_BLEND_A64_MAX_ALPHA - 1;
+      mask_[i] = rng_(2) + AOM_BLEND_A64_MAX_ALPHA - 1;
 
     Common();
   }
@@ -179,10 +179,10 @@
 #if HAVE_SSE4_1
 INSTANTIATE_TEST_CASE_P(SSE4_1_C_COMPARE, BlendA64MaskTest8B,
                         ::testing::Values(TestFuncs(
-                            vpx_blend_a64_mask_c, vpx_blend_a64_mask_sse4_1)));
+                            aom_blend_a64_mask_c, aom_blend_a64_mask_sse4_1)));
 #endif  // HAVE_SSE4_1
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 //////////////////////////////////////////////////////////////////////////////
 // High bit-depth version
 //////////////////////////////////////////////////////////////////////////////
@@ -229,7 +229,7 @@
     }
 
     for (int i = 0; i < kMaxMaskSize; ++i)
-      mask_[i] = rng_(VPX_BLEND_A64_MAX_ALPHA + 1);
+      mask_[i] = rng_(AOM_BLEND_A64_MAX_ALPHA + 1);
 
     Common();
   }
@@ -254,7 +254,7 @@
     }
 
     for (int i = 0; i < kMaxMaskSize; ++i)
-      mask_[i] = rng_(2) + VPX_BLEND_A64_MAX_ALPHA - 1;
+      mask_[i] = rng_(2) + AOM_BLEND_A64_MAX_ALPHA - 1;
 
     Common();
   }
@@ -263,8 +263,8 @@
 #if HAVE_SSE4_1
 INSTANTIATE_TEST_CASE_P(
     SSE4_1_C_COMPARE, BlendA64MaskTestHBD,
-    ::testing::Values(TestFuncsHBD(vpx_highbd_blend_a64_mask_c,
-                                   vpx_highbd_blend_a64_mask_sse4_1)));
+    ::testing::Values(TestFuncsHBD(aom_highbd_blend_a64_mask_c,
+                                   aom_highbd_blend_a64_mask_sse4_1)));
 #endif  // HAVE_SSE4_1
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }  // namespace
diff --git a/test/boolcoder_test.cc b/test/boolcoder_test.cc
index 949907b..551a08f 100644
--- a/test/boolcoder_test.cc
+++ b/test/boolcoder_test.cc
@@ -15,7 +15,7 @@
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
 #include "test/acm_random.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/bitreader.h"
 #include "aom_dsp/bitwriter.h"
 
@@ -25,7 +25,7 @@
 const int num_tests = 10;
 }  // namespace
 
-TEST(VP9, TestBitIO) {
+TEST(AV1, TestBitIO) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   for (int n = 0; n < num_tests; ++n) {
     for (int method = 0; method <= 7; ++method) {  // we generate various proba
@@ -51,9 +51,9 @@
         const int random_seed = 6432;
         const int kBufferSize = 10000;
         ACMRandom bit_rnd(random_seed);
-        vpx_writer bw;
+        aom_writer bw;
         uint8_t bw_buffer[kBufferSize];
-        vpx_start_encode(&bw, bw_buffer);
+        aom_start_encode(&bw, bw_buffer);
 
         int bit = (bit_method == 0) ? 0 : (bit_method == 1) ? 1 : 0;
         for (int i = 0; i < kBitsToTest; ++i) {
@@ -62,16 +62,16 @@
           } else if (bit_method == 3) {
             bit = bit_rnd(2);
           }
-          vpx_write(&bw, bit, static_cast<int>(probas[i]));
+          aom_write(&bw, bit, static_cast<int>(probas[i]));
         }
 
-        vpx_stop_encode(&bw);
+        aom_stop_encode(&bw);
 
         // First bit should be zero
         GTEST_ASSERT_EQ(bw_buffer[0] & 0x80, 0);
 
-        vpx_reader br;
-        vpx_reader_init(&br, bw_buffer, kBufferSize, NULL, NULL);
+        aom_reader br;
+        aom_reader_init(&br, bw_buffer, kBufferSize, NULL, NULL);
         bit_rnd.Reset(random_seed);
         for (int i = 0; i < kBitsToTest; ++i) {
           if (bit_method == 2) {
@@ -79,7 +79,7 @@
           } else if (bit_method == 3) {
             bit = bit_rnd(2);
           }
-          GTEST_ASSERT_EQ(vpx_read(&br, probas[i]), bit)
+          GTEST_ASSERT_EQ(aom_read(&br, probas[i]), bit)
               << "pos: " << i << " / " << kBitsToTest
               << " bit_method: " << bit_method << " method: " << method;
         }
diff --git a/test/borders_test.cc b/test/borders_test.cc
index e631da5..39125c0 100644
--- a/test/borders_test.cc
+++ b/test/borders_test.cc
@@ -32,16 +32,16 @@
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 1) {
-      encoder->Control(VP8E_SET_CPUUSED, 1);
-      encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
-      encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
-      encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
-      encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+      encoder->Control(AOME_SET_CPUUSED, 1);
+      encoder->Control(AOME_SET_ENABLEAUTOALTREF, 1);
+      encoder->Control(AOME_SET_ARNR_MAXFRAMES, 7);
+      encoder->Control(AOME_SET_ARNR_STRENGTH, 5);
+      encoder->Control(AOME_SET_ARNR_TYPE, 3);
     }
   }
 
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
-    if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
+  virtual void FramePktHook(const aom_codec_cx_pkt_t *pkt) {
+    if (pkt->data.frame.flags & AOM_FRAME_IS_KEY) {
     }
   }
 };
@@ -79,6 +79,6 @@
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-VP10_INSTANTIATE_TEST_CASE(BordersTest,
-                           ::testing::Values(::libaom_test::kTwoPassGood));
+AV1_INSTANTIATE_TEST_CASE(BordersTest,
+                          ::testing::Values(::libaom_test::kTwoPassGood));
 }  // namespace
diff --git a/test/clear_system_state.h b/test/clear_system_state.h
index b80cceb..e8fcf6d 100644
--- a/test/clear_system_state.h
+++ b/test/clear_system_state.h
@@ -10,7 +10,7 @@
 #ifndef TEST_CLEAR_SYSTEM_STATE_H_
 #define TEST_CLEAR_SYSTEM_STATE_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #if ARCH_X86 || ARCH_X86_64
 #include "aom_ports/x86.h"
 #endif
@@ -21,7 +21,7 @@
 // test cases.
 inline void ClearSystemState() {
 #if ARCH_X86 || ARCH_X86_64
-  vpx_reset_mmx_state();
+  aom_reset_mmx_state();
 #endif
 }
 
diff --git a/test/codec_factory.h b/test/codec_factory.h
index abbf2e1..84d40a1 100644
--- a/test/codec_factory.h
+++ b/test/codec_factory.h
@@ -10,14 +10,14 @@
 #ifndef TEST_CODEC_FACTORY_H_
 #define TEST_CODEC_FACTORY_H_
 
-#include "./vpx_config.h"
-#include "aom/vpx_decoder.h"
-#include "aom/vpx_encoder.h"
-#if CONFIG_VP10_ENCODER
-#include "aom/vp8cx.h"
+#include "./aom_config.h"
+#include "aom/aom_decoder.h"
+#include "aom/aom_encoder.h"
+#if CONFIG_AV1_ENCODER
+#include "aom/aomcx.h"
 #endif
-#if CONFIG_VP10_DECODER
-#include "aom/vp8dx.h"
+#if CONFIG_AV1_DECODER
+#include "aom/aomdx.h"
 #endif
 
 #include "test/decode_test_driver.h"
@@ -32,20 +32,20 @@
 
   virtual ~CodecFactory() {}
 
-  virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg,
+  virtual Decoder *CreateDecoder(aom_codec_dec_cfg_t cfg,
                                  unsigned long deadline) const = 0;
 
-  virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg,
-                                 const vpx_codec_flags_t flags,
+  virtual Decoder *CreateDecoder(aom_codec_dec_cfg_t cfg,
+                                 const aom_codec_flags_t flags,
                                  unsigned long deadline)  // NOLINT(runtime/int)
       const = 0;
 
-  virtual Encoder *CreateEncoder(vpx_codec_enc_cfg_t cfg,
+  virtual Encoder *CreateEncoder(aom_codec_enc_cfg_t cfg,
                                  unsigned long deadline,
                                  const unsigned long init_flags,
                                  TwopassStatsStore *stats) const = 0;
 
-  virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t *cfg,
+  virtual aom_codec_err_t DefaultEncoderConfig(aom_codec_enc_cfg_t *cfg,
                                                int usage) const = 0;
 };
 
@@ -69,96 +69,96 @@
           std::tr1::tuple<const libaom_test::CodecFactory *, T1, T2, T3> > {};
 
 /*
- * VP10 Codec Definitions
+ * AV1 Codec Definitions
  */
-#if CONFIG_VP10
-class VP10Decoder : public Decoder {
+#if CONFIG_AV1
+class AV1Decoder : public Decoder {
  public:
-  VP10Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline)
+  AV1Decoder(aom_codec_dec_cfg_t cfg, unsigned long deadline)
       : Decoder(cfg, deadline) {}
 
-  VP10Decoder(vpx_codec_dec_cfg_t cfg, const vpx_codec_flags_t flag,
-              unsigned long deadline)  // NOLINT
+  AV1Decoder(aom_codec_dec_cfg_t cfg, const aom_codec_flags_t flag,
+             unsigned long deadline)  // NOLINT
       : Decoder(cfg, flag, deadline) {}
 
  protected:
-  virtual vpx_codec_iface_t *CodecInterface() const {
-#if CONFIG_VP10_DECODER
-    return &vpx_codec_vp10_dx_algo;
+  virtual aom_codec_iface_t *CodecInterface() const {
+#if CONFIG_AV1_DECODER
+    return &aom_codec_av1_dx_algo;
 #else
     return NULL;
 #endif
   }
 };
 
-class VP10Encoder : public Encoder {
+class AV1Encoder : public Encoder {
  public:
-  VP10Encoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline,
-              const unsigned long init_flags, TwopassStatsStore *stats)
+  AV1Encoder(aom_codec_enc_cfg_t cfg, unsigned long deadline,
+             const unsigned long init_flags, TwopassStatsStore *stats)
       : Encoder(cfg, deadline, init_flags, stats) {}
 
  protected:
-  virtual vpx_codec_iface_t *CodecInterface() const {
-#if CONFIG_VP10_ENCODER
-    return &vpx_codec_vp10_cx_algo;
+  virtual aom_codec_iface_t *CodecInterface() const {
+#if CONFIG_AV1_ENCODER
+    return &aom_codec_av1_cx_algo;
 #else
     return NULL;
 #endif
   }
 };
 
-class VP10CodecFactory : public CodecFactory {
+class AV1CodecFactory : public CodecFactory {
  public:
-  VP10CodecFactory() : CodecFactory() {}
+  AV1CodecFactory() : CodecFactory() {}
 
-  virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg,
+  virtual Decoder *CreateDecoder(aom_codec_dec_cfg_t cfg,
                                  unsigned long deadline) const {
     return CreateDecoder(cfg, 0, deadline);
   }
 
-  virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg,
-                                 const vpx_codec_flags_t flags,
+  virtual Decoder *CreateDecoder(aom_codec_dec_cfg_t cfg,
+                                 const aom_codec_flags_t flags,
                                  unsigned long deadline) const {  // NOLINT
-#if CONFIG_VP10_DECODER
-    return new VP10Decoder(cfg, flags, deadline);
+#if CONFIG_AV1_DECODER
+    return new AV1Decoder(cfg, flags, deadline);
 #else
     return NULL;
 #endif
   }
 
-  virtual Encoder *CreateEncoder(vpx_codec_enc_cfg_t cfg,
+  virtual Encoder *CreateEncoder(aom_codec_enc_cfg_t cfg,
                                  unsigned long deadline,
                                  const unsigned long init_flags,
                                  TwopassStatsStore *stats) const {
-#if CONFIG_VP10_ENCODER
-    return new VP10Encoder(cfg, deadline, init_flags, stats);
+#if CONFIG_AV1_ENCODER
+    return new AV1Encoder(cfg, deadline, init_flags, stats);
 #else
     return NULL;
 #endif
   }
 
-  virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t *cfg,
+  virtual aom_codec_err_t DefaultEncoderConfig(aom_codec_enc_cfg_t *cfg,
                                                int usage) const {
-#if CONFIG_VP10_ENCODER
-    return vpx_codec_enc_config_default(&vpx_codec_vp10_cx_algo, cfg, usage);
+#if CONFIG_AV1_ENCODER
+    return aom_codec_enc_config_default(&aom_codec_av1_cx_algo, cfg, usage);
 #else
-    return VPX_CODEC_INCAPABLE;
+    return AOM_CODEC_INCAPABLE;
 #endif
   }
 };
 
-const libaom_test::VP10CodecFactory kVP10;
+const libaom_test::AV1CodecFactory kAV1;
 
-#define VP10_INSTANTIATE_TEST_CASE(test, ...)                               \
+#define AV1_INSTANTIATE_TEST_CASE(test, ...)                                \
   INSTANTIATE_TEST_CASE_P(                                                  \
-      VP10, test,                                                           \
+      AV1, test,                                                            \
       ::testing::Combine(                                                   \
           ::testing::Values(static_cast<const libaom_test::CodecFactory *>( \
-              &libaom_test::kVP10)),                                        \
+              &libaom_test::kAV1)),                                         \
           __VA_ARGS__))
 #else
-#define VP10_INSTANTIATE_TEST_CASE(test, ...)
-#endif  // CONFIG_VP10
+#define AV1_INSTANTIATE_TEST_CASE(test, ...)
+#endif  // CONFIG_AV1
 
 }  // namespace libaom_test
 #endif  // TEST_CODEC_FACTORY_H_
diff --git a/test/convolve_test.cc b/test/convolve_test.cc
index 910c4ec..f31ae02 100644
--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -12,15 +12,15 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 namespace {
@@ -64,7 +64,7 @@
 
 typedef std::tr1::tuple<int, int, const ConvolveFunctions *> ConvolveParam;
 
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
 #define ALL_SIZES(convolve_fn)                                            \
   make_tuple(128, 64, &convolve_fn), make_tuple(64, 128, &convolve_fn),   \
       make_tuple(128, 128, &convolve_fn), make_tuple(4, 4, &convolve_fn), \
@@ -83,11 +83,11 @@
       make_tuple(16, 32, &convolve_fn), make_tuple(32, 32, &convolve_fn), \
       make_tuple(64, 32, &convolve_fn), make_tuple(32, 64, &convolve_fn), \
       make_tuple(64, 64, &convolve_fn)
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
 
 // Reference 8-tap subpixel filter, slightly modified to fit into this test.
-#define VP9_FILTER_WEIGHT 128
-#define VP9_FILTER_SHIFT 7
+#define AV1_FILTER_WEIGHT 128
+#define AV1_FILTER_SHIFT 7
 uint8_t clip_pixel(int x) { return x < 0 ? 0 : x > 255 ? 255 : x; }
 
 void filter_block2d_8_c(const uint8_t *src_ptr, const unsigned int src_stride,
@@ -125,10 +125,10 @@
                        (src_ptr[2] * HFilter[2]) + (src_ptr[3] * HFilter[3]) +
                        (src_ptr[4] * HFilter[4]) + (src_ptr[5] * HFilter[5]) +
                        (src_ptr[6] * HFilter[6]) + (src_ptr[7] * HFilter[7]) +
-                       (VP9_FILTER_WEIGHT >> 1);  // Rounding
+                       (AV1_FILTER_WEIGHT >> 1);  // Rounding
 
       // Normalize back to 0-255...
-      *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
+      *output_ptr = clip_pixel(temp >> AV1_FILTER_SHIFT);
       ++src_ptr;
       output_ptr += intermediate_height;
     }
@@ -146,10 +146,10 @@
                        (src_ptr[2] * VFilter[2]) + (src_ptr[3] * VFilter[3]) +
                        (src_ptr[4] * VFilter[4]) + (src_ptr[5] * VFilter[5]) +
                        (src_ptr[6] * VFilter[6]) + (src_ptr[7] * VFilter[7]) +
-                       (VP9_FILTER_WEIGHT >> 1);  // Rounding
+                       (AV1_FILTER_WEIGHT >> 1);  // Rounding
 
       // Normalize back to 0-255...
-      *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
+      *dst_ptr++ = clip_pixel(temp >> AV1_FILTER_SHIFT);
       src_ptr += intermediate_height;
     }
     src_ptr += intermediate_next_stride;
@@ -185,7 +185,7 @@
                     output_height);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
                                const unsigned int src_stride,
                                const int16_t *HFilter, const int16_t *VFilter,
@@ -224,10 +224,10 @@
                          (src_ptr[2] * HFilter[2]) + (src_ptr[3] * HFilter[3]) +
                          (src_ptr[4] * HFilter[4]) + (src_ptr[5] * HFilter[5]) +
                          (src_ptr[6] * HFilter[6]) + (src_ptr[7] * HFilter[7]) +
-                         (VP9_FILTER_WEIGHT >> 1);  // Rounding
+                         (AV1_FILTER_WEIGHT >> 1);  // Rounding
 
         // Normalize back to 0-255...
-        *output_ptr = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+        *output_ptr = clip_pixel_highbd(temp >> AV1_FILTER_SHIFT, bd);
         ++src_ptr;
         output_ptr += intermediate_height;
       }
@@ -248,10 +248,10 @@
                          (src_ptr[2] * VFilter[2]) + (src_ptr[3] * VFilter[3]) +
                          (src_ptr[4] * VFilter[4]) + (src_ptr[5] * VFilter[5]) +
                          (src_ptr[6] * VFilter[6]) + (src_ptr[7] * VFilter[7]) +
-                         (VP9_FILTER_WEIGHT >> 1);  // Rounding
+                         (AV1_FILTER_WEIGHT >> 1);  // Rounding
 
         // Normalize back to 0-255...
-        *dst_ptr++ = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+        *dst_ptr++ = clip_pixel_highbd(temp >> AV1_FILTER_SHIFT, bd);
         src_ptr += intermediate_height;
       }
       src_ptr += intermediate_next_stride;
@@ -287,45 +287,45 @@
   highbd_block2d_average_c(tmp, kMaxDimension, dst_ptr, dst_stride,
                            output_width, output_height);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
  public:
   static void SetUpTestCase() {
     // Force input_ to be unaligned, output to be 16 byte aligned.
     input_ = reinterpret_cast<uint8_t *>(
-                 vpx_memalign(kDataAlignment, kInputBufferSize + 1)) +
+                 aom_memalign(kDataAlignment, kInputBufferSize + 1)) +
              1;
     output_ = reinterpret_cast<uint8_t *>(
-        vpx_memalign(kDataAlignment, kOutputBufferSize));
+        aom_memalign(kDataAlignment, kOutputBufferSize));
     output_ref_ = reinterpret_cast<uint8_t *>(
-        vpx_memalign(kDataAlignment, kOutputBufferSize));
-#if CONFIG_VP9_HIGHBITDEPTH
-    input16_ = reinterpret_cast<uint16_t *>(vpx_memalign(
+        aom_memalign(kDataAlignment, kOutputBufferSize));
+#if CONFIG_AOM_HIGHBITDEPTH
+    input16_ = reinterpret_cast<uint16_t *>(aom_memalign(
                    kDataAlignment, (kInputBufferSize + 1) * sizeof(uint16_t))) +
                1;
     output16_ = reinterpret_cast<uint16_t *>(
-        vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+        aom_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
     output16_ref_ = reinterpret_cast<uint16_t *>(
-        vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+        aom_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
 #endif
   }
 
   virtual void TearDown() { libaom_test::ClearSystemState(); }
 
   static void TearDownTestCase() {
-    vpx_free(input_ - 1);
+    aom_free(input_ - 1);
     input_ = NULL;
-    vpx_free(output_);
+    aom_free(output_);
     output_ = NULL;
-    vpx_free(output_ref_);
+    aom_free(output_ref_);
     output_ref_ = NULL;
-#if CONFIG_VP9_HIGHBITDEPTH
-    vpx_free(input16_ - 1);
+#if CONFIG_AOM_HIGHBITDEPTH
+    aom_free(input16_ - 1);
     input16_ = NULL;
-    vpx_free(output16_);
+    aom_free(output16_);
     output16_ = NULL;
-    vpx_free(output16_ref_);
+    aom_free(output16_ref_);
     output16_ref_ = NULL;
 #endif
   }
@@ -355,7 +355,7 @@
 
   virtual void SetUp() {
     UUT_ = GET_PARAM(2);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ != 0)
       mask_ = (1 << UUT_->use_highbd_) - 1;
     else
@@ -373,12 +373,12 @@
     for (int i = 0; i < kInputBufferSize; ++i) {
       if (i & 1) {
         input_[i] = 255;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         input16_[i] = mask_;
 #endif
       } else {
         input_[i] = prng.Rand8Extremes();
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         input16_[i] = prng.Rand16() & mask_;
 #endif
       }
@@ -387,14 +387,14 @@
 
   void SetConstantInput(int value) {
     memset(input_, value, kInputBufferSize);
-#if CONFIG_VP9_HIGHBITDEPTH
-    vpx_memset16(input16_, value, kInputBufferSize);
+#if CONFIG_AOM_HIGHBITDEPTH
+    aom_memset16(input16_, value, kInputBufferSize);
 #endif
   }
 
   void CopyOutputToRef() {
     memcpy(output_ref_, output_, kOutputBufferSize);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     memcpy(output16_ref_, output16_,
            kOutputBufferSize * sizeof(output16_ref_[0]));
 #endif
@@ -408,7 +408,7 @@
 
   uint8_t *input() const {
     const int offset = BorderTop() * kOuterBlockSize + BorderLeft();
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       return input_ + offset;
     } else {
@@ -421,7 +421,7 @@
 
   uint8_t *output() const {
     const int offset = BorderTop() * kOuterBlockSize + BorderLeft();
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       return output_ + offset;
     } else {
@@ -434,7 +434,7 @@
 
   uint8_t *output_ref() const {
     const int offset = BorderTop() * kOuterBlockSize + BorderLeft();
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       return output_ref_ + offset;
     } else {
@@ -446,7 +446,7 @@
   }
 
   uint16_t lookup(uint8_t *list, int index) const {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       return list[index];
     } else {
@@ -458,7 +458,7 @@
   }
 
   void assign_val(uint8_t *list, int index, uint16_t val) const {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       list[index] = (uint8_t)val;
     } else {
@@ -474,7 +474,7 @@
       const int16_t *HFilter, const int16_t *VFilter, uint8_t *dst_ptr,
       unsigned int dst_stride, unsigned int output_width,
       unsigned int output_height) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
                                  dst_stride, output_width, output_height);
@@ -497,7 +497,7 @@
                                   unsigned int dst_stride,
                                   unsigned int output_width,
                                   unsigned int output_height) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
                          dst_stride, output_width, output_height);
@@ -517,7 +517,7 @@
   static uint8_t *input_;
   static uint8_t *output_;
   static uint8_t *output_ref_;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   static uint16_t *input16_;
   static uint16_t *output16_;
   static uint16_t *output16_ref_;
@@ -528,7 +528,7 @@
 uint8_t *ConvolveTest::input_ = NULL;
 uint8_t *ConvolveTest::output_ = NULL;
 uint8_t *ConvolveTest::output_ref_ = NULL;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 uint16_t *ConvolveTest::input16_ = NULL;
 uint16_t *ConvolveTest::output16_ = NULL;
 uint16_t *ConvolveTest::output16_ref_ = NULL;
@@ -635,7 +635,7 @@
 TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
   for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
     const InterpKernel *filters =
-        vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+        av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
     for (int i = 0; i < kNumFilters; i++) {
       const int p0 = filters[i][0] + filters[i][1];
       const int p1 = filters[i][2] + filters[i][3];
@@ -658,7 +658,7 @@
 TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
   uint8_t *const in = input();
   uint8_t *const out = output();
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint8_t ref8[kOutputStride * kMaxDimension];
   uint16_t ref16[kOutputStride * kMaxDimension];
   uint8_t *ref;
@@ -673,7 +673,7 @@
 
   for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
     const InterpKernel *filters =
-        vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+        av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
 
     for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
       for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
@@ -715,7 +715,7 @@
 TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
   uint8_t *const in = input();
   uint8_t *const out = output();
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint8_t ref8[kOutputStride * kMaxDimension];
   uint16_t ref16[kOutputStride * kMaxDimension];
   uint8_t *ref;
@@ -733,7 +733,7 @@
   for (int y = 0; y < Height(); ++y) {
     for (int x = 0; x < Width(); ++x) {
       uint16_t r;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
         r = prng.Rand8Extremes();
       } else {
@@ -750,7 +750,7 @@
 
   for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
     const InterpKernel *filters =
-        vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+        av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
 
     for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
       for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
@@ -792,7 +792,7 @@
 TEST_P(ConvolveTest, FilterExtremes) {
   uint8_t *const in = input();
   uint8_t *const out = output();
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint8_t ref8[kOutputStride * kMaxDimension];
   uint16_t ref16[kOutputStride * kMaxDimension];
   uint8_t *ref;
@@ -810,7 +810,7 @@
   for (int y = 0; y < Height(); ++y) {
     for (int x = 0; x < Width(); ++x) {
       uint16_t r;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
         r = prng.Rand8Extremes();
       } else {
@@ -829,7 +829,7 @@
     while (seed_val < 256) {
       for (int y = 0; y < 8; ++y) {
         for (int x = 0; x < 8; ++x) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
                      ((seed_val >> (axis ? y : x)) & 1) * mask_);
 #else
@@ -847,7 +847,7 @@
 
       for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
         const InterpKernel *filters =
-            vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+            av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
         for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
           for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
             wrapper_filter_block2d_8_c(in, kInputStride, filters[filter_x],
@@ -889,7 +889,7 @@
 TEST_P(ConvolveTest, CheckScalingFiltering) {
   uint8_t *const in = input();
   uint8_t *const out = output();
-  const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP];
+  const InterpKernel *const eighttap = av1_filter_kernels[EIGHTTAP];
 
   SetConstantInput(127);
 
@@ -916,13 +916,13 @@
 
 using std::tr1::make_tuple;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define WRAP(func, bd)                                                       \
   void wrap_##func##_##bd(                                                   \
       const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,                \
       ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride,    \
       const int16_t *filter_y, int filter_y_stride, int w, int h) {          \
-    vpx_highbd_##func(src, src_stride, dst, dst_stride, filter_x,            \
+    aom_highbd_##func(src, src_stride, dst, dst_stride, filter_x,            \
                       filter_x_stride, filter_y, filter_y_stride, w, h, bd); \
   }
 #if HAVE_SSE2 && ARCH_X86_64
@@ -1005,17 +1005,17 @@
 
 #else
 const ConvolveFunctions convolve8_c(
-    vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_c,
-    vpx_convolve8_avg_horiz_c, vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
-    vpx_convolve8_c, vpx_convolve8_avg_c, vpx_scaled_horiz_c,
-    vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
-    vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+    aom_convolve_copy_c, aom_convolve_avg_c, aom_convolve8_horiz_c,
+    aom_convolve8_avg_horiz_c, aom_convolve8_vert_c, aom_convolve8_avg_vert_c,
+    aom_convolve8_c, aom_convolve8_avg_c, aom_scaled_horiz_c,
+    aom_scaled_avg_horiz_c, aom_scaled_vert_c, aom_scaled_avg_vert_c,
+    aom_scaled_2d_c, aom_scaled_avg_2d_c, 0);
 const ConvolveParam kArrayConvolve_c[] = { ALL_SIZES(convolve8_c) };
 #endif
 INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::ValuesIn(kArrayConvolve_c));
 
 #if HAVE_SSE2 && ARCH_X86_64
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 const ConvolveFunctions convolve8_sse2(
     wrap_convolve_copy_sse2_8, wrap_convolve_avg_sse2_8,
     wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
@@ -1045,25 +1045,25 @@
                                               ALL_SIZES(convolve12_sse2) };
 #else
 const ConvolveFunctions convolve8_sse2(
-    vpx_convolve_copy_sse2, vpx_convolve_avg_sse2, vpx_convolve8_horiz_sse2,
-    vpx_convolve8_avg_horiz_sse2, vpx_convolve8_vert_sse2,
-    vpx_convolve8_avg_vert_sse2, vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
-    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+    aom_convolve_copy_sse2, aom_convolve_avg_sse2, aom_convolve8_horiz_sse2,
+    aom_convolve8_avg_horiz_sse2, aom_convolve8_vert_sse2,
+    aom_convolve8_avg_vert_sse2, aom_convolve8_sse2, aom_convolve8_avg_sse2,
+    aom_scaled_horiz_c, aom_scaled_avg_horiz_c, aom_scaled_vert_c,
+    aom_scaled_avg_vert_c, aom_scaled_2d_c, aom_scaled_avg_2d_c, 0);
 
 const ConvolveParam kArrayConvolve_sse2[] = { ALL_SIZES(convolve8_sse2) };
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest,
                         ::testing::ValuesIn(kArrayConvolve_sse2));
 #endif
 
 #if HAVE_SSSE3
 const ConvolveFunctions convolve8_ssse3(
-    vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_ssse3,
-    vpx_convolve8_avg_horiz_ssse3, vpx_convolve8_vert_ssse3,
-    vpx_convolve8_avg_vert_ssse3, vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
-    vpx_scaled_avg_vert_c, vpx_scaled_2d_ssse3, vpx_scaled_avg_2d_c, 0);
+    aom_convolve_copy_c, aom_convolve_avg_c, aom_convolve8_horiz_ssse3,
+    aom_convolve8_avg_horiz_ssse3, aom_convolve8_vert_ssse3,
+    aom_convolve8_avg_vert_ssse3, aom_convolve8_ssse3, aom_convolve8_avg_ssse3,
+    aom_scaled_horiz_c, aom_scaled_avg_horiz_c, aom_scaled_vert_c,
+    aom_scaled_avg_vert_c, aom_scaled_2d_ssse3, aom_scaled_avg_2d_c, 0);
 
 const ConvolveParam kArrayConvolve8_ssse3[] = { ALL_SIZES(convolve8_ssse3) };
 INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest,
@@ -1072,11 +1072,11 @@
 
 #if HAVE_AVX2 && HAVE_SSSE3
 const ConvolveFunctions convolve8_avx2(
-    vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_avx2,
-    vpx_convolve8_avg_horiz_ssse3, vpx_convolve8_vert_avx2,
-    vpx_convolve8_avg_vert_ssse3, vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
-    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+    aom_convolve_copy_c, aom_convolve_avg_c, aom_convolve8_horiz_avx2,
+    aom_convolve8_avg_horiz_ssse3, aom_convolve8_vert_avx2,
+    aom_convolve8_avg_vert_ssse3, aom_convolve8_avx2, aom_convolve8_avg_ssse3,
+    aom_scaled_horiz_c, aom_scaled_avg_horiz_c, aom_scaled_vert_c,
+    aom_scaled_avg_vert_c, aom_scaled_2d_c, aom_scaled_avg_2d_c, 0);
 
 const ConvolveParam kArrayConvolve8_avx2[] = { ALL_SIZES(convolve8_avx2) };
 INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest,
@@ -1084,21 +1084,21 @@
 #endif  // HAVE_AVX2 && HAVE_SSSE3
 
 // TODO(any): Make NEON versions support 128x128 128x64 64x128 block sizes
-#if HAVE_NEON && !(CONFIG_VP10 && CONFIG_EXT_PARTITION)
+#if HAVE_NEON && !(CONFIG_AV1 && CONFIG_EXT_PARTITION)
 #if HAVE_NEON_ASM
 const ConvolveFunctions convolve8_neon(
-    vpx_convolve_copy_neon, vpx_convolve_avg_neon, vpx_convolve8_horiz_neon,
-    vpx_convolve8_avg_horiz_neon, vpx_convolve8_vert_neon,
-    vpx_convolve8_avg_vert_neon, vpx_convolve8_neon, vpx_convolve8_avg_neon,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
-    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+    aom_convolve_copy_neon, aom_convolve_avg_neon, aom_convolve8_horiz_neon,
+    aom_convolve8_avg_horiz_neon, aom_convolve8_vert_neon,
+    aom_convolve8_avg_vert_neon, aom_convolve8_neon, aom_convolve8_avg_neon,
+    aom_scaled_horiz_c, aom_scaled_avg_horiz_c, aom_scaled_vert_c,
+    aom_scaled_avg_vert_c, aom_scaled_2d_c, aom_scaled_avg_2d_c, 0);
 #else   // HAVE_NEON
 const ConvolveFunctions convolve8_neon(
-    vpx_convolve_copy_neon, vpx_convolve_avg_neon, vpx_convolve8_horiz_neon,
-    vpx_convolve8_avg_horiz_neon, vpx_convolve8_vert_neon,
-    vpx_convolve8_avg_vert_neon, vpx_convolve8_neon, vpx_convolve8_avg_neon,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
-    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+    aom_convolve_copy_neon, aom_convolve_avg_neon, aom_convolve8_horiz_neon,
+    aom_convolve8_avg_horiz_neon, aom_convolve8_vert_neon,
+    aom_convolve8_avg_vert_neon, aom_convolve8_neon, aom_convolve8_avg_neon,
+    aom_scaled_horiz_c, aom_scaled_avg_horiz_c, aom_scaled_vert_c,
+    aom_scaled_avg_vert_c, aom_scaled_2d_c, aom_scaled_avg_2d_c, 0);
 #endif  // HAVE_NEON_ASM
 
 const ConvolveParam kArrayConvolve8_neon[] = { ALL_SIZES(convolve8_neon) };
@@ -1107,13 +1107,13 @@
 #endif  // HAVE_NEON
 
 // TODO(any): Make DSPR2 versions support 128x128 128x64 64x128 block sizes
-#if HAVE_DSPR2 && !(CONFIG_VP10 && CONFIG_EXT_PARTITION)
+#if HAVE_DSPR2 && !(CONFIG_AV1 && CONFIG_EXT_PARTITION)
 const ConvolveFunctions convolve8_dspr2(
-    vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2, vpx_convolve8_horiz_dspr2,
-    vpx_convolve8_avg_horiz_dspr2, vpx_convolve8_vert_dspr2,
-    vpx_convolve8_avg_vert_dspr2, vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
-    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+    aom_convolve_copy_dspr2, aom_convolve_avg_dspr2, aom_convolve8_horiz_dspr2,
+    aom_convolve8_avg_horiz_dspr2, aom_convolve8_vert_dspr2,
+    aom_convolve8_avg_vert_dspr2, aom_convolve8_dspr2, aom_convolve8_avg_dspr2,
+    aom_scaled_horiz_c, aom_scaled_avg_horiz_c, aom_scaled_vert_c,
+    aom_scaled_avg_vert_c, aom_scaled_2d_c, aom_scaled_avg_2d_c, 0);
 
 const ConvolveParam kArrayConvolve8_dspr2[] = { ALL_SIZES(convolve8_dspr2) };
 INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest,
@@ -1121,13 +1121,13 @@
 #endif  // HAVE_DSPR2
 
 // TODO(any): Make MSA versions support 128x128 128x64 64x128 block sizes
-#if HAVE_MSA && !(CONFIG_VP10 && CONFIG_EXT_PARTITION)
+#if HAVE_MSA && !(CONFIG_AV1 && CONFIG_EXT_PARTITION)
 const ConvolveFunctions convolve8_msa(
-    vpx_convolve_copy_msa, vpx_convolve_avg_msa, vpx_convolve8_horiz_msa,
-    vpx_convolve8_avg_horiz_msa, vpx_convolve8_vert_msa,
-    vpx_convolve8_avg_vert_msa, vpx_convolve8_msa, vpx_convolve8_avg_msa,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
-    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+    aom_convolve_copy_msa, aom_convolve_avg_msa, aom_convolve8_horiz_msa,
+    aom_convolve8_avg_horiz_msa, aom_convolve8_vert_msa,
+    aom_convolve8_avg_vert_msa, aom_convolve8_msa, aom_convolve8_avg_msa,
+    aom_scaled_horiz_c, aom_scaled_avg_horiz_c, aom_scaled_vert_c,
+    aom_scaled_avg_vert_c, aom_scaled_2d_c, aom_scaled_avg_2d_c, 0);
 
 const ConvolveParam kArrayConvolve8_msa[] = { ALL_SIZES(convolve8_msa) };
 INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest,
diff --git a/test/cpu_speed_test.cc b/test/cpu_speed_test.cc
index d7ad2ef..b9d0ed7 100644
--- a/test/cpu_speed_test.cc
+++ b/test/cpu_speed_test.cc
@@ -25,7 +25,7 @@
   CpuSpeedTest()
       : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)),
         set_cpu_used_(GET_PARAM(2)), min_psnr_(kMaxPSNR),
-        tune_content_(VPX_CONTENT_DEFAULT) {}
+        tune_content_(AOM_CONTENT_DEFAULT) {}
   virtual ~CpuSpeedTest() {}
 
   virtual void SetUp() {
@@ -33,10 +33,10 @@
     SetMode(encoding_mode_);
     if (encoding_mode_ != ::libaom_test::kRealTime) {
       cfg_.g_lag_in_frames = 25;
-      cfg_.rc_end_usage = VPX_VBR;
+      cfg_.rc_end_usage = AOM_VBR;
     } else {
       cfg_.g_lag_in_frames = 0;
-      cfg_.rc_end_usage = VPX_CBR;
+      cfg_.rc_end_usage = AOM_CBR;
     }
   }
 
@@ -45,18 +45,18 @@
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 1) {
-      encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
-      encoder->Control(VP9E_SET_TUNE_CONTENT, tune_content_);
+      encoder->Control(AOME_SET_CPUUSED, set_cpu_used_);
+      encoder->Control(AV1E_SET_TUNE_CONTENT, tune_content_);
       if (encoding_mode_ != ::libaom_test::kRealTime) {
-        encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
-        encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
-        encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
-        encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+        encoder->Control(AOME_SET_ENABLEAUTOALTREF, 1);
+        encoder->Control(AOME_SET_ARNR_MAXFRAMES, 7);
+        encoder->Control(AOME_SET_ARNR_STRENGTH, 5);
+        encoder->Control(AOME_SET_ARNR_TYPE, 3);
       }
     }
   }
 
-  virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void PSNRPktHook(const aom_codec_cx_pkt_t *pkt) {
     if (pkt->data.psnr.psnr[0] < min_psnr_) min_psnr_ = pkt->data.psnr.psnr[0];
   }
 
@@ -86,7 +86,7 @@
   ::libaom_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0,
                                        10);
 
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
   EXPECT_GE(min_psnr_, kMaxPSNR);
@@ -101,7 +101,7 @@
   cfg_.rc_max_quantizer = 0;
   cfg_.rc_min_quantizer = 0;
 
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
   EXPECT_GE(min_psnr_, kMaxPSNR);
@@ -115,9 +115,9 @@
   cfg_.rc_target_bitrate = 2000;
   cfg_.rc_max_quantizer = 63;
   cfg_.rc_min_quantizer = 0;
-  tune_content_ = VPX_CONTENT_SCREEN;
+  tune_content_ = AOM_CONTENT_SCREEN;
 
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
@@ -168,12 +168,12 @@
 TEST_P(CpuSpeedTestLarge, TestEncodeHighBitrate) { TestEncodeHighBitrate(); }
 TEST_P(CpuSpeedTestLarge, TestLowBitrate) { TestLowBitrate(); }
 
-VP10_INSTANTIATE_TEST_CASE(CpuSpeedTest,
-                           ::testing::Values(::libaom_test::kTwoPassGood,
-                                             ::libaom_test::kOnePassGood),
-                           ::testing::Range(1, 3));
-VP10_INSTANTIATE_TEST_CASE(CpuSpeedTestLarge,
-                           ::testing::Values(::libaom_test::kTwoPassGood,
-                                             ::libaom_test::kOnePassGood),
-                           ::testing::Range(0, 1));
+AV1_INSTANTIATE_TEST_CASE(CpuSpeedTest,
+                          ::testing::Values(::libaom_test::kTwoPassGood,
+                                            ::libaom_test::kOnePassGood),
+                          ::testing::Range(1, 3));
+AV1_INSTANTIATE_TEST_CASE(CpuSpeedTestLarge,
+                          ::testing::Values(::libaom_test::kTwoPassGood,
+                                            ::libaom_test::kOnePassGood),
+                          ::testing::Range(0, 1));
 }  // namespace
diff --git a/test/cx_set_ref.sh b/test/cx_set_ref.sh
index 7f1e604..9c825ab 100755
--- a/test/cx_set_ref.sh
+++ b/test/cx_set_ref.sh
@@ -18,18 +18,18 @@
 # Environment check: $YUV_RAW_INPUT is required.
 cx_set_ref_verify_environment() {
   if [ ! -e "${YUV_RAW_INPUT}" ]; then
-    echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+    echo "Libaom test data must exist in LIBVPX_TEST_DATA_PATH."
     return 1
   fi
 }
 
 # Runs cx_set_ref and updates the reference frame before encoding frame 90.
 # $1 is the codec name.
-vpx_set_ref() {
+aom_set_ref() {
   local codec="$1"
-  local encoder="${LIBAOM_BIN_PATH}/vpxcx_set_ref${VPX_TEST_EXE_SUFFIX}"
+  local encoder="${LIBAOM_BIN_PATH}/aomcx_set_ref${AOM_TEST_EXE_SUFFIX}"
 
-  local output_file="${VPX_TEST_OUTPUT_DIR}/${codec}cx_set_ref_${codec}.ivf"
+  local output_file="${AOM_TEST_OUTPUT_DIR}/${codec}cx_set_ref_${codec}.ivf"
   local ref_frame_num=90
 
   if [ ! -x "${encoder}" ]; then
@@ -38,11 +38,11 @@
   fi
 
   if [ "$codec" = "vp8" ]; then
-    eval "${VPX_TEST_PREFIX}" "${encoder}" "${YUV_RAW_INPUT_WIDTH}" \
+    eval "${AOM_TEST_PREFIX}" "${encoder}" "${YUV_RAW_INPUT_WIDTH}" \
         "${YUV_RAW_INPUT_HEIGHT}" "${YUV_RAW_INPUT}" "${output_file}" \
         "${ref_frame_num}" ${devnull}
   else
-    eval "${VPX_TEST_PREFIX}" "${encoder}" "${codec}" "${YUV_RAW_INPUT_WIDTH}" \
+    eval "${AOM_TEST_PREFIX}" "${encoder}" "${codec}" "${YUV_RAW_INPUT_WIDTH}" \
         "${YUV_RAW_INPUT_HEIGHT}" "${YUV_RAW_INPUT}" "${output_file}" \
         "${ref_frame_num}" ${devnull}
   fi
@@ -50,12 +50,12 @@
   [ -e "${output_file}" ] || return 1
 }
 
-cx_set_ref_vp10() {
-  if [ "$(vp10_encode_available)" = "yes" ]; then
-    vpx_set_ref vp10 || return 1
+cx_set_ref_av1() {
+  if [ "$(av1_encode_available)" = "yes" ]; then
+    aom_set_ref av1 || return 1
   fi
 }
 
-cx_set_ref_tests="cx_set_ref_vp10"
+cx_set_ref_tests="cx_set_ref_av1"
 
 run_tests cx_set_ref_verify_environment "${cx_set_ref_tests}"
diff --git a/test/datarate_test.cc b/test/datarate_test.cc
index 6e66f21..6d59ef8 100644
--- a/test/datarate_test.cc
+++ b/test/datarate_test.cc
@@ -7,14 +7,14 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "third_party/googletest/src/include/gtest/gtest.h"
 #include "test/codec_factory.h"
 #include "test/encode_test_driver.h"
 #include "test/i420_video_source.h"
 #include "test/util.h"
 #include "test/y4m_video_source.h"
-#include "aom/vpx_codec.h"
+#include "aom/aom_codec.h"
 
 namespace {
 
@@ -50,7 +50,7 @@
 
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
-    if (video->frame() == 0) encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
+    if (video->frame() == 0) encoder->Control(AOME_SET_CPUUSED, set_cpu_used_);
 
     if (denoiser_offon_test_) {
       ASSERT_GT(denoiser_offon_period_, 0)
@@ -61,16 +61,16 @@
       }
     }
 
-    encoder->Control(VP9E_SET_NOISE_SENSITIVITY, denoiser_on_);
+    encoder->Control(AV1E_SET_NOISE_SENSITIVITY, denoiser_on_);
 
-    const vpx_rational_t tb = video->timebase();
+    const aom_rational_t tb = video->timebase();
     timebase_ = static_cast<double>(tb.num) / tb.den;
     duration_ = 0;
   }
 
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void FramePktHook(const aom_codec_cx_pkt_t *pkt) {
     // Time since last timestamp = duration.
-    vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_;
+    aom_codec_pts_t duration = pkt->data.frame.pts - last_pts_;
 
     if (duration > 1) {
       // If first drop not set and we have a drop set it to this time.
@@ -107,7 +107,7 @@
     effective_datarate_ = (bits_total_ / 1000.0) / duration_;
   }
 
-  vpx_codec_pts_t last_pts_;
+  aom_codec_pts_t last_pts_;
   double timebase_;
   int frame_number_;      // Counter for number of non-dropped/encoded frames.
   int tot_frame_number_;  // Counter for total number of input frames.
@@ -116,7 +116,7 @@
   double effective_datarate_;
   int set_cpu_used_;
   int64_t bits_in_buffer_model_;
-  vpx_codec_pts_t first_drop_;
+  aom_codec_pts_t first_drop_;
   int num_drops_;
   int denoiser_on_;
   int denoiser_offon_test_;
@@ -128,7 +128,7 @@
   cfg_.rc_min_quantizer = 0;
   cfg_.rc_max_quantizer = 63;
   cfg_.g_error_resilient = 0;
-  cfg_.rc_end_usage = VPX_VBR;
+  cfg_.rc_end_usage = AOM_VBR;
   cfg_.g_lag_in_frames = 0;
 
   ::libaom_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
@@ -152,7 +152,7 @@
   cfg_.rc_dropframe_thresh = 1;
   cfg_.rc_min_quantizer = 0;
   cfg_.rc_max_quantizer = 63;
-  cfg_.rc_end_usage = VPX_CBR;
+  cfg_.rc_end_usage = AOM_CBR;
   cfg_.g_lag_in_frames = 0;
 
   ::libaom_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
@@ -181,7 +181,7 @@
   cfg_.rc_dropframe_thresh = 1;
   cfg_.rc_min_quantizer = 0;
   cfg_.rc_max_quantizer = 63;
-  cfg_.rc_end_usage = VPX_CBR;
+  cfg_.rc_end_usage = AOM_CBR;
 
   for (int i = 250; i < 900; i += 200) {
     cfg_.rc_target_bitrate = i;
@@ -210,7 +210,7 @@
   cfg_.rc_dropframe_thresh = 10;
   cfg_.rc_min_quantizer = 0;
   cfg_.rc_max_quantizer = 50;
-  cfg_.rc_end_usage = VPX_CBR;
+  cfg_.rc_end_usage = AOM_CBR;
   cfg_.rc_target_bitrate = 200;
   cfg_.g_lag_in_frames = 0;
   // TODO(marpan): Investigate datarate target failures with a smaller keyframe
@@ -221,7 +221,7 @@
                                        30, 1, 0, 140);
 
   const int kDropFrameThreshTestStep = 30;
-  vpx_codec_pts_t last_drop = 140;
+  aom_codec_pts_t last_drop = 140;
   int last_num_drops = 0;
   for (int i = 10; i < 100; i += kDropFrameThreshTestStep) {
     cfg_.rc_dropframe_thresh = i;
@@ -244,8 +244,8 @@
   }
 }
 
-VP10_INSTANTIATE_TEST_CASE(DatarateTestLarge,
-                           ::testing::Values(::libaom_test::kOnePassGood,
-                                             ::libaom_test::kRealTime),
-                           ::testing::Range(2, 9));
+AV1_INSTANTIATE_TEST_CASE(DatarateTestLarge,
+                          ::testing::Values(::libaom_test::kOnePassGood,
+                                            ::libaom_test::kRealTime),
+                          ::testing::Range(2, 9));
 }  // namespace
diff --git a/test/dct16x16_test.cc b/test/dct16x16_test.cc
index 8233a52..70b1ba2 100644
--- a/test/dct16x16_test.cc
+++ b/test/dct16x16_test.cc
@@ -14,16 +14,16 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
 #include "av1/common/entropy.h"
 #include "av1/common/scan.h"
-#include "aom/vpx_codec.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_codec.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/msvc.h"  // for round()
 
@@ -229,37 +229,37 @@
 typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
                         int tx_type);
 
-typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
-typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
-typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, aom_bit_depth_t> Dct16x16Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t> Ht16x16Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, aom_bit_depth_t>
     Idct16x16Param;
 
 void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
                    int /*tx_type*/) {
-  vpx_fdct16x16_c(in, out, stride);
+  aom_fdct16x16_c(in, out, stride);
 }
 
 void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
                    int /*tx_type*/) {
-  vpx_idct16x16_256_add_c(in, dest, stride);
+  aom_idct16x16_256_add_c(in, dest, stride);
 }
 
 void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
-  vp10_fht16x16_c(in, out, stride, tx_type);
+  av1_fht16x16_c(in, out, stride, tx_type);
 }
 
 void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
                   int tx_type) {
-  vp10_iht16x16_256_add_c(in, dest, stride, tx_type);
+  av1_iht16x16_256_add_c(in, dest, stride, tx_type);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct16x16_256_add_c(in, out, stride, 10);
+  aom_highbd_idct16x16_256_add_c(in, out, stride, 10);
 }
 
 void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct16x16_256_add_c(in, out, stride, 12);
+  aom_highbd_idct16x16_256_add_c(in, out, stride, 12);
 }
 
 void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
@@ -273,39 +273,39 @@
 }
 
 void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
+  av1_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
 }
 
 void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
+  av1_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
 }
 
 #if HAVE_SSE2
 void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct16x16_10_add_c(in, out, stride, 10);
+  aom_highbd_idct16x16_10_add_c(in, out, stride, 10);
 }
 
 void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct16x16_10_add_c(in, out, stride, 12);
+  aom_highbd_idct16x16_10_add_c(in, out, stride, 12);
 }
 
 void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
+  aom_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
 }
 
 void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
+  aom_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
 }
 
 void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
+  aom_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
 }
 
 void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
+  aom_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
 }
 #endif  // HAVE_SSE2
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 class Trans16x16TestBase {
  public:
@@ -326,18 +326,18 @@
       DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
       DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
       DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
       DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
 #endif
 
       // Initialize a test block with input range [-mask_, mask_].
       for (int j = 0; j < kNumCoeffs; ++j) {
-        if (bit_depth_ == VPX_BITS_8) {
+        if (bit_depth_ == AOM_BITS_8) {
           src[j] = rnd.Rand8();
           dst[j] = rnd.Rand8();
           test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand16() & mask_;
           dst16[j] = rnd.Rand16() & mask_;
@@ -348,9 +348,9 @@
 
       ASM_REGISTER_STATE_CHECK(
           RunFwdTxfm(test_input_block, test_temp_block, pitch_));
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -358,9 +358,9 @@
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const int32_t diff =
-            bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+            bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
         const int32_t diff = dst[j] - src[j];
 #endif
@@ -437,7 +437,7 @@
 
     DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
 #endif
@@ -457,7 +457,7 @@
       // clear reconstructed pixel buffers
       memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
       memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
       memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
 #endif
@@ -466,10 +466,10 @@
       output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
       for (int j = 1; j < kNumCoeffs; ++j)
         output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
                      tx_type_);
@@ -477,9 +477,9 @@
             RunInvTxfm(output_ref_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
 #endif
       }
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref[j], dst[j]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref16[j], dst16[j]);
 #endif
@@ -494,26 +494,26 @@
     DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     for (int i = 0; i < count_test_block; ++i) {
       double out_r[kNumCoeffs];
 
       // Initialize a test block with input range [-255, 255].
       for (int j = 0; j < kNumCoeffs; ++j) {
-        if (bit_depth_ == VPX_BITS_8) {
+        if (bit_depth_ == AOM_BITS_8) {
           src[j] = rnd.Rand8();
           dst[j] = rnd.Rand8();
           in[j] = src[j] - dst[j];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand16() & mask_;
           dst16[j] = rnd.Rand16() & mask_;
           in[j] = src16[j] - dst16[j];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
 
@@ -521,22 +521,22 @@
       for (int j = 0; j < kNumCoeffs; ++j)
         coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
 
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), 16));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const uint32_t diff =
-            bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+            bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
         const uint32_t diff = dst[j] - src[j];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         const uint32_t error = diff * diff;
         EXPECT_GE(1u, error) << "Error: 16x16 IDCT has error " << error
                              << " at index " << j;
@@ -548,14 +548,14 @@
     ACMRandom rnd(ACMRandom::DeterministicSeed());
     const int count_test_block = 10000;
     const int eob = 10;
-    const int16_t *scan = vp10_default_scan_orders[TX_16X16].scan;
+    const int16_t *scan = av1_default_scan_orders[TX_16X16].scan;
     DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     for (int i = 0; i < count_test_block; ++i) {
       for (int j = 0; j < kNumCoeffs; ++j) {
@@ -565,34 +565,34 @@
         } else {
           coeff[scan[j]] = 0;
         }
-        if (bit_depth_ == VPX_BITS_8) {
+        if (bit_depth_ == AOM_BITS_8) {
           dst[j] = 0;
           ref[j] = 0;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           dst16[j] = 0;
           ref16[j] = 0;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         ref_txfm(coeff, ref, pitch_);
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
       } else {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const uint32_t diff =
-            bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+            bit_depth_ == AOM_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
 #else
         const uint32_t diff = dst[j] - ref[j];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         const uint32_t error = diff * diff;
         EXPECT_EQ(0u, error) << "Error: 16x16 IDCT Comparison has error "
                              << error << " at index " << j;
@@ -602,7 +602,7 @@
 
   int pitch_;
   int tx_type_;
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
   int mask_;
   FhtFunc fwd_txfm_ref;
   IhtFunc inv_txfm_ref;
@@ -622,10 +622,10 @@
     fwd_txfm_ref = fdct16x16_ref;
     inv_txfm_ref = idct16x16_ref;
     mask_ = (1 << bit_depth_) - 1;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     switch (bit_depth_) {
-      case VPX_BITS_10: inv_txfm_ref = idct16x16_10_ref; break;
-      case VPX_BITS_12: inv_txfm_ref = idct16x16_12_ref; break;
+      case AOM_BITS_10: inv_txfm_ref = idct16x16_10_ref; break;
+      case AOM_BITS_12: inv_txfm_ref = idct16x16_12_ref; break;
       default: inv_txfm_ref = idct16x16_ref; break;
     }
 #else
@@ -674,10 +674,10 @@
     fwd_txfm_ref = fht16x16_ref;
     inv_txfm_ref = iht16x16_ref;
     mask_ = (1 << bit_depth_) - 1;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     switch (bit_depth_) {
-      case VPX_BITS_10: inv_txfm_ref = iht16x16_10; break;
-      case VPX_BITS_12: inv_txfm_ref = iht16x16_12; break;
+      case AOM_BITS_10: inv_txfm_ref = iht16x16_10; break;
+      case AOM_BITS_12: inv_txfm_ref = iht16x16_12; break;
       default: inv_txfm_ref = iht16x16_ref; break;
     }
 #else
@@ -741,7 +741,7 @@
 }
 
 class PartialTrans16x16Test : public ::testing::TestWithParam<
-                                  std::tr1::tuple<FdctFunc, vpx_bit_depth_t> > {
+                                  std::tr1::tuple<FdctFunc, aom_bit_depth_t> > {
  public:
   virtual ~PartialTrans16x16Test() {}
   virtual void SetUp() {
@@ -752,12 +752,12 @@
   virtual void TearDown() { libaom_test::ClearSystemState(); }
 
  protected:
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
   FdctFunc fwd_txfm_;
 };
 
 TEST_P(PartialTrans16x16Test, Extremes) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int16_t maxval =
       static_cast<int16_t>(clip_pixel_highbd(1 << 30, bit_depth_));
 #else
@@ -779,7 +779,7 @@
 }
 
 TEST_P(PartialTrans16x16Test, Random) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int16_t maxval =
       static_cast<int16_t>(clip_pixel_highbd(1 << 30, bit_depth_));
 #else
@@ -802,139 +802,135 @@
 
 using std::tr1::make_tuple;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans16x16DCT,
     ::testing::Values(
-        make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
-        make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+        make_tuple(&aom_highbd_fdct16x16_c, &idct16x16_10, 0, AOM_BITS_10),
+        make_tuple(&aom_highbd_fdct16x16_c, &idct16x16_12, 0, AOM_BITS_12),
+        make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_c, 0, AOM_BITS_8)));
 #else
 INSTANTIATE_TEST_CASE_P(C, Trans16x16DCT,
-                        ::testing::Values(make_tuple(&vpx_fdct16x16_c,
-                                                     &vpx_idct16x16_256_add_c,
-                                                     0, VPX_BITS_8)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+                        ::testing::Values(make_tuple(&aom_fdct16x16_c,
+                                                     &aom_idct16x16_256_add_c,
+                                                     0, AOM_BITS_8)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans16x16HT,
     ::testing::Values(
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 3, VPX_BITS_8)));
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 0, AOM_BITS_10),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 1, AOM_BITS_10),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 2, AOM_BITS_10),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 3, AOM_BITS_10),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 0, AOM_BITS_12),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 1, AOM_BITS_12),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 2, AOM_BITS_12),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 3, AOM_BITS_12),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 0, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 1, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 2, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 3, AOM_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     C, PartialTrans16x16Test,
-    ::testing::Values(make_tuple(&vpx_highbd_fdct16x16_1_c, VPX_BITS_8),
-                      make_tuple(&vpx_highbd_fdct16x16_1_c, VPX_BITS_10),
-                      make_tuple(&vpx_highbd_fdct16x16_1_c, VPX_BITS_12)));
+    ::testing::Values(make_tuple(&aom_highbd_fdct16x16_1_c, AOM_BITS_8),
+                      make_tuple(&aom_highbd_fdct16x16_1_c, AOM_BITS_10),
+                      make_tuple(&aom_highbd_fdct16x16_1_c, AOM_BITS_12)));
 #else
 INSTANTIATE_TEST_CASE_P(
     C, Trans16x16HT,
     ::testing::Values(
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 3, VPX_BITS_8)));
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 0, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 1, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 2, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 3, AOM_BITS_8)));
 INSTANTIATE_TEST_CASE_P(C, PartialTrans16x16Test,
-                        ::testing::Values(make_tuple(&vpx_fdct16x16_1_c,
-                                                     VPX_BITS_8)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+                        ::testing::Values(make_tuple(&aom_fdct16x16_1_c,
+                                                     AOM_BITS_8)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, Trans16x16DCT,
-    ::testing::Values(make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_neon,
-                                 0, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_neon,
+                                 0, AOM_BITS_8)));
 #endif
 
-#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16DCT,
-    ::testing::Values(make_tuple(&vpx_fdct16x16_sse2,
-                                 &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&aom_fdct16x16_sse2,
+                                 &aom_idct16x16_256_add_sse2, 0, AOM_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16HT,
-    ::testing::Values(make_tuple(&vp10_fht16x16_sse2,
-                                 &vp10_iht16x16_256_add_sse2, 0, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2,
-                                 &vp10_iht16x16_256_add_sse2, 1, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2,
-                                 &vp10_iht16x16_256_add_sse2, 2, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2,
-                                 &vp10_iht16x16_256_add_sse2, 3, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2,
+                                 0, AOM_BITS_8),
+                      make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2,
+                                 1, AOM_BITS_8),
+                      make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2,
+                                 2, AOM_BITS_8),
+                      make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2,
+                                 3, AOM_BITS_8)));
 INSTANTIATE_TEST_CASE_P(SSE2, PartialTrans16x16Test,
-                        ::testing::Values(make_tuple(&vpx_fdct16x16_1_sse2,
-                                                     VPX_BITS_8)));
-#endif  // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+                        ::testing::Values(make_tuple(&aom_fdct16x16_1_sse2,
+                                                     AOM_BITS_8)));
+#endif  // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16DCT,
     ::testing::Values(
-        make_tuple(&vpx_highbd_fdct16x16_sse2, &idct16x16_10, 0, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_256_add_10_sse2, 0,
-                   VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct16x16_sse2, &idct16x16_12, 0, VPX_BITS_12),
-        make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_256_add_12_sse2, 0,
-                   VPX_BITS_12),
-        make_tuple(&vpx_fdct16x16_sse2, &vpx_idct16x16_256_add_c, 0,
-                   VPX_BITS_8)));
+        make_tuple(&aom_highbd_fdct16x16_sse2, &idct16x16_10, 0, AOM_BITS_10),
+        make_tuple(&aom_highbd_fdct16x16_c, &idct16x16_256_add_10_sse2, 0,
+                   AOM_BITS_10),
+        make_tuple(&aom_highbd_fdct16x16_sse2, &idct16x16_12, 0, AOM_BITS_12),
+        make_tuple(&aom_highbd_fdct16x16_c, &idct16x16_256_add_12_sse2, 0,
+                   AOM_BITS_12),
+        make_tuple(&aom_fdct16x16_sse2, &aom_idct16x16_256_add_c, 0,
+                   AOM_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16HT,
-    ::testing::Values(make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
-                                 0, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
-                                 1, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
-                                 2, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
-                                 3, VPX_BITS_8)));
+    ::testing::Values(
+        make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c, 0, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c, 1, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c, 2, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c, 3,
+                   AOM_BITS_8)));
 // Optimizations take effect at a threshold of 3155, so we use a value close to
 // that to test both branches.
 INSTANTIATE_TEST_CASE_P(
     SSE2, InvTrans16x16DCT,
     ::testing::Values(make_tuple(&idct16x16_10_add_10_c,
-                                 &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
+                                 &idct16x16_10_add_10_sse2, 3167, AOM_BITS_10),
                       make_tuple(&idct16x16_10, &idct16x16_256_add_10_sse2,
-                                 3167, VPX_BITS_10),
+                                 3167, AOM_BITS_10),
                       make_tuple(&idct16x16_10_add_12_c,
-                                 &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
+                                 &idct16x16_10_add_12_sse2, 3167, AOM_BITS_12),
                       make_tuple(&idct16x16_12, &idct16x16_256_add_12_sse2,
-                                 3167, VPX_BITS_12)));
+                                 3167, AOM_BITS_12)));
 INSTANTIATE_TEST_CASE_P(SSE2, PartialTrans16x16Test,
-                        ::testing::Values(make_tuple(&vpx_fdct16x16_1_sse2,
-                                                     VPX_BITS_8)));
-#endif  // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+                        ::testing::Values(make_tuple(&aom_fdct16x16_1_sse2,
+                                                     AOM_BITS_8)));
+#endif  // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(MSA, Trans16x16DCT,
-                        ::testing::Values(make_tuple(&vpx_fdct16x16_msa,
-                                                     &vpx_idct16x16_256_add_msa,
-                                                     0, VPX_BITS_8)));
+                        ::testing::Values(make_tuple(&aom_fdct16x16_msa,
+                                                     &aom_idct16x16_256_add_msa,
+                                                     0, AOM_BITS_8)));
 #if !CONFIG_EXT_TX
 INSTANTIATE_TEST_CASE_P(
     MSA, Trans16x16HT,
-    ::testing::Values(make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
-                                 0, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
-                                 1, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
-                                 2, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
-                                 3, VPX_BITS_8)));
+    ::testing::Values(
+        make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa, 0, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa, 1, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa, 2, AOM_BITS_8),
+        make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa, 3,
+                   AOM_BITS_8)));
 #endif  // !CONFIG_EXT_TX
 INSTANTIATE_TEST_CASE_P(MSA, PartialTrans16x16Test,
-                        ::testing::Values(make_tuple(&vpx_fdct16x16_1_msa,
-                                                     VPX_BITS_8)));
-#endif  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+                        ::testing::Values(make_tuple(&aom_fdct16x16_1_msa,
+                                                     AOM_BITS_8)));
+#endif  // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 }  // namespace
diff --git a/test/dct32x32_test.cc b/test/dct32x32_test.cc
index 629f878..c5faa9e 100644
--- a/test/dct32x32_test.cc
+++ b/test/dct32x32_test.cc
@@ -14,16 +14,16 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
 #include "av1/common/entropy.h"
-#include "aom/vpx_codec.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_codec.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/msvc.h"  // for round()
 
@@ -65,18 +65,18 @@
 typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
 typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
 
-typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, int, vpx_bit_depth_t>
+typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, int, aom_bit_depth_t>
     Trans32x32Param;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void idct32x32_10(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct32x32_1024_add_c(in, out, stride, 10);
+  aom_highbd_idct32x32_1024_add_c(in, out, stride, 10);
 }
 
 void idct32x32_12(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct32x32_1024_add_c(in, out, stride, 12);
+  aom_highbd_idct32x32_1024_add_c(in, out, stride, 12);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 class Trans32x32Test : public ::testing::TestWithParam<Trans32x32Param> {
  public:
@@ -94,7 +94,7 @@
 
  protected:
   int version_;
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
   int mask_;
   FwdTxfmFunc fwd_txfm_;
   InvTxfmFunc inv_txfm_;
@@ -109,7 +109,7 @@
   DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
 #endif
@@ -117,11 +117,11 @@
   for (int i = 0; i < count_test_block; ++i) {
     // Initialize a test block with input range [-mask_, mask_].
     for (int j = 0; j < kNumCoeffs; ++j) {
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         src[j] = rnd.Rand8();
         dst[j] = rnd.Rand8();
         test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         src16[j] = rnd.Rand16() & mask_;
         dst16[j] = rnd.Rand16() & mask_;
@@ -131,9 +131,9 @@
     }
 
     ASM_REGISTER_STATE_CHECK(fwd_txfm_(test_input_block, test_temp_block, 32));
-    if (bit_depth_ == VPX_BITS_8) {
+    if (bit_depth_ == AOM_BITS_8) {
       ASM_REGISTER_STATE_CHECK(inv_txfm_(test_temp_block, dst, 32));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       ASM_REGISTER_STATE_CHECK(
           inv_txfm_(test_temp_block, CONVERT_TO_BYTEPTR(dst16), 32));
@@ -141,9 +141,9 @@
     }
 
     for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       const int32_t diff =
-          bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+          bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
       const int32_t diff = dst[j] - src[j];
 #endif
@@ -178,7 +178,7 @@
       input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
 
     const int stride = 32;
-    vpx_fdct32x32_c(input_block, output_ref_block, stride);
+    aom_fdct32x32_c(input_block, output_ref_block, stride);
     ASM_REGISTER_STATE_CHECK(fwd_txfm_(input_block, output_block, stride));
 
     if (version_ == 0) {
@@ -213,7 +213,7 @@
     }
 
     const int stride = 32;
-    vpx_fdct32x32_c(input_extreme_block, output_ref_block, stride);
+    aom_fdct32x32_c(input_extreme_block, output_ref_block, stride);
     ASM_REGISTER_STATE_CHECK(
         fwd_txfm_(input_extreme_block, output_block, stride));
 
@@ -242,7 +242,7 @@
   DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
 #endif
@@ -252,11 +252,11 @@
 
     // Initialize a test block with input range [-255, 255]
     for (int j = 0; j < kNumCoeffs; ++j) {
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         src[j] = rnd.Rand8();
         dst[j] = rnd.Rand8();
         in[j] = src[j] - dst[j];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         src16[j] = rnd.Rand16() & mask_;
         dst16[j] = rnd.Rand16() & mask_;
@@ -268,17 +268,17 @@
     reference_32x32_dct_2d(in, out_r);
     for (int j = 0; j < kNumCoeffs; ++j)
       coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
-    if (bit_depth_ == VPX_BITS_8) {
+    if (bit_depth_ == AOM_BITS_8) {
       ASM_REGISTER_STATE_CHECK(inv_txfm_(coeff, dst, 32));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       ASM_REGISTER_STATE_CHECK(inv_txfm_(coeff, CONVERT_TO_BYTEPTR(dst16), 32));
 #endif
     }
     for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       const int diff =
-          bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+          bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
       const int diff = dst[j] - src[j];
 #endif
@@ -291,7 +291,7 @@
 
 class PartialTrans32x32Test
     : public ::testing::TestWithParam<
-          std::tr1::tuple<FwdTxfmFunc, vpx_bit_depth_t> > {
+          std::tr1::tuple<FwdTxfmFunc, aom_bit_depth_t> > {
  public:
   virtual ~PartialTrans32x32Test() {}
   virtual void SetUp() {
@@ -302,12 +302,12 @@
   virtual void TearDown() { libaom_test::ClearSystemState(); }
 
  protected:
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
   FwdTxfmFunc fwd_txfm_;
 };
 
 TEST_P(PartialTrans32x32Test, Extremes) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int16_t maxval =
       static_cast<int16_t>(clip_pixel_highbd(1 << 30, bit_depth_));
 #else
@@ -329,7 +329,7 @@
 }
 
 TEST_P(PartialTrans32x32Test, Random) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int16_t maxval =
       static_cast<int16_t>(clip_pixel_highbd(1 << 30, bit_depth_));
 #else
@@ -352,92 +352,92 @@
 
 using std::tr1::make_tuple;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans32x32Test,
     ::testing::Values(
-        make_tuple(&vpx_highbd_fdct32x32_c, &idct32x32_10, 0, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct32x32_rd_c, &idct32x32_10, 1, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct32x32_c, &idct32x32_12, 0, VPX_BITS_12),
-        make_tuple(&vpx_highbd_fdct32x32_rd_c, &idct32x32_12, 1, VPX_BITS_12),
-        make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8),
-        make_tuple(&vpx_fdct32x32_rd_c, &vpx_idct32x32_1024_add_c, 1,
-                   VPX_BITS_8)));
+        make_tuple(&aom_highbd_fdct32x32_c, &idct32x32_10, 0, AOM_BITS_10),
+        make_tuple(&aom_highbd_fdct32x32_rd_c, &idct32x32_10, 1, AOM_BITS_10),
+        make_tuple(&aom_highbd_fdct32x32_c, &idct32x32_12, 0, AOM_BITS_12),
+        make_tuple(&aom_highbd_fdct32x32_rd_c, &idct32x32_12, 1, AOM_BITS_12),
+        make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c, 0, AOM_BITS_8),
+        make_tuple(&aom_fdct32x32_rd_c, &aom_idct32x32_1024_add_c, 1,
+                   AOM_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     C, PartialTrans32x32Test,
-    ::testing::Values(make_tuple(&vpx_highbd_fdct32x32_1_c, VPX_BITS_8),
-                      make_tuple(&vpx_highbd_fdct32x32_1_c, VPX_BITS_10),
-                      make_tuple(&vpx_highbd_fdct32x32_1_c, VPX_BITS_12)));
+    ::testing::Values(make_tuple(&aom_highbd_fdct32x32_1_c, AOM_BITS_8),
+                      make_tuple(&aom_highbd_fdct32x32_1_c, AOM_BITS_10),
+                      make_tuple(&aom_highbd_fdct32x32_1_c, AOM_BITS_12)));
 #else
 INSTANTIATE_TEST_CASE_P(
     C, Trans32x32Test,
-    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, 0,
-                                 VPX_BITS_8),
-                      make_tuple(&vpx_fdct32x32_rd_c, &vpx_idct32x32_1024_add_c,
-                                 1, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c, 0,
+                                 AOM_BITS_8),
+                      make_tuple(&aom_fdct32x32_rd_c, &aom_idct32x32_1024_add_c,
+                                 1, AOM_BITS_8)));
 INSTANTIATE_TEST_CASE_P(C, PartialTrans32x32Test,
-                        ::testing::Values(make_tuple(&vpx_fdct32x32_1_c,
-                                                     VPX_BITS_8)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+                        ::testing::Values(make_tuple(&aom_fdct32x32_1_c,
+                                                     AOM_BITS_8)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, Trans32x32Test,
-    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_neon,
-                                 0, VPX_BITS_8),
-                      make_tuple(&vpx_fdct32x32_rd_c,
-                                 &vpx_idct32x32_1024_add_neon, 1, VPX_BITS_8)));
-#endif  // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+    ::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_neon,
+                                 0, AOM_BITS_8),
+                      make_tuple(&aom_fdct32x32_rd_c,
+                                 &aom_idct32x32_1024_add_neon, 1, AOM_BITS_8)));
+#endif  // HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans32x32Test,
-    ::testing::Values(make_tuple(&vpx_fdct32x32_sse2,
-                                 &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
-                      make_tuple(&vpx_fdct32x32_rd_sse2,
-                                 &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&aom_fdct32x32_sse2,
+                                 &aom_idct32x32_1024_add_sse2, 0, AOM_BITS_8),
+                      make_tuple(&aom_fdct32x32_rd_sse2,
+                                 &aom_idct32x32_1024_add_sse2, 1, AOM_BITS_8)));
 INSTANTIATE_TEST_CASE_P(SSE2, PartialTrans32x32Test,
-                        ::testing::Values(make_tuple(&vpx_fdct32x32_1_sse2,
-                                                     VPX_BITS_8)));
-#endif  // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+                        ::testing::Values(make_tuple(&aom_fdct32x32_1_sse2,
+                                                     AOM_BITS_8)));
+#endif  // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans32x32Test,
     ::testing::Values(
-        make_tuple(&vpx_highbd_fdct32x32_sse2, &idct32x32_10, 0, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct32x32_rd_sse2, &idct32x32_10, 1,
-                   VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct32x32_sse2, &idct32x32_12, 0, VPX_BITS_12),
-        make_tuple(&vpx_highbd_fdct32x32_rd_sse2, &idct32x32_12, 1,
-                   VPX_BITS_12),
-        make_tuple(&vpx_fdct32x32_sse2, &vpx_idct32x32_1024_add_c, 0,
-                   VPX_BITS_8),
-        make_tuple(&vpx_fdct32x32_rd_sse2, &vpx_idct32x32_1024_add_c, 1,
-                   VPX_BITS_8)));
+        make_tuple(&aom_highbd_fdct32x32_sse2, &idct32x32_10, 0, AOM_BITS_10),
+        make_tuple(&aom_highbd_fdct32x32_rd_sse2, &idct32x32_10, 1,
+                   AOM_BITS_10),
+        make_tuple(&aom_highbd_fdct32x32_sse2, &idct32x32_12, 0, AOM_BITS_12),
+        make_tuple(&aom_highbd_fdct32x32_rd_sse2, &idct32x32_12, 1,
+                   AOM_BITS_12),
+        make_tuple(&aom_fdct32x32_sse2, &aom_idct32x32_1024_add_c, 0,
+                   AOM_BITS_8),
+        make_tuple(&aom_fdct32x32_rd_sse2, &aom_idct32x32_1024_add_c, 1,
+                   AOM_BITS_8)));
 INSTANTIATE_TEST_CASE_P(SSE2, PartialTrans32x32Test,
-                        ::testing::Values(make_tuple(&vpx_fdct32x32_1_sse2,
-                                                     VPX_BITS_8)));
-#endif  // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+                        ::testing::Values(make_tuple(&aom_fdct32x32_1_sse2,
+                                                     AOM_BITS_8)));
+#endif  // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_AVX2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_AVX2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     AVX2, Trans32x32Test,
-    ::testing::Values(make_tuple(&vpx_fdct32x32_avx2,
-                                 &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
-                      make_tuple(&vpx_fdct32x32_rd_avx2,
-                                 &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
-#endif  // HAVE_AVX2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+    ::testing::Values(make_tuple(&aom_fdct32x32_avx2,
+                                 &aom_idct32x32_1024_add_sse2, 0, AOM_BITS_8),
+                      make_tuple(&aom_fdct32x32_rd_avx2,
+                                 &aom_idct32x32_1024_add_sse2, 1, AOM_BITS_8)));
+#endif  // HAVE_AVX2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     MSA, Trans32x32Test,
-    ::testing::Values(make_tuple(&vpx_fdct32x32_msa,
-                                 &vpx_idct32x32_1024_add_msa, 0, VPX_BITS_8),
-                      make_tuple(&vpx_fdct32x32_rd_msa,
-                                 &vpx_idct32x32_1024_add_msa, 1, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&aom_fdct32x32_msa,
+                                 &aom_idct32x32_1024_add_msa, 0, AOM_BITS_8),
+                      make_tuple(&aom_fdct32x32_rd_msa,
+                                 &aom_idct32x32_1024_add_msa, 1, AOM_BITS_8)));
 INSTANTIATE_TEST_CASE_P(MSA, PartialTrans32x32Test,
-                        ::testing::Values(make_tuple(&vpx_fdct32x32_1_msa,
-                                                     VPX_BITS_8)));
-#endif  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+                        ::testing::Values(make_tuple(&aom_fdct32x32_1_msa,
+                                                     AOM_BITS_8)));
+#endif  // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 }  // namespace
diff --git a/test/decode_api_test.cc b/test/decode_api_test.cc
index 6aceaba..6ea33d5 100644
--- a/test/decode_api_test.cc
+++ b/test/decode_api_test.cc
@@ -10,47 +10,47 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "test/ivf_video_source.h"
-#include "aom/vp8dx.h"
-#include "aom/vpx_decoder.h"
+#include "aom/aomdx.h"
+#include "aom/aom_decoder.h"
 
 namespace {
 
 #define NELEMENTS(x) static_cast<int>(sizeof(x) / sizeof(x[0]))
 
 TEST(DecodeAPI, InvalidParams) {
-  static const vpx_codec_iface_t *kCodecs[] = {
-#if CONFIG_VP10_DECODER
-    &vpx_codec_vp10_dx_algo,
+  static const aom_codec_iface_t *kCodecs[] = {
+#if CONFIG_AV1_DECODER
+    &aom_codec_av1_dx_algo,
 #endif
   };
   uint8_t buf[1] = { 0 };
-  vpx_codec_ctx_t dec;
+  aom_codec_ctx_t dec;
 
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_dec_init(NULL, NULL, NULL, 0));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_dec_init(&dec, NULL, NULL, 0));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_decode(NULL, NULL, 0, NULL, 0));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_decode(NULL, buf, 0, NULL, 0));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
-            vpx_codec_decode(NULL, buf, NELEMENTS(buf), NULL, 0));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
-            vpx_codec_decode(NULL, NULL, NELEMENTS(buf), NULL, 0));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_destroy(NULL));
-  EXPECT_TRUE(vpx_codec_error(NULL) != NULL);
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM, aom_codec_dec_init(NULL, NULL, NULL, 0));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM, aom_codec_dec_init(&dec, NULL, NULL, 0));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM, aom_codec_decode(NULL, NULL, 0, NULL, 0));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM, aom_codec_decode(NULL, buf, 0, NULL, 0));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM,
+            aom_codec_decode(NULL, buf, NELEMENTS(buf), NULL, 0));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM,
+            aom_codec_decode(NULL, NULL, NELEMENTS(buf), NULL, 0));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM, aom_codec_destroy(NULL));
+  EXPECT_TRUE(aom_codec_error(NULL) != NULL);
 
   for (int i = 0; i < NELEMENTS(kCodecs); ++i) {
-    EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
-              vpx_codec_dec_init(NULL, kCodecs[i], NULL, 0));
+    EXPECT_EQ(AOM_CODEC_INVALID_PARAM,
+              aom_codec_dec_init(NULL, kCodecs[i], NULL, 0));
 
-    EXPECT_EQ(VPX_CODEC_OK, vpx_codec_dec_init(&dec, kCodecs[i], NULL, 0));
-    EXPECT_EQ(VPX_CODEC_UNSUP_BITSTREAM,
-              vpx_codec_decode(&dec, buf, NELEMENTS(buf), NULL, 0));
-    EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
-              vpx_codec_decode(&dec, NULL, NELEMENTS(buf), NULL, 0));
-    EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_decode(&dec, buf, 0, NULL, 0));
+    EXPECT_EQ(AOM_CODEC_OK, aom_codec_dec_init(&dec, kCodecs[i], NULL, 0));
+    EXPECT_EQ(AOM_CODEC_UNSUP_BITSTREAM,
+              aom_codec_decode(&dec, buf, NELEMENTS(buf), NULL, 0));
+    EXPECT_EQ(AOM_CODEC_INVALID_PARAM,
+              aom_codec_decode(&dec, NULL, NELEMENTS(buf), NULL, 0));
+    EXPECT_EQ(AOM_CODEC_INVALID_PARAM, aom_codec_decode(&dec, buf, 0, NULL, 0));
 
-    EXPECT_EQ(VPX_CODEC_OK, vpx_codec_destroy(&dec));
+    EXPECT_EQ(AOM_CODEC_OK, aom_codec_destroy(&dec));
   }
 }
 
diff --git a/test/decode_perf_test.cc b/test/decode_perf_test.cc
index 04ca12f..0699863 100644
--- a/test/decode_perf_test.cc
+++ b/test/decode_perf_test.cc
@@ -17,9 +17,9 @@
 #include "test/md5_helper.h"
 #include "test/util.h"
 #include "test/webm_video_source.h"
-#include "aom_ports/vpx_timer.h"
+#include "aom_ports/aom_timer.h"
 #include "./ivfenc.h"
-#include "./vpx_version.h"
+#include "./aom_version.h"
 
 using std::tr1::make_tuple;
 
@@ -37,26 +37,26 @@
  */
 typedef std::tr1::tuple<const char *, unsigned> DecodePerfParam;
 
-const DecodePerfParam kVP9DecodePerfVectors[] = {
-  make_tuple("vp90-2-bbb_426x240_tile_1x1_180kbps.webm", 1),
-  make_tuple("vp90-2-bbb_640x360_tile_1x2_337kbps.webm", 2),
-  make_tuple("vp90-2-bbb_854x480_tile_1x2_651kbps.webm", 2),
-  make_tuple("vp90-2-bbb_1280x720_tile_1x4_1310kbps.webm", 4),
-  make_tuple("vp90-2-bbb_1920x1080_tile_1x1_2581kbps.webm", 1),
-  make_tuple("vp90-2-bbb_1920x1080_tile_1x4_2586kbps.webm", 4),
-  make_tuple("vp90-2-bbb_1920x1080_tile_1x4_fpm_2304kbps.webm", 4),
-  make_tuple("vp90-2-sintel_426x182_tile_1x1_171kbps.webm", 1),
-  make_tuple("vp90-2-sintel_640x272_tile_1x2_318kbps.webm", 2),
-  make_tuple("vp90-2-sintel_854x364_tile_1x2_621kbps.webm", 2),
-  make_tuple("vp90-2-sintel_1280x546_tile_1x4_1257kbps.webm", 4),
-  make_tuple("vp90-2-sintel_1920x818_tile_1x4_fpm_2279kbps.webm", 4),
-  make_tuple("vp90-2-tos_426x178_tile_1x1_181kbps.webm", 1),
-  make_tuple("vp90-2-tos_640x266_tile_1x2_336kbps.webm", 2),
-  make_tuple("vp90-2-tos_854x356_tile_1x2_656kbps.webm", 2),
-  make_tuple("vp90-2-tos_854x356_tile_1x2_fpm_546kbps.webm", 2),
-  make_tuple("vp90-2-tos_1280x534_tile_1x4_1306kbps.webm", 4),
-  make_tuple("vp90-2-tos_1280x534_tile_1x4_fpm_952kbps.webm", 4),
-  make_tuple("vp90-2-tos_1920x800_tile_1x4_fpm_2335kbps.webm", 4),
+const DecodePerfParam kAV1DecodePerfVectors[] = {
+  make_tuple("av10-2-bbb_426x240_tile_1x1_180kbps.webm", 1),
+  make_tuple("av10-2-bbb_640x360_tile_1x2_337kbps.webm", 2),
+  make_tuple("av10-2-bbb_854x480_tile_1x2_651kbps.webm", 2),
+  make_tuple("av10-2-bbb_1280x720_tile_1x4_1310kbps.webm", 4),
+  make_tuple("av10-2-bbb_1920x1080_tile_1x1_2581kbps.webm", 1),
+  make_tuple("av10-2-bbb_1920x1080_tile_1x4_2586kbps.webm", 4),
+  make_tuple("av10-2-bbb_1920x1080_tile_1x4_fpm_2304kbps.webm", 4),
+  make_tuple("av10-2-sintel_426x182_tile_1x1_171kbps.webm", 1),
+  make_tuple("av10-2-sintel_640x272_tile_1x2_318kbps.webm", 2),
+  make_tuple("av10-2-sintel_854x364_tile_1x2_621kbps.webm", 2),
+  make_tuple("av10-2-sintel_1280x546_tile_1x4_1257kbps.webm", 4),
+  make_tuple("av10-2-sintel_1920x818_tile_1x4_fpm_2279kbps.webm", 4),
+  make_tuple("av10-2-tos_426x178_tile_1x1_181kbps.webm", 1),
+  make_tuple("av10-2-tos_640x266_tile_1x2_336kbps.webm", 2),
+  make_tuple("av10-2-tos_854x356_tile_1x2_656kbps.webm", 2),
+  make_tuple("av10-2-tos_854x356_tile_1x2_fpm_546kbps.webm", 2),
+  make_tuple("av10-2-tos_1280x534_tile_1x4_1306kbps.webm", 4),
+  make_tuple("av10-2-tos_1280x534_tile_1x4_fpm_952kbps.webm", 4),
+  make_tuple("av10-2-tos_1920x800_tile_1x4_fpm_2335kbps.webm", 4),
 };
 
 /*
@@ -79,19 +79,19 @@
   libaom_test::WebMVideoSource video(video_name);
   video.Init();
 
-  vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
+  aom_codec_dec_cfg_t cfg = aom_codec_dec_cfg_t();
   cfg.threads = threads;
-  libaom_test::VP9Decoder decoder(cfg, 0);
+  libaom_test::AV1Decoder decoder(cfg, 0);
 
-  vpx_usec_timer t;
-  vpx_usec_timer_start(&t);
+  aom_usec_timer t;
+  aom_usec_timer_start(&t);
 
   for (video.Begin(); video.cxdata() != NULL; video.Next()) {
     decoder.DecodeFrame(video.cxdata(), video.frame_size());
   }
 
-  vpx_usec_timer_mark(&t);
-  const double elapsed_secs = double(vpx_usec_timer_elapsed(&t)) / kUsecsInSec;
+  aom_usec_timer_mark(&t);
+  const double elapsed_secs = double(aom_usec_timer_elapsed(&t)) / kUsecsInSec;
   const unsigned frames = video.frame_number();
   const double fps = double(frames) / elapsed_secs;
 
@@ -106,18 +106,18 @@
   printf("}\n");
 }
 
-INSTANTIATE_TEST_CASE_P(VP9, DecodePerfTest,
-                        ::testing::ValuesIn(kVP9DecodePerfVectors));
+INSTANTIATE_TEST_CASE_P(AV1, DecodePerfTest,
+                        ::testing::ValuesIn(kAV1DecodePerfVectors));
 
-class VP9NewEncodeDecodePerfTest
+class AV1NewEncodeDecodePerfTest
     : public ::libaom_test::EncoderTest,
       public ::libaom_test::CodecTestWithParam<libaom_test::TestMode> {
  protected:
-  VP9NewEncodeDecodePerfTest()
+  AV1NewEncodeDecodePerfTest()
       : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)), speed_(0),
         outfile_(0), out_frames_(0) {}
 
-  virtual ~VP9NewEncodeDecodePerfTest() {}
+  virtual ~AV1NewEncodeDecodePerfTest() {}
 
   virtual void SetUp() {
     InitializeConfig();
@@ -133,15 +133,15 @@
     cfg_.rc_buf_initial_sz = 500;
     cfg_.rc_buf_optimal_sz = 600;
     cfg_.rc_resize_allowed = 0;
-    cfg_.rc_end_usage = VPX_VBR;
+    cfg_.rc_end_usage = AOM_VBR;
   }
 
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 1) {
-      encoder->Control(VP8E_SET_CPUUSED, speed_);
-      encoder->Control(VP9E_SET_FRAME_PARALLEL_DECODING, 1);
-      encoder->Control(VP9E_SET_TILE_COLUMNS, 2);
+      encoder->Control(AOME_SET_CPUUSED, speed_);
+      encoder->Control(AV1E_SET_FRAME_PARALLEL_DECODING, 1);
+      encoder->Control(AV1E_SET_TILE_COLUMNS, 2);
     }
   }
 
@@ -155,18 +155,18 @@
   virtual void EndPassHook() {
     if (outfile_ != NULL) {
       if (!fseek(outfile_, 0, SEEK_SET))
-        ivf_write_file_header(outfile_, &cfg_, VP9_FOURCC, out_frames_);
+        ivf_write_file_header(outfile_, &cfg_, AV1_FOURCC, out_frames_);
       fclose(outfile_);
       outfile_ = NULL;
     }
   }
 
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void FramePktHook(const aom_codec_cx_pkt_t *pkt) {
     ++out_frames_;
 
     // Write initial file header if first frame.
     if (pkt->data.frame.pts == 0)
-      ivf_write_file_header(outfile_, &cfg_, VP9_FOURCC, out_frames_);
+      ivf_write_file_header(outfile_, &cfg_, AV1_FOURCC, out_frames_);
 
     // Write frame header and data.
     ivf_write_frame_header(outfile_, out_frames_, pkt->data.frame.sz);
@@ -197,26 +197,26 @@
   int frames;
 };
 
-const EncodePerfTestVideo kVP9EncodePerfTestVectors[] = {
+const EncodePerfTestVideo kAV1EncodePerfTestVectors[] = {
   EncodePerfTestVideo("niklas_1280_720_30.yuv", 1280, 720, 600, 470),
 };
 
-TEST_P(VP9NewEncodeDecodePerfTest, PerfTest) {
+TEST_P(AV1NewEncodeDecodePerfTest, PerfTest) {
   SetUp();
 
   // TODO(JBB): Make this work by going through the set of given files.
   const int i = 0;
-  const vpx_rational timebase = { 33333333, 1000000000 };
+  const aom_rational timebase = { 33333333, 1000000000 };
   cfg_.g_timebase = timebase;
-  cfg_.rc_target_bitrate = kVP9EncodePerfTestVectors[i].bitrate;
+  cfg_.rc_target_bitrate = kAV1EncodePerfTestVectors[i].bitrate;
 
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
 
-  const char *video_name = kVP9EncodePerfTestVectors[i].name;
+  const char *video_name = kAV1EncodePerfTestVectors[i].name;
   libaom_test::I420VideoSource video(
-      video_name, kVP9EncodePerfTestVectors[i].width,
-      kVP9EncodePerfTestVectors[i].height, timebase.den, timebase.num, 0,
-      kVP9EncodePerfTestVectors[i].frames);
+      video_name, kAV1EncodePerfTestVectors[i].width,
+      kAV1EncodePerfTestVectors[i].height, timebase.den, timebase.num, 0,
+      kAV1EncodePerfTestVectors[i].frames);
   set_speed(2);
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
@@ -226,21 +226,21 @@
   libaom_test::IVFVideoSource decode_video(kNewEncodeOutputFile);
   decode_video.Init();
 
-  vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
+  aom_codec_dec_cfg_t cfg = aom_codec_dec_cfg_t();
   cfg.threads = threads;
-  libaom_test::VP9Decoder decoder(cfg, 0);
+  libaom_test::AV1Decoder decoder(cfg, 0);
 
-  vpx_usec_timer t;
-  vpx_usec_timer_start(&t);
+  aom_usec_timer t;
+  aom_usec_timer_start(&t);
 
   for (decode_video.Begin(); decode_video.cxdata() != NULL;
        decode_video.Next()) {
     decoder.DecodeFrame(decode_video.cxdata(), decode_video.frame_size());
   }
 
-  vpx_usec_timer_mark(&t);
+  aom_usec_timer_mark(&t);
   const double elapsed_secs =
-      static_cast<double>(vpx_usec_timer_elapsed(&t)) / kUsecsInSec;
+      static_cast<double>(aom_usec_timer_elapsed(&t)) / kUsecsInSec;
   const unsigned decode_frames = decode_video.frame_number();
   const double fps = static_cast<double>(decode_frames) / elapsed_secs;
 
@@ -255,6 +255,6 @@
   printf("}\n");
 }
 
-VP10_INSTANTIATE_TEST_CASE(VP9NewEncodeDecodePerfTest,
-                           ::testing::Values(::libaom_test::kTwoPassGood));
+AV1_INSTANTIATE_TEST_CASE(AV1NewEncodeDecodePerfTest,
+                          ::testing::Values(::libaom_test::kTwoPassGood));
 }  // namespace
diff --git a/test/decode_test_driver.cc b/test/decode_test_driver.cc
index 4484d47..65dea5c 100644
--- a/test/decode_test_driver.cc
+++ b/test/decode_test_driver.cc
@@ -18,24 +18,24 @@
 namespace libaom_test {
 
 const char kVP8Name[] = "WebM Project VP8";
-const char kVP10Name[] = "WebM Project VP10";
+const char kAV1Name[] = "WebM Project AV1";
 
-vpx_codec_err_t Decoder::PeekStream(const uint8_t *cxdata, size_t size,
-                                    vpx_codec_stream_info_t *stream_info) {
-  return vpx_codec_peek_stream_info(
+aom_codec_err_t Decoder::PeekStream(const uint8_t *cxdata, size_t size,
+                                    aom_codec_stream_info_t *stream_info) {
+  return aom_codec_peek_stream_info(
       CodecInterface(), cxdata, static_cast<unsigned int>(size), stream_info);
 }
 
-vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size) {
+aom_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size) {
   return DecodeFrame(cxdata, size, NULL);
 }
 
-vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size,
+aom_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size,
                                      void *user_priv) {
-  vpx_codec_err_t res_dec;
+  aom_codec_err_t res_dec;
   InitOnce();
   API_REGISTER_STATE_CHECK(
-      res_dec = vpx_codec_decode(
+      res_dec = aom_codec_decode(
           &decoder_, cxdata, static_cast<unsigned int>(size), user_priv, 0));
   return res_dec;
 }
@@ -45,32 +45,32 @@
   return strncmp(kVP8Name, codec_name, sizeof(kVP8Name) - 1) == 0;
 }
 
-bool Decoder::IsVP10() const {
+bool Decoder::IsAV1() const {
   const char *codec_name = GetDecoderName();
-  return strncmp(kVP10Name, codec_name, sizeof(kVP10Name) - 1) == 0;
+  return strncmp(kAV1Name, codec_name, sizeof(kAV1Name) - 1) == 0;
 }
 
 void DecoderTest::HandlePeekResult(Decoder *const decoder,
                                    CompressedVideoSource *video,
-                                   const vpx_codec_err_t res_peek) {
+                                   const aom_codec_err_t res_peek) {
   const bool is_vp8 = decoder->IsVP8();
   if (is_vp8) {
     /* Vp8's implementation of PeekStream returns an error if the frame you
-     * pass it is not a keyframe, so we only expect VPX_CODEC_OK on the first
+     * pass it is not a keyframe, so we only expect AOM_CODEC_OK on the first
      * frame, which must be a keyframe. */
     if (video->frame_number() == 0)
-      ASSERT_EQ(VPX_CODEC_OK, res_peek) << "Peek return failed: "
-                                        << vpx_codec_err_to_string(res_peek);
+      ASSERT_EQ(AOM_CODEC_OK, res_peek) << "Peek return failed: "
+                                        << aom_codec_err_to_string(res_peek);
   } else {
-    /* The Vp9 implementation of PeekStream returns an error only if the
-     * data passed to it isn't a valid Vp9 chunk. */
-    ASSERT_EQ(VPX_CODEC_OK, res_peek) << "Peek return failed: "
-                                      << vpx_codec_err_to_string(res_peek);
+    /* The Av1 implementation of PeekStream returns an error only if the
+     * data passed to it isn't a valid Av1 chunk. */
+    ASSERT_EQ(AOM_CODEC_OK, res_peek) << "Peek return failed: "
+                                      << aom_codec_err_to_string(res_peek);
   }
 }
 
 void DecoderTest::RunLoop(CompressedVideoSource *video,
-                          const vpx_codec_dec_cfg_t &dec_cfg) {
+                          const aom_codec_dec_cfg_t &dec_cfg) {
   Decoder *const decoder = codec_->CreateDecoder(dec_cfg, flags_, 0);
   ASSERT_TRUE(decoder != NULL);
   bool end_of_file = false;
@@ -80,27 +80,27 @@
        video->Next()) {
     PreDecodeFrameHook(*video, decoder);
 
-    vpx_codec_stream_info_t stream_info;
+    aom_codec_stream_info_t stream_info;
     stream_info.sz = sizeof(stream_info);
 
     if (video->cxdata() != NULL) {
-      const vpx_codec_err_t res_peek = decoder->PeekStream(
+      const aom_codec_err_t res_peek = decoder->PeekStream(
           video->cxdata(), video->frame_size(), &stream_info);
       HandlePeekResult(decoder, video, res_peek);
       ASSERT_FALSE(::testing::Test::HasFailure());
 
-      vpx_codec_err_t res_dec =
+      aom_codec_err_t res_dec =
           decoder->DecodeFrame(video->cxdata(), video->frame_size());
       if (!HandleDecodeResult(res_dec, *video, decoder)) break;
     } else {
       // Signal end of the file to the decoder.
-      const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
-      ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+      const aom_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
+      ASSERT_EQ(AOM_CODEC_OK, res_dec) << decoder->DecodeError();
       end_of_file = true;
     }
 
     DxDataIterator dec_iter = decoder->GetDxData();
-    const vpx_image_t *img = NULL;
+    const aom_image_t *img = NULL;
 
     // Get decompressed data
     while ((img = dec_iter.Next()))
@@ -110,14 +110,14 @@
 }
 
 void DecoderTest::RunLoop(CompressedVideoSource *video) {
-  vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
+  aom_codec_dec_cfg_t dec_cfg = aom_codec_dec_cfg_t();
   RunLoop(video, dec_cfg);
 }
 
-void DecoderTest::set_cfg(const vpx_codec_dec_cfg_t &dec_cfg) {
+void DecoderTest::set_cfg(const aom_codec_dec_cfg_t &dec_cfg) {
   memcpy(&cfg_, &dec_cfg, sizeof(cfg_));
 }
 
-void DecoderTest::set_flags(const vpx_codec_flags_t flags) { flags_ = flags; }
+void DecoderTest::set_flags(const aom_codec_flags_t flags) { flags_ = flags; }
 
 }  // namespace libaom_test
diff --git a/test/decode_test_driver.h b/test/decode_test_driver.h
index 04ae46f..32c0c3f 100644
--- a/test/decode_test_driver.h
+++ b/test/decode_test_driver.h
@@ -12,8 +12,8 @@
 #define TEST_DECODE_TEST_DRIVER_H_
 #include <cstring>
 #include "third_party/googletest/src/include/gtest/gtest.h"
-#include "./vpx_config.h"
-#include "aom/vpx_decoder.h"
+#include "./aom_config.h"
+#include "aom/aom_decoder.h"
 
 namespace libaom_test {
 
@@ -23,14 +23,14 @@
 // Provides an object to handle decoding output
 class DxDataIterator {
  public:
-  explicit DxDataIterator(vpx_codec_ctx_t *decoder)
+  explicit DxDataIterator(aom_codec_ctx_t *decoder)
       : decoder_(decoder), iter_(NULL) {}
 
-  const vpx_image_t *Next() { return vpx_codec_get_frame(decoder_, &iter_); }
+  const aom_image_t *Next() { return aom_codec_get_frame(decoder_, &iter_); }
 
  private:
-  vpx_codec_ctx_t *decoder_;
-  vpx_codec_iter_t iter_;
+  aom_codec_ctx_t *decoder_;
+  aom_codec_iter_t iter_;
 };
 
 // Provides a simplified interface to manage one video decoding.
@@ -38,12 +38,12 @@
 // as more tests are added.
 class Decoder {
  public:
-  Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline)
+  Decoder(aom_codec_dec_cfg_t cfg, unsigned long deadline)
       : cfg_(cfg), flags_(0), deadline_(deadline), init_done_(false) {
     memset(&decoder_, 0, sizeof(decoder_));
   }
 
-  Decoder(vpx_codec_dec_cfg_t cfg, const vpx_codec_flags_t flag,
+  Decoder(aom_codec_dec_cfg_t cfg, const aom_codec_flags_t flag,
           unsigned long deadline)  // NOLINT
       : cfg_(cfg),
         flags_(flag),
@@ -52,73 +52,73 @@
     memset(&decoder_, 0, sizeof(decoder_));
   }
 
-  virtual ~Decoder() { vpx_codec_destroy(&decoder_); }
+  virtual ~Decoder() { aom_codec_destroy(&decoder_); }
 
-  vpx_codec_err_t PeekStream(const uint8_t *cxdata, size_t size,
-                             vpx_codec_stream_info_t *stream_info);
+  aom_codec_err_t PeekStream(const uint8_t *cxdata, size_t size,
+                             aom_codec_stream_info_t *stream_info);
 
-  vpx_codec_err_t DecodeFrame(const uint8_t *cxdata, size_t size);
+  aom_codec_err_t DecodeFrame(const uint8_t *cxdata, size_t size);
 
-  vpx_codec_err_t DecodeFrame(const uint8_t *cxdata, size_t size,
+  aom_codec_err_t DecodeFrame(const uint8_t *cxdata, size_t size,
                               void *user_priv);
 
   DxDataIterator GetDxData() { return DxDataIterator(&decoder_); }
 
   void set_deadline(unsigned long deadline) { deadline_ = deadline; }
 
-  void Control(int ctrl_id, int arg) { Control(ctrl_id, arg, VPX_CODEC_OK); }
+  void Control(int ctrl_id, int arg) { Control(ctrl_id, arg, AOM_CODEC_OK); }
 
   void Control(int ctrl_id, const void *arg) {
     InitOnce();
-    const vpx_codec_err_t res = vpx_codec_control_(&decoder_, ctrl_id, arg);
-    ASSERT_EQ(VPX_CODEC_OK, res) << DecodeError();
+    const aom_codec_err_t res = aom_codec_control_(&decoder_, ctrl_id, arg);
+    ASSERT_EQ(AOM_CODEC_OK, res) << DecodeError();
   }
 
-  void Control(int ctrl_id, int arg, vpx_codec_err_t expected_value) {
+  void Control(int ctrl_id, int arg, aom_codec_err_t expected_value) {
     InitOnce();
-    const vpx_codec_err_t res = vpx_codec_control_(&decoder_, ctrl_id, arg);
+    const aom_codec_err_t res = aom_codec_control_(&decoder_, ctrl_id, arg);
     ASSERT_EQ(expected_value, res) << DecodeError();
   }
 
   const char *DecodeError() {
-    const char *detail = vpx_codec_error_detail(&decoder_);
-    return detail ? detail : vpx_codec_error(&decoder_);
+    const char *detail = aom_codec_error_detail(&decoder_);
+    return detail ? detail : aom_codec_error(&decoder_);
   }
 
   // Passes the external frame buffer information to libaom.
-  vpx_codec_err_t SetFrameBufferFunctions(
-      vpx_get_frame_buffer_cb_fn_t cb_get,
-      vpx_release_frame_buffer_cb_fn_t cb_release, void *user_priv) {
+  aom_codec_err_t SetFrameBufferFunctions(
+      aom_get_frame_buffer_cb_fn_t cb_get,
+      aom_release_frame_buffer_cb_fn_t cb_release, void *user_priv) {
     InitOnce();
-    return vpx_codec_set_frame_buffer_functions(&decoder_, cb_get, cb_release,
+    return aom_codec_set_frame_buffer_functions(&decoder_, cb_get, cb_release,
                                                 user_priv);
   }
 
   const char *GetDecoderName() const {
-    return vpx_codec_iface_name(CodecInterface());
+    return aom_codec_iface_name(CodecInterface());
   }
 
   bool IsVP8() const;
 
-  bool IsVP10() const;
+  bool IsAV1() const;
 
-  vpx_codec_ctx_t *GetDecoder() { return &decoder_; }
+  aom_codec_ctx_t *GetDecoder() { return &decoder_; }
 
  protected:
-  virtual vpx_codec_iface_t *CodecInterface() const = 0;
+  virtual aom_codec_iface_t *CodecInterface() const = 0;
 
   void InitOnce() {
     if (!init_done_) {
-      const vpx_codec_err_t res =
-          vpx_codec_dec_init(&decoder_, CodecInterface(), &cfg_, flags_);
-      ASSERT_EQ(VPX_CODEC_OK, res) << DecodeError();
+      const aom_codec_err_t res =
+          aom_codec_dec_init(&decoder_, CodecInterface(), &cfg_, flags_);
+      ASSERT_EQ(AOM_CODEC_OK, res) << DecodeError();
       init_done_ = true;
     }
   }
 
-  vpx_codec_ctx_t decoder_;
-  vpx_codec_dec_cfg_t cfg_;
-  vpx_codec_flags_t flags_;
+  aom_codec_ctx_t decoder_;
+  aom_codec_dec_cfg_t cfg_;
+  aom_codec_flags_t flags_;
   unsigned int deadline_;
   bool init_done_;
 };
@@ -129,31 +129,31 @@
   // Main decoding loop
   virtual void RunLoop(CompressedVideoSource *video);
   virtual void RunLoop(CompressedVideoSource *video,
-                       const vpx_codec_dec_cfg_t &dec_cfg);
+                       const aom_codec_dec_cfg_t &dec_cfg);
 
-  virtual void set_cfg(const vpx_codec_dec_cfg_t &dec_cfg);
-  virtual void set_flags(const vpx_codec_flags_t flags);
+  virtual void set_cfg(const aom_codec_dec_cfg_t &dec_cfg);
+  virtual void set_flags(const aom_codec_flags_t flags);
 
   // Hook to be called before decompressing every frame.
   virtual void PreDecodeFrameHook(const CompressedVideoSource & /*video*/,
                                   Decoder * /*decoder*/) {}
 
   // Hook to be called to handle decode result. Return true to continue.
-  virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec,
+  virtual bool HandleDecodeResult(const aom_codec_err_t res_dec,
                                   const CompressedVideoSource & /*video*/,
                                   Decoder *decoder) {
-    EXPECT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
-    return VPX_CODEC_OK == res_dec;
+    EXPECT_EQ(AOM_CODEC_OK, res_dec) << decoder->DecodeError();
+    return AOM_CODEC_OK == res_dec;
   }
 
   // Hook to be called on every decompressed frame.
-  virtual void DecompressedFrameHook(const vpx_image_t & /*img*/,
+  virtual void DecompressedFrameHook(const aom_image_t & /*img*/,
                                      const unsigned int /*frame_number*/) {}
 
   // Hook to be called on peek result
   virtual void HandlePeekResult(Decoder *const decoder,
                                 CompressedVideoSource *video,
-                                const vpx_codec_err_t res_peek);
+                                const aom_codec_err_t res_peek);
 
  protected:
   explicit DecoderTest(const CodecFactory *codec)
@@ -162,8 +162,8 @@
   virtual ~DecoderTest() {}
 
   const CodecFactory *codec_;
-  vpx_codec_dec_cfg_t cfg_;
-  vpx_codec_flags_t flags_;
+  aom_codec_dec_cfg_t cfg_;
+  aom_codec_flags_t flags_;
 };
 
 }  // namespace libaom_test
diff --git a/test/decode_to_md5.sh b/test/decode_to_md5.sh
index 54d1593..e5c6a01 100755
--- a/test/decode_to_md5.sh
+++ b/test/decode_to_md5.sh
@@ -16,10 +16,10 @@
 . $(dirname $0)/tools_common.sh
 
 # Environment check: Make sure input is available:
-#   $VP8_IVF_FILE and $VP9_IVF_FILE are required.
+#   $VP8_IVF_FILE and $AV1_IVF_FILE are required.
 decode_to_md5_verify_environment() {
-  if [ ! -e "${VP8_IVF_FILE}" ] || [ ! -e "${VP9_IVF_FILE}" ]; then
-    echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+  if [ ! -e "${VP8_IVF_FILE}" ] || [ ! -e "${AV1_IVF_FILE}" ]; then
+    echo "Libaom test data must exist in LIBVPX_TEST_DATA_PATH."
     return 1
   fi
 }
@@ -28,18 +28,18 @@
 # interpreted as codec name and used solely to name the output file. $3 is the
 # expected md5 sum: It must match that of the final frame.
 decode_to_md5() {
-  local decoder="${LIBAOM_BIN_PATH}/decode_to_md5${VPX_TEST_EXE_SUFFIX}"
+  local decoder="${LIBAOM_BIN_PATH}/decode_to_md5${AOM_TEST_EXE_SUFFIX}"
   local input_file="$1"
   local codec="$2"
   local expected_md5="$3"
-  local output_file="${VPX_TEST_OUTPUT_DIR}/decode_to_md5_${codec}"
+  local output_file="${AOM_TEST_OUTPUT_DIR}/decode_to_md5_${codec}"
 
   if [ ! -x "${decoder}" ]; then
     elog "${decoder} does not exist or is not executable."
     return 1
   fi
 
-  eval "${VPX_TEST_PREFIX}" "${decoder}" "${input_file}" "${output_file}" \
+  eval "${AOM_TEST_PREFIX}" "${decoder}" "${input_file}" "${output_file}" \
       ${devnull}
 
   [ -e "${output_file}" ] || return 1
@@ -58,16 +58,16 @@
   fi
 }
 
-decode_to_md5_vp9() {
+decode_to_md5_av1() {
   # expected MD5 sum for the last frame.
   local expected_md5="2952c0eae93f3dadd1aa84c50d3fd6d2"
 
-  if [ "$(vp9_decode_available)" = "yes" ]; then
-    decode_to_md5 "${VP9_IVF_FILE}" "vp9" "${expected_md5}"
+  if [ "$(av1_decode_available)" = "yes" ]; then
+    decode_to_md5 "${AV1_IVF_FILE}" "av1" "${expected_md5}"
   fi
 }
 
 decode_to_md5_tests="decode_to_md5_vp8
-                     decode_to_md5_vp9"
+                     decode_to_md5_av1"
 
 run_tests decode_to_md5_verify_environment "${decode_to_md5_tests}"
diff --git a/test/decode_with_drops.sh b/test/decode_with_drops.sh
index bb2416a..2a8ce99 100755
--- a/test/decode_with_drops.sh
+++ b/test/decode_with_drops.sh
@@ -16,10 +16,10 @@
 . $(dirname $0)/tools_common.sh
 
 # Environment check: Make sure input is available:
-#   $VP8_IVF_FILE and $VP9_IVF_FILE are required.
+#   $VP8_IVF_FILE and $AV1_IVF_FILE are required.
 decode_with_drops_verify_environment() {
-  if [ ! -e "${VP8_IVF_FILE}" ] || [ ! -e "${VP9_IVF_FILE}" ]; then
-    echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+  if [ ! -e "${VP8_IVF_FILE}" ] || [ ! -e "${AV1_IVF_FILE}" ]; then
+    echo "Libaom test data must exist in LIBVPX_TEST_DATA_PATH."
     return 1
   fi
 }
@@ -28,10 +28,10 @@
 # to name the output file. $3 is the drop mode, and is passed directly to
 # decode_with_drops.
 decode_with_drops() {
-  local decoder="${LIBAOM_BIN_PATH}/decode_with_drops${VPX_TEST_EXE_SUFFIX}"
+  local decoder="${LIBAOM_BIN_PATH}/decode_with_drops${AOM_TEST_EXE_SUFFIX}"
   local input_file="$1"
   local codec="$2"
-  local output_file="${VPX_TEST_OUTPUT_DIR}/decode_with_drops_${codec}"
+  local output_file="${AOM_TEST_OUTPUT_DIR}/decode_with_drops_${codec}"
   local drop_mode="$3"
 
   if [ ! -x "${decoder}" ]; then
@@ -39,7 +39,7 @@
     return 1
   fi
 
-  eval "${VPX_TEST_PREFIX}" "${decoder}" "${input_file}" "${output_file}" \
+  eval "${AOM_TEST_PREFIX}" "${decoder}" "${input_file}" "${output_file}" \
       "${drop_mode}" ${devnull}
 
   [ -e "${output_file}" ] || return 1
@@ -59,21 +59,21 @@
   fi
 }
 
-# Decodes $VP9_IVF_FILE while dropping frames, twice: once in sequence mode,
+# Decodes $AV1_IVF_FILE while dropping frames, twice: once in sequence mode,
 # and once in pattern mode.
-# Note: This test assumes that $VP9_IVF_FILE has exactly 20 frames, and could
+# Note: This test assumes that $AV1_IVF_FILE has exactly 20 frames, and could
 # break if the file is modified.
-decode_with_drops_vp9() {
-  if [ "$(vp9_decode_available)" = "yes" ]; then
+decode_with_drops_av1() {
+  if [ "$(av1_decode_available)" = "yes" ]; then
     # Test sequence mode: Drop frames 2-28.
-    decode_with_drops "${VP9_IVF_FILE}" "vp9" "2-19"
+    decode_with_drops "${AV1_IVF_FILE}" "av1" "2-19"
 
     # Test pattern mode: Drop 3 of every 4 frames.
-    decode_with_drops "${VP9_IVF_FILE}" "vp9" "3/4"
+    decode_with_drops "${AV1_IVF_FILE}" "av1" "3/4"
   fi
 }
 
 decode_with_drops_tests="decode_with_drops_vp8
-                         decode_with_drops_vp9"
+                         decode_with_drops_av1"
 
 run_tests decode_with_drops_verify_environment "${decode_with_drops_tests}"
diff --git a/test/denoiser_sse2_test.cc b/test/denoiser_sse2_test.cc
index 571f3cf..115839b 100644
--- a/test/denoiser_sse2_test.cc
+++ b/test/denoiser_sse2_test.cc
@@ -19,7 +19,7 @@
 #include "test/util.h"
 
 #include "aom_scale/yv12config.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "av1/common/reconinter.h"
 #include "av1/encoder/context_tree.h"
 #include "av1/encoder/denoiser.h"
@@ -29,9 +29,9 @@
 namespace {
 
 const int kNumPixels = 64 * 64;
-class VP9DenoiserTest : public ::testing::TestWithParam<BLOCK_SIZE> {
+class AV1DenoiserTest : public ::testing::TestWithParam<BLOCK_SIZE> {
  public:
-  virtual ~VP9DenoiserTest() {}
+  virtual ~AV1DenoiserTest() {}
 
   virtual void SetUp() { bs_ = GetParam(); }
 
@@ -41,7 +41,7 @@
   BLOCK_SIZE bs_;
 };
 
-TEST_P(VP9DenoiserTest, BitexactCheck) {
+TEST_P(AV1DenoiserTest, BitexactCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int count_test_block = 4000;
 
@@ -72,11 +72,11 @@
       mc_avg_block[j] = (temp < 0) ? 0 : ((temp > 255) ? 255 : temp);
     }
 
-    ASM_REGISTER_STATE_CHECK(vp9_denoiser_filter_c(sig_block, 64, mc_avg_block,
+    ASM_REGISTER_STATE_CHECK(av1_denoiser_filter_c(sig_block, 64, mc_avg_block,
                                                    64, avg_block_c, 64, 0, bs_,
                                                    motion_magnitude_random));
 
-    ASM_REGISTER_STATE_CHECK(vp9_denoiser_filter_sse2(
+    ASM_REGISTER_STATE_CHECK(av1_denoiser_filter_sse2(
         sig_block, 64, mc_avg_block, 64, avg_block_sse2, 64, 0, bs_,
         motion_magnitude_random));
 
@@ -90,7 +90,7 @@
 }
 
 // Test for all block size.
-INSTANTIATE_TEST_CASE_P(SSE2, VP9DenoiserTest,
+INSTANTIATE_TEST_CASE_P(SSE2, AV1DenoiserTest,
                         ::testing::Values(BLOCK_8X8, BLOCK_8X16, BLOCK_16X8,
                                           BLOCK_16X16, BLOCK_16X32, BLOCK_32X16,
                                           BLOCK_32X32, BLOCK_32X64, BLOCK_64X32,
diff --git a/test/encode_api_test.cc b/test/encode_api_test.cc
index dc3a52b..c45c761 100644
--- a/test/encode_api_test.cc
+++ b/test/encode_api_test.cc
@@ -10,52 +10,52 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#include "aom/vp8cx.h"
-#include "aom/vpx_encoder.h"
+#include "./aom_config.h"
+#include "aom/aomcx.h"
+#include "aom/aom_encoder.h"
 
 namespace {
 
 #define NELEMENTS(x) static_cast<int>(sizeof(x) / sizeof(x[0]))
 
 TEST(EncodeAPI, InvalidParams) {
-  static const vpx_codec_iface_t *kCodecs[] = {
-#if CONFIG_VP10_ENCODER
-    &vpx_codec_vp10_cx_algo,
+  static const aom_codec_iface_t *kCodecs[] = {
+#if CONFIG_AV1_ENCODER
+    &aom_codec_av1_cx_algo,
 #endif
   };
   uint8_t buf[1] = { 0 };
-  vpx_image_t img;
-  vpx_codec_ctx_t enc;
-  vpx_codec_enc_cfg_t cfg;
+  aom_image_t img;
+  aom_codec_ctx_t enc;
+  aom_codec_enc_cfg_t cfg;
 
-  EXPECT_EQ(&img, vpx_img_wrap(&img, VPX_IMG_FMT_I420, 1, 1, 1, buf));
+  EXPECT_EQ(&img, aom_img_wrap(&img, AOM_IMG_FMT_I420, 1, 1, 1, buf));
 
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_enc_init(NULL, NULL, NULL, 0));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_enc_init(&enc, NULL, NULL, 0));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_encode(NULL, NULL, 0, 0, 0, 0));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_encode(NULL, &img, 0, 0, 0, 0));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_destroy(NULL));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
-            vpx_codec_enc_config_default(NULL, NULL, 0));
-  EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
-            vpx_codec_enc_config_default(NULL, &cfg, 0));
-  EXPECT_TRUE(vpx_codec_error(NULL) != NULL);
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM, aom_codec_enc_init(NULL, NULL, NULL, 0));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM, aom_codec_enc_init(&enc, NULL, NULL, 0));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM, aom_codec_encode(NULL, NULL, 0, 0, 0, 0));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM, aom_codec_encode(NULL, &img, 0, 0, 0, 0));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM, aom_codec_destroy(NULL));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM,
+            aom_codec_enc_config_default(NULL, NULL, 0));
+  EXPECT_EQ(AOM_CODEC_INVALID_PARAM,
+            aom_codec_enc_config_default(NULL, &cfg, 0));
+  EXPECT_TRUE(aom_codec_error(NULL) != NULL);
 
   for (int i = 0; i < NELEMENTS(kCodecs); ++i) {
-    SCOPED_TRACE(vpx_codec_iface_name(kCodecs[i]));
-    EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
-              vpx_codec_enc_init(NULL, kCodecs[i], NULL, 0));
-    EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
-              vpx_codec_enc_init(&enc, kCodecs[i], NULL, 0));
-    EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
-              vpx_codec_enc_config_default(kCodecs[i], &cfg, 1));
+    SCOPED_TRACE(aom_codec_iface_name(kCodecs[i]));
+    EXPECT_EQ(AOM_CODEC_INVALID_PARAM,
+              aom_codec_enc_init(NULL, kCodecs[i], NULL, 0));
+    EXPECT_EQ(AOM_CODEC_INVALID_PARAM,
+              aom_codec_enc_init(&enc, kCodecs[i], NULL, 0));
+    EXPECT_EQ(AOM_CODEC_INVALID_PARAM,
+              aom_codec_enc_config_default(kCodecs[i], &cfg, 1));
 
-    EXPECT_EQ(VPX_CODEC_OK, vpx_codec_enc_config_default(kCodecs[i], &cfg, 0));
-    EXPECT_EQ(VPX_CODEC_OK, vpx_codec_enc_init(&enc, kCodecs[i], &cfg, 0));
-    EXPECT_EQ(VPX_CODEC_OK, vpx_codec_encode(&enc, NULL, 0, 0, 0, 0));
+    EXPECT_EQ(AOM_CODEC_OK, aom_codec_enc_config_default(kCodecs[i], &cfg, 0));
+    EXPECT_EQ(AOM_CODEC_OK, aom_codec_enc_init(&enc, kCodecs[i], &cfg, 0));
+    EXPECT_EQ(AOM_CODEC_OK, aom_codec_encode(&enc, NULL, 0, 0, 0, 0));
 
-    EXPECT_EQ(VPX_CODEC_OK, vpx_codec_destroy(&enc));
+    EXPECT_EQ(AOM_CODEC_OK, aom_codec_destroy(&enc));
   }
 }
 
diff --git a/test/encode_perf_test.cc b/test/encode_perf_test.cc
index 7f33d55..39330aa 100644
--- a/test/encode_perf_test.cc
+++ b/test/encode_perf_test.cc
@@ -9,14 +9,14 @@
  */
 #include <string>
 #include "third_party/googletest/src/include/gtest/gtest.h"
-#include "./vpx_config.h"
-#include "./vpx_version.h"
+#include "./aom_config.h"
+#include "./aom_version.h"
 #include "test/codec_factory.h"
 #include "test/encode_test_driver.h"
 #include "test/i420_video_source.h"
 #include "test/util.h"
 #include "test/y4m_video_source.h"
-#include "aom_ports/vpx_timer.h"
+#include "aom_ports/aom_timer.h"
 
 namespace {
 
@@ -35,7 +35,7 @@
   int frames;
 };
 
-const EncodePerfTestVideo kVP9EncodePerfTestVectors[] = {
+const EncodePerfTestVideo kAV1EncodePerfTestVectors[] = {
   EncodePerfTestVideo("desktop_640_360_30.yuv", 640, 360, 200, 2484),
   EncodePerfTestVideo("kirland_640_480_30.yuv", 640, 480, 200, 300),
   EncodePerfTestVideo("macmarcomoving_640_480_30.yuv", 640, 480, 200, 987),
@@ -53,15 +53,15 @@
 
 #define NELEMENTS(x) (sizeof((x)) / sizeof((x)[0]))
 
-class VP9EncodePerfTest
+class AV1EncodePerfTest
     : public ::libaom_test::EncoderTest,
       public ::libaom_test::CodecTestWithParam<libaom_test::TestMode> {
  protected:
-  VP9EncodePerfTest()
+  AV1EncodePerfTest()
       : EncoderTest(GET_PARAM(0)), min_psnr_(kMaxPsnr), nframes_(0),
         encoding_mode_(GET_PARAM(1)), speed_(0), threads_(1) {}
 
-  virtual ~VP9EncodePerfTest() {}
+  virtual ~AV1EncodePerfTest() {}
 
   virtual void SetUp() {
     InitializeConfig();
@@ -77,7 +77,7 @@
     cfg_.rc_buf_initial_sz = 500;
     cfg_.rc_buf_optimal_sz = 600;
     cfg_.rc_resize_allowed = 0;
-    cfg_.rc_end_usage = VPX_CBR;
+    cfg_.rc_end_usage = AOM_CBR;
     cfg_.g_error_resilient = 1;
     cfg_.g_threads = threads_;
   }
@@ -86,10 +86,10 @@
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 0) {
       const int log2_tile_columns = 3;
-      encoder->Control(VP8E_SET_CPUUSED, speed_);
-      encoder->Control(VP9E_SET_TILE_COLUMNS, log2_tile_columns);
-      encoder->Control(VP9E_SET_FRAME_PARALLEL_DECODING, 1);
-      encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 0);
+      encoder->Control(AOME_SET_CPUUSED, speed_);
+      encoder->Control(AV1E_SET_TILE_COLUMNS, log2_tile_columns);
+      encoder->Control(AV1E_SET_FRAME_PARALLEL_DECODING, 1);
+      encoder->Control(AOME_SET_ENABLEAUTOALTREF, 0);
     }
   }
 
@@ -98,7 +98,7 @@
     nframes_ = 0;
   }
 
-  virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void PSNRPktHook(const aom_codec_cx_pkt_t *pkt) {
     if (pkt->data.psnr.psnr[0] < min_psnr_) {
       min_psnr_ = pkt->data.psnr.psnr[0];
     }
@@ -121,41 +121,41 @@
   unsigned int threads_;
 };
 
-TEST_P(VP9EncodePerfTest, PerfTest) {
-  for (size_t i = 0; i < NELEMENTS(kVP9EncodePerfTestVectors); ++i) {
+TEST_P(AV1EncodePerfTest, PerfTest) {
+  for (size_t i = 0; i < NELEMENTS(kAV1EncodePerfTestVectors); ++i) {
     for (size_t j = 0; j < NELEMENTS(kEncodePerfTestSpeeds); ++j) {
       for (size_t k = 0; k < NELEMENTS(kEncodePerfTestThreads); ++k) {
-        if (kVP9EncodePerfTestVectors[i].width < 512 &&
+        if (kAV1EncodePerfTestVectors[i].width < 512 &&
             kEncodePerfTestThreads[k] > 1)
           continue;
-        else if (kVP9EncodePerfTestVectors[i].width < 1024 &&
+        else if (kAV1EncodePerfTestVectors[i].width < 1024 &&
                  kEncodePerfTestThreads[k] > 2)
           continue;
 
         set_threads(kEncodePerfTestThreads[k]);
         SetUp();
 
-        const vpx_rational timebase = { 33333333, 1000000000 };
+        const aom_rational timebase = { 33333333, 1000000000 };
         cfg_.g_timebase = timebase;
-        cfg_.rc_target_bitrate = kVP9EncodePerfTestVectors[i].bitrate;
+        cfg_.rc_target_bitrate = kAV1EncodePerfTestVectors[i].bitrate;
 
-        init_flags_ = VPX_CODEC_USE_PSNR;
+        init_flags_ = AOM_CODEC_USE_PSNR;
 
-        const unsigned frames = kVP9EncodePerfTestVectors[i].frames;
-        const char *video_name = kVP9EncodePerfTestVectors[i].name;
+        const unsigned frames = kAV1EncodePerfTestVectors[i].frames;
+        const char *video_name = kAV1EncodePerfTestVectors[i].name;
         libaom_test::I420VideoSource video(
-            video_name, kVP9EncodePerfTestVectors[i].width,
-            kVP9EncodePerfTestVectors[i].height, timebase.den, timebase.num, 0,
-            kVP9EncodePerfTestVectors[i].frames);
+            video_name, kAV1EncodePerfTestVectors[i].width,
+            kAV1EncodePerfTestVectors[i].height, timebase.den, timebase.num, 0,
+            kAV1EncodePerfTestVectors[i].frames);
         set_speed(kEncodePerfTestSpeeds[j]);
 
-        vpx_usec_timer t;
-        vpx_usec_timer_start(&t);
+        aom_usec_timer t;
+        aom_usec_timer_start(&t);
 
         ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 
-        vpx_usec_timer_mark(&t);
-        const double elapsed_secs = vpx_usec_timer_elapsed(&t) / kUsecsInSec;
+        aom_usec_timer_mark(&t);
+        const double elapsed_secs = aom_usec_timer_elapsed(&t) / kUsecsInSec;
         const double fps = frames / elapsed_secs;
         const double minimum_psnr = min_psnr();
         std::string display_name(video_name);
@@ -182,6 +182,6 @@
   }
 }
 
-VP10_INSTANTIATE_TEST_CASE(VP9EncodePerfTest,
-                           ::testing::Values(::libaom_test::kRealTime));
+AV1_INSTANTIATE_TEST_CASE(AV1EncodePerfTest,
+                          ::testing::Values(::libaom_test::kRealTime));
 }  // namespace
diff --git a/test/encode_test_driver.cc b/test/encode_test_driver.cc
index cb1df9b..915036f 100644
--- a/test/encode_test_driver.cc
+++ b/test/encode_test_driver.cc
@@ -12,7 +12,7 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_ports/mem.h"
 #include "test/codec_factory.h"
 #include "test/decode_test_driver.h"
@@ -22,8 +22,8 @@
 
 namespace libaom_test {
 void Encoder::InitEncoder(VideoSource *video) {
-  vpx_codec_err_t res;
-  const vpx_image_t *img = video->img();
+  aom_codec_err_t res;
+  const aom_image_t *img = video->img();
 
   if (video->img() && !encoder_.priv) {
     cfg_.g_w = img->d_w;
@@ -31,18 +31,18 @@
     cfg_.g_timebase = video->timebase();
     cfg_.rc_twopass_stats_in = stats_->buf();
 
-    res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_, init_flags_);
-    ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+    res = aom_codec_enc_init(&encoder_, CodecInterface(), &cfg_, init_flags_);
+    ASSERT_EQ(AOM_CODEC_OK, res) << EncoderError();
 
-#if CONFIG_VP10_ENCODER
-    if (CodecInterface() == &vpx_codec_vp10_cx_algo) {
-// Default to 1 tile column for VP10. With CONFIG_EXT_TILE, the
+#if CONFIG_AV1_ENCODER
+    if (CodecInterface() == &aom_codec_av1_cx_algo) {
+// Default to 1 tile column for AV1. With CONFIG_EXT_TILE, the
 // default is already the largest possible tile size
 #if !CONFIG_EXT_TILE
       const int log2_tile_columns = 0;
-      res = vpx_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
+      res = aom_codec_control_(&encoder_, AV1E_SET_TILE_COLUMNS,
                                log2_tile_columns);
-      ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+      ASSERT_EQ(AOM_CODEC_OK, res) << EncoderError();
 #endif  // !CONFIG_EXT_TILE
     } else
 #endif
@@ -60,8 +60,8 @@
   // Handle twopass stats
   CxDataIterator iter = GetCxData();
 
-  while (const vpx_codec_cx_pkt_t *pkt = iter.Next()) {
-    if (pkt->kind != VPX_CODEC_STATS_PKT) continue;
+  while (const aom_codec_cx_pkt_t *pkt = iter.Next()) {
+    if (pkt->kind != AOM_CODEC_STATS_PKT) continue;
 
     stats_->Append(*pkt);
   }
@@ -69,48 +69,48 @@
 
 void Encoder::EncodeFrameInternal(const VideoSource &video,
                                   const unsigned long frame_flags) {
-  vpx_codec_err_t res;
-  const vpx_image_t *img = video.img();
+  aom_codec_err_t res;
+  const aom_image_t *img = video.img();
 
   // Handle frame resizing
   if (cfg_.g_w != img->d_w || cfg_.g_h != img->d_h) {
     cfg_.g_w = img->d_w;
     cfg_.g_h = img->d_h;
-    res = vpx_codec_enc_config_set(&encoder_, &cfg_);
-    ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+    res = aom_codec_enc_config_set(&encoder_, &cfg_);
+    ASSERT_EQ(AOM_CODEC_OK, res) << EncoderError();
   }
 
   // Encode the frame
-  API_REGISTER_STATE_CHECK(res = vpx_codec_encode(&encoder_, img, video.pts(),
+  API_REGISTER_STATE_CHECK(res = aom_codec_encode(&encoder_, img, video.pts(),
                                                   video.duration(), frame_flags,
                                                   deadline_));
-  ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+  ASSERT_EQ(AOM_CODEC_OK, res) << EncoderError();
 }
 
 void Encoder::Flush() {
-  const vpx_codec_err_t res =
-      vpx_codec_encode(&encoder_, NULL, 0, 0, 0, deadline_);
+  const aom_codec_err_t res =
+      aom_codec_encode(&encoder_, NULL, 0, 0, 0, deadline_);
   if (!encoder_.priv)
-    ASSERT_EQ(VPX_CODEC_ERROR, res) << EncoderError();
+    ASSERT_EQ(AOM_CODEC_ERROR, res) << EncoderError();
   else
-    ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+    ASSERT_EQ(AOM_CODEC_OK, res) << EncoderError();
 }
 
 void EncoderTest::InitializeConfig() {
-  const vpx_codec_err_t res = codec_->DefaultEncoderConfig(&cfg_, 0);
-  dec_cfg_ = vpx_codec_dec_cfg_t();
-  ASSERT_EQ(VPX_CODEC_OK, res);
+  const aom_codec_err_t res = codec_->DefaultEncoderConfig(&cfg_, 0);
+  dec_cfg_ = aom_codec_dec_cfg_t();
+  ASSERT_EQ(AOM_CODEC_OK, res);
 }
 
 void EncoderTest::SetMode(TestMode mode) {
   switch (mode) {
-    case kRealTime: deadline_ = VPX_DL_REALTIME; break;
+    case kRealTime: deadline_ = AOM_DL_REALTIME; break;
 
     case kOnePassGood:
-    case kTwoPassGood: deadline_ = VPX_DL_GOOD_QUALITY; break;
+    case kTwoPassGood: deadline_ = AOM_DL_GOOD_QUALITY; break;
 
     case kOnePassBest:
-    case kTwoPassBest: deadline_ = VPX_DL_BEST_QUALITY; break;
+    case kTwoPassBest: deadline_ = AOM_DL_BEST_QUALITY; break;
 
     default: ASSERT_TRUE(false) << "Unexpected mode " << mode;
   }
@@ -148,7 +148,7 @@
 
 // The function should return "true" most of the time, therefore no early
 // break-out is implemented within the match checking process.
-static bool compare_img(const vpx_image_t *img1, const vpx_image_t *img2,
+static bool compare_img(const aom_image_t *img1, const aom_image_t *img2,
                         int *const mismatch_row, int *const mismatch_col,
                         int *const mismatch_plane, int *const mismatch_pix1,
                         int *const mismatch_pix2) {
@@ -164,35 +164,35 @@
     return false;
   }
 
-  if (!compare_plane(img1->planes[VPX_PLANE_Y], img1->stride[VPX_PLANE_Y],
-                     img2->planes[VPX_PLANE_Y], img2->stride[VPX_PLANE_Y], w_y,
+  if (!compare_plane(img1->planes[AOM_PLANE_Y], img1->stride[AOM_PLANE_Y],
+                     img2->planes[AOM_PLANE_Y], img2->stride[AOM_PLANE_Y], w_y,
                      h_y, mismatch_row, mismatch_col, mismatch_pix1,
                      mismatch_pix2)) {
-    if (mismatch_plane != NULL) *mismatch_plane = VPX_PLANE_Y;
+    if (mismatch_plane != NULL) *mismatch_plane = AOM_PLANE_Y;
     return false;
   }
 
-  if (!compare_plane(img1->planes[VPX_PLANE_U], img1->stride[VPX_PLANE_U],
-                     img2->planes[VPX_PLANE_U], img2->stride[VPX_PLANE_U], w_uv,
+  if (!compare_plane(img1->planes[AOM_PLANE_U], img1->stride[AOM_PLANE_U],
+                     img2->planes[AOM_PLANE_U], img2->stride[AOM_PLANE_U], w_uv,
                      h_uv, mismatch_row, mismatch_col, mismatch_pix1,
                      mismatch_pix2)) {
-    if (mismatch_plane != NULL) *mismatch_plane = VPX_PLANE_U;
+    if (mismatch_plane != NULL) *mismatch_plane = AOM_PLANE_U;
     return false;
   }
 
-  if (!compare_plane(img1->planes[VPX_PLANE_V], img1->stride[VPX_PLANE_V],
-                     img2->planes[VPX_PLANE_V], img2->stride[VPX_PLANE_V], w_uv,
+  if (!compare_plane(img1->planes[AOM_PLANE_V], img1->stride[AOM_PLANE_V],
+                     img2->planes[AOM_PLANE_V], img2->stride[AOM_PLANE_V], w_uv,
                      h_uv, mismatch_row, mismatch_col, mismatch_pix1,
                      mismatch_pix2)) {
-    if (mismatch_plane != NULL) *mismatch_plane = VPX_PLANE_U;
+    if (mismatch_plane != NULL) *mismatch_plane = AOM_PLANE_U;
     return false;
   }
 
   return true;
 }
 
-void EncoderTest::MismatchHook(const vpx_image_t *img_enc,
-                               const vpx_image_t *img_dec) {
+void EncoderTest::MismatchHook(const aom_image_t *img_enc,
+                               const aom_image_t *img_dec) {
   int mismatch_row = 0;
   int mismatch_col = 0;
   int mismatch_plane = 0;
@@ -212,7 +212,7 @@
 }
 
 void EncoderTest::RunLoop(VideoSource *video) {
-  vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
+  aom_codec_dec_cfg_t dec_cfg = aom_codec_dec_cfg_t();
 
   stats_.Reset();
 
@@ -221,11 +221,11 @@
     last_pts_ = 0;
 
     if (passes_ == 1)
-      cfg_.g_pass = VPX_RC_ONE_PASS;
+      cfg_.g_pass = AOM_RC_ONE_PASS;
     else if (pass == 0)
-      cfg_.g_pass = VPX_RC_FIRST_PASS;
+      cfg_.g_pass = AOM_RC_FIRST_PASS;
     else
-      cfg_.g_pass = VPX_RC_LAST_PASS;
+      cfg_.g_pass = AOM_RC_LAST_PASS;
 
     BeginPassHook(pass);
     testing::internal::scoped_ptr<Encoder> encoder(
@@ -239,16 +239,16 @@
     unsigned long dec_init_flags = 0;  // NOLINT
     // Use fragment decoder if encoder outputs partitions.
     // NOTE: fragment decoder and partition encoder are only supported by VP8.
-    if (init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION)
-      dec_init_flags |= VPX_CODEC_USE_INPUT_FRAGMENTS;
+    if (init_flags_ & AOM_CODEC_USE_OUTPUT_PARTITION)
+      dec_init_flags |= AOM_CODEC_USE_INPUT_FRAGMENTS;
     testing::internal::scoped_ptr<Decoder> decoder(
         codec_->CreateDecoder(dec_cfg, dec_init_flags, 0));
-#if CONFIG_VP10 && CONFIG_EXT_TILE
-    if (decoder->IsVP10()) {
+#if CONFIG_AV1 && CONFIG_EXT_TILE
+    if (decoder->IsAV1()) {
       // Set dec_cfg.tile_row = -1 and dec_cfg.tile_col = -1 so that the whole
       // frame is decoded.
-      decoder->Control(VP10_SET_DECODE_TILE_ROW, -1);
-      decoder->Control(VP10_SET_DECODE_TILE_COL, -1);
+      decoder->Control(AV1_SET_DECODE_TILE_ROW, -1);
+      decoder->Control(AV1_SET_DECODE_TILE_COL, -1);
     }
 #endif
 
@@ -264,14 +264,14 @@
 
       bool has_cxdata = false;
       bool has_dxdata = false;
-      while (const vpx_codec_cx_pkt_t *pkt = iter.Next()) {
+      while (const aom_codec_cx_pkt_t *pkt = iter.Next()) {
         pkt = MutateEncoderOutputHook(pkt);
         again = true;
         switch (pkt->kind) {
-          case VPX_CODEC_CX_FRAME_PKT:
+          case AOM_CODEC_CX_FRAME_PKT:
             has_cxdata = true;
             if (decoder.get() != NULL && DoDecode()) {
-              vpx_codec_err_t res_dec = decoder->DecodeFrame(
+              aom_codec_err_t res_dec = decoder->DecodeFrame(
                   (const uint8_t *)pkt->data.frame.buf, pkt->data.frame.sz);
 
               if (!HandleDecodeResult(res_dec, *video, decoder.get())) break;
@@ -283,22 +283,22 @@
             FramePktHook(pkt);
             break;
 
-          case VPX_CODEC_PSNR_PKT: PSNRPktHook(pkt); break;
+          case AOM_CODEC_PSNR_PKT: PSNRPktHook(pkt); break;
 
           default: break;
         }
       }
 
       // Flush the decoder when there are no more fragments.
-      if ((init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) && has_dxdata) {
-        const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
+      if ((init_flags_ & AOM_CODEC_USE_OUTPUT_PARTITION) && has_dxdata) {
+        const aom_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
         if (!HandleDecodeResult(res_dec, *video, decoder.get())) break;
       }
 
       if (has_dxdata && has_cxdata) {
-        const vpx_image_t *img_enc = encoder->GetPreviewFrame();
+        const aom_image_t *img_enc = encoder->GetPreviewFrame();
         DxDataIterator dec_iter = decoder->GetDxData();
-        const vpx_image_t *img_dec = dec_iter.Next();
+        const aom_image_t *img_dec = dec_iter.Next();
         if (img_enc && img_dec) {
           const bool res =
               compare_img(img_enc, img_dec, NULL, NULL, NULL, NULL, NULL);
diff --git a/test/encode_test_driver.h b/test/encode_test_driver.h
index 5762be5..f74faf8 100644
--- a/test/encode_test_driver.h
+++ b/test/encode_test_driver.h
@@ -15,11 +15,11 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#if CONFIG_VP10_ENCODER
-#include "aom/vp8cx.h"
+#include "./aom_config.h"
+#if CONFIG_AV1_ENCODER
+#include "aom/aomcx.h"
 #endif
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 
 namespace libaom_test {
 
@@ -48,28 +48,28 @@
 // Provides an object to handle the libaom get_cx_data() iteration pattern
 class CxDataIterator {
  public:
-  explicit CxDataIterator(vpx_codec_ctx_t *encoder)
+  explicit CxDataIterator(aom_codec_ctx_t *encoder)
       : encoder_(encoder), iter_(NULL) {}
 
-  const vpx_codec_cx_pkt_t *Next() {
-    return vpx_codec_get_cx_data(encoder_, &iter_);
+  const aom_codec_cx_pkt_t *Next() {
+    return aom_codec_get_cx_data(encoder_, &iter_);
   }
 
  private:
-  vpx_codec_ctx_t *encoder_;
-  vpx_codec_iter_t iter_;
+  aom_codec_ctx_t *encoder_;
+  aom_codec_iter_t iter_;
 };
 
 // Implements an in-memory store for libaom twopass statistics
 class TwopassStatsStore {
  public:
-  void Append(const vpx_codec_cx_pkt_t &pkt) {
+  void Append(const aom_codec_cx_pkt_t &pkt) {
     buffer_.append(reinterpret_cast<char *>(pkt.data.twopass_stats.buf),
                    pkt.data.twopass_stats.sz);
   }
 
-  vpx_fixed_buf_t buf() {
-    const vpx_fixed_buf_t buf = { &buffer_[0], buffer_.size() };
+  aom_fixed_buf_t buf() {
+    const aom_fixed_buf_t buf = { &buffer_[0], buffer_.size() };
     return buf;
   }
 
@@ -86,64 +86,64 @@
 // level of abstraction will be fleshed out as more tests are written.
 class Encoder {
  public:
-  Encoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline,
+  Encoder(aom_codec_enc_cfg_t cfg, unsigned long deadline,
           const unsigned long init_flags, TwopassStatsStore *stats)
       : cfg_(cfg), deadline_(deadline), init_flags_(init_flags), stats_(stats) {
     memset(&encoder_, 0, sizeof(encoder_));
   }
 
-  virtual ~Encoder() { vpx_codec_destroy(&encoder_); }
+  virtual ~Encoder() { aom_codec_destroy(&encoder_); }
 
   CxDataIterator GetCxData() { return CxDataIterator(&encoder_); }
 
   void InitEncoder(VideoSource *video);
 
-  const vpx_image_t *GetPreviewFrame() {
-    return vpx_codec_get_preview_frame(&encoder_);
+  const aom_image_t *GetPreviewFrame() {
+    return aom_codec_get_preview_frame(&encoder_);
   }
-  // This is a thin wrapper around vpx_codec_encode(), so refer to
-  // vpx_encoder.h for its semantics.
+  // This is a thin wrapper around aom_codec_encode(), so refer to
+  // aom_encoder.h for its semantics.
   void EncodeFrame(VideoSource *video, const unsigned long frame_flags);
 
   // Convenience wrapper for EncodeFrame()
   void EncodeFrame(VideoSource *video) { EncodeFrame(video, 0); }
 
   void Control(int ctrl_id, int arg) {
-    const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg);
-    ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+    const aom_codec_err_t res = aom_codec_control_(&encoder_, ctrl_id, arg);
+    ASSERT_EQ(AOM_CODEC_OK, res) << EncoderError();
   }
 
   void Control(int ctrl_id, int *arg) {
-    const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg);
-    ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+    const aom_codec_err_t res = aom_codec_control_(&encoder_, ctrl_id, arg);
+    ASSERT_EQ(AOM_CODEC_OK, res) << EncoderError();
   }
 
-  void Control(int ctrl_id, struct vpx_scaling_mode *arg) {
-    const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg);
-    ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+  void Control(int ctrl_id, struct aom_scaling_mode *arg) {
+    const aom_codec_err_t res = aom_codec_control_(&encoder_, ctrl_id, arg);
+    ASSERT_EQ(AOM_CODEC_OK, res) << EncoderError();
   }
 
-#if CONFIG_VP10_ENCODER
-  void Control(int ctrl_id, vpx_active_map_t *arg) {
-    const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg);
-    ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+#if CONFIG_AV1_ENCODER
+  void Control(int ctrl_id, aom_active_map_t *arg) {
+    const aom_codec_err_t res = aom_codec_control_(&encoder_, ctrl_id, arg);
+    ASSERT_EQ(AOM_CODEC_OK, res) << EncoderError();
   }
 #endif
 
-  void Config(const vpx_codec_enc_cfg_t *cfg) {
-    const vpx_codec_err_t res = vpx_codec_enc_config_set(&encoder_, cfg);
-    ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+  void Config(const aom_codec_enc_cfg_t *cfg) {
+    const aom_codec_err_t res = aom_codec_enc_config_set(&encoder_, cfg);
+    ASSERT_EQ(AOM_CODEC_OK, res) << EncoderError();
     cfg_ = *cfg;
   }
 
   void set_deadline(unsigned long deadline) { deadline_ = deadline; }
 
  protected:
-  virtual vpx_codec_iface_t *CodecInterface() const = 0;
+  virtual aom_codec_iface_t *CodecInterface() const = 0;
 
   const char *EncoderError() {
-    const char *detail = vpx_codec_error_detail(&encoder_);
-    return detail ? detail : vpx_codec_error(&encoder_);
+    const char *detail = aom_codec_error_detail(&encoder_);
+    return detail ? detail : aom_codec_error(&encoder_);
   }
 
   // Encode an image
@@ -153,8 +153,8 @@
   // Flush the encoder on EOS
   void Flush();
 
-  vpx_codec_ctx_t encoder_;
-  vpx_codec_enc_cfg_t cfg_;
+  aom_codec_ctx_t encoder_;
+  aom_codec_enc_cfg_t cfg_;
   unsigned long deadline_;
   unsigned long init_flags_;
   TwopassStatsStore *stats_;
@@ -204,10 +204,10 @@
                                   Encoder * /*encoder*/) {}
 
   // Hook to be called on every compressed data packet.
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t * /*pkt*/) {}
+  virtual void FramePktHook(const aom_codec_cx_pkt_t * /*pkt*/) {}
 
   // Hook to be called on every PSNR packet.
-  virtual void PSNRPktHook(const vpx_codec_cx_pkt_t * /*pkt*/) {}
+  virtual void PSNRPktHook(const aom_codec_cx_pkt_t * /*pkt*/) {}
 
   // Hook to determine whether the encode loop should continue.
   virtual bool Continue() const {
@@ -219,35 +219,35 @@
   virtual bool DoDecode() const { return 1; }
 
   // Hook to handle encode/decode mismatch
-  virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2);
+  virtual void MismatchHook(const aom_image_t *img1, const aom_image_t *img2);
 
   // Hook to be called on every decompressed frame.
-  virtual void DecompressedFrameHook(const vpx_image_t & /*img*/,
-                                     vpx_codec_pts_t /*pts*/) {}
+  virtual void DecompressedFrameHook(const aom_image_t & /*img*/,
+                                     aom_codec_pts_t /*pts*/) {}
 
   // Hook to be called to handle decode result. Return true to continue.
-  virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec,
+  virtual bool HandleDecodeResult(const aom_codec_err_t res_dec,
                                   const VideoSource & /*video*/,
                                   Decoder *decoder) {
-    EXPECT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
-    return VPX_CODEC_OK == res_dec;
+    EXPECT_EQ(AOM_CODEC_OK, res_dec) << decoder->DecodeError();
+    return AOM_CODEC_OK == res_dec;
   }
 
   // Hook that can modify the encoder's output data
-  virtual const vpx_codec_cx_pkt_t *MutateEncoderOutputHook(
-      const vpx_codec_cx_pkt_t *pkt) {
+  virtual const aom_codec_cx_pkt_t *MutateEncoderOutputHook(
+      const aom_codec_cx_pkt_t *pkt) {
     return pkt;
   }
 
   bool abort_;
-  vpx_codec_enc_cfg_t cfg_;
-  vpx_codec_dec_cfg_t dec_cfg_;
+  aom_codec_enc_cfg_t cfg_;
+  aom_codec_dec_cfg_t dec_cfg_;
   unsigned int passes_;
   unsigned long deadline_;
   TwopassStatsStore stats_;
   unsigned long init_flags_;
   unsigned long frame_flags_;
-  vpx_codec_pts_t last_pts_;
+  aom_codec_pts_t last_pts_;
 };
 
 }  // namespace libaom_test
diff --git a/test/encoder_parms_get_to_decoder.cc b/test/encoder_parms_get_to_decoder.cc
index e2fe537..5d44290 100644
--- a/test/encoder_parms_get_to_decoder.cc
+++ b/test/encoder_parms_get_to_decoder.cc
@@ -14,7 +14,7 @@
 #include "test/encode_test_driver.h"
 #include "test/util.h"
 #include "test/y4m_video_source.h"
-#include "av1/vp10_dx_iface.c"
+#include "av1/av1_dx_iface.c"
 
 namespace {
 
@@ -28,7 +28,7 @@
   int frames;
 };
 
-const EncodePerfTestVideo kVP9EncodePerfTestVectors[] = {
+const EncodePerfTestVideo kAV1EncodePerfTestVectors[] = {
   { "niklas_1280_720_30.y4m", 1280, 720, 600, 10 },
 };
 
@@ -38,29 +38,29 @@
   int32_t lossless;
   int32_t error_resilient;
   int32_t frame_parallel;
-  vpx_color_range_t color_range;
-  vpx_color_space_t cs;
+  aom_color_range_t color_range;
+  aom_color_space_t cs;
   int render_size[2];
   // TODO(JBB): quantizers / bitrate
 };
 
-const EncodeParameters kVP9EncodeParameterSet[] = {
-  { 0, 0, 0, 1, 0, VPX_CR_STUDIO_RANGE, VPX_CS_BT_601, { 0, 0 } },
-  { 0, 0, 0, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_709, { 0, 0 } },
-  { 0, 0, 1, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_2020, { 0, 0 } },
-  { 0, 2, 0, 0, 1, VPX_CR_STUDIO_RANGE, VPX_CS_UNKNOWN, { 640, 480 } },
+const EncodeParameters kAV1EncodeParameterSet[] = {
+  { 0, 0, 0, 1, 0, AOM_CR_STUDIO_RANGE, AOM_CS_BT_601, { 0, 0 } },
+  { 0, 0, 0, 0, 0, AOM_CR_FULL_RANGE, AOM_CS_BT_709, { 0, 0 } },
+  { 0, 0, 1, 0, 0, AOM_CR_FULL_RANGE, AOM_CS_BT_2020, { 0, 0 } },
+  { 0, 2, 0, 0, 1, AOM_CR_STUDIO_RANGE, AOM_CS_UNKNOWN, { 640, 480 } },
   // TODO(JBB): Test profiles (requires more work).
 };
 
-class VpxEncoderParmsGetToDecoder
+class AvxEncoderParmsGetToDecoder
     : public ::libaom_test::EncoderTest,
       public ::libaom_test::CodecTestWith2Params<EncodeParameters,
                                                  EncodePerfTestVideo> {
  protected:
-  VpxEncoderParmsGetToDecoder()
+  AvxEncoderParmsGetToDecoder()
       : EncoderTest(GET_PARAM(0)), encode_parms(GET_PARAM(1)) {}
 
-  virtual ~VpxEncoderParmsGetToDecoder() {}
+  virtual ~AvxEncoderParmsGetToDecoder() {}
 
   virtual void SetUp() {
     InitializeConfig();
@@ -75,32 +75,32 @@
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 1) {
-      encoder->Control(VP9E_SET_COLOR_SPACE, encode_parms.cs);
-      encoder->Control(VP9E_SET_COLOR_RANGE, encode_parms.color_range);
-      encoder->Control(VP9E_SET_LOSSLESS, encode_parms.lossless);
-      encoder->Control(VP9E_SET_FRAME_PARALLEL_DECODING,
+      encoder->Control(AV1E_SET_COLOR_SPACE, encode_parms.cs);
+      encoder->Control(AV1E_SET_COLOR_RANGE, encode_parms.color_range);
+      encoder->Control(AV1E_SET_LOSSLESS, encode_parms.lossless);
+      encoder->Control(AV1E_SET_FRAME_PARALLEL_DECODING,
                        encode_parms.frame_parallel);
-      encoder->Control(VP9E_SET_TILE_ROWS, encode_parms.tile_rows);
-      encoder->Control(VP9E_SET_TILE_COLUMNS, encode_parms.tile_cols);
-      encoder->Control(VP8E_SET_CPUUSED, kCpuUsed);
-      encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
-      encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
-      encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
-      encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+      encoder->Control(AV1E_SET_TILE_ROWS, encode_parms.tile_rows);
+      encoder->Control(AV1E_SET_TILE_COLUMNS, encode_parms.tile_cols);
+      encoder->Control(AOME_SET_CPUUSED, kCpuUsed);
+      encoder->Control(AOME_SET_ENABLEAUTOALTREF, 1);
+      encoder->Control(AOME_SET_ARNR_MAXFRAMES, 7);
+      encoder->Control(AOME_SET_ARNR_STRENGTH, 5);
+      encoder->Control(AOME_SET_ARNR_TYPE, 3);
       if (encode_parms.render_size[0] > 0 && encode_parms.render_size[1] > 0)
-        encoder->Control(VP9E_SET_RENDER_SIZE, encode_parms.render_size);
+        encoder->Control(AV1E_SET_RENDER_SIZE, encode_parms.render_size);
     }
   }
 
-  virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec,
+  virtual bool HandleDecodeResult(const aom_codec_err_t res_dec,
                                   const libaom_test::VideoSource & /*video*/,
                                   libaom_test::Decoder *decoder) {
-    vpx_codec_ctx_t *const vp9_decoder = decoder->GetDecoder();
-    vpx_codec_alg_priv_t *const priv =
-        reinterpret_cast<vpx_codec_alg_priv_t *>(vp9_decoder->priv);
+    aom_codec_ctx_t *const av1_decoder = decoder->GetDecoder();
+    aom_codec_alg_priv_t *const priv =
+        reinterpret_cast<aom_codec_alg_priv_t *>(av1_decoder->priv);
     FrameWorkerData *const worker_data =
         reinterpret_cast<FrameWorkerData *>(priv->frame_workers[0].data1);
-    VP10_COMMON *const common = &worker_data->pbi->common;
+    AV1_COMMON *const common = &worker_data->pbi->common;
 
     if (encode_parms.lossless) {
       EXPECT_EQ(0, common->base_qindex);
@@ -122,8 +122,8 @@
     EXPECT_EQ(encode_parms.tile_cols, common->log2_tile_cols);
     EXPECT_EQ(encode_parms.tile_rows, common->log2_tile_rows);
 
-    EXPECT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
-    return VPX_CODEC_OK == res_dec;
+    EXPECT_EQ(AOM_CODEC_OK, res_dec) << decoder->DecodeError();
+    return AOM_CODEC_OK == res_dec;
   }
 
   EncodePerfTestVideo test_video_;
@@ -132,8 +132,8 @@
   EncodeParameters encode_parms;
 };
 
-TEST_P(VpxEncoderParmsGetToDecoder, BitstreamParms) {
-  init_flags_ = VPX_CODEC_USE_PSNR;
+TEST_P(AvxEncoderParmsGetToDecoder, BitstreamParms) {
+  init_flags_ = AOM_CODEC_USE_PSNR;
 
   libaom_test::VideoSource *const video =
       new libaom_test::Y4mVideoSource(test_video_.name, 0, test_video_.frames);
@@ -143,7 +143,7 @@
   delete video;
 }
 
-VP10_INSTANTIATE_TEST_CASE(VpxEncoderParmsGetToDecoder,
-                           ::testing::ValuesIn(kVP9EncodeParameterSet),
-                           ::testing::ValuesIn(kVP9EncodePerfTestVectors));
+AV1_INSTANTIATE_TEST_CASE(AvxEncoderParmsGetToDecoder,
+                          ::testing::ValuesIn(kAV1EncodeParameterSet),
+                          ::testing::ValuesIn(kAV1EncodePerfTestVectors));
 }  // namespace
diff --git a/test/end_to_end_test.cc b/test/end_to_end_test.cc
index 693df78..dfcc015 100644
--- a/test/end_to_end_test.cc
+++ b/test/end_to_end_test.cc
@@ -26,10 +26,10 @@
 // List of psnr thresholds for speed settings 0-7 and 5 encoding modes
 const double kPsnrThreshold[][5] = {
 // Note:
-// VP10 HBD average PSNR is slightly lower than VP9.
+// AV1 HBD average PSNR is slightly lower than AV1.
 // We make two cases here to enable the testing and
 // guard picture quality.
-#if CONFIG_VP10_ENCODER && CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AV1_ENCODER && CONFIG_AOM_HIGHBITDEPTH
   { 36.0, 37.0, 37.0, 37.0, 37.0 }, { 31.0, 36.0, 36.0, 36.0, 36.0 },
   { 31.0, 35.0, 35.0, 35.0, 35.0 }, { 31.0, 34.0, 34.0, 34.0, 34.0 },
   { 31.0, 33.0, 33.0, 33.0, 33.0 }, { 31.0, 32.0, 32.0, 32.0, 32.0 },
@@ -39,32 +39,32 @@
   { 34.0, 35.0, 35.0, 35.0, 35.0 }, { 33.0, 34.0, 34.0, 34.0, 34.0 },
   { 32.0, 33.0, 33.0, 33.0, 33.0 }, { 31.0, 32.0, 32.0, 32.0, 32.0 },
   { 30.0, 31.0, 31.0, 31.0, 31.0 }, { 29.0, 30.0, 30.0, 30.0, 30.0 },
-#endif  // CONFIG_VP9_HIGHBITDEPTH && CONFIG_VP10_ENCODER
+#endif  // CONFIG_AOM_HIGHBITDEPTH && CONFIG_AV1_ENCODER
 };
 
 typedef struct {
   const char *filename;
   unsigned int input_bit_depth;
-  vpx_img_fmt fmt;
-  vpx_bit_depth_t bit_depth;
+  aom_img_fmt fmt;
+  aom_bit_depth_t bit_depth;
   unsigned int profile;
 } TestVideoParam;
 
 const TestVideoParam kTestVectors[] = {
-  { "park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420, VPX_BITS_8, 0 },
-  { "park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422, VPX_BITS_8, 1 },
-  { "park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 },
-  { "park_joy_90p_8_440.yuv", 8, VPX_IMG_FMT_I440, VPX_BITS_8, 1 },
-#if CONFIG_VP9_HIGHBITDEPTH
-  { "park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016, VPX_BITS_10, 2 },
-  { "park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216, VPX_BITS_10, 3 },
-  { "park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416, VPX_BITS_10, 3 },
-  { "park_joy_90p_10_440.yuv", 10, VPX_IMG_FMT_I44016, VPX_BITS_10, 3 },
-  { "park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016, VPX_BITS_12, 2 },
-  { "park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216, VPX_BITS_12, 3 },
-  { "park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416, VPX_BITS_12, 3 },
-  { "park_joy_90p_12_440.yuv", 12, VPX_IMG_FMT_I44016, VPX_BITS_12, 3 },
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+  { "park_joy_90p_8_420.y4m", 8, AOM_IMG_FMT_I420, AOM_BITS_8, 0 },
+  { "park_joy_90p_8_422.y4m", 8, AOM_IMG_FMT_I422, AOM_BITS_8, 1 },
+  { "park_joy_90p_8_444.y4m", 8, AOM_IMG_FMT_I444, AOM_BITS_8, 1 },
+  { "park_joy_90p_8_440.yuv", 8, AOM_IMG_FMT_I440, AOM_BITS_8, 1 },
+#if CONFIG_AOM_HIGHBITDEPTH
+  { "park_joy_90p_10_420.y4m", 10, AOM_IMG_FMT_I42016, AOM_BITS_10, 2 },
+  { "park_joy_90p_10_422.y4m", 10, AOM_IMG_FMT_I42216, AOM_BITS_10, 3 },
+  { "park_joy_90p_10_444.y4m", 10, AOM_IMG_FMT_I44416, AOM_BITS_10, 3 },
+  { "park_joy_90p_10_440.yuv", 10, AOM_IMG_FMT_I44016, AOM_BITS_10, 3 },
+  { "park_joy_90p_12_420.y4m", 12, AOM_IMG_FMT_I42016, AOM_BITS_12, 2 },
+  { "park_joy_90p_12_422.y4m", 12, AOM_IMG_FMT_I42216, AOM_BITS_12, 3 },
+  { "park_joy_90p_12_444.y4m", 12, AOM_IMG_FMT_I44416, AOM_BITS_12, 3 },
+  { "park_joy_90p_12_440.yuv", 12, AOM_IMG_FMT_I44016, AOM_BITS_12, 3 },
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 
 // Encoding modes tested
@@ -101,10 +101,10 @@
     SetMode(encoding_mode_);
     if (encoding_mode_ != ::libaom_test::kRealTime) {
       cfg_.g_lag_in_frames = 5;
-      cfg_.rc_end_usage = VPX_VBR;
+      cfg_.rc_end_usage = AOM_VBR;
     } else {
       cfg_.g_lag_in_frames = 0;
-      cfg_.rc_end_usage = VPX_CBR;
+      cfg_.rc_end_usage = AOM_CBR;
       cfg_.rc_buf_sz = 1000;
       cfg_.rc_buf_initial_sz = 500;
       cfg_.rc_buf_optimal_sz = 600;
@@ -117,7 +117,7 @@
     nframes_ = 0;
   }
 
-  virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void PSNRPktHook(const aom_codec_cx_pkt_t *pkt) {
     psnr_ += pkt->data.psnr.psnr[0];
     nframes_++;
   }
@@ -125,19 +125,19 @@
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 1) {
-      encoder->Control(VP9E_SET_FRAME_PARALLEL_DECODING, 1);
-      encoder->Control(VP9E_SET_TILE_COLUMNS, 4);
-      encoder->Control(VP8E_SET_CPUUSED, cpu_used_);
+      encoder->Control(AV1E_SET_FRAME_PARALLEL_DECODING, 1);
+      encoder->Control(AV1E_SET_TILE_COLUMNS, 4);
+      encoder->Control(AOME_SET_CPUUSED, cpu_used_);
       // Test screen coding tools at cpu_used = 1 && encoding mode is two-pass.
       if (cpu_used_ == 1 && encoding_mode_ == ::libaom_test::kTwoPassGood)
-        encoder->Control(VP9E_SET_TUNE_CONTENT, VPX_CONTENT_SCREEN);
+        encoder->Control(AV1E_SET_TUNE_CONTENT, AOM_CONTENT_SCREEN);
       else
-        encoder->Control(VP9E_SET_TUNE_CONTENT, VPX_CONTENT_DEFAULT);
+        encoder->Control(AV1E_SET_TUNE_CONTENT, AOM_CONTENT_DEFAULT);
       if (encoding_mode_ != ::libaom_test::kRealTime) {
-        encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
-        encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
-        encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
-        encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+        encoder->Control(AOME_SET_ENABLEAUTOALTREF, 1);
+        encoder->Control(AOME_SET_ARNR_MAXFRAMES, 7);
+        encoder->Control(AOME_SET_ARNR_STRENGTH, 5);
+        encoder->Control(AOME_SET_ARNR_TYPE, 3);
       }
     }
   }
@@ -166,8 +166,8 @@
   cfg_.g_profile = test_video_param_.profile;
   cfg_.g_input_bit_depth = test_video_param_.input_bit_depth;
   cfg_.g_bit_depth = test_video_param_.bit_depth;
-  init_flags_ = VPX_CODEC_USE_PSNR;
-  if (cfg_.g_bit_depth > 8) init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH;
+  init_flags_ = AOM_CODEC_USE_PSNR;
+  if (cfg_.g_bit_depth > 8) init_flags_ |= AOM_CODEC_USE_HIGHBITDEPTH;
 
   libaom_test::VideoSource *video;
   if (is_extension_y4m(test_video_param_.filename)) {
@@ -185,8 +185,8 @@
   delete (video);
 }
 
-VP10_INSTANTIATE_TEST_CASE(EndToEndTestLarge,
-                           ::testing::ValuesIn(kEncodingModeVectors),
-                           ::testing::ValuesIn(kTestVectors),
-                           ::testing::ValuesIn(kCpuUsedVectors));
+AV1_INSTANTIATE_TEST_CASE(EndToEndTestLarge,
+                          ::testing::ValuesIn(kEncodingModeVectors),
+                          ::testing::ValuesIn(kTestVectors),
+                          ::testing::ValuesIn(kCpuUsedVectors));
 }  // namespace
diff --git a/test/error_block_test.cc b/test/error_block_test.cc
index 8047e3d..05e643a 100644
--- a/test/error_block_test.cc
+++ b/test/error_block_test.cc
@@ -14,27 +14,27 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
 #include "av1/common/entropy.h"
-#include "aom/vpx_codec.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_codec.h"
+#include "aom/aom_integer.h"
 
 using libaom_test::ACMRandom;
 
 namespace {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 const int kNumIterations = 1000;
 
 typedef int64_t (*ErrorBlockFunc)(const tran_low_t *coeff,
                                   const tran_low_t *dqcoeff,
                                   intptr_t block_size, int64_t *ssz, int bps);
 
-typedef std::tr1::tuple<ErrorBlockFunc, ErrorBlockFunc, vpx_bit_depth_t>
+typedef std::tr1::tuple<ErrorBlockFunc, ErrorBlockFunc, aom_bit_depth_t>
     ErrorBlockParam;
 
 class ErrorBlockTest : public ::testing::TestWithParam<ErrorBlockParam> {
@@ -49,7 +49,7 @@
   virtual void TearDown() { libaom_test::ClearSystemState(); }
 
  protected:
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
   ErrorBlockFunc error_block_op_;
   ErrorBlockFunc ref_error_block_op_;
 };
@@ -160,13 +160,13 @@
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, ErrorBlockTest,
-    ::testing::Values(make_tuple(&vp10_highbd_block_error_sse2,
-                                 &vp10_highbd_block_error_c, VPX_BITS_10),
-                      make_tuple(&vp10_highbd_block_error_sse2,
-                                 &vp10_highbd_block_error_c, VPX_BITS_12),
-                      make_tuple(&vp10_highbd_block_error_sse2,
-                                 &vp10_highbd_block_error_c, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&av1_highbd_block_error_sse2,
+                                 &av1_highbd_block_error_c, AOM_BITS_10),
+                      make_tuple(&av1_highbd_block_error_sse2,
+                                 &av1_highbd_block_error_c, AOM_BITS_12),
+                      make_tuple(&av1_highbd_block_error_sse2,
+                                 &av1_highbd_block_error_c, AOM_BITS_8)));
 #endif  // HAVE_SSE2
 
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }  // namespace
diff --git a/test/error_resilience_test.cc b/test/error_resilience_test.cc
index f86f85d..7b55fba 100644
--- a/test/error_resilience_test.cc
+++ b/test/error_resilience_test.cc
@@ -49,7 +49,7 @@
     mismatch_nframes_ = 0;
   }
 
-  virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void PSNRPktHook(const aom_codec_cx_pkt_t *pkt) {
     psnr_ += pkt->data.psnr.psnr[0];
     nframes_++;
   }
@@ -57,15 +57,15 @@
   virtual void PreEncodeFrameHook(libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder * /*encoder*/) {
     frame_flags_ &=
-        ~(VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF);
+        ~(AOM_EFLAG_NO_UPD_LAST | AOM_EFLAG_NO_UPD_GF | AOM_EFLAG_NO_UPD_ARF);
     if (droppable_nframes_ > 0 &&
-        (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
+        (cfg_.g_pass == AOM_RC_LAST_PASS || cfg_.g_pass == AOM_RC_ONE_PASS)) {
       for (unsigned int i = 0; i < droppable_nframes_; ++i) {
         if (droppable_frames_[i] == video->frame()) {
           std::cout << "Encoding droppable frame: " << droppable_frames_[i]
                     << "\n";
-          frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
-                           VP8_EFLAG_NO_UPD_ARF);
+          frame_flags_ |= (AOM_EFLAG_NO_UPD_LAST | AOM_EFLAG_NO_UPD_GF |
+                           AOM_EFLAG_NO_UPD_ARF);
           return;
         }
       }
@@ -84,7 +84,7 @@
 
   virtual bool DoDecode() const {
     if (error_nframes_ > 0 &&
-        (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
+        (cfg_.g_pass == AOM_RC_LAST_PASS || cfg_.g_pass == AOM_RC_ONE_PASS)) {
       for (unsigned int i = 0; i < error_nframes_; ++i) {
         if (error_frames_[i] == nframes_ - 1) {
           std::cout << "             Skipping decoding frame: "
@@ -96,7 +96,7 @@
     return 1;
   }
 
-  virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2) {
+  virtual void MismatchHook(const aom_image_t *img1, const aom_image_t *img2) {
     double mismatch_psnr = compute_psnr(img1, img2);
     mismatch_psnr_ += mismatch_psnr;
     ++mismatch_nframes_;
@@ -142,12 +142,12 @@
 };
 
 TEST_P(ErrorResilienceTestLarge, OnVersusOff) {
-  const vpx_rational timebase = { 33333333, 1000000000 };
+  const aom_rational timebase = { 33333333, 1000000000 };
   cfg_.g_timebase = timebase;
   cfg_.rc_target_bitrate = 2000;
   cfg_.g_lag_in_frames = 10;
 
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
 
   libaom_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
                                      timebase.den, timebase.num, 0, 30);
@@ -177,21 +177,21 @@
 // frames (i.e., frames that don't update any reference buffers).
 // Check both isolated and consecutive loss.
 TEST_P(ErrorResilienceTestLarge, DropFramesWithoutRecovery) {
-  const vpx_rational timebase = { 33333333, 1000000000 };
+  const aom_rational timebase = { 33333333, 1000000000 };
   cfg_.g_timebase = timebase;
   cfg_.rc_target_bitrate = 500;
   // FIXME(debargha): Fix this to work for any lag.
   // Currently this test only works for lag = 0
   cfg_.g_lag_in_frames = 0;
 
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
 
   libaom_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
                                      timebase.den, timebase.num, 0, 40);
 
   // Error resilient mode ON.
   cfg_.g_error_resilient = 1;
-  cfg_.kf_mode = VPX_KF_DISABLED;
+  cfg_.kf_mode = AOM_KF_DISABLED;
 
   // Set an arbitrary set of error frames same as droppable frames.
   // In addition to isolated loss/drop, add a long consecutive series
@@ -231,5 +231,5 @@
 #endif
 }
 
-VP10_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
+AV1_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
 }  // namespace
diff --git a/test/ethread_test.cc b/test/ethread_test.cc
index 0a32458..e62b78e 100644
--- a/test/ethread_test.cc
+++ b/test/ethread_test.cc
@@ -18,22 +18,22 @@
 #include "test/y4m_video_source.h"
 
 namespace {
-class VPxEncoderThreadTest
+class AVxEncoderThreadTest
     : public ::libaom_test::EncoderTest,
       public ::libaom_test::CodecTestWith2Params<libaom_test::TestMode, int> {
  protected:
-  VPxEncoderThreadTest()
+  AVxEncoderThreadTest()
       : EncoderTest(GET_PARAM(0)), encoder_initialized_(false),
         encoding_mode_(GET_PARAM(1)), set_cpu_used_(GET_PARAM(2)) {
-    init_flags_ = VPX_CODEC_USE_PSNR;
-    vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
+    init_flags_ = AOM_CODEC_USE_PSNR;
+    aom_codec_dec_cfg_t cfg = aom_codec_dec_cfg_t();
     cfg.w = 1280;
     cfg.h = 720;
     decoder_ = codec_->CreateDecoder(cfg, 0);
-#if CONFIG_VP10 && CONFIG_EXT_TILE
-    if (decoder_->IsVP10()) {
-      decoder_->Control(VP10_SET_DECODE_TILE_ROW, -1);
-      decoder_->Control(VP10_SET_DECODE_TILE_COL, -1);
+#if CONFIG_AV1 && CONFIG_EXT_TILE
+    if (decoder_->IsAV1()) {
+      decoder_->Control(AV1_SET_DECODE_TILE_ROW, -1);
+      decoder_->Control(AV1_SET_DECODE_TILE_COL, -1);
     }
 #endif
 
@@ -41,7 +41,7 @@
     md5_dec_.clear();
     md5_enc_.clear();
   }
-  virtual ~VPxEncoderThreadTest() { delete decoder_; }
+  virtual ~AVxEncoderThreadTest() { delete decoder_; }
 
   virtual void SetUp() {
     InitializeConfig();
@@ -49,12 +49,12 @@
 
     if (encoding_mode_ != ::libaom_test::kRealTime) {
       cfg_.g_lag_in_frames = 3;
-      cfg_.rc_end_usage = VPX_VBR;
+      cfg_.rc_end_usage = AOM_VBR;
       cfg_.rc_2pass_vbr_minsection_pct = 5;
       cfg_.rc_2pass_vbr_maxsection_pct = 2000;
     } else {
       cfg_.g_lag_in_frames = 0;
-      cfg_.rc_end_usage = VPX_CBR;
+      cfg_.rc_end_usage = AOM_CBR;
       cfg_.g_error_resilient = 1;
     }
     cfg_.rc_max_quantizer = 56;
@@ -68,35 +68,35 @@
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource * /*video*/,
                                   ::libaom_test::Encoder *encoder) {
     if (!encoder_initialized_) {
-#if CONFIG_VP10 && CONFIG_EXT_TILE
-      encoder->Control(VP9E_SET_TILE_COLUMNS, 1);
-      if (codec_ == &libaom_test::kVP10) {
+#if CONFIG_AV1 && CONFIG_EXT_TILE
+      encoder->Control(AV1E_SET_TILE_COLUMNS, 1);
+      if (codec_ == &libaom_test::kAV1) {
         // TODO(geza): Start using multiple tile rows when the multi-threaded
         // encoder can handle them
-        encoder->Control(VP9E_SET_TILE_ROWS, 32);
+        encoder->Control(AV1E_SET_TILE_ROWS, 32);
       } else {
-        encoder->Control(VP9E_SET_TILE_ROWS, 0);
+        encoder->Control(AV1E_SET_TILE_ROWS, 0);
       }
 #else
       // Encode 4 tile columns.
-      encoder->Control(VP9E_SET_TILE_COLUMNS, 2);
-      encoder->Control(VP9E_SET_TILE_ROWS, 0);
-#endif  // CONFIG_VP10 && CONFIG_EXT_TILE
-      encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
+      encoder->Control(AV1E_SET_TILE_COLUMNS, 2);
+      encoder->Control(AV1E_SET_TILE_ROWS, 0);
+#endif  // CONFIG_AV1 && CONFIG_EXT_TILE
+      encoder->Control(AOME_SET_CPUUSED, set_cpu_used_);
       if (encoding_mode_ != ::libaom_test::kRealTime) {
-        encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
-        encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
-        encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
-        encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+        encoder->Control(AOME_SET_ENABLEAUTOALTREF, 1);
+        encoder->Control(AOME_SET_ARNR_MAXFRAMES, 7);
+        encoder->Control(AOME_SET_ARNR_STRENGTH, 5);
+        encoder->Control(AOME_SET_ARNR_TYPE, 3);
       } else {
-        encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 0);
-        encoder->Control(VP9E_SET_AQ_MODE, 3);
+        encoder->Control(AOME_SET_ENABLEAUTOALTREF, 0);
+        encoder->Control(AV1E_SET_AQ_MODE, 3);
       }
       encoder_initialized_ = true;
     }
   }
 
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void FramePktHook(const aom_codec_cx_pkt_t *pkt) {
     size_enc_.push_back(pkt->data.frame.sz);
 
     ::libaom_test::MD5 md5_enc;
@@ -104,13 +104,13 @@
                 pkt->data.frame.sz);
     md5_enc_.push_back(md5_enc.Get());
 
-    const vpx_codec_err_t res = decoder_->DecodeFrame(
+    const aom_codec_err_t res = decoder_->DecodeFrame(
         reinterpret_cast<uint8_t *>(pkt->data.frame.buf), pkt->data.frame.sz);
-    if (res != VPX_CODEC_OK) {
+    if (res != AOM_CODEC_OK) {
       abort_ = true;
-      ASSERT_EQ(VPX_CODEC_OK, res);
+      ASSERT_EQ(AOM_CODEC_OK, res);
     }
-    const vpx_image_t *img = decoder_->GetDxData().Next();
+    const aom_image_t *img = decoder_->GetDxData().Next();
 
     if (img) {
       ::libaom_test::MD5 md5_res;
@@ -125,7 +125,7 @@
 
     // Encode using single thread.
     cfg_.g_threads = 1;
-    init_flags_ = VPX_CODEC_USE_PSNR;
+    init_flags_ = AOM_CODEC_USE_PSNR;
     ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
     std::vector<size_t> single_thr_size_enc;
     std::vector<std::string> single_thr_md5_enc;
@@ -165,19 +165,19 @@
   std::vector<std::string> md5_dec_;
 };
 
-TEST_P(VPxEncoderThreadTest, EncoderResultTest) { DoTest(); }
+TEST_P(AVxEncoderThreadTest, EncoderResultTest) { DoTest(); }
 
-class VPxEncoderThreadTestLarge : public VPxEncoderThreadTest {};
+class AVxEncoderThreadTestLarge : public AVxEncoderThreadTest {};
 
-TEST_P(VPxEncoderThreadTestLarge, EncoderResultTest) { DoTest(); }
+TEST_P(AVxEncoderThreadTestLarge, EncoderResultTest) { DoTest(); }
 
-VP10_INSTANTIATE_TEST_CASE(VPxEncoderThreadTest,
-                           ::testing::Values(::libaom_test::kTwoPassGood,
-                                             ::libaom_test::kOnePassGood),
-                           ::testing::Range(3, 9));
+AV1_INSTANTIATE_TEST_CASE(AVxEncoderThreadTest,
+                          ::testing::Values(::libaom_test::kTwoPassGood,
+                                            ::libaom_test::kOnePassGood),
+                          ::testing::Range(3, 9));
 
-VP10_INSTANTIATE_TEST_CASE(VPxEncoderThreadTestLarge,
-                           ::testing::Values(::libaom_test::kTwoPassGood,
-                                             ::libaom_test::kOnePassGood),
-                           ::testing::Range(1, 3));
+AV1_INSTANTIATE_TEST_CASE(AVxEncoderThreadTestLarge,
+                          ::testing::Values(::libaom_test::kTwoPassGood,
+                                            ::libaom_test::kOnePassGood),
+                          ::testing::Range(1, 3));
 }  // namespace
diff --git a/test/examples.sh b/test/examples.sh
index 1ee120a..8c0aff4 100755
--- a/test/examples.sh
+++ b/test/examples.sh
@@ -24,6 +24,6 @@
 
 for test in ${example_tests}; do
   # Source each test script so that exporting variables can be avoided.
-  VPX_TEST_NAME="$(basename ${test%.*})"
+  AOM_TEST_NAME="$(basename ${test%.*})"
   . "${test}"
 done
diff --git a/test/fdct4x4_test.cc b/test/fdct4x4_test.cc
index dda6e68..4498b7b 100644
--- a/test/fdct4x4_test.cc
+++ b/test/fdct4x4_test.cc
@@ -14,16 +14,16 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/transform_test_base.h"
 #include "test/util.h"
 #include "av1/common/entropy.h"
-#include "aom/vpx_codec.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_codec.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
 using libaom_test::ACMRandom;
@@ -35,59 +35,59 @@
                         int tx_type);
 using libaom_test::FhtFunc;
 
-typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t, int>
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, aom_bit_depth_t, int>
     Dct4x4Param;
-typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t, int> Ht4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht4x4Param;
 
 void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
                  int /*tx_type*/) {
-  vpx_fdct4x4_c(in, out, stride);
+  aom_fdct4x4_c(in, out, stride);
 }
 
 void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
-  vp10_fht4x4_c(in, out, stride, tx_type);
+  av1_fht4x4_c(in, out, stride, tx_type);
 }
 
 void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
                  int /*tx_type*/) {
-  vp10_fwht4x4_c(in, out, stride);
+  av1_fwht4x4_c(in, out, stride);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct4x4_16_add_c(in, out, stride, 10);
+  aom_highbd_idct4x4_16_add_c(in, out, stride, 10);
 }
 
 void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct4x4_16_add_c(in, out, stride, 12);
+  aom_highbd_idct4x4_16_add_c(in, out, stride, 12);
 }
 
 void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+  av1_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
 }
 
 void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+  av1_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
 }
 
 void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_iwht4x4_16_add_c(in, out, stride, 10);
+  aom_highbd_iwht4x4_16_add_c(in, out, stride, 10);
 }
 
 void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_iwht4x4_16_add_c(in, out, stride, 12);
+  aom_highbd_iwht4x4_16_add_c(in, out, stride, 12);
 }
 
 #if HAVE_SSE2
 void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
+  aom_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
 }
 
 void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
+  aom_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
 }
 #endif  // HAVE_SSE2
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 class Trans4x4DCT : public libaom_test::TransformTestBase,
                     public ::testing::TestWithParam<Dct4x4Param> {
@@ -202,143 +202,139 @@
 TEST_P(Trans4x4WHT, InvAccuracyCheck) { RunInvAccuracyCheck(0); }
 using std::tr1::make_tuple;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans4x4DCT,
     ::testing::Values(
-        make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10, 0, VPX_BITS_10, 16),
-        make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12, 16),
-        make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8, 16)));
+        make_tuple(&aom_highbd_fdct4x4_c, &idct4x4_10, 0, AOM_BITS_10, 16),
+        make_tuple(&aom_highbd_fdct4x4_c, &idct4x4_12, 0, AOM_BITS_12, 16),
+        make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c, 0, AOM_BITS_8, 16)));
 #else
 INSTANTIATE_TEST_CASE_P(C, Trans4x4DCT,
-                        ::testing::Values(make_tuple(&vpx_fdct4x4_c,
-                                                     &vpx_idct4x4_16_add_c, 0,
-                                                     VPX_BITS_8, 16)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+                        ::testing::Values(make_tuple(&aom_fdct4x4_c,
+                                                     &aom_idct4x4_16_add_c, 0,
+                                                     AOM_BITS_8, 16)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans4x4HT,
     ::testing::Values(
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10, 16),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10, 16),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10, 16),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10, 16),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12, 16),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12, 16),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12, 16),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12, 16),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 0, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 1, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 2, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 3, VPX_BITS_8, 16)));
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 0, AOM_BITS_10, 16),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 1, AOM_BITS_10, 16),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 2, AOM_BITS_10, 16),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 3, AOM_BITS_10, 16),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 0, AOM_BITS_12, 16),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 1, AOM_BITS_12, 16),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 2, AOM_BITS_12, 16),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 3, AOM_BITS_12, 16),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 0, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 1, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 2, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 3, AOM_BITS_8, 16)));
 #else
 INSTANTIATE_TEST_CASE_P(
     C, Trans4x4HT,
     ::testing::Values(
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 0, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 1, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 2, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 3, VPX_BITS_8, 16)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 0, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 1, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 2, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 3, AOM_BITS_8, 16)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans4x4WHT,
     ::testing::Values(
-        make_tuple(&vp10_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10, 16),
-        make_tuple(&vp10_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12, 16),
-        make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8, 16)));
+        make_tuple(&av1_highbd_fwht4x4_c, &iwht4x4_10, 0, AOM_BITS_10, 16),
+        make_tuple(&av1_highbd_fwht4x4_c, &iwht4x4_12, 0, AOM_BITS_12, 16),
+        make_tuple(&av1_fwht4x4_c, &aom_iwht4x4_16_add_c, 0, AOM_BITS_8, 16)));
 #else
 INSTANTIATE_TEST_CASE_P(C, Trans4x4WHT,
-                        ::testing::Values(make_tuple(&vp10_fwht4x4_c,
-                                                     &vpx_iwht4x4_16_add_c, 0,
-                                                     VPX_BITS_8, 16)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+                        ::testing::Values(make_tuple(&av1_fwht4x4_c,
+                                                     &aom_iwht4x4_16_add_c, 0,
+                                                     AOM_BITS_8, 16)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(NEON, Trans4x4DCT,
-                        ::testing::Values(make_tuple(&vpx_fdct4x4_c,
-                                                     &vpx_idct4x4_16_add_neon,
-                                                     0, VPX_BITS_8, 16)));
-#endif  // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+                        ::testing::Values(make_tuple(&aom_fdct4x4_c,
+                                                     &aom_idct4x4_16_add_neon,
+                                                     0, AOM_BITS_8, 16)));
+#endif  // HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, Trans4x4HT,
     ::testing::Values(
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 0, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 1, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 2, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 3, VPX_BITS_8,
-                   16)));
-#endif  // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 0, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 1, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 2, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 3, AOM_BITS_8, 16)));
+#endif  // HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans4x4WHT,
-    ::testing::Values(make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0,
-                                 VPX_BITS_8, 16),
-                      make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0,
-                                 VPX_BITS_8, 16)));
+    ::testing::Values(make_tuple(&av1_fwht4x4_c, &aom_iwht4x4_16_add_c, 0,
+                                 AOM_BITS_8, 16),
+                      make_tuple(&av1_fwht4x4_c, &aom_iwht4x4_16_add_sse2, 0,
+                                 AOM_BITS_8, 16)));
 #endif
 
-#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(SSE2, Trans4x4DCT,
-                        ::testing::Values(make_tuple(&vpx_fdct4x4_sse2,
-                                                     &vpx_idct4x4_16_add_sse2,
-                                                     0, VPX_BITS_8, 16)));
+                        ::testing::Values(make_tuple(&aom_fdct4x4_sse2,
+                                                     &aom_idct4x4_16_add_sse2,
+                                                     0, AOM_BITS_8, 16)));
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans4x4HT,
-    ::testing::Values(make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 0,
-                                 VPX_BITS_8, 16),
-                      make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 1,
-                                 VPX_BITS_8, 16),
-                      make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 2,
-                                 VPX_BITS_8, 16),
-                      make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 3,
-                                 VPX_BITS_8, 16)));
-#endif  // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+    ::testing::Values(make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 0,
+                                 AOM_BITS_8, 16),
+                      make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 1,
+                                 AOM_BITS_8, 16),
+                      make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 2,
+                                 AOM_BITS_8, 16),
+                      make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 3,
+                                 AOM_BITS_8, 16)));
+#endif  // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans4x4DCT,
     ::testing::Values(
-        make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10, 16),
-        make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10,
+        make_tuple(&aom_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, AOM_BITS_10, 16),
+        make_tuple(&aom_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, AOM_BITS_10,
                    16),
-        make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12, 16),
-        make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12,
+        make_tuple(&aom_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, AOM_BITS_12, 16),
+        make_tuple(&aom_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, AOM_BITS_12,
                    16),
-        make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8,
+        make_tuple(&aom_fdct4x4_sse2, &aom_idct4x4_16_add_c, 0, AOM_BITS_8,
                    16)));
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans4x4HT,
     ::testing::Values(
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 0, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 1, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 2, VPX_BITS_8, 16),
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 3, VPX_BITS_8,
-                   16)));
-#endif  // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 0, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 1, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 2, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 3, AOM_BITS_8, 16)));
+#endif  // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(MSA, Trans4x4DCT,
-                        ::testing::Values(make_tuple(&vpx_fdct4x4_msa,
-                                                     &vpx_idct4x4_16_add_msa, 0,
-                                                     VPX_BITS_8, 16)));
+                        ::testing::Values(make_tuple(&aom_fdct4x4_msa,
+                                                     &aom_idct4x4_16_add_msa, 0,
+                                                     AOM_BITS_8, 16)));
 #if !CONFIG_EXT_TX
 INSTANTIATE_TEST_CASE_P(
     MSA, Trans4x4HT,
-    ::testing::Values(make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 0,
-                                 VPX_BITS_8, 16),
-                      make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 1,
-                                 VPX_BITS_8, 16),
-                      make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 2,
-                                 VPX_BITS_8, 16),
-                      make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 3,
-                                 VPX_BITS_8, 16)));
+    ::testing::Values(
+        make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 0, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 1, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 2, AOM_BITS_8, 16),
+        make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 3, AOM_BITS_8,
+                   16)));
 #endif  // !CONFIG_EXT_TX
-#endif  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 }  // namespace
diff --git a/test/fdct8x8_test.cc b/test/fdct8x8_test.cc
index 5f07468..8cfcbc1 100644
--- a/test/fdct8x8_test.cc
+++ b/test/fdct8x8_test.cc
@@ -14,16 +14,16 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
 #include "av1/common/entropy.h"
 #include "av1/common/scan.h"
-#include "aom/vpx_codec.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_codec.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
 using libaom_test::ACMRandom;
@@ -43,9 +43,9 @@
 typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
                         int tx_type);
 
-typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct8x8Param;
-typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht8x8Param;
-typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t> Idct8x8Param;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, aom_bit_depth_t> Dct8x8Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t> Ht8x8Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, aom_bit_depth_t> Idct8x8Param;
 
 void reference_8x8_dct_1d(const double in[8], double out[8]) {
   const double kInvSqrt2 = 0.707106781186547524400844362104;
@@ -78,57 +78,57 @@
 
 void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride,
                  int /*tx_type*/) {
-  vpx_fdct8x8_c(in, out, stride);
+  aom_fdct8x8_c(in, out, stride);
 }
 
 void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
-  vp10_fht8x8_c(in, out, stride, tx_type);
+  av1_fht8x8_c(in, out, stride, tx_type);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void idct8x8_10(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct8x8_64_add_c(in, out, stride, 10);
+  aom_highbd_idct8x8_64_add_c(in, out, stride, 10);
 }
 
 void idct8x8_12(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct8x8_64_add_c(in, out, stride, 12);
+  aom_highbd_idct8x8_64_add_c(in, out, stride, 12);
 }
 
 void iht8x8_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
+  av1_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
 }
 
 void iht8x8_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
+  av1_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
 }
 
 #if HAVE_SSE2
 
 void idct8x8_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct8x8_10_add_c(in, out, stride, 10);
+  aom_highbd_idct8x8_10_add_c(in, out, stride, 10);
 }
 
 void idct8x8_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct8x8_10_add_c(in, out, stride, 12);
+  aom_highbd_idct8x8_10_add_c(in, out, stride, 12);
 }
 
 void idct8x8_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 10);
+  aom_highbd_idct8x8_10_add_sse2(in, out, stride, 10);
 }
 
 void idct8x8_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 12);
+  aom_highbd_idct8x8_10_add_sse2(in, out, stride, 12);
 }
 
 void idct8x8_64_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 10);
+  aom_highbd_idct8x8_64_add_sse2(in, out, stride, 10);
 }
 
 void idct8x8_64_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
-  vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 12);
+  aom_highbd_idct8x8_64_add_sse2(in, out, stride, 12);
 }
 #endif  // HAVE_SSE2
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 class FwdTrans8x8TestBase {
  public:
@@ -213,7 +213,7 @@
     DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
     DECLARE_ALIGNED(16, uint8_t, dst[64]);
     DECLARE_ALIGNED(16, uint8_t, src[64]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[64]);
     DECLARE_ALIGNED(16, uint16_t, src16[64]);
 #endif
@@ -221,11 +221,11 @@
     for (int i = 0; i < count_test_block; ++i) {
       // Initialize a test block with input range [-mask_, mask_].
       for (int j = 0; j < 64; ++j) {
-        if (bit_depth_ == VPX_BITS_8) {
+        if (bit_depth_ == AOM_BITS_8) {
           src[j] = rnd.Rand8();
           dst[j] = rnd.Rand8();
           test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand16() & mask_;
           dst16[j] = rnd.Rand16() & mask_;
@@ -247,9 +247,9 @@
           test_temp_block[j] *= 4;
         }
       }
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -257,9 +257,9 @@
       }
 
       for (int j = 0; j < 64; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const int diff =
-            bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+            bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
         const int diff = dst[j] - src[j];
 #endif
@@ -289,7 +289,7 @@
     DECLARE_ALIGNED(16, tran_low_t, ref_temp_block[64]);
     DECLARE_ALIGNED(16, uint8_t, dst[64]);
     DECLARE_ALIGNED(16, uint8_t, src[64]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[64]);
     DECLARE_ALIGNED(16, uint16_t, src16[64]);
 #endif
@@ -297,7 +297,7 @@
     for (int i = 0; i < count_test_block; ++i) {
       // Initialize a test block with input range [-mask_, mask_].
       for (int j = 0; j < 64; ++j) {
-        if (bit_depth_ == VPX_BITS_8) {
+        if (bit_depth_ == AOM_BITS_8) {
           if (i == 0) {
             src[j] = 255;
             dst[j] = 0;
@@ -309,7 +309,7 @@
             dst[j] = rnd.Rand8() % 2 ? 255 : 0;
           }
           test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           if (i == 0) {
             src16[j] = mask_;
@@ -330,9 +330,9 @@
           RunFwdTxfm(test_input_block, test_temp_block, pitch_));
       ASM_REGISTER_STATE_CHECK(
           fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -340,9 +340,9 @@
       }
 
       for (int j = 0; j < 64; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const int diff =
-            bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+            bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
         const int diff = dst[j] - src[j];
 #endif
@@ -375,7 +375,7 @@
     DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
 #endif
@@ -385,11 +385,11 @@
 
       // Initialize a test block with input range [-255, 255].
       for (int j = 0; j < kNumCoeffs; ++j) {
-        if (bit_depth_ == VPX_BITS_8) {
+        if (bit_depth_ == AOM_BITS_8) {
           src[j] = rnd.Rand8() % 2 ? 255 : 0;
           dst[j] = src[j] > 0 ? 0 : 255;
           in[j] = src[j] - dst[j];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
           dst16[j] = src16[j] > 0 ? 0 : mask_;
@@ -402,9 +402,9 @@
       for (int j = 0; j < kNumCoeffs; ++j)
         coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
 
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -412,9 +412,9 @@
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const int diff =
-            bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+            bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
         const int diff = dst[j] - src[j];
 #endif
@@ -460,11 +460,11 @@
     DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
 #endif
-    const int16_t *scan = vp10_default_scan_orders[TX_8X8].scan;
+    const int16_t *scan = av1_default_scan_orders[TX_8X8].scan;
 
     for (int i = 0; i < count_test_block; ++i) {
       for (int j = 0; j < kNumCoeffs; ++j) {
@@ -474,20 +474,20 @@
         } else {
           coeff[scan[j]] = 0;
         }
-        if (bit_depth_ == VPX_BITS_8) {
+        if (bit_depth_ == AOM_BITS_8) {
           dst[j] = 0;
           ref[j] = 0;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           dst16[j] = 0;
           ref16[j] = 0;
 #endif
         }
       }
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         ref_txfm(coeff, ref, pitch_);
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
         ASM_REGISTER_STATE_CHECK(
@@ -496,9 +496,9 @@
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const int diff =
-            bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+            bit_depth_ == AOM_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
 #else
         const int diff = dst[j] - ref[j];
 #endif
@@ -511,7 +511,7 @@
   int pitch_;
   int tx_type_;
   FhtFunc fwd_txfm_ref;
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
   int mask_;
 };
 
@@ -622,99 +622,98 @@
 
 using std::tr1::make_tuple;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, FwdTrans8x8DCT,
     ::testing::Values(
-        make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
-        make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_10, 0, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12)));
+        make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c, 0, AOM_BITS_8),
+        make_tuple(&aom_highbd_fdct8x8_c, &idct8x8_10, 0, AOM_BITS_10),
+        make_tuple(&aom_highbd_fdct8x8_c, &idct8x8_12, 0, AOM_BITS_12)));
 #else
 INSTANTIATE_TEST_CASE_P(C, FwdTrans8x8DCT,
-                        ::testing::Values(make_tuple(&vpx_fdct8x8_c,
-                                                     &vpx_idct8x8_64_add_c, 0,
-                                                     VPX_BITS_8)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+                        ::testing::Values(make_tuple(&aom_fdct8x8_c,
+                                                     &aom_idct8x8_64_add_c, 0,
+                                                     AOM_BITS_8)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 0, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 1, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 2, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 3, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 0, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 3, VPX_BITS_8)));
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 0, AOM_BITS_8),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 0, AOM_BITS_10),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 1, AOM_BITS_10),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 2, AOM_BITS_10),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 3, AOM_BITS_10),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 0, AOM_BITS_12),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 1, AOM_BITS_12),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 2, AOM_BITS_12),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 3, AOM_BITS_12),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 1, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 2, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 3, AOM_BITS_8)));
 #else
 INSTANTIATE_TEST_CASE_P(
     C, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 3, VPX_BITS_8)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 0, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 1, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 2, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 3, AOM_BITS_8)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(NEON, FwdTrans8x8DCT,
-                        ::testing::Values(make_tuple(&vpx_fdct8x8_neon,
-                                                     &vpx_idct8x8_64_add_neon,
-                                                     0, VPX_BITS_8)));
-#endif  // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+                        ::testing::Values(make_tuple(&aom_fdct8x8_neon,
+                                                     &aom_idct8x8_64_add_neon,
+                                                     0, AOM_BITS_8)));
+#endif  // HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 3, VPX_BITS_8)));
-#endif  // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 0, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 1, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 2, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 3, AOM_BITS_8)));
+#endif  // HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(SSE2, FwdTrans8x8DCT,
-                        ::testing::Values(make_tuple(&vpx_fdct8x8_sse2,
-                                                     &vpx_idct8x8_64_add_sse2,
-                                                     0, VPX_BITS_8)));
+                        ::testing::Values(make_tuple(&aom_fdct8x8_sse2,
+                                                     &aom_idct8x8_64_add_sse2,
+                                                     0, AOM_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     SSE2, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 3,
-                   VPX_BITS_8)));
-#endif  // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 0, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 1, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 2, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 3, AOM_BITS_8)));
+#endif  // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, FwdTrans8x8DCT,
-    ::testing::Values(make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0,
-                                 VPX_BITS_8),
-                      make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_64_add_10_sse2,
-                                 12, VPX_BITS_10),
-                      make_tuple(&vpx_highbd_fdct8x8_sse2,
-                                 &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
-                      make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_64_add_12_sse2,
-                                 12, VPX_BITS_12),
-                      make_tuple(&vpx_highbd_fdct8x8_sse2,
-                                 &idct8x8_64_add_12_sse2, 12, VPX_BITS_12)));
+    ::testing::Values(make_tuple(&aom_fdct8x8_sse2, &aom_idct8x8_64_add_c, 0,
+                                 AOM_BITS_8),
+                      make_tuple(&aom_highbd_fdct8x8_c, &idct8x8_64_add_10_sse2,
+                                 12, AOM_BITS_10),
+                      make_tuple(&aom_highbd_fdct8x8_sse2,
+                                 &idct8x8_64_add_10_sse2, 12, AOM_BITS_10),
+                      make_tuple(&aom_highbd_fdct8x8_c, &idct8x8_64_add_12_sse2,
+                                 12, AOM_BITS_12),
+                      make_tuple(&aom_highbd_fdct8x8_sse2,
+                                 &idct8x8_64_add_12_sse2, 12, AOM_BITS_12)));
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 3, VPX_BITS_8)));
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 0, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 1, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 2, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 3, AOM_BITS_8)));
 
 // Optimizations take effect at a threshold of 6201, so we use a value close to
 // that to test both branches.
@@ -722,34 +721,34 @@
     SSE2, InvTrans8x8DCT,
     ::testing::Values(
         make_tuple(&idct8x8_10_add_10_c, &idct8x8_10_add_10_sse2, 6225,
-                   VPX_BITS_10),
-        make_tuple(&idct8x8_10, &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10),
+                   AOM_BITS_10),
+        make_tuple(&idct8x8_10, &idct8x8_64_add_10_sse2, 6225, AOM_BITS_10),
         make_tuple(&idct8x8_10_add_12_c, &idct8x8_10_add_12_sse2, 6225,
-                   VPX_BITS_12),
-        make_tuple(&idct8x8_12, &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
-#endif  // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+                   AOM_BITS_12),
+        make_tuple(&idct8x8_12, &idct8x8_64_add_12_sse2, 6225, AOM_BITS_12)));
+#endif  // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH && \
+#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_AOM_HIGHBITDEPTH && \
     !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(SSSE3, FwdTrans8x8DCT,
-                        ::testing::Values(make_tuple(&vpx_fdct8x8_ssse3,
-                                                     &vpx_idct8x8_64_add_ssse3,
-                                                     0, VPX_BITS_8)));
+                        ::testing::Values(make_tuple(&aom_fdct8x8_ssse3,
+                                                     &aom_idct8x8_64_add_ssse3,
+                                                     0, AOM_BITS_8)));
 #endif
 
-#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(MSA, FwdTrans8x8DCT,
-                        ::testing::Values(make_tuple(&vpx_fdct8x8_msa,
-                                                     &vpx_idct8x8_64_add_msa, 0,
-                                                     VPX_BITS_8)));
+                        ::testing::Values(make_tuple(&aom_fdct8x8_msa,
+                                                     &aom_idct8x8_64_add_msa, 0,
+                                                     AOM_BITS_8)));
 #if !CONFIG_EXT_TX
 INSTANTIATE_TEST_CASE_P(
     MSA, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 3, VPX_BITS_8)));
+        make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 0, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 1, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 2, AOM_BITS_8),
+        make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 3, AOM_BITS_8)));
 #endif  // !CONFIG_EXT_TX
-#endif  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 }  // namespace
diff --git a/test/frame_size_tests.cc b/test/frame_size_tests.cc
index ff05119..4faa304 100644
--- a/test/frame_size_tests.cc
+++ b/test/frame_size_tests.cc
@@ -13,19 +13,19 @@
 
 namespace {
 
-class VP9FrameSizeTestsLarge : public ::libaom_test::EncoderTest,
+class AV1FrameSizeTestsLarge : public ::libaom_test::EncoderTest,
                                public ::testing::Test {
  protected:
-  VP9FrameSizeTestsLarge()
-      : EncoderTest(&::libaom_test::kVP10), expected_res_(VPX_CODEC_OK) {}
-  virtual ~VP9FrameSizeTestsLarge() {}
+  AV1FrameSizeTestsLarge()
+      : EncoderTest(&::libaom_test::kAV1), expected_res_(AOM_CODEC_OK) {}
+  virtual ~AV1FrameSizeTestsLarge() {}
 
   virtual void SetUp() {
     InitializeConfig();
     SetMode(::libaom_test::kRealTime);
   }
 
-  virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec,
+  virtual bool HandleDecodeResult(const aom_codec_err_t res_dec,
                                   const libaom_test::VideoSource & /*video*/,
                                   libaom_test::Decoder *decoder) {
     EXPECT_EQ(expected_res_, res_dec) << decoder->DecodeError();
@@ -35,35 +35,35 @@
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 1) {
-      encoder->Control(VP8E_SET_CPUUSED, 7);
-      encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
-      encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
-      encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
-      encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+      encoder->Control(AOME_SET_CPUUSED, 7);
+      encoder->Control(AOME_SET_ENABLEAUTOALTREF, 1);
+      encoder->Control(AOME_SET_ARNR_MAXFRAMES, 7);
+      encoder->Control(AOME_SET_ARNR_STRENGTH, 5);
+      encoder->Control(AOME_SET_ARNR_TYPE, 3);
     }
   }
 
   int expected_res_;
 };
 
-TEST_F(VP9FrameSizeTestsLarge, TestInvalidSizes) {
+TEST_F(AV1FrameSizeTestsLarge, TestInvalidSizes) {
   ::libaom_test::RandomVideoSource video;
 
 #if CONFIG_SIZE_LIMIT
   video.SetSize(DECODE_WIDTH_LIMIT + 16, DECODE_HEIGHT_LIMIT + 16);
   video.set_limit(2);
-  expected_res_ = VPX_CODEC_CORRUPT_FRAME;
+  expected_res_ = AOM_CODEC_CORRUPT_FRAME;
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 #endif
 }
 
-TEST_F(VP9FrameSizeTestsLarge, ValidSizes) {
+TEST_F(AV1FrameSizeTestsLarge, ValidSizes) {
   ::libaom_test::RandomVideoSource video;
 
 #if CONFIG_SIZE_LIMIT
   video.SetSize(DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT);
   video.set_limit(2);
-  expected_res_ = VPX_CODEC_OK;
+  expected_res_ = AOM_CODEC_OK;
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 #else
 // This test produces a pretty large single frame allocation,  (roughly
@@ -79,17 +79,17 @@
   video.SetSize(4096, 4096);
 #endif
   video.set_limit(2);
-  expected_res_ = VPX_CODEC_OK;
+  expected_res_ = AOM_CODEC_OK;
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 #endif
 }
 
-TEST_F(VP9FrameSizeTestsLarge, OneByOneVideo) {
+TEST_F(AV1FrameSizeTestsLarge, OneByOneVideo) {
   ::libaom_test::RandomVideoSource video;
 
   video.SetSize(1, 1);
   video.set_limit(2);
-  expected_res_ = VPX_CODEC_OK;
+  expected_res_ = AOM_CODEC_OK;
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 }  // namespace
diff --git a/test/hadamard_test.cc b/test/hadamard_test.cc
index 1f86e9c..bc8a406 100644
--- a/test/hadamard_test.cc
+++ b/test/hadamard_test.cc
@@ -12,7 +12,7 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 
 #include "test/acm_random.h"
 #include "test/register_state_check.h"
@@ -145,21 +145,21 @@
 }
 
 INSTANTIATE_TEST_CASE_P(C, Hadamard8x8Test,
-                        ::testing::Values(&vpx_hadamard_8x8_c));
+                        ::testing::Values(&aom_hadamard_8x8_c));
 
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(SSE2, Hadamard8x8Test,
-                        ::testing::Values(&vpx_hadamard_8x8_sse2));
+                        ::testing::Values(&aom_hadamard_8x8_sse2));
 #endif  // HAVE_SSE2
 
 #if HAVE_SSSE3 && ARCH_X86_64
 INSTANTIATE_TEST_CASE_P(SSSE3, Hadamard8x8Test,
-                        ::testing::Values(&vpx_hadamard_8x8_ssse3));
+                        ::testing::Values(&aom_hadamard_8x8_ssse3));
 #endif  // HAVE_SSSE3 && ARCH_X86_64
 
 #if HAVE_NEON
 INSTANTIATE_TEST_CASE_P(NEON, Hadamard8x8Test,
-                        ::testing::Values(&vpx_hadamard_8x8_neon));
+                        ::testing::Values(&aom_hadamard_8x8_neon));
 #endif  // HAVE_NEON
 
 class Hadamard16x16Test : public HadamardTestBase {};
@@ -206,15 +206,15 @@
 }
 
 INSTANTIATE_TEST_CASE_P(C, Hadamard16x16Test,
-                        ::testing::Values(&vpx_hadamard_16x16_c));
+                        ::testing::Values(&aom_hadamard_16x16_c));
 
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(SSE2, Hadamard16x16Test,
-                        ::testing::Values(&vpx_hadamard_16x16_sse2));
+                        ::testing::Values(&aom_hadamard_16x16_sse2));
 #endif  // HAVE_SSE2
 
 #if HAVE_NEON
 INSTANTIATE_TEST_CASE_P(NEON, Hadamard16x16Test,
-                        ::testing::Values(&vpx_hadamard_16x16_neon));
+                        ::testing::Values(&aom_hadamard_16x16_neon));
 #endif  // HAVE_NEON
 }  // namespace
diff --git a/test/hbd_metrics_test.cc b/test/hbd_metrics_test.cc
index a7c1ec3..d22b804 100644
--- a/test/hbd_metrics_test.cc
+++ b/test/hbd_metrics_test.cc
@@ -15,7 +15,7 @@
 #include "third_party/googletest/src/include/gtest/gtest.h"
 #include "test/acm_random.h"
 #include "test/util.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_dsp/psnr.h"
 #include "aom_dsp/ssim.h"
 #include "aom_ports/mem.h"
@@ -36,14 +36,14 @@
                         const YV12_BUFFER_CONFIG *dest, uint32_t in_bd,
                         uint32_t bd) {
   PSNR_STATS psnr;
-  vpx_calc_highbd_psnr(source, dest, &psnr, bd, in_bd);
+  aom_calc_highbd_psnr(source, dest, &psnr, bd, in_bd);
   return psnr.psnr[0];
 }
 
 double compute_psnr(const YV12_BUFFER_CONFIG *source,
                     const YV12_BUFFER_CONFIG *dest) {
   PSNR_STATS psnr;
-  vpx_calc_psnr(source, dest, &psnr);
+  aom_calc_psnr(source, dest, &psnr);
   return psnr.psnr[0];
 }
 
@@ -51,40 +51,40 @@
                            const YV12_BUFFER_CONFIG *dest, uint32_t in_bd,
                            uint32_t bd) {
   double tempy, tempu, tempv;
-  return vpx_psnrhvs(source, dest, &tempy, &tempu, &tempv, bd, in_bd);
+  return aom_psnrhvs(source, dest, &tempy, &tempu, &tempv, bd, in_bd);
 }
 
 double compute_psnrhvs(const YV12_BUFFER_CONFIG *source,
                        const YV12_BUFFER_CONFIG *dest) {
   double tempy, tempu, tempv;
-  return vpx_psnrhvs(source, dest, &tempy, &tempu, &tempv, 8, 8);
+  return aom_psnrhvs(source, dest, &tempy, &tempu, &tempv, 8, 8);
 }
 
 double compute_hbd_fastssim(const YV12_BUFFER_CONFIG *source,
                             const YV12_BUFFER_CONFIG *dest, uint32_t in_bd,
                             uint32_t bd) {
   double tempy, tempu, tempv;
-  return vpx_calc_fastssim(source, dest, &tempy, &tempu, &tempv, bd, in_bd);
+  return aom_calc_fastssim(source, dest, &tempy, &tempu, &tempv, bd, in_bd);
 }
 
 double compute_fastssim(const YV12_BUFFER_CONFIG *source,
                         const YV12_BUFFER_CONFIG *dest) {
   double tempy, tempu, tempv;
-  return vpx_calc_fastssim(source, dest, &tempy, &tempu, &tempv, 8, 8);
+  return aom_calc_fastssim(source, dest, &tempy, &tempu, &tempv, 8, 8);
 }
 
-double compute_hbd_vpxssim(const YV12_BUFFER_CONFIG *source,
+double compute_hbd_aomssim(const YV12_BUFFER_CONFIG *source,
                            const YV12_BUFFER_CONFIG *dest, uint32_t in_bd,
                            uint32_t bd) {
   double ssim, weight;
-  ssim = vpx_highbd_calc_ssim(source, dest, &weight, bd, in_bd);
+  ssim = aom_highbd_calc_ssim(source, dest, &weight, bd, in_bd);
   return 100 * pow(ssim / weight, 8.0);
 }
 
-double compute_vpxssim(const YV12_BUFFER_CONFIG *source,
+double compute_aomssim(const YV12_BUFFER_CONFIG *source,
                        const YV12_BUFFER_CONFIG *dest) {
   double ssim, weight;
-  ssim = vpx_calc_ssim(source, dest, &weight);
+  ssim = aom_calc_ssim(source, dest, &weight);
   return 100 * pow(ssim / weight, 8.0);
 }
 
@@ -108,10 +108,10 @@
     memset(&hbd_src, 0, sizeof(hbd_src));
     memset(&hbd_dst, 0, sizeof(hbd_dst));
 
-    vpx_alloc_frame_buffer(&lbd_src, width, height, 1, 1, 0, 32, 16);
-    vpx_alloc_frame_buffer(&lbd_dst, width, height, 1, 1, 0, 32, 16);
-    vpx_alloc_frame_buffer(&hbd_src, width, height, 1, 1, 1, 32, 16);
-    vpx_alloc_frame_buffer(&hbd_dst, width, height, 1, 1, 1, 32, 16);
+    aom_alloc_frame_buffer(&lbd_src, width, height, 1, 1, 0, 32, 16);
+    aom_alloc_frame_buffer(&lbd_dst, width, height, 1, 1, 0, 32, 16);
+    aom_alloc_frame_buffer(&hbd_src, width, height, 1, 1, 1, 32, 16);
+    aom_alloc_frame_buffer(&hbd_dst, width, height, 1, 1, 1, 32, 16);
 
     memset(lbd_src.buffer_alloc, kPixFiller, lbd_src.buffer_alloc_sz);
     while (i < lbd_src.buffer_alloc_sz) {
@@ -157,10 +157,10 @@
     hbd_db = hbd_metric_(&hbd_src, &hbd_dst, input_bit_depth_, bit_depth_);
     EXPECT_LE(fabs(lbd_db - hbd_db), threshold_);
 
-    vpx_free_frame_buffer(&lbd_src);
-    vpx_free_frame_buffer(&lbd_dst);
-    vpx_free_frame_buffer(&hbd_src);
-    vpx_free_frame_buffer(&hbd_dst);
+    aom_free_frame_buffer(&lbd_src);
+    aom_free_frame_buffer(&lbd_dst);
+    aom_free_frame_buffer(&hbd_src);
+    aom_free_frame_buffer(&hbd_dst);
   }
 
   int input_bit_depth_;
@@ -195,14 +195,14 @@
 static const double kPhvs_thresh = 0.3;
 
 INSTANTIATE_TEST_CASE_P(
-    VPXSSIM, HBDMetricsTest,
-    ::testing::Values(MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim,
+    AOMSSIM, HBDMetricsTest,
+    ::testing::Values(MetricTestTParam(&compute_aomssim, &compute_hbd_aomssim,
                                        8, 10, kSsim_thresh),
-                      MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim,
+                      MetricTestTParam(&compute_aomssim, &compute_hbd_aomssim,
                                        10, 10, kPhvs_thresh),
-                      MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim,
+                      MetricTestTParam(&compute_aomssim, &compute_hbd_aomssim,
                                        8, 12, kSsim_thresh),
-                      MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim,
+                      MetricTestTParam(&compute_aomssim, &compute_hbd_aomssim,
                                        12, 12, kPhvs_thresh)));
 INSTANTIATE_TEST_CASE_P(
     FASTSSIM, HBDMetricsTest,
diff --git a/test/i420_video_source.h b/test/i420_video_source.h
index d1324af..8e817ec 100644
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -24,7 +24,7 @@
   I420VideoSource(const std::string &file_name, unsigned int width,
                   unsigned int height, int rate_numerator, int rate_denominator,
                   unsigned int start, int limit)
-      : YUVVideoSource(file_name, VPX_IMG_FMT_I420, width, height,
+      : YUVVideoSource(file_name, AOM_IMG_FMT_I420, width, height,
                        rate_numerator, rate_denominator, start, limit) {}
 };
 
diff --git a/test/idct8x8_test.cc b/test/idct8x8_test.cc
index 52d3f4f..9957ee6 100644
--- a/test/idct8x8_test.cc
+++ b/test/idct8x8_test.cc
@@ -14,9 +14,9 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/msvc.h"  // for round()
 
 using libaom_test::ACMRandom;
@@ -53,7 +53,7 @@
   for (int i = 0; i < 64; ++i) output[i] *= 2;
 }
 
-TEST(VP9Idct8x8Test, AccuracyCheck) {
+TEST(AV1Idct8x8Test, AccuracyCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int count_test_block = 10000;
   for (int i = 0; i < count_test_block; ++i) {
@@ -72,7 +72,7 @@
     reference_dct_2d(input, output_r);
     for (int j = 0; j < 64; ++j)
       coeff[j] = static_cast<tran_low_t>(round(output_r[j]));
-    vpx_idct8x8_64_add_c(coeff, dst, 8);
+    aom_idct8x8_64_add_c(coeff, dst, 8);
     for (int j = 0; j < 64; ++j) {
       const int diff = dst[j] - src[j];
       const int error = diff * diff;
diff --git a/test/idct_test.cc b/test/idct_test.cc
index a1264a8..db6734c 100644
--- a/test/idct_test.cc
+++ b/test/idct_test.cc
@@ -8,13 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
+#include "./aom_rtcd.h"
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 typedef void (*IdctFunc)(int16_t *input, unsigned char *pred_ptr,
                          int pred_stride, unsigned char *dst_ptr,
@@ -108,13 +109,13 @@
       EXPECT_EQ(0, output[i]) << "i==" << i;
 }
 
-INSTANTIATE_TEST_CASE_P(C, IDCTTest, ::testing::Values(vp8_short_idct4x4llm_c));
+INSTANTIATE_TEST_CASE_P(C, IDCTTest, ::testing::Values(aom_short_idct4x4llm_c));
 #if HAVE_MMX
 INSTANTIATE_TEST_CASE_P(MMX, IDCTTest,
-                        ::testing::Values(vp8_short_idct4x4llm_mmx));
+                        ::testing::Values(aom_short_idct4x4llm_mmx));
 #endif
 #if HAVE_MSA
 INSTANTIATE_TEST_CASE_P(MSA, IDCTTest,
-                        ::testing::Values(vp8_short_idct4x4llm_msa));
+                        ::testing::Values(aom_short_idct4x4llm_msa));
 #endif
 }
diff --git a/test/intrapred_test.cc b/test/intrapred_test.cc
index 0cedb12..efa57f7 100644
--- a/test/intrapred_test.cc
+++ b/test/intrapred_test.cc
@@ -12,15 +12,15 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
 #include "av1/common/blockd.h"
 #include "av1/common/pred_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 namespace {
 
@@ -43,7 +43,7 @@
   int bit_depth;
 };
 
-class VP9IntraPredTest : public ::testing::TestWithParam<IntraPredFunc> {
+class AV1IntraPredTest : public ::testing::TestWithParam<IntraPredFunc> {
  public:
   void RunTest(uint16_t *left_col, uint16_t *above_data, uint16_t *dst,
                uint16_t *ref_dst) {
@@ -114,7 +114,7 @@
   IntraPredFunc params_;
 };
 
-TEST_P(VP9IntraPredTest, IntraPredTests) {
+TEST_P(AV1IntraPredTest, IntraPredTests) {
   // max block size is 32
   DECLARE_ALIGNED(16, uint16_t, left_col[2 * 32]);
   DECLARE_ALIGNED(16, uint16_t, above_data[2 * 32 + 32]);
@@ -124,88 +124,88 @@
 }
 
 #if HAVE_SSE2
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
-    SSE2_TO_C_8, VP9IntraPredTest,
-    ::testing::Values(IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2,
-                                    &vpx_highbd_dc_predictor_32x32_c, 32, 8),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2,
-                                    &vpx_highbd_tm_predictor_16x16_c, 16, 8),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2,
-                                    &vpx_highbd_tm_predictor_32x32_c, 32, 8),
-                      IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2,
-                                    &vpx_highbd_dc_predictor_4x4_c, 4, 8),
-                      IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2,
-                                    &vpx_highbd_dc_predictor_8x8_c, 8, 8),
-                      IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2,
-                                    &vpx_highbd_dc_predictor_16x16_c, 16, 8),
-                      IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2,
-                                    &vpx_highbd_v_predictor_4x4_c, 4, 8),
-                      IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2,
-                                    &vpx_highbd_v_predictor_8x8_c, 8, 8),
-                      IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2,
-                                    &vpx_highbd_v_predictor_16x16_c, 16, 8),
-                      IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2,
-                                    &vpx_highbd_v_predictor_32x32_c, 32, 8),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2,
-                                    &vpx_highbd_tm_predictor_4x4_c, 4, 8),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2,
-                                    &vpx_highbd_tm_predictor_8x8_c, 8, 8)));
+    SSE2_TO_C_8, AV1IntraPredTest,
+    ::testing::Values(IntraPredFunc(&aom_highbd_dc_predictor_32x32_sse2,
+                                    &aom_highbd_dc_predictor_32x32_c, 32, 8),
+                      IntraPredFunc(&aom_highbd_tm_predictor_16x16_sse2,
+                                    &aom_highbd_tm_predictor_16x16_c, 16, 8),
+                      IntraPredFunc(&aom_highbd_tm_predictor_32x32_sse2,
+                                    &aom_highbd_tm_predictor_32x32_c, 32, 8),
+                      IntraPredFunc(&aom_highbd_dc_predictor_4x4_sse2,
+                                    &aom_highbd_dc_predictor_4x4_c, 4, 8),
+                      IntraPredFunc(&aom_highbd_dc_predictor_8x8_sse2,
+                                    &aom_highbd_dc_predictor_8x8_c, 8, 8),
+                      IntraPredFunc(&aom_highbd_dc_predictor_16x16_sse2,
+                                    &aom_highbd_dc_predictor_16x16_c, 16, 8),
+                      IntraPredFunc(&aom_highbd_v_predictor_4x4_sse2,
+                                    &aom_highbd_v_predictor_4x4_c, 4, 8),
+                      IntraPredFunc(&aom_highbd_v_predictor_8x8_sse2,
+                                    &aom_highbd_v_predictor_8x8_c, 8, 8),
+                      IntraPredFunc(&aom_highbd_v_predictor_16x16_sse2,
+                                    &aom_highbd_v_predictor_16x16_c, 16, 8),
+                      IntraPredFunc(&aom_highbd_v_predictor_32x32_sse2,
+                                    &aom_highbd_v_predictor_32x32_c, 32, 8),
+                      IntraPredFunc(&aom_highbd_tm_predictor_4x4_sse2,
+                                    &aom_highbd_tm_predictor_4x4_c, 4, 8),
+                      IntraPredFunc(&aom_highbd_tm_predictor_8x8_sse2,
+                                    &aom_highbd_tm_predictor_8x8_c, 8, 8)));
 
 INSTANTIATE_TEST_CASE_P(
-    SSE2_TO_C_10, VP9IntraPredTest,
-    ::testing::Values(IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2,
-                                    &vpx_highbd_dc_predictor_32x32_c, 32, 10),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2,
-                                    &vpx_highbd_tm_predictor_16x16_c, 16, 10),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2,
-                                    &vpx_highbd_tm_predictor_32x32_c, 32, 10),
-                      IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2,
-                                    &vpx_highbd_dc_predictor_4x4_c, 4, 10),
-                      IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2,
-                                    &vpx_highbd_dc_predictor_8x8_c, 8, 10),
-                      IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2,
-                                    &vpx_highbd_dc_predictor_16x16_c, 16, 10),
-                      IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2,
-                                    &vpx_highbd_v_predictor_4x4_c, 4, 10),
-                      IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2,
-                                    &vpx_highbd_v_predictor_8x8_c, 8, 10),
-                      IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2,
-                                    &vpx_highbd_v_predictor_16x16_c, 16, 10),
-                      IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2,
-                                    &vpx_highbd_v_predictor_32x32_c, 32, 10),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2,
-                                    &vpx_highbd_tm_predictor_4x4_c, 4, 10),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2,
-                                    &vpx_highbd_tm_predictor_8x8_c, 8, 10)));
+    SSE2_TO_C_10, AV1IntraPredTest,
+    ::testing::Values(IntraPredFunc(&aom_highbd_dc_predictor_32x32_sse2,
+                                    &aom_highbd_dc_predictor_32x32_c, 32, 10),
+                      IntraPredFunc(&aom_highbd_tm_predictor_16x16_sse2,
+                                    &aom_highbd_tm_predictor_16x16_c, 16, 10),
+                      IntraPredFunc(&aom_highbd_tm_predictor_32x32_sse2,
+                                    &aom_highbd_tm_predictor_32x32_c, 32, 10),
+                      IntraPredFunc(&aom_highbd_dc_predictor_4x4_sse2,
+                                    &aom_highbd_dc_predictor_4x4_c, 4, 10),
+                      IntraPredFunc(&aom_highbd_dc_predictor_8x8_sse2,
+                                    &aom_highbd_dc_predictor_8x8_c, 8, 10),
+                      IntraPredFunc(&aom_highbd_dc_predictor_16x16_sse2,
+                                    &aom_highbd_dc_predictor_16x16_c, 16, 10),
+                      IntraPredFunc(&aom_highbd_v_predictor_4x4_sse2,
+                                    &aom_highbd_v_predictor_4x4_c, 4, 10),
+                      IntraPredFunc(&aom_highbd_v_predictor_8x8_sse2,
+                                    &aom_highbd_v_predictor_8x8_c, 8, 10),
+                      IntraPredFunc(&aom_highbd_v_predictor_16x16_sse2,
+                                    &aom_highbd_v_predictor_16x16_c, 16, 10),
+                      IntraPredFunc(&aom_highbd_v_predictor_32x32_sse2,
+                                    &aom_highbd_v_predictor_32x32_c, 32, 10),
+                      IntraPredFunc(&aom_highbd_tm_predictor_4x4_sse2,
+                                    &aom_highbd_tm_predictor_4x4_c, 4, 10),
+                      IntraPredFunc(&aom_highbd_tm_predictor_8x8_sse2,
+                                    &aom_highbd_tm_predictor_8x8_c, 8, 10)));
 
 INSTANTIATE_TEST_CASE_P(
-    SSE2_TO_C_12, VP9IntraPredTest,
-    ::testing::Values(IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2,
-                                    &vpx_highbd_dc_predictor_32x32_c, 32, 12),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2,
-                                    &vpx_highbd_tm_predictor_16x16_c, 16, 12),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2,
-                                    &vpx_highbd_tm_predictor_32x32_c, 32, 12),
-                      IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2,
-                                    &vpx_highbd_dc_predictor_4x4_c, 4, 12),
-                      IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2,
-                                    &vpx_highbd_dc_predictor_8x8_c, 8, 12),
-                      IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2,
-                                    &vpx_highbd_dc_predictor_16x16_c, 16, 12),
-                      IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2,
-                                    &vpx_highbd_v_predictor_4x4_c, 4, 12),
-                      IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2,
-                                    &vpx_highbd_v_predictor_8x8_c, 8, 12),
-                      IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2,
-                                    &vpx_highbd_v_predictor_16x16_c, 16, 12),
-                      IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2,
-                                    &vpx_highbd_v_predictor_32x32_c, 32, 12),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2,
-                                    &vpx_highbd_tm_predictor_4x4_c, 4, 12),
-                      IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2,
-                                    &vpx_highbd_tm_predictor_8x8_c, 8, 12)));
+    SSE2_TO_C_12, AV1IntraPredTest,
+    ::testing::Values(IntraPredFunc(&aom_highbd_dc_predictor_32x32_sse2,
+                                    &aom_highbd_dc_predictor_32x32_c, 32, 12),
+                      IntraPredFunc(&aom_highbd_tm_predictor_16x16_sse2,
+                                    &aom_highbd_tm_predictor_16x16_c, 16, 12),
+                      IntraPredFunc(&aom_highbd_tm_predictor_32x32_sse2,
+                                    &aom_highbd_tm_predictor_32x32_c, 32, 12),
+                      IntraPredFunc(&aom_highbd_dc_predictor_4x4_sse2,
+                                    &aom_highbd_dc_predictor_4x4_c, 4, 12),
+                      IntraPredFunc(&aom_highbd_dc_predictor_8x8_sse2,
+                                    &aom_highbd_dc_predictor_8x8_c, 8, 12),
+                      IntraPredFunc(&aom_highbd_dc_predictor_16x16_sse2,
+                                    &aom_highbd_dc_predictor_16x16_c, 16, 12),
+                      IntraPredFunc(&aom_highbd_v_predictor_4x4_sse2,
+                                    &aom_highbd_v_predictor_4x4_c, 4, 12),
+                      IntraPredFunc(&aom_highbd_v_predictor_8x8_sse2,
+                                    &aom_highbd_v_predictor_8x8_c, 8, 12),
+                      IntraPredFunc(&aom_highbd_v_predictor_16x16_sse2,
+                                    &aom_highbd_v_predictor_16x16_c, 16, 12),
+                      IntraPredFunc(&aom_highbd_v_predictor_32x32_sse2,
+                                    &aom_highbd_v_predictor_32x32_c, 32, 12),
+                      IntraPredFunc(&aom_highbd_tm_predictor_4x4_sse2,
+                                    &aom_highbd_tm_predictor_4x4_c, 4, 12),
+                      IntraPredFunc(&aom_highbd_tm_predictor_8x8_sse2,
+                                    &aom_highbd_tm_predictor_8x8_c, 8, 12)));
 
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // HAVE_SSE2
 }  // namespace
diff --git a/test/level_test.cc b/test/level_test.cc
index 0a12668..3c46903 100644
--- a/test/level_test.cc
+++ b/test/level_test.cc
@@ -29,10 +29,10 @@
     SetMode(encoding_mode_);
     if (encoding_mode_ != ::libaom_test::kRealTime) {
       cfg_.g_lag_in_frames = 25;
-      cfg_.rc_end_usage = VPX_VBR;
+      cfg_.rc_end_usage = AOM_VBR;
     } else {
       cfg_.g_lag_in_frames = 0;
-      cfg_.rc_end_usage = VPX_CBR;
+      cfg_.rc_end_usage = AOM_CBR;
     }
     cfg_.rc_2pass_vbr_minsection_pct = 5;
     cfg_.rc_2pass_vbr_maxsection_pct = 2000;
@@ -44,17 +44,17 @@
   virtual void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                                   ::libaom_test::Encoder *encoder) {
     if (video->frame() == 0) {
-      encoder->Control(VP8E_SET_CPUUSED, cpu_used_);
-      encoder->Control(VP9E_SET_TARGET_LEVEL, target_level_);
-      encoder->Control(VP9E_SET_MIN_GF_INTERVAL, min_gf_internal_);
+      encoder->Control(AOME_SET_CPUUSED, cpu_used_);
+      encoder->Control(AV1E_SET_TARGET_LEVEL, target_level_);
+      encoder->Control(AV1E_SET_MIN_GF_INTERVAL, min_gf_internal_);
       if (encoding_mode_ != ::libaom_test::kRealTime) {
-        encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
-        encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
-        encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
-        encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+        encoder->Control(AOME_SET_ENABLEAUTOALTREF, 1);
+        encoder->Control(AOME_SET_ARNR_MAXFRAMES, 7);
+        encoder->Control(AOME_SET_ARNR_STRENGTH, 5);
+        encoder->Control(AOME_SET_ARNR_TYPE, 3);
       }
     }
-    encoder->Control(VP9E_GET_LEVEL, &level_);
+    encoder->Control(AV1E_GET_LEVEL, &level_);
     ASSERT_LE(level_, 51);
     ASSERT_GE(level_, 0);
   }
@@ -90,26 +90,26 @@
 
 TEST_P(LevelTest, TestTargetLevelApi) {
   ::libaom_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0, 1);
-  static const vpx_codec_iface_t *codec = &vpx_codec_vp9_cx_algo;
-  vpx_codec_ctx_t enc;
-  vpx_codec_enc_cfg_t cfg;
-  EXPECT_EQ(VPX_CODEC_OK, vpx_codec_enc_config_default(codec, &cfg, 0));
-  EXPECT_EQ(VPX_CODEC_OK, vpx_codec_enc_init(&enc, codec, &cfg, 0));
+  static const aom_codec_iface_t *codec = &aom_codec_av1_cx_algo;
+  aom_codec_ctx_t enc;
+  aom_codec_enc_cfg_t cfg;
+  EXPECT_EQ(AOM_CODEC_OK, aom_codec_enc_config_default(codec, &cfg, 0));
+  EXPECT_EQ(AOM_CODEC_OK, aom_codec_enc_init(&enc, codec, &cfg, 0));
   for (int level = 0; level <= 256; ++level) {
     if (level == 10 || level == 11 || level == 20 || level == 21 ||
         level == 30 || level == 31 || level == 40 || level == 41 ||
         level == 50 || level == 51 || level == 52 || level == 60 ||
         level == 61 || level == 62 || level == 0 || level == 255)
-      EXPECT_EQ(VPX_CODEC_OK,
-                vpx_codec_control(&enc, VP9E_SET_TARGET_LEVEL, level));
+      EXPECT_EQ(AOM_CODEC_OK,
+                aom_codec_control(&enc, AV1E_SET_TARGET_LEVEL, level));
     else
-      EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
-                vpx_codec_control(&enc, VP9E_SET_TARGET_LEVEL, level));
+      EXPECT_EQ(AOM_CODEC_INVALID_PARAM,
+                aom_codec_control(&enc, AV1E_SET_TARGET_LEVEL, level));
   }
-  EXPECT_EQ(VPX_CODEC_OK, vpx_codec_destroy(&enc));
+  EXPECT_EQ(AOM_CODEC_OK, aom_codec_destroy(&enc));
 }
 
-VP9_INSTANTIATE_TEST_CASE(LevelTest,
+AV1_INSTANTIATE_TEST_CASE(LevelTest,
                           ::testing::Values(::libaom_test::kTwoPassGood,
                                             ::libaom_test::kOnePassGood),
                           ::testing::Range(0, 9));
diff --git a/test/lossless_test.cc b/test/lossless_test.cc
index 3989e93..8a94248 100644
--- a/test/lossless_test.cc
+++ b/test/lossless_test.cc
@@ -10,7 +10,7 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "test/codec_factory.h"
 #include "test/encode_test_driver.h"
 #include "test/i420_video_source.h"
@@ -42,7 +42,7 @@
       // Only call Control if quantizer > 0 to verify that using quantizer
       // alone will activate lossless
       if (cfg_.rc_max_quantizer > 0 || cfg_.rc_min_quantizer > 0) {
-        encoder->Control(VP9E_SET_LOSSLESS, 1);
+        encoder->Control(AV1E_SET_LOSSLESS, 1);
       }
     }
   }
@@ -52,7 +52,7 @@
     nframes_ = 0;
   }
 
-  virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void PSNRPktHook(const aom_codec_cx_pkt_t *pkt) {
     if (pkt->data.psnr.psnr[0] < psnr_) psnr_ = pkt->data.psnr.psnr[0];
   }
 
@@ -65,14 +65,14 @@
 };
 
 TEST_P(LosslessTestLarge, TestLossLessEncoding) {
-  const vpx_rational timebase = { 33333333, 1000000000 };
+  const aom_rational timebase = { 33333333, 1000000000 };
   cfg_.g_timebase = timebase;
   cfg_.rc_target_bitrate = 2000;
   cfg_.g_lag_in_frames = 25;
   cfg_.rc_min_quantizer = 0;
   cfg_.rc_max_quantizer = 0;
 
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
 
   // intentionally changed the dimension for better testing coverage
   libaom_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
@@ -92,7 +92,7 @@
   cfg_.rc_min_quantizer = 0;
   cfg_.rc_max_quantizer = 0;
 
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
   const double psnr_lossless = GetMinPsnr();
@@ -100,7 +100,7 @@
 }
 
 TEST_P(LosslessTestLarge, TestLossLessEncodingCtrl) {
-  const vpx_rational timebase = { 33333333, 1000000000 };
+  const aom_rational timebase = { 33333333, 1000000000 };
   cfg_.g_timebase = timebase;
   cfg_.rc_target_bitrate = 2000;
   cfg_.g_lag_in_frames = 25;
@@ -109,7 +109,7 @@
   cfg_.rc_min_quantizer = 10;
   cfg_.rc_max_quantizer = 20;
 
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
 
   libaom_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
                                      timebase.den, timebase.num, 0, 5);
@@ -118,7 +118,7 @@
   EXPECT_GE(psnr_lossless, kMaxPsnr);
 }
 
-VP10_INSTANTIATE_TEST_CASE(LosslessTestLarge,
-                           ::testing::Values(::libaom_test::kOnePassGood,
-                                             ::libaom_test::kTwoPassGood));
+AV1_INSTANTIATE_TEST_CASE(LosslessTestLarge,
+                          ::testing::Values(::libaom_test::kOnePassGood,
+                                            ::libaom_test::kTwoPassGood));
 }  // namespace
diff --git a/test/lpf_8_test.cc b/test/lpf_8_test.cc
index 3088b4a..b187574 100644
--- a/test/lpf_8_test.cc
+++ b/test/lpf_8_test.cc
@@ -14,15 +14,15 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
 #include "av1/common/entropy.h"
 #include "av1/common/loopfilter.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 using libaom_test::ACMRandom;
 
@@ -34,7 +34,7 @@
 
 const int number_of_iterations = 10000;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*loop_op_t)(uint16_t *s, int p, const uint8_t *blimit,
                           const uint8_t *limit, const uint8_t *thresh, int bd);
 typedef void (*dual_loop_op_t)(uint16_t *s, int p, const uint8_t *blimit0,
@@ -48,7 +48,7 @@
                                const uint8_t *limit0, const uint8_t *thresh0,
                                const uint8_t *blimit1, const uint8_t *limit1,
                                const uint8_t *thresh1);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 typedef std::tr1::tuple<loop_op_t, loop_op_t, int> loop8_param_t;
 typedef std::tr1::tuple<dual_loop_op_t, dual_loop_op_t, int> dualloop8_param_t;
@@ -94,14 +94,14 @@
 TEST_P(Loop8Test6Param, OperationCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int count_test_block = number_of_iterations;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   int32_t bd = bit_depth_;
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
   DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
   for (int i = 0; i < count_test_block; ++i) {
@@ -149,7 +149,7 @@
       }
       ref_s[j] = s[j];
     }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, bd);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, bd));
@@ -157,7 +157,7 @@
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
@@ -176,18 +176,18 @@
 TEST_P(Loop8Test6Param, ValueCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int count_test_block = number_of_iterations;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int32_t bd = bit_depth_;
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
   DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
 
-  // NOTE: The code in vp9_loopfilter.c:update_sharpness computes mblim as a
+  // NOTE: The code in av1_loopfilter.c:update_sharpness computes mblim as a
   // function of sharpness_lvl and the loopfilter lvl as:
   // block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4));
   // ...
@@ -218,7 +218,7 @@
       s[j] = rnd.Rand16() & mask_;
       ref_s[j] = s[j];
     }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, bd);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, bd));
@@ -226,7 +226,7 @@
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
     }
@@ -244,14 +244,14 @@
 TEST_P(Loop8Test9Param, OperationCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int count_test_block = number_of_iterations;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int32_t bd = bit_depth_;
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
   DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
   for (int i = 0; i < count_test_block; ++i) {
@@ -311,7 +311,7 @@
       }
       ref_s[j] = s[j];
     }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
                        limit1, thresh1, bd);
     ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
@@ -322,7 +322,7 @@
                        limit1, thresh1);
     ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
                                             thresh0, blimit1, limit1, thresh1));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
     }
@@ -340,13 +340,13 @@
 TEST_P(Loop8Test9Param, ValueCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int count_test_block = number_of_iterations;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
   DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
   for (int i = 0; i < count_test_block; ++i) {
@@ -380,7 +380,7 @@
       s[j] = rnd.Rand16() & mask_;
       ref_s[j] = s[j];
     }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     const int32_t bd = bit_depth_;
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
                        limit1, thresh1, bd);
@@ -392,7 +392,7 @@
                        limit1, thresh1);
     ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
                                             thresh0, blimit1, limit1, thresh1));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
     }
@@ -410,128 +410,128 @@
 using std::tr1::make_tuple;
 
 #if HAVE_SSE2
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test6Param,
-    ::testing::Values(make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
-                                 &vpx_highbd_lpf_horizontal_4_c, 8),
-                      make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
-                                 &vpx_highbd_lpf_vertical_4_c, 8),
-                      make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
-                                 &vpx_highbd_lpf_horizontal_8_c, 8),
-                      make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
-                                 &vpx_highbd_lpf_horizontal_edge_8_c, 8),
-                      make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
-                                 &vpx_highbd_lpf_horizontal_edge_16_c, 8),
-                      make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
-                                 &vpx_highbd_lpf_vertical_8_c, 8),
-                      make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
-                                 &vpx_highbd_lpf_vertical_16_c, 8),
-                      make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
-                                 &vpx_highbd_lpf_horizontal_4_c, 10),
-                      make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
-                                 &vpx_highbd_lpf_vertical_4_c, 10),
-                      make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
-                                 &vpx_highbd_lpf_horizontal_8_c, 10),
-                      make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
-                                 &vpx_highbd_lpf_horizontal_edge_8_c, 10),
-                      make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
-                                 &vpx_highbd_lpf_horizontal_edge_16_c, 10),
-                      make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
-                                 &vpx_highbd_lpf_vertical_8_c, 10),
-                      make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
-                                 &vpx_highbd_lpf_vertical_16_c, 10),
-                      make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
-                                 &vpx_highbd_lpf_horizontal_4_c, 12),
-                      make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
-                                 &vpx_highbd_lpf_vertical_4_c, 12),
-                      make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
-                                 &vpx_highbd_lpf_horizontal_8_c, 12),
-                      make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
-                                 &vpx_highbd_lpf_horizontal_edge_8_c, 12),
-                      make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
-                                 &vpx_highbd_lpf_horizontal_edge_16_c, 12),
-                      make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
-                                 &vpx_highbd_lpf_vertical_8_c, 12),
-                      make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
-                                 &vpx_highbd_lpf_vertical_16_c, 12),
-                      make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
-                                 &vpx_highbd_lpf_vertical_16_dual_c, 8),
-                      make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
-                                 &vpx_highbd_lpf_vertical_16_dual_c, 10),
-                      make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
-                                 &vpx_highbd_lpf_vertical_16_dual_c, 12)));
+    ::testing::Values(make_tuple(&aom_highbd_lpf_horizontal_4_sse2,
+                                 &aom_highbd_lpf_horizontal_4_c, 8),
+                      make_tuple(&aom_highbd_lpf_vertical_4_sse2,
+                                 &aom_highbd_lpf_vertical_4_c, 8),
+                      make_tuple(&aom_highbd_lpf_horizontal_8_sse2,
+                                 &aom_highbd_lpf_horizontal_8_c, 8),
+                      make_tuple(&aom_highbd_lpf_horizontal_edge_8_sse2,
+                                 &aom_highbd_lpf_horizontal_edge_8_c, 8),
+                      make_tuple(&aom_highbd_lpf_horizontal_edge_16_sse2,
+                                 &aom_highbd_lpf_horizontal_edge_16_c, 8),
+                      make_tuple(&aom_highbd_lpf_vertical_8_sse2,
+                                 &aom_highbd_lpf_vertical_8_c, 8),
+                      make_tuple(&aom_highbd_lpf_vertical_16_sse2,
+                                 &aom_highbd_lpf_vertical_16_c, 8),
+                      make_tuple(&aom_highbd_lpf_horizontal_4_sse2,
+                                 &aom_highbd_lpf_horizontal_4_c, 10),
+                      make_tuple(&aom_highbd_lpf_vertical_4_sse2,
+                                 &aom_highbd_lpf_vertical_4_c, 10),
+                      make_tuple(&aom_highbd_lpf_horizontal_8_sse2,
+                                 &aom_highbd_lpf_horizontal_8_c, 10),
+                      make_tuple(&aom_highbd_lpf_horizontal_edge_8_sse2,
+                                 &aom_highbd_lpf_horizontal_edge_8_c, 10),
+                      make_tuple(&aom_highbd_lpf_horizontal_edge_16_sse2,
+                                 &aom_highbd_lpf_horizontal_edge_16_c, 10),
+                      make_tuple(&aom_highbd_lpf_vertical_8_sse2,
+                                 &aom_highbd_lpf_vertical_8_c, 10),
+                      make_tuple(&aom_highbd_lpf_vertical_16_sse2,
+                                 &aom_highbd_lpf_vertical_16_c, 10),
+                      make_tuple(&aom_highbd_lpf_horizontal_4_sse2,
+                                 &aom_highbd_lpf_horizontal_4_c, 12),
+                      make_tuple(&aom_highbd_lpf_vertical_4_sse2,
+                                 &aom_highbd_lpf_vertical_4_c, 12),
+                      make_tuple(&aom_highbd_lpf_horizontal_8_sse2,
+                                 &aom_highbd_lpf_horizontal_8_c, 12),
+                      make_tuple(&aom_highbd_lpf_horizontal_edge_8_sse2,
+                                 &aom_highbd_lpf_horizontal_edge_8_c, 12),
+                      make_tuple(&aom_highbd_lpf_horizontal_edge_16_sse2,
+                                 &aom_highbd_lpf_horizontal_edge_16_c, 12),
+                      make_tuple(&aom_highbd_lpf_vertical_8_sse2,
+                                 &aom_highbd_lpf_vertical_8_c, 12),
+                      make_tuple(&aom_highbd_lpf_vertical_16_sse2,
+                                 &aom_highbd_lpf_vertical_16_c, 12),
+                      make_tuple(&aom_highbd_lpf_vertical_16_dual_sse2,
+                                 &aom_highbd_lpf_vertical_16_dual_c, 8),
+                      make_tuple(&aom_highbd_lpf_vertical_16_dual_sse2,
+                                 &aom_highbd_lpf_vertical_16_dual_c, 10),
+                      make_tuple(&aom_highbd_lpf_vertical_16_dual_sse2,
+                                 &aom_highbd_lpf_vertical_16_dual_c, 12)));
 #else
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test6Param,
     ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_4_sse2, &vpx_lpf_horizontal_4_c, 8),
-        make_tuple(&vpx_lpf_horizontal_8_sse2, &vpx_lpf_horizontal_8_c, 8),
-        make_tuple(&vpx_lpf_horizontal_edge_8_sse2,
-                   &vpx_lpf_horizontal_edge_8_c, 8),
-        make_tuple(&vpx_lpf_horizontal_edge_16_sse2,
-                   &vpx_lpf_horizontal_edge_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_sse2, &vpx_lpf_vertical_4_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_sse2, &vpx_lpf_vertical_8_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_sse2, &vpx_lpf_vertical_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_dual_sse2, &vpx_lpf_vertical_16_dual_c,
+        make_tuple(&aom_lpf_horizontal_4_sse2, &aom_lpf_horizontal_4_c, 8),
+        make_tuple(&aom_lpf_horizontal_8_sse2, &aom_lpf_horizontal_8_c, 8),
+        make_tuple(&aom_lpf_horizontal_edge_8_sse2,
+                   &aom_lpf_horizontal_edge_8_c, 8),
+        make_tuple(&aom_lpf_horizontal_edge_16_sse2,
+                   &aom_lpf_horizontal_edge_16_c, 8),
+        make_tuple(&aom_lpf_vertical_4_sse2, &aom_lpf_vertical_4_c, 8),
+        make_tuple(&aom_lpf_vertical_8_sse2, &aom_lpf_vertical_8_c, 8),
+        make_tuple(&aom_lpf_vertical_16_sse2, &aom_lpf_vertical_16_c, 8),
+        make_tuple(&aom_lpf_vertical_16_dual_sse2, &aom_lpf_vertical_16_dual_c,
                    8)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif
 
-#if HAVE_AVX2 && (!CONFIG_VP9_HIGHBITDEPTH)
+#if HAVE_AVX2 && (!CONFIG_AOM_HIGHBITDEPTH)
 INSTANTIATE_TEST_CASE_P(
     AVX2, Loop8Test6Param,
-    ::testing::Values(make_tuple(&vpx_lpf_horizontal_edge_8_avx2,
-                                 &vpx_lpf_horizontal_edge_8_c, 8),
-                      make_tuple(&vpx_lpf_horizontal_edge_16_avx2,
-                                 &vpx_lpf_horizontal_edge_16_c, 8)));
+    ::testing::Values(make_tuple(&aom_lpf_horizontal_edge_8_avx2,
+                                 &aom_lpf_horizontal_edge_8_c, 8),
+                      make_tuple(&aom_lpf_horizontal_edge_16_avx2,
+                                 &aom_lpf_horizontal_edge_16_c, 8)));
 #endif
 
 #if HAVE_SSE2
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test9Param,
-    ::testing::Values(make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
-                                 &vpx_highbd_lpf_horizontal_4_dual_c, 8),
-                      make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
-                                 &vpx_highbd_lpf_horizontal_8_dual_c, 8),
-                      make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
-                                 &vpx_highbd_lpf_vertical_4_dual_c, 8),
-                      make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
-                                 &vpx_highbd_lpf_vertical_8_dual_c, 8),
-                      make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
-                                 &vpx_highbd_lpf_horizontal_4_dual_c, 10),
-                      make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
-                                 &vpx_highbd_lpf_horizontal_8_dual_c, 10),
-                      make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
-                                 &vpx_highbd_lpf_vertical_4_dual_c, 10),
-                      make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
-                                 &vpx_highbd_lpf_vertical_8_dual_c, 10),
-                      make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
-                                 &vpx_highbd_lpf_horizontal_4_dual_c, 12),
-                      make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
-                                 &vpx_highbd_lpf_horizontal_8_dual_c, 12),
-                      make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
-                                 &vpx_highbd_lpf_vertical_4_dual_c, 12),
-                      make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
-                                 &vpx_highbd_lpf_vertical_8_dual_c, 12)));
+    ::testing::Values(make_tuple(&aom_highbd_lpf_horizontal_4_dual_sse2,
+                                 &aom_highbd_lpf_horizontal_4_dual_c, 8),
+                      make_tuple(&aom_highbd_lpf_horizontal_8_dual_sse2,
+                                 &aom_highbd_lpf_horizontal_8_dual_c, 8),
+                      make_tuple(&aom_highbd_lpf_vertical_4_dual_sse2,
+                                 &aom_highbd_lpf_vertical_4_dual_c, 8),
+                      make_tuple(&aom_highbd_lpf_vertical_8_dual_sse2,
+                                 &aom_highbd_lpf_vertical_8_dual_c, 8),
+                      make_tuple(&aom_highbd_lpf_horizontal_4_dual_sse2,
+                                 &aom_highbd_lpf_horizontal_4_dual_c, 10),
+                      make_tuple(&aom_highbd_lpf_horizontal_8_dual_sse2,
+                                 &aom_highbd_lpf_horizontal_8_dual_c, 10),
+                      make_tuple(&aom_highbd_lpf_vertical_4_dual_sse2,
+                                 &aom_highbd_lpf_vertical_4_dual_c, 10),
+                      make_tuple(&aom_highbd_lpf_vertical_8_dual_sse2,
+                                 &aom_highbd_lpf_vertical_8_dual_c, 10),
+                      make_tuple(&aom_highbd_lpf_horizontal_4_dual_sse2,
+                                 &aom_highbd_lpf_horizontal_4_dual_c, 12),
+                      make_tuple(&aom_highbd_lpf_horizontal_8_dual_sse2,
+                                 &aom_highbd_lpf_horizontal_8_dual_c, 12),
+                      make_tuple(&aom_highbd_lpf_vertical_4_dual_sse2,
+                                 &aom_highbd_lpf_vertical_4_dual_c, 12),
+                      make_tuple(&aom_highbd_lpf_vertical_8_dual_sse2,
+                                 &aom_highbd_lpf_vertical_8_dual_c, 12)));
 #else
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test9Param,
-    ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_sse2,
-                                 &vpx_lpf_horizontal_4_dual_c, 8),
-                      make_tuple(&vpx_lpf_horizontal_8_dual_sse2,
-                                 &vpx_lpf_horizontal_8_dual_c, 8),
-                      make_tuple(&vpx_lpf_vertical_4_dual_sse2,
-                                 &vpx_lpf_vertical_4_dual_c, 8),
-                      make_tuple(&vpx_lpf_vertical_8_dual_sse2,
-                                 &vpx_lpf_vertical_8_dual_c, 8)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+    ::testing::Values(make_tuple(&aom_lpf_horizontal_4_dual_sse2,
+                                 &aom_lpf_horizontal_4_dual_c, 8),
+                      make_tuple(&aom_lpf_horizontal_8_dual_sse2,
+                                 &aom_lpf_horizontal_8_dual_c, 8),
+                      make_tuple(&aom_lpf_vertical_4_dual_sse2,
+                                 &aom_lpf_vertical_4_dual_c, 8),
+                      make_tuple(&aom_lpf_vertical_8_dual_sse2,
+                                 &aom_lpf_vertical_8_dual_c, 8)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif
 
 #if HAVE_NEON
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 // No neon high bitdepth functions.
 #else
 INSTANTIATE_TEST_CASE_P(
@@ -541,83 +541,83 @@
         // Using #if inside the macro is unsupported on MSVS but the tests are
         // not
         // currently built for MSVS with ARM and NEON.
-        make_tuple(&vpx_lpf_horizontal_edge_8_neon,
-                   &vpx_lpf_horizontal_edge_8_c, 8),
-        make_tuple(&vpx_lpf_horizontal_edge_16_neon,
-                   &vpx_lpf_horizontal_edge_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_neon, &vpx_lpf_vertical_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_dual_neon, &vpx_lpf_vertical_16_dual_c,
+        make_tuple(&aom_lpf_horizontal_edge_8_neon,
+                   &aom_lpf_horizontal_edge_8_c, 8),
+        make_tuple(&aom_lpf_horizontal_edge_16_neon,
+                   &aom_lpf_horizontal_edge_16_c, 8),
+        make_tuple(&aom_lpf_vertical_16_neon, &aom_lpf_vertical_16_c, 8),
+        make_tuple(&aom_lpf_vertical_16_dual_neon, &aom_lpf_vertical_16_dual_c,
                    8),
 #endif  // HAVE_NEON_ASM
-        make_tuple(&vpx_lpf_horizontal_8_neon, &vpx_lpf_horizontal_8_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_neon, &vpx_lpf_vertical_8_c, 8),
-        make_tuple(&vpx_lpf_horizontal_4_neon, &vpx_lpf_horizontal_4_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_neon, &vpx_lpf_vertical_4_c, 8)));
+        make_tuple(&aom_lpf_horizontal_8_neon, &aom_lpf_horizontal_8_c, 8),
+        make_tuple(&aom_lpf_vertical_8_neon, &aom_lpf_vertical_8_c, 8),
+        make_tuple(&aom_lpf_horizontal_4_neon, &aom_lpf_horizontal_4_c, 8),
+        make_tuple(&aom_lpf_vertical_4_neon, &aom_lpf_vertical_4_c, 8)));
 INSTANTIATE_TEST_CASE_P(NEON, Loop8Test9Param,
                         ::testing::Values(
 #if HAVE_NEON_ASM
-                            make_tuple(&vpx_lpf_horizontal_8_dual_neon,
-                                       &vpx_lpf_horizontal_8_dual_c, 8),
-                            make_tuple(&vpx_lpf_vertical_8_dual_neon,
-                                       &vpx_lpf_vertical_8_dual_c, 8),
+                            make_tuple(&aom_lpf_horizontal_8_dual_neon,
+                                       &aom_lpf_horizontal_8_dual_c, 8),
+                            make_tuple(&aom_lpf_vertical_8_dual_neon,
+                                       &aom_lpf_vertical_8_dual_c, 8),
 #endif  // HAVE_NEON_ASM
-                            make_tuple(&vpx_lpf_horizontal_4_dual_neon,
-                                       &vpx_lpf_horizontal_4_dual_c, 8),
-                            make_tuple(&vpx_lpf_vertical_4_dual_neon,
-                                       &vpx_lpf_vertical_4_dual_c, 8)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+                            make_tuple(&aom_lpf_horizontal_4_dual_neon,
+                                       &aom_lpf_horizontal_4_dual_c, 8),
+                            make_tuple(&aom_lpf_vertical_4_dual_neon,
+                                       &aom_lpf_vertical_4_dual_c, 8)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // HAVE_NEON
 
-#if HAVE_DSPR2 && !CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_DSPR2 && !CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     DSPR2, Loop8Test6Param,
     ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_4_dspr2, &vpx_lpf_horizontal_4_c, 8),
-        make_tuple(&vpx_lpf_horizontal_8_dspr2, &vpx_lpf_horizontal_8_c, 8),
-        make_tuple(&vpx_lpf_horizontal_edge_8, &vpx_lpf_horizontal_edge_8, 8),
-        make_tuple(&vpx_lpf_horizontal_edge_16, &vpx_lpf_horizontal_edge_16, 8),
-        make_tuple(&vpx_lpf_vertical_4_dspr2, &vpx_lpf_vertical_4_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_dspr2, &vpx_lpf_vertical_8_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_dspr2, &vpx_lpf_vertical_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_dual_dspr2, &vpx_lpf_vertical_16_dual_c,
+        make_tuple(&aom_lpf_horizontal_4_dspr2, &aom_lpf_horizontal_4_c, 8),
+        make_tuple(&aom_lpf_horizontal_8_dspr2, &aom_lpf_horizontal_8_c, 8),
+        make_tuple(&aom_lpf_horizontal_edge_8, &aom_lpf_horizontal_edge_8, 8),
+        make_tuple(&aom_lpf_horizontal_edge_16, &aom_lpf_horizontal_edge_16, 8),
+        make_tuple(&aom_lpf_vertical_4_dspr2, &aom_lpf_vertical_4_c, 8),
+        make_tuple(&aom_lpf_vertical_8_dspr2, &aom_lpf_vertical_8_c, 8),
+        make_tuple(&aom_lpf_vertical_16_dspr2, &aom_lpf_vertical_16_c, 8),
+        make_tuple(&aom_lpf_vertical_16_dual_dspr2, &aom_lpf_vertical_16_dual_c,
                    8)));
 
 INSTANTIATE_TEST_CASE_P(
     DSPR2, Loop8Test9Param,
-    ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_dspr2,
-                                 &vpx_lpf_horizontal_4_dual_c, 8),
-                      make_tuple(&vpx_lpf_horizontal_8_dual_dspr2,
-                                 &vpx_lpf_horizontal_8_dual_c, 8),
-                      make_tuple(&vpx_lpf_vertical_4_dual_dspr2,
-                                 &vpx_lpf_vertical_4_dual_c, 8),
-                      make_tuple(&vpx_lpf_vertical_8_dual_dspr2,
-                                 &vpx_lpf_vertical_8_dual_c, 8)));
-#endif  // HAVE_DSPR2 && !CONFIG_VP9_HIGHBITDEPTH
+    ::testing::Values(make_tuple(&aom_lpf_horizontal_4_dual_dspr2,
+                                 &aom_lpf_horizontal_4_dual_c, 8),
+                      make_tuple(&aom_lpf_horizontal_8_dual_dspr2,
+                                 &aom_lpf_horizontal_8_dual_c, 8),
+                      make_tuple(&aom_lpf_vertical_4_dual_dspr2,
+                                 &aom_lpf_vertical_4_dual_c, 8),
+                      make_tuple(&aom_lpf_vertical_8_dual_dspr2,
+                                 &aom_lpf_vertical_8_dual_c, 8)));
+#endif  // HAVE_DSPR2 && !CONFIG_AOM_HIGHBITDEPTH
 
-#if HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH)
+#if HAVE_MSA && (!CONFIG_AOM_HIGHBITDEPTH)
 INSTANTIATE_TEST_CASE_P(
     MSA, Loop8Test6Param,
     ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_4_msa, &vpx_lpf_horizontal_4_c, 8),
-        make_tuple(&vpx_lpf_horizontal_8_msa, &vpx_lpf_horizontal_8_c, 8),
-        make_tuple(&vpx_lpf_horizontal_edge_8_msa, &vpx_lpf_horizontal_edge_8_c,
+        make_tuple(&aom_lpf_horizontal_4_msa, &aom_lpf_horizontal_4_c, 8),
+        make_tuple(&aom_lpf_horizontal_8_msa, &aom_lpf_horizontal_8_c, 8),
+        make_tuple(&aom_lpf_horizontal_edge_8_msa, &aom_lpf_horizontal_edge_8_c,
                    8),
-        make_tuple(&vpx_lpf_horizontal_edge_16_msa,
-                   &vpx_lpf_horizontal_edge_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_msa, &vpx_lpf_vertical_4_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_msa, &vpx_lpf_vertical_8_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_msa, &vpx_lpf_vertical_16_c, 8)));
+        make_tuple(&aom_lpf_horizontal_edge_16_msa,
+                   &aom_lpf_horizontal_edge_16_c, 8),
+        make_tuple(&aom_lpf_vertical_4_msa, &aom_lpf_vertical_4_c, 8),
+        make_tuple(&aom_lpf_vertical_8_msa, &aom_lpf_vertical_8_c, 8),
+        make_tuple(&aom_lpf_vertical_16_msa, &aom_lpf_vertical_16_c, 8)));
 
 INSTANTIATE_TEST_CASE_P(
     MSA, Loop8Test9Param,
-    ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_msa,
-                                 &vpx_lpf_horizontal_4_dual_c, 8),
-                      make_tuple(&vpx_lpf_horizontal_8_dual_msa,
-                                 &vpx_lpf_horizontal_8_dual_c, 8),
-                      make_tuple(&vpx_lpf_vertical_4_dual_msa,
-                                 &vpx_lpf_vertical_4_dual_c, 8),
-                      make_tuple(&vpx_lpf_vertical_8_dual_msa,
-                                 &vpx_lpf_vertical_8_dual_c, 8)));
-#endif  // HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH)
+    ::testing::Values(make_tuple(&aom_lpf_horizontal_4_dual_msa,
+                                 &aom_lpf_horizontal_4_dual_c, 8),
+                      make_tuple(&aom_lpf_horizontal_8_dual_msa,
+                                 &aom_lpf_horizontal_8_dual_c, 8),
+                      make_tuple(&aom_lpf_vertical_4_dual_msa,
+                                 &aom_lpf_vertical_4_dual_c, 8),
+                      make_tuple(&aom_lpf_vertical_8_dual_msa,
+                                 &aom_lpf_vertical_8_dual_c, 8)));
+#endif  // HAVE_MSA && (!CONFIG_AOM_HIGHBITDEPTH)
 
 }  // namespace
diff --git a/test/masked_sad_test.cc b/test/masked_sad_test.cc
index b9af5ad..68ff0be 100644
--- a/test/masked_sad_test.cc
+++ b/test/masked_sad_test.cc
@@ -18,9 +18,9 @@
 #include "test/register_state_check.h"
 #include "test/util.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
 using libaom_test::ACMRandom;
 
@@ -81,7 +81,7 @@
       << "First failed at test case " << first_failure;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef unsigned int (*HighbdMaskedSADFunc)(const uint8_t *a, int a_stride,
                                             const uint8_t *b, int b_stride,
                                             const uint8_t *m, int m_stride);
@@ -138,7 +138,7 @@
       << "Error: High BD Masked SAD Test, C output doesn't match SSSE3 output. "
       << "First failed at test case " << first_failure;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 using std::tr1::make_tuple;
 
@@ -147,60 +147,60 @@
     SSSE3_C_COMPARE, MaskedSADTest,
     ::testing::Values(
 #if CONFIG_EXT_PARTITION
-        make_tuple(&vpx_masked_sad128x128_ssse3, &vpx_masked_sad128x128_c),
-        make_tuple(&vpx_masked_sad128x64_ssse3, &vpx_masked_sad128x64_c),
-        make_tuple(&vpx_masked_sad64x128_ssse3, &vpx_masked_sad64x128_c),
+        make_tuple(&aom_masked_sad128x128_ssse3, &aom_masked_sad128x128_c),
+        make_tuple(&aom_masked_sad128x64_ssse3, &aom_masked_sad128x64_c),
+        make_tuple(&aom_masked_sad64x128_ssse3, &aom_masked_sad64x128_c),
 #endif  // CONFIG_EXT_PARTITION
-        make_tuple(&vpx_masked_sad64x64_ssse3, &vpx_masked_sad64x64_c),
-        make_tuple(&vpx_masked_sad64x32_ssse3, &vpx_masked_sad64x32_c),
-        make_tuple(&vpx_masked_sad32x64_ssse3, &vpx_masked_sad32x64_c),
-        make_tuple(&vpx_masked_sad32x32_ssse3, &vpx_masked_sad32x32_c),
-        make_tuple(&vpx_masked_sad32x16_ssse3, &vpx_masked_sad32x16_c),
-        make_tuple(&vpx_masked_sad16x32_ssse3, &vpx_masked_sad16x32_c),
-        make_tuple(&vpx_masked_sad16x16_ssse3, &vpx_masked_sad16x16_c),
-        make_tuple(&vpx_masked_sad16x8_ssse3, &vpx_masked_sad16x8_c),
-        make_tuple(&vpx_masked_sad8x16_ssse3, &vpx_masked_sad8x16_c),
-        make_tuple(&vpx_masked_sad8x8_ssse3, &vpx_masked_sad8x8_c),
-        make_tuple(&vpx_masked_sad8x4_ssse3, &vpx_masked_sad8x4_c),
-        make_tuple(&vpx_masked_sad4x8_ssse3, &vpx_masked_sad4x8_c),
-        make_tuple(&vpx_masked_sad4x4_ssse3, &vpx_masked_sad4x4_c)));
-#if CONFIG_VP9_HIGHBITDEPTH
+        make_tuple(&aom_masked_sad64x64_ssse3, &aom_masked_sad64x64_c),
+        make_tuple(&aom_masked_sad64x32_ssse3, &aom_masked_sad64x32_c),
+        make_tuple(&aom_masked_sad32x64_ssse3, &aom_masked_sad32x64_c),
+        make_tuple(&aom_masked_sad32x32_ssse3, &aom_masked_sad32x32_c),
+        make_tuple(&aom_masked_sad32x16_ssse3, &aom_masked_sad32x16_c),
+        make_tuple(&aom_masked_sad16x32_ssse3, &aom_masked_sad16x32_c),
+        make_tuple(&aom_masked_sad16x16_ssse3, &aom_masked_sad16x16_c),
+        make_tuple(&aom_masked_sad16x8_ssse3, &aom_masked_sad16x8_c),
+        make_tuple(&aom_masked_sad8x16_ssse3, &aom_masked_sad8x16_c),
+        make_tuple(&aom_masked_sad8x8_ssse3, &aom_masked_sad8x8_c),
+        make_tuple(&aom_masked_sad8x4_ssse3, &aom_masked_sad8x4_c),
+        make_tuple(&aom_masked_sad4x8_ssse3, &aom_masked_sad4x8_c),
+        make_tuple(&aom_masked_sad4x4_ssse3, &aom_masked_sad4x4_c)));
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(SSSE3_C_COMPARE, HighbdMaskedSADTest,
                         ::testing::Values(
 #if CONFIG_EXT_PARTITION
-                            make_tuple(&vpx_highbd_masked_sad128x128_ssse3,
-                                       &vpx_highbd_masked_sad128x128_c),
-                            make_tuple(&vpx_highbd_masked_sad128x64_ssse3,
-                                       &vpx_highbd_masked_sad128x64_c),
-                            make_tuple(&vpx_highbd_masked_sad64x128_ssse3,
-                                       &vpx_highbd_masked_sad64x128_c),
+                            make_tuple(&aom_highbd_masked_sad128x128_ssse3,
+                                       &aom_highbd_masked_sad128x128_c),
+                            make_tuple(&aom_highbd_masked_sad128x64_ssse3,
+                                       &aom_highbd_masked_sad128x64_c),
+                            make_tuple(&aom_highbd_masked_sad64x128_ssse3,
+                                       &aom_highbd_masked_sad64x128_c),
 #endif  // CONFIG_EXT_PARTITION
-                            make_tuple(&vpx_highbd_masked_sad64x64_ssse3,
-                                       &vpx_highbd_masked_sad64x64_c),
-                            make_tuple(&vpx_highbd_masked_sad64x32_ssse3,
-                                       &vpx_highbd_masked_sad64x32_c),
-                            make_tuple(&vpx_highbd_masked_sad32x64_ssse3,
-                                       &vpx_highbd_masked_sad32x64_c),
-                            make_tuple(&vpx_highbd_masked_sad32x32_ssse3,
-                                       &vpx_highbd_masked_sad32x32_c),
-                            make_tuple(&vpx_highbd_masked_sad32x16_ssse3,
-                                       &vpx_highbd_masked_sad32x16_c),
-                            make_tuple(&vpx_highbd_masked_sad16x32_ssse3,
-                                       &vpx_highbd_masked_sad16x32_c),
-                            make_tuple(&vpx_highbd_masked_sad16x16_ssse3,
-                                       &vpx_highbd_masked_sad16x16_c),
-                            make_tuple(&vpx_highbd_masked_sad16x8_ssse3,
-                                       &vpx_highbd_masked_sad16x8_c),
-                            make_tuple(&vpx_highbd_masked_sad8x16_ssse3,
-                                       &vpx_highbd_masked_sad8x16_c),
-                            make_tuple(&vpx_highbd_masked_sad8x8_ssse3,
-                                       &vpx_highbd_masked_sad8x8_c),
-                            make_tuple(&vpx_highbd_masked_sad8x4_ssse3,
-                                       &vpx_highbd_masked_sad8x4_c),
-                            make_tuple(&vpx_highbd_masked_sad4x8_ssse3,
-                                       &vpx_highbd_masked_sad4x8_c),
-                            make_tuple(&vpx_highbd_masked_sad4x4_ssse3,
-                                       &vpx_highbd_masked_sad4x4_c)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+                            make_tuple(&aom_highbd_masked_sad64x64_ssse3,
+                                       &aom_highbd_masked_sad64x64_c),
+                            make_tuple(&aom_highbd_masked_sad64x32_ssse3,
+                                       &aom_highbd_masked_sad64x32_c),
+                            make_tuple(&aom_highbd_masked_sad32x64_ssse3,
+                                       &aom_highbd_masked_sad32x64_c),
+                            make_tuple(&aom_highbd_masked_sad32x32_ssse3,
+                                       &aom_highbd_masked_sad32x32_c),
+                            make_tuple(&aom_highbd_masked_sad32x16_ssse3,
+                                       &aom_highbd_masked_sad32x16_c),
+                            make_tuple(&aom_highbd_masked_sad16x32_ssse3,
+                                       &aom_highbd_masked_sad16x32_c),
+                            make_tuple(&aom_highbd_masked_sad16x16_ssse3,
+                                       &aom_highbd_masked_sad16x16_c),
+                            make_tuple(&aom_highbd_masked_sad16x8_ssse3,
+                                       &aom_highbd_masked_sad16x8_c),
+                            make_tuple(&aom_highbd_masked_sad8x16_ssse3,
+                                       &aom_highbd_masked_sad8x16_c),
+                            make_tuple(&aom_highbd_masked_sad8x8_ssse3,
+                                       &aom_highbd_masked_sad8x8_c),
+                            make_tuple(&aom_highbd_masked_sad8x4_ssse3,
+                                       &aom_highbd_masked_sad8x4_c),
+                            make_tuple(&aom_highbd_masked_sad4x8_ssse3,
+                                       &aom_highbd_masked_sad4x8_c),
+                            make_tuple(&aom_highbd_masked_sad4x4_ssse3,
+                                       &aom_highbd_masked_sad4x4_c)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // HAVE_SSSE3
 }  // namespace
diff --git a/test/masked_variance_test.cc b/test/masked_variance_test.cc
index 36fde93..3e44bf7 100644
--- a/test/masked_variance_test.cc
+++ b/test/masked_variance_test.cc
@@ -18,12 +18,12 @@
 #include "test/register_state_check.h"
 #include "test/util.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_codec.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_filter.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_codec.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_filter.h"
+#include "aom_mem/aom_mem.h"
 
 using libaom_test::ACMRandom;
 
@@ -248,8 +248,8 @@
                           << " y_offset = " << first_failure_y;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-typedef std::tr1::tuple<MaskedVarianceFunc, MaskedVarianceFunc, vpx_bit_depth_t>
+#if CONFIG_AOM_HIGHBITDEPTH
+typedef std::tr1::tuple<MaskedVarianceFunc, MaskedVarianceFunc, aom_bit_depth_t>
     HighbdMaskedVarianceParam;
 
 class HighbdMaskedVarianceTest
@@ -267,7 +267,7 @@
  protected:
   MaskedVarianceFunc opt_func_;
   MaskedVarianceFunc ref_func_;
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
 };
 
 TEST_P(HighbdMaskedVarianceTest, OperationCheck) {
@@ -325,9 +325,9 @@
   int msk_stride = MAX_SB_SIZE;
 
   for (int i = 0; i < 8; ++i) {
-    vpx_memset16(src_ptr, (i & 0x1) ? ((1 << bit_depth_) - 1) : 0,
+    aom_memset16(src_ptr, (i & 0x1) ? ((1 << bit_depth_) - 1) : 0,
                  MAX_SB_SIZE * MAX_SB_SIZE);
-    vpx_memset16(ref_ptr, (i & 0x2) ? ((1 << bit_depth_) - 1) : 0,
+    aom_memset16(ref_ptr, (i & 0x2) ? ((1 << bit_depth_) - 1) : 0,
                  MAX_SB_SIZE * MAX_SB_SIZE);
     memset(msk_ptr, (i & 0x4) ? 64 : 0, MAX_SB_SIZE * MAX_SB_SIZE);
 
@@ -349,7 +349,7 @@
 }
 
 typedef std::tr1::tuple<MaskedSubPixelVarianceFunc, MaskedSubPixelVarianceFunc,
-                        vpx_bit_depth_t> HighbdMaskedSubPixelVarianceParam;
+                        aom_bit_depth_t> HighbdMaskedSubPixelVarianceParam;
 
 class HighbdMaskedSubPixelVarianceTest
     : public ::testing::TestWithParam<HighbdMaskedSubPixelVarianceParam> {
@@ -366,7 +366,7 @@
  protected:
   MaskedSubPixelVarianceFunc opt_func_;
   MaskedSubPixelVarianceFunc ref_func_;
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
 };
 
 TEST_P(HighbdMaskedSubPixelVarianceTest, OperationCheck) {
@@ -442,9 +442,9 @@
   for (int xoffset = 0; xoffset < BIL_SUBPEL_SHIFTS; xoffset++) {
     for (int yoffset = 0; yoffset < BIL_SUBPEL_SHIFTS; yoffset++) {
       for (int i = 0; i < 8; ++i) {
-        vpx_memset16(src_ptr, (i & 0x1) ? ((1 << bit_depth_) - 1) : 0,
+        aom_memset16(src_ptr, (i & 0x1) ? ((1 << bit_depth_) - 1) : 0,
                      (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1));
-        vpx_memset16(ref_ptr, (i & 0x2) ? ((1 << bit_depth_) - 1) : 0,
+        aom_memset16(ref_ptr, (i & 0x2) ? ((1 << bit_depth_) - 1) : 0,
                      (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1));
         memset(msk_ptr, (i & 0x4) ? 64 : 0,
                (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1));
@@ -474,7 +474,7 @@
                           << " x_offset = " << first_failure_x
                           << " y_offset = " << first_failure_y;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 using std::tr1::make_tuple;
 
@@ -483,306 +483,306 @@
     SSSE3_C_COMPARE, MaskedVarianceTest,
     ::testing::Values(
 #if CONFIG_EXT_PARTITION
-        make_tuple(&vpx_masked_variance128x128_ssse3,
-                   &vpx_masked_variance128x128_c),
-        make_tuple(&vpx_masked_variance128x64_ssse3,
-                   &vpx_masked_variance128x64_c),
-        make_tuple(&vpx_masked_variance64x128_ssse3,
-                   &vpx_masked_variance64x128_c),
+        make_tuple(&aom_masked_variance128x128_ssse3,
+                   &aom_masked_variance128x128_c),
+        make_tuple(&aom_masked_variance128x64_ssse3,
+                   &aom_masked_variance128x64_c),
+        make_tuple(&aom_masked_variance64x128_ssse3,
+                   &aom_masked_variance64x128_c),
 #endif  // CONFIG_EXT_PARTITION
-        make_tuple(&vpx_masked_variance64x64_ssse3,
-                   &vpx_masked_variance64x64_c),
-        make_tuple(&vpx_masked_variance64x32_ssse3,
-                   &vpx_masked_variance64x32_c),
-        make_tuple(&vpx_masked_variance32x64_ssse3,
-                   &vpx_masked_variance32x64_c),
-        make_tuple(&vpx_masked_variance32x32_ssse3,
-                   &vpx_masked_variance32x32_c),
-        make_tuple(&vpx_masked_variance32x16_ssse3,
-                   &vpx_masked_variance32x16_c),
-        make_tuple(&vpx_masked_variance16x32_ssse3,
-                   &vpx_masked_variance16x32_c),
-        make_tuple(&vpx_masked_variance16x16_ssse3,
-                   &vpx_masked_variance16x16_c),
-        make_tuple(&vpx_masked_variance16x8_ssse3, &vpx_masked_variance16x8_c),
-        make_tuple(&vpx_masked_variance8x16_ssse3, &vpx_masked_variance8x16_c),
-        make_tuple(&vpx_masked_variance8x8_ssse3, &vpx_masked_variance8x8_c),
-        make_tuple(&vpx_masked_variance8x4_ssse3, &vpx_masked_variance8x4_c),
-        make_tuple(&vpx_masked_variance4x8_ssse3, &vpx_masked_variance4x8_c),
-        make_tuple(&vpx_masked_variance4x4_ssse3, &vpx_masked_variance4x4_c)));
+        make_tuple(&aom_masked_variance64x64_ssse3,
+                   &aom_masked_variance64x64_c),
+        make_tuple(&aom_masked_variance64x32_ssse3,
+                   &aom_masked_variance64x32_c),
+        make_tuple(&aom_masked_variance32x64_ssse3,
+                   &aom_masked_variance32x64_c),
+        make_tuple(&aom_masked_variance32x32_ssse3,
+                   &aom_masked_variance32x32_c),
+        make_tuple(&aom_masked_variance32x16_ssse3,
+                   &aom_masked_variance32x16_c),
+        make_tuple(&aom_masked_variance16x32_ssse3,
+                   &aom_masked_variance16x32_c),
+        make_tuple(&aom_masked_variance16x16_ssse3,
+                   &aom_masked_variance16x16_c),
+        make_tuple(&aom_masked_variance16x8_ssse3, &aom_masked_variance16x8_c),
+        make_tuple(&aom_masked_variance8x16_ssse3, &aom_masked_variance8x16_c),
+        make_tuple(&aom_masked_variance8x8_ssse3, &aom_masked_variance8x8_c),
+        make_tuple(&aom_masked_variance8x4_ssse3, &aom_masked_variance8x4_c),
+        make_tuple(&aom_masked_variance4x8_ssse3, &aom_masked_variance4x8_c),
+        make_tuple(&aom_masked_variance4x4_ssse3, &aom_masked_variance4x4_c)));
 
 INSTANTIATE_TEST_CASE_P(
     SSSE3_C_COMPARE, MaskedSubPixelVarianceTest,
     ::testing::Values(
 #if CONFIG_EXT_PARTITION
-        make_tuple(&vpx_masked_sub_pixel_variance128x128_ssse3,
-                   &vpx_masked_sub_pixel_variance128x128_c),
-        make_tuple(&vpx_masked_sub_pixel_variance128x64_ssse3,
-                   &vpx_masked_sub_pixel_variance128x64_c),
-        make_tuple(&vpx_masked_sub_pixel_variance64x128_ssse3,
-                   &vpx_masked_sub_pixel_variance64x128_c),
+        make_tuple(&aom_masked_sub_pixel_variance128x128_ssse3,
+                   &aom_masked_sub_pixel_variance128x128_c),
+        make_tuple(&aom_masked_sub_pixel_variance128x64_ssse3,
+                   &aom_masked_sub_pixel_variance128x64_c),
+        make_tuple(&aom_masked_sub_pixel_variance64x128_ssse3,
+                   &aom_masked_sub_pixel_variance64x128_c),
 #endif  // CONFIG_EXT_PARTITION
-        make_tuple(&vpx_masked_sub_pixel_variance64x64_ssse3,
-                   &vpx_masked_sub_pixel_variance64x64_c),
-        make_tuple(&vpx_masked_sub_pixel_variance64x32_ssse3,
-                   &vpx_masked_sub_pixel_variance64x32_c),
-        make_tuple(&vpx_masked_sub_pixel_variance32x64_ssse3,
-                   &vpx_masked_sub_pixel_variance32x64_c),
-        make_tuple(&vpx_masked_sub_pixel_variance32x32_ssse3,
-                   &vpx_masked_sub_pixel_variance32x32_c),
-        make_tuple(&vpx_masked_sub_pixel_variance32x16_ssse3,
-                   &vpx_masked_sub_pixel_variance32x16_c),
-        make_tuple(&vpx_masked_sub_pixel_variance16x32_ssse3,
-                   &vpx_masked_sub_pixel_variance16x32_c),
-        make_tuple(&vpx_masked_sub_pixel_variance16x16_ssse3,
-                   &vpx_masked_sub_pixel_variance16x16_c),
-        make_tuple(&vpx_masked_sub_pixel_variance16x8_ssse3,
-                   &vpx_masked_sub_pixel_variance16x8_c),
-        make_tuple(&vpx_masked_sub_pixel_variance8x16_ssse3,
-                   &vpx_masked_sub_pixel_variance8x16_c),
-        make_tuple(&vpx_masked_sub_pixel_variance8x8_ssse3,
-                   &vpx_masked_sub_pixel_variance8x8_c),
-        make_tuple(&vpx_masked_sub_pixel_variance8x4_ssse3,
-                   &vpx_masked_sub_pixel_variance8x4_c),
-        make_tuple(&vpx_masked_sub_pixel_variance4x8_ssse3,
-                   &vpx_masked_sub_pixel_variance4x8_c),
-        make_tuple(&vpx_masked_sub_pixel_variance4x4_ssse3,
-                   &vpx_masked_sub_pixel_variance4x4_c)));
+        make_tuple(&aom_masked_sub_pixel_variance64x64_ssse3,
+                   &aom_masked_sub_pixel_variance64x64_c),
+        make_tuple(&aom_masked_sub_pixel_variance64x32_ssse3,
+                   &aom_masked_sub_pixel_variance64x32_c),
+        make_tuple(&aom_masked_sub_pixel_variance32x64_ssse3,
+                   &aom_masked_sub_pixel_variance32x64_c),
+        make_tuple(&aom_masked_sub_pixel_variance32x32_ssse3,
+                   &aom_masked_sub_pixel_variance32x32_c),
+        make_tuple(&aom_masked_sub_pixel_variance32x16_ssse3,
+                   &aom_masked_sub_pixel_variance32x16_c),
+        make_tuple(&aom_masked_sub_pixel_variance16x32_ssse3,
+                   &aom_masked_sub_pixel_variance16x32_c),
+        make_tuple(&aom_masked_sub_pixel_variance16x16_ssse3,
+                   &aom_masked_sub_pixel_variance16x16_c),
+        make_tuple(&aom_masked_sub_pixel_variance16x8_ssse3,
+                   &aom_masked_sub_pixel_variance16x8_c),
+        make_tuple(&aom_masked_sub_pixel_variance8x16_ssse3,
+                   &aom_masked_sub_pixel_variance8x16_c),
+        make_tuple(&aom_masked_sub_pixel_variance8x8_ssse3,
+                   &aom_masked_sub_pixel_variance8x8_c),
+        make_tuple(&aom_masked_sub_pixel_variance8x4_ssse3,
+                   &aom_masked_sub_pixel_variance8x4_c),
+        make_tuple(&aom_masked_sub_pixel_variance4x8_ssse3,
+                   &aom_masked_sub_pixel_variance4x8_c),
+        make_tuple(&aom_masked_sub_pixel_variance4x4_ssse3,
+                   &aom_masked_sub_pixel_variance4x4_c)));
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     SSSE3_C_COMPARE, HighbdMaskedVarianceTest,
     ::testing::Values(
 #if CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_masked_variance128x128_ssse3,
-                   &vpx_highbd_masked_variance128x128_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance128x64_ssse3,
-                   &vpx_highbd_masked_variance128x64_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance64x128_ssse3,
-                   &vpx_highbd_masked_variance64x128_c, VPX_BITS_8),
+        make_tuple(&aom_highbd_masked_variance128x128_ssse3,
+                   &aom_highbd_masked_variance128x128_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance128x64_ssse3,
+                   &aom_highbd_masked_variance128x64_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance64x128_ssse3,
+                   &aom_highbd_masked_variance64x128_c, AOM_BITS_8),
 #endif  // CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_masked_variance64x64_ssse3,
-                   &vpx_highbd_masked_variance64x64_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance64x32_ssse3,
-                   &vpx_highbd_masked_variance64x32_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance32x64_ssse3,
-                   &vpx_highbd_masked_variance32x64_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance32x32_ssse3,
-                   &vpx_highbd_masked_variance32x32_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance32x16_ssse3,
-                   &vpx_highbd_masked_variance32x16_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance16x32_ssse3,
-                   &vpx_highbd_masked_variance16x32_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance16x16_ssse3,
-                   &vpx_highbd_masked_variance16x16_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance16x8_ssse3,
-                   &vpx_highbd_masked_variance16x8_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance8x16_ssse3,
-                   &vpx_highbd_masked_variance8x16_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance8x8_ssse3,
-                   &vpx_highbd_masked_variance8x8_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance8x4_ssse3,
-                   &vpx_highbd_masked_variance8x4_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance4x8_ssse3,
-                   &vpx_highbd_masked_variance4x8_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_variance4x4_ssse3,
-                   &vpx_highbd_masked_variance4x4_c, VPX_BITS_8),
+        make_tuple(&aom_highbd_masked_variance64x64_ssse3,
+                   &aom_highbd_masked_variance64x64_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance64x32_ssse3,
+                   &aom_highbd_masked_variance64x32_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance32x64_ssse3,
+                   &aom_highbd_masked_variance32x64_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance32x32_ssse3,
+                   &aom_highbd_masked_variance32x32_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance32x16_ssse3,
+                   &aom_highbd_masked_variance32x16_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance16x32_ssse3,
+                   &aom_highbd_masked_variance16x32_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance16x16_ssse3,
+                   &aom_highbd_masked_variance16x16_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance16x8_ssse3,
+                   &aom_highbd_masked_variance16x8_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance8x16_ssse3,
+                   &aom_highbd_masked_variance8x16_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance8x8_ssse3,
+                   &aom_highbd_masked_variance8x8_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance8x4_ssse3,
+                   &aom_highbd_masked_variance8x4_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance4x8_ssse3,
+                   &aom_highbd_masked_variance4x8_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_variance4x4_ssse3,
+                   &aom_highbd_masked_variance4x4_c, AOM_BITS_8),
 #if CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_10_masked_variance128x128_ssse3,
-                   &vpx_highbd_10_masked_variance128x128_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance128x64_ssse3,
-                   &vpx_highbd_10_masked_variance128x64_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance64x128_ssse3,
-                   &vpx_highbd_10_masked_variance64x128_c, VPX_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance128x128_ssse3,
+                   &aom_highbd_10_masked_variance128x128_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance128x64_ssse3,
+                   &aom_highbd_10_masked_variance128x64_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance64x128_ssse3,
+                   &aom_highbd_10_masked_variance64x128_c, AOM_BITS_10),
 #endif  // CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_10_masked_variance64x64_ssse3,
-                   &vpx_highbd_10_masked_variance64x64_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance64x32_ssse3,
-                   &vpx_highbd_10_masked_variance64x32_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance32x64_ssse3,
-                   &vpx_highbd_10_masked_variance32x64_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance32x32_ssse3,
-                   &vpx_highbd_10_masked_variance32x32_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance32x16_ssse3,
-                   &vpx_highbd_10_masked_variance32x16_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance16x32_ssse3,
-                   &vpx_highbd_10_masked_variance16x32_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance16x16_ssse3,
-                   &vpx_highbd_10_masked_variance16x16_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance16x8_ssse3,
-                   &vpx_highbd_10_masked_variance16x8_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance8x16_ssse3,
-                   &vpx_highbd_10_masked_variance8x16_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance8x8_ssse3,
-                   &vpx_highbd_10_masked_variance8x8_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance8x4_ssse3,
-                   &vpx_highbd_10_masked_variance8x4_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance4x8_ssse3,
-                   &vpx_highbd_10_masked_variance4x8_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_variance4x4_ssse3,
-                   &vpx_highbd_10_masked_variance4x4_c, VPX_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance64x64_ssse3,
+                   &aom_highbd_10_masked_variance64x64_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance64x32_ssse3,
+                   &aom_highbd_10_masked_variance64x32_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance32x64_ssse3,
+                   &aom_highbd_10_masked_variance32x64_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance32x32_ssse3,
+                   &aom_highbd_10_masked_variance32x32_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance32x16_ssse3,
+                   &aom_highbd_10_masked_variance32x16_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance16x32_ssse3,
+                   &aom_highbd_10_masked_variance16x32_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance16x16_ssse3,
+                   &aom_highbd_10_masked_variance16x16_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance16x8_ssse3,
+                   &aom_highbd_10_masked_variance16x8_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance8x16_ssse3,
+                   &aom_highbd_10_masked_variance8x16_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance8x8_ssse3,
+                   &aom_highbd_10_masked_variance8x8_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance8x4_ssse3,
+                   &aom_highbd_10_masked_variance8x4_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance4x8_ssse3,
+                   &aom_highbd_10_masked_variance4x8_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_variance4x4_ssse3,
+                   &aom_highbd_10_masked_variance4x4_c, AOM_BITS_10),
 #if CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_12_masked_variance128x128_ssse3,
-                   &vpx_highbd_12_masked_variance128x128_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance128x64_ssse3,
-                   &vpx_highbd_12_masked_variance128x64_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance64x128_ssse3,
-                   &vpx_highbd_12_masked_variance64x128_c, VPX_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance128x128_ssse3,
+                   &aom_highbd_12_masked_variance128x128_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance128x64_ssse3,
+                   &aom_highbd_12_masked_variance128x64_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance64x128_ssse3,
+                   &aom_highbd_12_masked_variance64x128_c, AOM_BITS_12),
 #endif  // CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_12_masked_variance64x64_ssse3,
-                   &vpx_highbd_12_masked_variance64x64_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance64x32_ssse3,
-                   &vpx_highbd_12_masked_variance64x32_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance32x64_ssse3,
-                   &vpx_highbd_12_masked_variance32x64_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance32x32_ssse3,
-                   &vpx_highbd_12_masked_variance32x32_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance32x16_ssse3,
-                   &vpx_highbd_12_masked_variance32x16_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance16x32_ssse3,
-                   &vpx_highbd_12_masked_variance16x32_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance16x16_ssse3,
-                   &vpx_highbd_12_masked_variance16x16_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance16x8_ssse3,
-                   &vpx_highbd_12_masked_variance16x8_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance8x16_ssse3,
-                   &vpx_highbd_12_masked_variance8x16_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance8x8_ssse3,
-                   &vpx_highbd_12_masked_variance8x8_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance8x4_ssse3,
-                   &vpx_highbd_12_masked_variance8x4_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance4x8_ssse3,
-                   &vpx_highbd_12_masked_variance4x8_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_variance4x4_ssse3,
-                   &vpx_highbd_12_masked_variance4x4_c, VPX_BITS_12)));
+        make_tuple(&aom_highbd_12_masked_variance64x64_ssse3,
+                   &aom_highbd_12_masked_variance64x64_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance64x32_ssse3,
+                   &aom_highbd_12_masked_variance64x32_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance32x64_ssse3,
+                   &aom_highbd_12_masked_variance32x64_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance32x32_ssse3,
+                   &aom_highbd_12_masked_variance32x32_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance32x16_ssse3,
+                   &aom_highbd_12_masked_variance32x16_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance16x32_ssse3,
+                   &aom_highbd_12_masked_variance16x32_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance16x16_ssse3,
+                   &aom_highbd_12_masked_variance16x16_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance16x8_ssse3,
+                   &aom_highbd_12_masked_variance16x8_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance8x16_ssse3,
+                   &aom_highbd_12_masked_variance8x16_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance8x8_ssse3,
+                   &aom_highbd_12_masked_variance8x8_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance8x4_ssse3,
+                   &aom_highbd_12_masked_variance8x4_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance4x8_ssse3,
+                   &aom_highbd_12_masked_variance4x8_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_variance4x4_ssse3,
+                   &aom_highbd_12_masked_variance4x4_c, AOM_BITS_12)));
 
 INSTANTIATE_TEST_CASE_P(
     SSSE3_C_COMPARE, HighbdMaskedSubPixelVarianceTest,
     ::testing::Values(
 #if CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance128x128_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance128x128_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance128x64_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance128x64_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance64x128_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance64x128_c, VPX_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance128x128_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance128x128_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance128x64_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance128x64_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance64x128_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance64x128_c, AOM_BITS_8),
 #endif  // CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance64x64_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance64x64_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance64x32_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance64x32_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance32x64_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance32x64_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance32x32_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance32x32_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance32x16_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance32x16_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance16x32_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance16x32_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance16x16_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance16x16_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance16x8_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance16x8_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance8x16_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance8x16_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance8x8_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance8x8_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance8x4_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance8x4_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance4x8_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance4x8_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_masked_sub_pixel_variance4x4_ssse3,
-                   &vpx_highbd_masked_sub_pixel_variance4x4_c, VPX_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance64x64_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance64x64_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance64x32_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance64x32_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance32x64_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance32x64_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance32x32_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance32x32_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance32x16_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance32x16_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance16x32_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance16x32_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance16x16_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance16x16_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance16x8_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance16x8_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance8x16_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance8x16_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance8x8_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance8x8_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance8x4_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance8x4_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance4x8_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance4x8_c, AOM_BITS_8),
+        make_tuple(&aom_highbd_masked_sub_pixel_variance4x4_ssse3,
+                   &aom_highbd_masked_sub_pixel_variance4x4_c, AOM_BITS_8),
 #if CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance128x128_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance128x128_c,
-                   VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance128x64_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance128x64_c,
-                   VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance64x128_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance64x128_c,
-                   VPX_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance128x128_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance128x128_c,
+                   AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance128x64_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance128x64_c,
+                   AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x128_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance64x128_c,
+                   AOM_BITS_10),
 #endif  // CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance64x64_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance64x64_c,
-                   VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance64x32_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance64x32_c,
-                   VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance32x64_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance32x64_c,
-                   VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance32x32_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance32x32_c,
-                   VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance32x16_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance32x16_c,
-                   VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance16x32_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance16x32_c,
-                   VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance16x16_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance16x16_c,
-                   VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance16x8_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance16x8_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance8x16_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance8x16_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance8x8_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance8x8_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance8x4_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance8x4_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance4x8_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance4x8_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_10_masked_sub_pixel_variance4x4_ssse3,
-                   &vpx_highbd_10_masked_sub_pixel_variance4x4_c, VPX_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x64_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance64x64_c,
+                   AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x32_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance64x32_c,
+                   AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x64_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance32x64_c,
+                   AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x32_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance32x32_c,
+                   AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x16_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance32x16_c,
+                   AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x32_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance16x32_c,
+                   AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x16_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance16x16_c,
+                   AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x8_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance16x8_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x16_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance8x16_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x8_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance8x8_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x4_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance8x4_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance4x8_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance4x8_c, AOM_BITS_10),
+        make_tuple(&aom_highbd_10_masked_sub_pixel_variance4x4_ssse3,
+                   &aom_highbd_10_masked_sub_pixel_variance4x4_c, AOM_BITS_10),
 #if CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance128x128_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance128x128_c,
-                   VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance128x64_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance128x64_c,
-                   VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance64x128_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance64x128_c,
-                   VPX_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance128x128_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance128x128_c,
+                   AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance128x64_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance128x64_c,
+                   AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x128_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance64x128_c,
+                   AOM_BITS_12),
 #endif  // CONFIG_EXT_PARTITION
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance64x64_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance64x64_c,
-                   VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance64x32_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance64x32_c,
-                   VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance32x64_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance32x64_c,
-                   VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance32x32_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance32x32_c,
-                   VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance32x16_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance32x16_c,
-                   VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance16x32_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance16x32_c,
-                   VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance16x16_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance16x16_c,
-                   VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance16x8_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance16x8_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance8x16_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance8x16_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance8x8_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance8x8_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance8x4_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance8x4_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance4x8_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance4x8_c, VPX_BITS_12),
-        make_tuple(&vpx_highbd_12_masked_sub_pixel_variance4x4_ssse3,
-                   &vpx_highbd_12_masked_sub_pixel_variance4x4_c,
-                   VPX_BITS_12)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x64_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance64x64_c,
+                   AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x32_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance64x32_c,
+                   AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x64_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance32x64_c,
+                   AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x32_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance32x32_c,
+                   AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x16_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance32x16_c,
+                   AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x32_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance16x32_c,
+                   AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x16_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance16x16_c,
+                   AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x8_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance16x8_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x16_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance8x16_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x8_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance8x8_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x4_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance8x4_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance4x8_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance4x8_c, AOM_BITS_12),
+        make_tuple(&aom_highbd_12_masked_sub_pixel_variance4x4_ssse3,
+                   &aom_highbd_12_masked_sub_pixel_variance4x4_c,
+                   AOM_BITS_12)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #endif  // HAVE_SSSE3
 }  // namespace
diff --git a/test/md5_helper.h b/test/md5_helper.h
index b995619..262dd08 100644
--- a/test/md5_helper.h
+++ b/test/md5_helper.h
@@ -12,14 +12,14 @@
 #define TEST_MD5_HELPER_H_
 
 #include "./md5_utils.h"
-#include "aom/vpx_decoder.h"
+#include "aom/aom_decoder.h"
 
 namespace libaom_test {
 class MD5 {
  public:
   MD5() { MD5Init(&md5_); }
 
-  void Add(const vpx_image_t *img) {
+  void Add(const aom_image_t *img) {
     for (int plane = 0; plane < 3; ++plane) {
       const uint8_t *buf = img->planes[plane];
       // Calculate the width and height to do the md5 check. For the chroma
@@ -27,7 +27,7 @@
       // we are shifting by 1 (chroma_shift) we add 1 before doing the shift.
       // This works only for chroma_shift of 0 and 1.
       const int bytes_per_sample =
-          (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
+          (img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
       const int h =
           plane ? (img->d_h + img->y_chroma_shift) >> img->y_chroma_shift
                 : img->d_h;
diff --git a/test/minmax_test.cc b/test/minmax_test.cc
index 026f79a..735f617 100644
--- a/test/minmax_test.cc
+++ b/test/minmax_test.cc
@@ -13,8 +13,8 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
 #include "test/acm_random.h"
 #include "test/register_state_check.h"
@@ -115,16 +115,16 @@
   }
 }
 
-INSTANTIATE_TEST_CASE_P(C, MinMaxTest, ::testing::Values(&vpx_minmax_8x8_c));
+INSTANTIATE_TEST_CASE_P(C, MinMaxTest, ::testing::Values(&aom_minmax_8x8_c));
 
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(SSE2, MinMaxTest,
-                        ::testing::Values(&vpx_minmax_8x8_sse2));
+                        ::testing::Values(&aom_minmax_8x8_sse2));
 #endif
 
 #if HAVE_NEON
 INSTANTIATE_TEST_CASE_P(NEON, MinMaxTest,
-                        ::testing::Values(&vpx_minmax_8x8_neon));
+                        ::testing::Values(&aom_minmax_8x8_neon));
 #endif
 
 }  // namespace
diff --git a/test/obmc_sad_test.cc b/test/obmc_sad_test.cc
index c47bd26..40a9ea0 100644
--- a/test/obmc_sad_test.cc
+++ b/test/obmc_sad_test.cc
@@ -13,9 +13,9 @@
 #include "test/function_equivalence_test.h"
 #include "test/register_state_check.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
 #define MAX_SB_SQUARE (MAX_SB_SIZE * MAX_SB_SIZE)
 
@@ -85,23 +85,23 @@
 #if HAVE_SSE4_1
 const ObmcSadTest::ParamType sse4_functions[] = {
 #if CONFIG_EXT_PARTITION
-  TestFuncs(vpx_obmc_sad128x128_c, vpx_obmc_sad128x128_sse4_1),
-  TestFuncs(vpx_obmc_sad128x64_c, vpx_obmc_sad128x64_sse4_1),
-  TestFuncs(vpx_obmc_sad64x128_c, vpx_obmc_sad64x128_sse4_1),
+  TestFuncs(aom_obmc_sad128x128_c, aom_obmc_sad128x128_sse4_1),
+  TestFuncs(aom_obmc_sad128x64_c, aom_obmc_sad128x64_sse4_1),
+  TestFuncs(aom_obmc_sad64x128_c, aom_obmc_sad64x128_sse4_1),
 #endif  // CONFIG_EXT_PARTITION
-  TestFuncs(vpx_obmc_sad64x64_c, vpx_obmc_sad64x64_sse4_1),
-  TestFuncs(vpx_obmc_sad64x32_c, vpx_obmc_sad64x32_sse4_1),
-  TestFuncs(vpx_obmc_sad32x64_c, vpx_obmc_sad32x64_sse4_1),
-  TestFuncs(vpx_obmc_sad32x32_c, vpx_obmc_sad32x32_sse4_1),
-  TestFuncs(vpx_obmc_sad32x16_c, vpx_obmc_sad32x16_sse4_1),
-  TestFuncs(vpx_obmc_sad16x32_c, vpx_obmc_sad16x32_sse4_1),
-  TestFuncs(vpx_obmc_sad16x16_c, vpx_obmc_sad16x16_sse4_1),
-  TestFuncs(vpx_obmc_sad16x8_c, vpx_obmc_sad16x8_sse4_1),
-  TestFuncs(vpx_obmc_sad8x16_c, vpx_obmc_sad8x16_sse4_1),
-  TestFuncs(vpx_obmc_sad8x8_c, vpx_obmc_sad8x8_sse4_1),
-  TestFuncs(vpx_obmc_sad8x4_c, vpx_obmc_sad8x4_sse4_1),
-  TestFuncs(vpx_obmc_sad4x8_c, vpx_obmc_sad4x8_sse4_1),
-  TestFuncs(vpx_obmc_sad4x4_c, vpx_obmc_sad4x4_sse4_1)
+  TestFuncs(aom_obmc_sad64x64_c, aom_obmc_sad64x64_sse4_1),
+  TestFuncs(aom_obmc_sad64x32_c, aom_obmc_sad64x32_sse4_1),
+  TestFuncs(aom_obmc_sad32x64_c, aom_obmc_sad32x64_sse4_1),
+  TestFuncs(aom_obmc_sad32x32_c, aom_obmc_sad32x32_sse4_1),
+  TestFuncs(aom_obmc_sad32x16_c, aom_obmc_sad32x16_sse4_1),
+  TestFuncs(aom_obmc_sad16x32_c, aom_obmc_sad16x32_sse4_1),
+  TestFuncs(aom_obmc_sad16x16_c, aom_obmc_sad16x16_sse4_1),
+  TestFuncs(aom_obmc_sad16x8_c, aom_obmc_sad16x8_sse4_1),
+  TestFuncs(aom_obmc_sad8x16_c, aom_obmc_sad8x16_sse4_1),
+  TestFuncs(aom_obmc_sad8x8_c, aom_obmc_sad8x8_sse4_1),
+  TestFuncs(aom_obmc_sad8x4_c, aom_obmc_sad8x4_sse4_1),
+  TestFuncs(aom_obmc_sad4x8_c, aom_obmc_sad4x8_sse4_1),
+  TestFuncs(aom_obmc_sad4x4_c, aom_obmc_sad4x4_sse4_1)
 };
 
 INSTANTIATE_TEST_CASE_P(SSE4_1_C_COMPARE, ObmcSadTest,
@@ -112,7 +112,7 @@
 // High bit-depth
 ////////////////////////////////////////////////////////////////////////////////
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 class ObmcSadHBDTest : public FunctionEquivalenceTest<ObmcSadF> {};
 
 TEST_P(ObmcSadHBDTest, RandomValues) {
@@ -168,27 +168,27 @@
 #if HAVE_SSE4_1
 ObmcSadHBDTest::ParamType sse4_functions_hbd[] = {
 #if CONFIG_EXT_PARTITION
-  TestFuncs(vpx_highbd_obmc_sad128x128_c, vpx_highbd_obmc_sad128x128_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad128x64_c, vpx_highbd_obmc_sad128x64_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad64x128_c, vpx_highbd_obmc_sad64x128_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad128x128_c, aom_highbd_obmc_sad128x128_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad128x64_c, aom_highbd_obmc_sad128x64_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad64x128_c, aom_highbd_obmc_sad64x128_sse4_1),
 #endif  // CONFIG_EXT_PARTITION
-  TestFuncs(vpx_highbd_obmc_sad64x64_c, vpx_highbd_obmc_sad64x64_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad64x32_c, vpx_highbd_obmc_sad64x32_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad32x64_c, vpx_highbd_obmc_sad32x64_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad32x32_c, vpx_highbd_obmc_sad32x32_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad32x16_c, vpx_highbd_obmc_sad32x16_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad16x32_c, vpx_highbd_obmc_sad16x32_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad16x16_c, vpx_highbd_obmc_sad16x16_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad16x8_c, vpx_highbd_obmc_sad16x8_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad8x16_c, vpx_highbd_obmc_sad8x16_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad8x8_c, vpx_highbd_obmc_sad8x8_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad8x4_c, vpx_highbd_obmc_sad8x4_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad4x8_c, vpx_highbd_obmc_sad4x8_sse4_1),
-  TestFuncs(vpx_highbd_obmc_sad4x4_c, vpx_highbd_obmc_sad4x4_sse4_1)
+  TestFuncs(aom_highbd_obmc_sad64x64_c, aom_highbd_obmc_sad64x64_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad64x32_c, aom_highbd_obmc_sad64x32_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad32x64_c, aom_highbd_obmc_sad32x64_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad32x32_c, aom_highbd_obmc_sad32x32_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad32x16_c, aom_highbd_obmc_sad32x16_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad16x32_c, aom_highbd_obmc_sad16x32_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad16x16_c, aom_highbd_obmc_sad16x16_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad16x8_c, aom_highbd_obmc_sad16x8_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad8x16_c, aom_highbd_obmc_sad8x16_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad8x8_c, aom_highbd_obmc_sad8x8_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad8x4_c, aom_highbd_obmc_sad8x4_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad4x8_c, aom_highbd_obmc_sad4x8_sse4_1),
+  TestFuncs(aom_highbd_obmc_sad4x4_c, aom_highbd_obmc_sad4x4_sse4_1)
 };
 
 INSTANTIATE_TEST_CASE_P(SSE4_1_C_COMPARE, ObmcSadHBDTest,
                         ::testing::ValuesIn(sse4_functions_hbd));
 #endif  // HAVE_SSE4_1
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }  // namespace
diff --git a/test/obmc_variance_test.cc b/test/obmc_variance_test.cc
index ff4dd4a..5a6bace 100644
--- a/test/obmc_variance_test.cc
+++ b/test/obmc_variance_test.cc
@@ -14,9 +14,9 @@
 #include "test/function_equivalence_test.h"
 #include "test/register_state_check.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
 
 #define MAX_SB_SQUARE (MAX_SB_SIZE * MAX_SB_SIZE)
 
@@ -94,23 +94,23 @@
 #if HAVE_SSE4_1
 const ObmcVarianceTest::ParamType sse4_functions[] = {
 #if CONFIG_EXT_PARTITION
-  TestFuncs(vpx_obmc_variance128x128_c, vpx_obmc_variance128x128_sse4_1),
-  TestFuncs(vpx_obmc_variance128x64_c, vpx_obmc_variance128x64_sse4_1),
-  TestFuncs(vpx_obmc_variance64x128_c, vpx_obmc_variance64x128_sse4_1),
+  TestFuncs(aom_obmc_variance128x128_c, aom_obmc_variance128x128_sse4_1),
+  TestFuncs(aom_obmc_variance128x64_c, aom_obmc_variance128x64_sse4_1),
+  TestFuncs(aom_obmc_variance64x128_c, aom_obmc_variance64x128_sse4_1),
 #endif  // CONFIG_EXT_PARTITION
-  TestFuncs(vpx_obmc_variance64x64_c, vpx_obmc_variance64x64_sse4_1),
-  TestFuncs(vpx_obmc_variance64x32_c, vpx_obmc_variance64x32_sse4_1),
-  TestFuncs(vpx_obmc_variance32x64_c, vpx_obmc_variance32x64_sse4_1),
-  TestFuncs(vpx_obmc_variance32x32_c, vpx_obmc_variance32x32_sse4_1),
-  TestFuncs(vpx_obmc_variance32x16_c, vpx_obmc_variance32x16_sse4_1),
-  TestFuncs(vpx_obmc_variance16x32_c, vpx_obmc_variance16x32_sse4_1),
-  TestFuncs(vpx_obmc_variance16x16_c, vpx_obmc_variance16x16_sse4_1),
-  TestFuncs(vpx_obmc_variance16x8_c, vpx_obmc_variance16x8_sse4_1),
-  TestFuncs(vpx_obmc_variance8x16_c, vpx_obmc_variance8x16_sse4_1),
-  TestFuncs(vpx_obmc_variance8x8_c, vpx_obmc_variance8x8_sse4_1),
-  TestFuncs(vpx_obmc_variance8x4_c, vpx_obmc_variance8x4_sse4_1),
-  TestFuncs(vpx_obmc_variance4x8_c, vpx_obmc_variance4x8_sse4_1),
-  TestFuncs(vpx_obmc_variance4x4_c, vpx_obmc_variance4x4_sse4_1)
+  TestFuncs(aom_obmc_variance64x64_c, aom_obmc_variance64x64_sse4_1),
+  TestFuncs(aom_obmc_variance64x32_c, aom_obmc_variance64x32_sse4_1),
+  TestFuncs(aom_obmc_variance32x64_c, aom_obmc_variance32x64_sse4_1),
+  TestFuncs(aom_obmc_variance32x32_c, aom_obmc_variance32x32_sse4_1),
+  TestFuncs(aom_obmc_variance32x16_c, aom_obmc_variance32x16_sse4_1),
+  TestFuncs(aom_obmc_variance16x32_c, aom_obmc_variance16x32_sse4_1),
+  TestFuncs(aom_obmc_variance16x16_c, aom_obmc_variance16x16_sse4_1),
+  TestFuncs(aom_obmc_variance16x8_c, aom_obmc_variance16x8_sse4_1),
+  TestFuncs(aom_obmc_variance8x16_c, aom_obmc_variance8x16_sse4_1),
+  TestFuncs(aom_obmc_variance8x8_c, aom_obmc_variance8x8_sse4_1),
+  TestFuncs(aom_obmc_variance8x4_c, aom_obmc_variance8x4_sse4_1),
+  TestFuncs(aom_obmc_variance4x8_c, aom_obmc_variance4x8_sse4_1),
+  TestFuncs(aom_obmc_variance4x4_c, aom_obmc_variance4x4_sse4_1)
 };
 
 INSTANTIATE_TEST_CASE_P(SSE4_1_C_COMPARE, ObmcVarianceTest,
@@ -121,7 +121,7 @@
 // High bit-depth
 ////////////////////////////////////////////////////////////////////////////////
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 class ObmcVarianceHBDTest : public FunctionEquivalenceTest<ObmcVarF> {};
 
 TEST_P(ObmcVarianceHBDTest, RandomValues) {
@@ -182,111 +182,111 @@
 #if HAVE_SSE4_1
 ObmcVarianceHBDTest::ParamType sse4_functions_hbd[] = {
 #if CONFIG_EXT_PARTITION
-  TestFuncs(vpx_highbd_obmc_variance128x128_c,
-            vpx_highbd_obmc_variance128x128_sse4_1, 8),
-  TestFuncs(vpx_highbd_obmc_variance128x64_c,
-            vpx_highbd_obmc_variance128x64_sse4_1, 8),
-  TestFuncs(vpx_highbd_obmc_variance64x128_c,
-            vpx_highbd_obmc_variance64x128_sse4_1, 8),
+  TestFuncs(aom_highbd_obmc_variance128x128_c,
+            aom_highbd_obmc_variance128x128_sse4_1, 8),
+  TestFuncs(aom_highbd_obmc_variance128x64_c,
+            aom_highbd_obmc_variance128x64_sse4_1, 8),
+  TestFuncs(aom_highbd_obmc_variance64x128_c,
+            aom_highbd_obmc_variance64x128_sse4_1, 8),
 #endif  // CONFIG_EXT_PARTITION
-  TestFuncs(vpx_highbd_obmc_variance64x64_c,
-            vpx_highbd_obmc_variance64x64_sse4_1, 8),
-  TestFuncs(vpx_highbd_obmc_variance64x32_c,
-            vpx_highbd_obmc_variance64x32_sse4_1, 8),
-  TestFuncs(vpx_highbd_obmc_variance32x64_c,
-            vpx_highbd_obmc_variance32x64_sse4_1, 8),
-  TestFuncs(vpx_highbd_obmc_variance32x32_c,
-            vpx_highbd_obmc_variance32x32_sse4_1, 8),
-  TestFuncs(vpx_highbd_obmc_variance32x16_c,
-            vpx_highbd_obmc_variance32x16_sse4_1, 8),
-  TestFuncs(vpx_highbd_obmc_variance16x32_c,
-            vpx_highbd_obmc_variance16x32_sse4_1, 8),
-  TestFuncs(vpx_highbd_obmc_variance16x16_c,
-            vpx_highbd_obmc_variance16x16_sse4_1, 8),
-  TestFuncs(vpx_highbd_obmc_variance16x8_c, vpx_highbd_obmc_variance16x8_sse4_1,
+  TestFuncs(aom_highbd_obmc_variance64x64_c,
+            aom_highbd_obmc_variance64x64_sse4_1, 8),
+  TestFuncs(aom_highbd_obmc_variance64x32_c,
+            aom_highbd_obmc_variance64x32_sse4_1, 8),
+  TestFuncs(aom_highbd_obmc_variance32x64_c,
+            aom_highbd_obmc_variance32x64_sse4_1, 8),
+  TestFuncs(aom_highbd_obmc_variance32x32_c,
+            aom_highbd_obmc_variance32x32_sse4_1, 8),
+  TestFuncs(aom_highbd_obmc_variance32x16_c,
+            aom_highbd_obmc_variance32x16_sse4_1, 8),
+  TestFuncs(aom_highbd_obmc_variance16x32_c,
+            aom_highbd_obmc_variance16x32_sse4_1, 8),
+  TestFuncs(aom_highbd_obmc_variance16x16_c,
+            aom_highbd_obmc_variance16x16_sse4_1, 8),
+  TestFuncs(aom_highbd_obmc_variance16x8_c, aom_highbd_obmc_variance16x8_sse4_1,
             8),
-  TestFuncs(vpx_highbd_obmc_variance8x16_c, vpx_highbd_obmc_variance8x16_sse4_1,
+  TestFuncs(aom_highbd_obmc_variance8x16_c, aom_highbd_obmc_variance8x16_sse4_1,
             8),
-  TestFuncs(vpx_highbd_obmc_variance8x8_c, vpx_highbd_obmc_variance8x8_sse4_1,
+  TestFuncs(aom_highbd_obmc_variance8x8_c, aom_highbd_obmc_variance8x8_sse4_1,
             8),
-  TestFuncs(vpx_highbd_obmc_variance8x4_c, vpx_highbd_obmc_variance8x4_sse4_1,
+  TestFuncs(aom_highbd_obmc_variance8x4_c, aom_highbd_obmc_variance8x4_sse4_1,
             8),
-  TestFuncs(vpx_highbd_obmc_variance4x8_c, vpx_highbd_obmc_variance4x8_sse4_1,
+  TestFuncs(aom_highbd_obmc_variance4x8_c, aom_highbd_obmc_variance4x8_sse4_1,
             8),
-  TestFuncs(vpx_highbd_obmc_variance4x4_c, vpx_highbd_obmc_variance4x4_sse4_1,
+  TestFuncs(aom_highbd_obmc_variance4x4_c, aom_highbd_obmc_variance4x4_sse4_1,
             8),
 #if CONFIG_EXT_PARTITION
-  TestFuncs(vpx_highbd_10_obmc_variance128x128_c,
-            vpx_highbd_10_obmc_variance128x128_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance128x64_c,
-            vpx_highbd_10_obmc_variance128x64_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance64x128_c,
-            vpx_highbd_10_obmc_variance64x128_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance128x128_c,
+            aom_highbd_10_obmc_variance128x128_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance128x64_c,
+            aom_highbd_10_obmc_variance128x64_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance64x128_c,
+            aom_highbd_10_obmc_variance64x128_sse4_1, 10),
 #endif  // CONFIG_EXT_PARTITION
-  TestFuncs(vpx_highbd_10_obmc_variance64x64_c,
-            vpx_highbd_10_obmc_variance64x64_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance64x32_c,
-            vpx_highbd_10_obmc_variance64x32_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance32x64_c,
-            vpx_highbd_10_obmc_variance32x64_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance32x32_c,
-            vpx_highbd_10_obmc_variance32x32_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance32x16_c,
-            vpx_highbd_10_obmc_variance32x16_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance16x32_c,
-            vpx_highbd_10_obmc_variance16x32_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance16x16_c,
-            vpx_highbd_10_obmc_variance16x16_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance16x8_c,
-            vpx_highbd_10_obmc_variance16x8_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance8x16_c,
-            vpx_highbd_10_obmc_variance8x16_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance8x8_c,
-            vpx_highbd_10_obmc_variance8x8_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance8x4_c,
-            vpx_highbd_10_obmc_variance8x4_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance4x8_c,
-            vpx_highbd_10_obmc_variance4x8_sse4_1, 10),
-  TestFuncs(vpx_highbd_10_obmc_variance4x4_c,
-            vpx_highbd_10_obmc_variance4x4_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance64x64_c,
+            aom_highbd_10_obmc_variance64x64_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance64x32_c,
+            aom_highbd_10_obmc_variance64x32_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance32x64_c,
+            aom_highbd_10_obmc_variance32x64_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance32x32_c,
+            aom_highbd_10_obmc_variance32x32_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance32x16_c,
+            aom_highbd_10_obmc_variance32x16_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance16x32_c,
+            aom_highbd_10_obmc_variance16x32_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance16x16_c,
+            aom_highbd_10_obmc_variance16x16_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance16x8_c,
+            aom_highbd_10_obmc_variance16x8_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance8x16_c,
+            aom_highbd_10_obmc_variance8x16_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance8x8_c,
+            aom_highbd_10_obmc_variance8x8_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance8x4_c,
+            aom_highbd_10_obmc_variance8x4_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance4x8_c,
+            aom_highbd_10_obmc_variance4x8_sse4_1, 10),
+  TestFuncs(aom_highbd_10_obmc_variance4x4_c,
+            aom_highbd_10_obmc_variance4x4_sse4_1, 10),
 #if CONFIG_EXT_PARTITION
-  TestFuncs(vpx_highbd_12_obmc_variance128x128_c,
-            vpx_highbd_12_obmc_variance128x128_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance128x64_c,
-            vpx_highbd_12_obmc_variance128x64_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance64x128_c,
-            vpx_highbd_12_obmc_variance64x128_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance128x128_c,
+            aom_highbd_12_obmc_variance128x128_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance128x64_c,
+            aom_highbd_12_obmc_variance128x64_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance64x128_c,
+            aom_highbd_12_obmc_variance64x128_sse4_1, 12),
 #endif  // CONFIG_EXT_PARTITION
-  TestFuncs(vpx_highbd_12_obmc_variance64x64_c,
-            vpx_highbd_12_obmc_variance64x64_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance64x32_c,
-            vpx_highbd_12_obmc_variance64x32_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance32x64_c,
-            vpx_highbd_12_obmc_variance32x64_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance32x32_c,
-            vpx_highbd_12_obmc_variance32x32_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance32x16_c,
-            vpx_highbd_12_obmc_variance32x16_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance16x32_c,
-            vpx_highbd_12_obmc_variance16x32_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance16x16_c,
-            vpx_highbd_12_obmc_variance16x16_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance16x8_c,
-            vpx_highbd_12_obmc_variance16x8_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance8x16_c,
-            vpx_highbd_12_obmc_variance8x16_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance8x8_c,
-            vpx_highbd_12_obmc_variance8x8_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance8x4_c,
-            vpx_highbd_12_obmc_variance8x4_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance4x8_c,
-            vpx_highbd_12_obmc_variance4x8_sse4_1, 12),
-  TestFuncs(vpx_highbd_12_obmc_variance4x4_c,
-            vpx_highbd_12_obmc_variance4x4_sse4_1, 12)
+  TestFuncs(aom_highbd_12_obmc_variance64x64_c,
+            aom_highbd_12_obmc_variance64x64_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance64x32_c,
+            aom_highbd_12_obmc_variance64x32_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance32x64_c,
+            aom_highbd_12_obmc_variance32x64_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance32x32_c,
+            aom_highbd_12_obmc_variance32x32_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance32x16_c,
+            aom_highbd_12_obmc_variance32x16_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance16x32_c,
+            aom_highbd_12_obmc_variance16x32_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance16x16_c,
+            aom_highbd_12_obmc_variance16x16_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance16x8_c,
+            aom_highbd_12_obmc_variance16x8_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance8x16_c,
+            aom_highbd_12_obmc_variance8x16_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance8x8_c,
+            aom_highbd_12_obmc_variance8x8_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance8x4_c,
+            aom_highbd_12_obmc_variance8x4_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance4x8_c,
+            aom_highbd_12_obmc_variance4x8_sse4_1, 12),
+  TestFuncs(aom_highbd_12_obmc_variance4x4_c,
+            aom_highbd_12_obmc_variance4x4_sse4_1, 12)
 };
 
 INSTANTIATE_TEST_CASE_P(SSE4_1_C_COMPARE, ObmcVarianceHBDTest,
                         ::testing::ValuesIn(sse4_functions_hbd));
 #endif  // HAVE_SSE4_1
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }  // namespace
diff --git a/test/partial_idct_test.cc b/test/partial_idct_test.cc
index 8d3bd72..f6c8300 100644
--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -14,15 +14,15 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
 #include "av1/common/blockd.h"
 #include "av1/common/scan.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 
 using libaom_test::ACMRandom;
 
@@ -101,7 +101,7 @@
       // quantization with maximum allowed step sizes
       test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336;
       for (int j = 1; j < last_nonzero_; ++j)
-        test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] =
+        test_coef_block1[av1_default_scan_orders[tx_size_].scan[j]] =
             (output_ref_block[j] / 1828) * 1828;
     }
 
@@ -152,7 +152,7 @@
         max_energy_leftover = 0;
         coef = 0;
       }
-      test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] = coef;
+      test_coef_block1[av1_default_scan_orders[tx_size_].scan[j]] = coef;
     }
 
     memcpy(test_coef_block2, test_coef_block1,
@@ -175,82 +175,82 @@
 
 INSTANTIATE_TEST_CASE_P(
     C, PartialIDctTest,
-    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_34_add_c, TX_32X32, 34),
-                      make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1_add_c, TX_32X32, 1),
-                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_10_add_c, TX_16X16, 10),
-                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_1_add_c, TX_16X16, 1),
-                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_12_add_c, TX_8X8, 12),
-                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_1_add_c, TX_8X8, 1),
-                      make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_1_add_c, TX_4X4, 1)));
+    ::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
+                                 &aom_idct32x32_34_add_c, TX_32X32, 34),
+                      make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
+                                 &aom_idct32x32_1_add_c, TX_32X32, 1),
+                      make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_c,
+                                 &aom_idct16x16_10_add_c, TX_16X16, 10),
+                      make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_c,
+                                 &aom_idct16x16_1_add_c, TX_16X16, 1),
+                      make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
+                                 &aom_idct8x8_12_add_c, TX_8X8, 12),
+                      make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
+                                 &aom_idct8x8_1_add_c, TX_8X8, 1),
+                      make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c,
+                                 &aom_idct4x4_1_add_c, TX_4X4, 1)));
 
-#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, PartialIDctTest,
-    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1_add_neon, TX_32X32, 1),
-                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_10_add_neon, TX_16X16, 10),
-                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_1_add_neon, TX_16X16, 1),
-                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_12_add_neon, TX_8X8, 12),
-                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_1_add_neon, TX_8X8, 1),
-                      make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_1_add_neon, TX_4X4, 1)));
-#endif  // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+    ::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
+                                 &aom_idct32x32_1_add_neon, TX_32X32, 1),
+                      make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_c,
+                                 &aom_idct16x16_10_add_neon, TX_16X16, 10),
+                      make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_c,
+                                 &aom_idct16x16_1_add_neon, TX_16X16, 1),
+                      make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
+                                 &aom_idct8x8_12_add_neon, TX_8X8, 12),
+                      make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
+                                 &aom_idct8x8_1_add_neon, TX_8X8, 1),
+                      make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c,
+                                 &aom_idct4x4_1_add_neon, TX_4X4, 1)));
+#endif  // HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, PartialIDctTest,
-    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_34_add_sse2, TX_32X32, 34),
-                      make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1_add_sse2, TX_32X32, 1),
-                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_10_add_sse2, TX_16X16, 10),
-                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_1_add_sse2, TX_16X16, 1),
-                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_12_add_sse2, TX_8X8, 12),
-                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_1_add_sse2, TX_8X8, 1),
-                      make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_1_add_sse2, TX_4X4, 1)));
+    ::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
+                                 &aom_idct32x32_34_add_sse2, TX_32X32, 34),
+                      make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
+                                 &aom_idct32x32_1_add_sse2, TX_32X32, 1),
+                      make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_c,
+                                 &aom_idct16x16_10_add_sse2, TX_16X16, 10),
+                      make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_c,
+                                 &aom_idct16x16_1_add_sse2, TX_16X16, 1),
+                      make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
+                                 &aom_idct8x8_12_add_sse2, TX_8X8, 12),
+                      make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
+                                 &aom_idct8x8_1_add_sse2, TX_8X8, 1),
+                      make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c,
+                                 &aom_idct4x4_1_add_sse2, TX_4X4, 1)));
 #endif
 
-#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH && \
+#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_AOM_HIGHBITDEPTH && \
     !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSSE3_64, PartialIDctTest,
-    ::testing::Values(make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_12_add_ssse3, TX_8X8, 12)));
+    ::testing::Values(make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
+                                 &aom_idct8x8_12_add_ssse3, TX_8X8, 12)));
 #endif
 
-#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     MSA, PartialIDctTest,
-    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_34_add_msa, TX_32X32, 34),
-                      make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1_add_msa, TX_32X32, 1),
-                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_10_add_msa, TX_16X16, 10),
-                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_1_add_msa, TX_16X16, 1),
-                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_12_add_msa, TX_8X8, 10),
-                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_1_add_msa, TX_8X8, 1),
-                      make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_1_add_msa, TX_4X4, 1)));
-#endif  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+    ::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
+                                 &aom_idct32x32_34_add_msa, TX_32X32, 34),
+                      make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
+                                 &aom_idct32x32_1_add_msa, TX_32X32, 1),
+                      make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_c,
+                                 &aom_idct16x16_10_add_msa, TX_16X16, 10),
+                      make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_c,
+                                 &aom_idct16x16_1_add_msa, TX_16X16, 1),
+                      make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
+                                 &aom_idct8x8_12_add_msa, TX_8X8, 10),
+                      make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
+                                 &aom_idct8x8_1_add_msa, TX_8X8, 1),
+                      make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c,
+                                 &aom_idct4x4_1_add_msa, TX_4X4, 1)));
+#endif  // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 }  // namespace
diff --git a/test/quantize_test.cc b/test/quantize_test.cc
index bfebdc5..f58d862 100644
--- a/test/quantize_test.cc
+++ b/test/quantize_test.cc
@@ -12,7 +12,7 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
@@ -22,8 +22,8 @@
 #include "vp8/encoder/block.h"
 #include "vp8/encoder/onyx_int.h"
 #include "vp8/encoder/quantize.h"
-#include "aom/vpx_integer.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom/aom_integer.h"
+#include "aom_mem/aom_mem.h"
 
 namespace {
 #if !CONFIG_AOM_QM
@@ -45,7 +45,7 @@
   virtual ~QuantizeTestBase() {
     vp8_remove_compressor(&vp8_comp_);
     vp8_comp_ = NULL;
-    vpx_free(macroblockd_dst_);
+    aom_free(macroblockd_dst_);
     macroblockd_dst_ = NULL;
     libaom_test::ClearSystemState();
   }
@@ -68,7 +68,7 @@
 
     // Copy macroblockd from the reference to get pre-set-up dequant values.
     macroblockd_dst_ = reinterpret_cast<MACROBLOCKD *>(
-        vpx_memalign(32, sizeof(*macroblockd_dst_)));
+        aom_memalign(32, sizeof(*macroblockd_dst_)));
     memcpy(macroblockd_dst_, &vp8_comp_->mb.e_mbd, sizeof(*macroblockd_dst_));
     // Fix block pointers - currently they point to the blocks in the reference
     // structure.
diff --git a/test/realtime_test.cc b/test/realtime_test.cc
index d48f7fb..0c99291 100644
--- a/test/realtime_test.cc
+++ b/test/realtime_test.cc
@@ -35,12 +35,12 @@
   virtual void BeginPassHook(unsigned int /*pass*/) {
     // TODO(tomfinegan): We're changing the pass value here to make sure
     // we get frames when real time mode is combined with |g_pass| set to
-    // VPX_RC_FIRST_PASS. This is necessary because EncoderTest::RunLoop() sets
+    // AOM_RC_FIRST_PASS. This is necessary because EncoderTest::RunLoop() sets
     // the pass value based on the mode passed into EncoderTest::SetMode(),
     // which overrides the one specified in SetUp() above.
-    cfg_.g_pass = VPX_RC_FIRST_PASS;
+    cfg_.g_pass = AOM_RC_FIRST_PASS;
   }
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t * /*pkt*/) {
+  virtual void FramePktHook(const aom_codec_cx_pkt_t * /*pkt*/) {
     frame_packets_++;
   }
 
@@ -55,9 +55,7 @@
   EXPECT_EQ(kFramesToEncode, frame_packets_);
 }
 
-VP8_INSTANTIATE_TEST_CASE(RealtimeTest,
-                          ::testing::Values(::libaom_test::kRealTime));
-VP9_INSTANTIATE_TEST_CASE(RealtimeTest,
+AV1_INSTANTIATE_TEST_CASE(RealtimeTest,
                           ::testing::Values(::libaom_test::kRealTime));
 
 }  // namespace
diff --git a/test/reconintra_predictors_test.cc b/test/reconintra_predictors_test.cc
index cddf3c1..a86a6a5 100644
--- a/test/reconintra_predictors_test.cc
+++ b/test/reconintra_predictors_test.cc
@@ -10,7 +10,7 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
@@ -32,7 +32,7 @@
 typedef tuple<Predictor, Predictor, int> PredFuncMode;
 typedef tuple<PredFuncMode, int> PredParams;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*HbdPredictor)(uint16_t *dst, ptrdiff_t stride, int bs,
                              const uint16_t *above, const uint16_t *left,
                              int bd);
@@ -57,9 +57,9 @@
 const int MaxTestNum = 100;
 #endif
 
-class VP10IntraPredOptimzTest : public ::testing::TestWithParam<PredParams> {
+class AV1IntraPredOptimzTest : public ::testing::TestWithParam<PredParams> {
  public:
-  virtual ~VP10IntraPredOptimzTest() {}
+  virtual ~AV1IntraPredOptimzTest() {}
   virtual void SetUp() {
     PredFuncMode funcMode = GET_PARAM(0);
     predFuncRef_ = std::tr1::get<0>(funcMode);
@@ -148,11 +148,11 @@
   uint8_t *predRef_;
 };
 
-#if CONFIG_VP9_HIGHBITDEPTH
-class VP10HbdIntraPredOptimzTest
+#if CONFIG_AOM_HIGHBITDEPTH
+class AV1HbdIntraPredOptimzTest
     : public ::testing::TestWithParam<HbdPredParams> {
  public:
-  virtual ~VP10HbdIntraPredOptimzTest() {}
+  virtual ~AV1HbdIntraPredOptimzTest() {}
   virtual void SetUp() {
     HbdPredFuncMode funcMode = GET_PARAM(0);
     predFuncRef_ = std::tr1::get<0>(funcMode);
@@ -243,87 +243,87 @@
   uint16_t *pred_;
   uint16_t *predRef_;
 };
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-TEST_P(VP10IntraPredOptimzTest, BitExactCheck) { RunTest(); }
+TEST_P(AV1IntraPredOptimzTest, BitExactCheck) { RunTest(); }
 
 #if PREDICTORS_SPEED_TEST
-TEST_P(VP10IntraPredOptimzTest, SpeedCheckC) { RunSpeedTestC(); }
+TEST_P(AV1IntraPredOptimzTest, SpeedCheckC) { RunSpeedTestC(); }
 
-TEST_P(VP10IntraPredOptimzTest, SpeedCheckSSE) { RunSpeedTestSSE(); }
+TEST_P(AV1IntraPredOptimzTest, SpeedCheckSSE) { RunSpeedTestSSE(); }
 #endif
 
-#if CONFIG_VP9_HIGHBITDEPTH
-TEST_P(VP10HbdIntraPredOptimzTest, BitExactCheck) { RunTest(); }
+#if CONFIG_AOM_HIGHBITDEPTH
+TEST_P(AV1HbdIntraPredOptimzTest, BitExactCheck) { RunTest(); }
 
 #if PREDICTORS_SPEED_TEST
-TEST_P(VP10HbdIntraPredOptimzTest, SpeedCheckC) { RunSpeedTestC(); }
+TEST_P(AV1HbdIntraPredOptimzTest, SpeedCheckC) { RunSpeedTestC(); }
 
-TEST_P(VP10HbdIntraPredOptimzTest, SpeedCheckSSE) { RunSpeedTestSSE(); }
+TEST_P(AV1HbdIntraPredOptimzTest, SpeedCheckSSE) { RunSpeedTestSSE(); }
 #endif  // PREDICTORS_SPEED_TEST
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 using std::tr1::make_tuple;
 
 const PredFuncMode kPredFuncMdArray[] = {
-  make_tuple(vp10_dc_filter_predictor_c, vp10_dc_filter_predictor_sse4_1,
+  make_tuple(av1_dc_filter_predictor_c, av1_dc_filter_predictor_sse4_1,
              DC_PRED),
-  make_tuple(vp10_v_filter_predictor_c, vp10_v_filter_predictor_sse4_1, V_PRED),
-  make_tuple(vp10_h_filter_predictor_c, vp10_h_filter_predictor_sse4_1, H_PRED),
-  make_tuple(vp10_d45_filter_predictor_c, vp10_d45_filter_predictor_sse4_1,
+  make_tuple(av1_v_filter_predictor_c, av1_v_filter_predictor_sse4_1, V_PRED),
+  make_tuple(av1_h_filter_predictor_c, av1_h_filter_predictor_sse4_1, H_PRED),
+  make_tuple(av1_d45_filter_predictor_c, av1_d45_filter_predictor_sse4_1,
              D45_PRED),
-  make_tuple(vp10_d135_filter_predictor_c, vp10_d135_filter_predictor_sse4_1,
+  make_tuple(av1_d135_filter_predictor_c, av1_d135_filter_predictor_sse4_1,
              D135_PRED),
-  make_tuple(vp10_d117_filter_predictor_c, vp10_d117_filter_predictor_sse4_1,
+  make_tuple(av1_d117_filter_predictor_c, av1_d117_filter_predictor_sse4_1,
              D117_PRED),
-  make_tuple(vp10_d153_filter_predictor_c, vp10_d153_filter_predictor_sse4_1,
+  make_tuple(av1_d153_filter_predictor_c, av1_d153_filter_predictor_sse4_1,
              D153_PRED),
-  make_tuple(vp10_d207_filter_predictor_c, vp10_d207_filter_predictor_sse4_1,
+  make_tuple(av1_d207_filter_predictor_c, av1_d207_filter_predictor_sse4_1,
              D207_PRED),
-  make_tuple(vp10_d63_filter_predictor_c, vp10_d63_filter_predictor_sse4_1,
+  make_tuple(av1_d63_filter_predictor_c, av1_d63_filter_predictor_sse4_1,
              D63_PRED),
-  make_tuple(vp10_tm_filter_predictor_c, vp10_tm_filter_predictor_sse4_1,
+  make_tuple(av1_tm_filter_predictor_c, av1_tm_filter_predictor_sse4_1,
              TM_PRED),
 };
 
 const int kBlkSize[] = { 4, 8, 16, 32 };
 
 INSTANTIATE_TEST_CASE_P(
-    SSE4_1, VP10IntraPredOptimzTest,
+    SSE4_1, AV1IntraPredOptimzTest,
     ::testing::Combine(::testing::ValuesIn(kPredFuncMdArray),
                        ::testing::ValuesIn(kBlkSize)));
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 const HbdPredFuncMode kHbdPredFuncMdArray[] = {
-  make_tuple(vp10_highbd_dc_filter_predictor_c,
-             vp10_highbd_dc_filter_predictor_sse4_1, DC_PRED),
-  make_tuple(vp10_highbd_v_filter_predictor_c,
-             vp10_highbd_v_filter_predictor_sse4_1, V_PRED),
-  make_tuple(vp10_highbd_h_filter_predictor_c,
-             vp10_highbd_h_filter_predictor_sse4_1, H_PRED),
-  make_tuple(vp10_highbd_d45_filter_predictor_c,
-             vp10_highbd_d45_filter_predictor_sse4_1, D45_PRED),
-  make_tuple(vp10_highbd_d135_filter_predictor_c,
-             vp10_highbd_d135_filter_predictor_sse4_1, D135_PRED),
-  make_tuple(vp10_highbd_d117_filter_predictor_c,
-             vp10_highbd_d117_filter_predictor_sse4_1, D117_PRED),
-  make_tuple(vp10_highbd_d153_filter_predictor_c,
-             vp10_highbd_d153_filter_predictor_sse4_1, D153_PRED),
-  make_tuple(vp10_highbd_d207_filter_predictor_c,
-             vp10_highbd_d207_filter_predictor_sse4_1, D207_PRED),
-  make_tuple(vp10_highbd_d63_filter_predictor_c,
-             vp10_highbd_d63_filter_predictor_sse4_1, D63_PRED),
-  make_tuple(vp10_highbd_tm_filter_predictor_c,
-             vp10_highbd_tm_filter_predictor_sse4_1, TM_PRED),
+  make_tuple(av1_highbd_dc_filter_predictor_c,
+             av1_highbd_dc_filter_predictor_sse4_1, DC_PRED),
+  make_tuple(av1_highbd_v_filter_predictor_c,
+             av1_highbd_v_filter_predictor_sse4_1, V_PRED),
+  make_tuple(av1_highbd_h_filter_predictor_c,
+             av1_highbd_h_filter_predictor_sse4_1, H_PRED),
+  make_tuple(av1_highbd_d45_filter_predictor_c,
+             av1_highbd_d45_filter_predictor_sse4_1, D45_PRED),
+  make_tuple(av1_highbd_d135_filter_predictor_c,
+             av1_highbd_d135_filter_predictor_sse4_1, D135_PRED),
+  make_tuple(av1_highbd_d117_filter_predictor_c,
+             av1_highbd_d117_filter_predictor_sse4_1, D117_PRED),
+  make_tuple(av1_highbd_d153_filter_predictor_c,
+             av1_highbd_d153_filter_predictor_sse4_1, D153_PRED),
+  make_tuple(av1_highbd_d207_filter_predictor_c,
+             av1_highbd_d207_filter_predictor_sse4_1, D207_PRED),
+  make_tuple(av1_highbd_d63_filter_predictor_c,
+             av1_highbd_d63_filter_predictor_sse4_1, D63_PRED),
+  make_tuple(av1_highbd_tm_filter_predictor_c,
+             av1_highbd_tm_filter_predictor_sse4_1, TM_PRED),
 };
 
 const int kBd[] = { 10, 12 };
 
 INSTANTIATE_TEST_CASE_P(
-    SSE4_1, VP10HbdIntraPredOptimzTest,
+    SSE4_1, AV1HbdIntraPredOptimzTest,
     ::testing::Combine(::testing::ValuesIn(kHbdPredFuncMdArray),
                        ::testing::ValuesIn(kBlkSize),
                        ::testing::ValuesIn(kBd)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 }  // namespace
diff --git a/test/register_state_check.h b/test/register_state_check.h
index 4ec53f4..5c9620d 100644
--- a/test/register_state_check.h
+++ b/test/register_state_check.h
@@ -12,8 +12,8 @@
 #define TEST_REGISTER_STATE_CHECK_H_
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 // ASM_REGISTER_STATE_CHECK(asm_function)
 //   Minimally validates the environment pre & post function execution. This
@@ -88,12 +88,12 @@
 
 }  // namespace libaom_test
 
-#elif defined(CONFIG_SHARED) && defined(HAVE_NEON_ASM) && \
-    defined(CONFIG_VP10) && !CONFIG_SHARED && HAVE_NEON_ASM && CONFIG_VP10
+#elif defined(CONFIG_SHARED) && defined(HAVE_NEON_ASM) && !CONFIG_SHARED && \
+    HAVE_NEON_ASM && CONFIG_AV1
 
 extern "C" {
 // Save the d8-d15 registers into store.
-void vpx_push_neon(int64_t *store);
+void aom_push_neon(int64_t *store);
 }
 
 namespace libaom_test {
@@ -108,7 +108,7 @@
 
  private:
   static bool StoreRegisters(int64_t store[8]) {
-    vpx_push_neon(store);
+    aom_push_neon(store);
     return true;
   }
 
@@ -116,7 +116,7 @@
   bool Check() const {
     if (!initialized_) return false;
     int64_t post_store[8];
-    vpx_push_neon(post_store);
+    aom_push_neon(post_store);
     for (int i = 0; i < 8; ++i) {
       EXPECT_EQ(pre_store_[i], post_store[i]) << "d" << i + 8
                                               << " has been modified";
diff --git a/test/resize_test.cc b/test/resize_test.cc
index 9b4808b..35253dc 100644
--- a/test/resize_test.cc
+++ b/test/resize_test.cc
@@ -36,7 +36,7 @@
   mem[3] = val >> 24;
 }
 
-static void write_ivf_file_header(const vpx_codec_enc_cfg_t *const cfg,
+static void write_ivf_file_header(const aom_codec_enc_cfg_t *const cfg,
                                   int frame_cnt, FILE *const outfile) {
   char header[32];
 
@@ -46,7 +46,7 @@
   header[3] = 'F';
   mem_put_le16(header + 4, 0);                    /* version */
   mem_put_le16(header + 6, 32);                   /* headersize */
-  mem_put_le32(header + 8, 0x30395056);           /* fourcc (vp9) */
+  mem_put_le32(header + 8, 0x30395056);           /* fourcc (av1) */
   mem_put_le16(header + 12, cfg->g_w);            /* width */
   mem_put_le16(header + 14, cfg->g_h);            /* height */
   mem_put_le32(header + 16, cfg->g_timebase.den); /* rate */
@@ -63,12 +63,12 @@
   (void)fwrite(header, 1, 4, outfile);
 }
 
-static void write_ivf_frame_header(const vpx_codec_cx_pkt_t *const pkt,
+static void write_ivf_frame_header(const aom_codec_cx_pkt_t *const pkt,
                                    FILE *const outfile) {
   char header[12];
-  vpx_codec_pts_t pts;
+  aom_codec_pts_t pts;
 
-  if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return;
+  if (pkt->kind != AOM_CODEC_CX_FRAME_PKT) return;
 
   pts = pkt->data.frame.pts;
   mem_put_le32(header, static_cast<unsigned int>(pkt->data.frame.sz));
@@ -83,10 +83,10 @@
 const unsigned int kInitialHeight = 240;
 
 struct FrameInfo {
-  FrameInfo(vpx_codec_pts_t _pts, unsigned int _w, unsigned int _h)
+  FrameInfo(aom_codec_pts_t _pts, unsigned int _w, unsigned int _h)
       : pts(_pts), w(_w), h(_h) {}
 
-  vpx_codec_pts_t pts;
+  aom_codec_pts_t pts;
   unsigned int w;
   unsigned int h;
 };
@@ -231,8 +231,8 @@
     return;
   }
   if (flag_codec == 1) {
-    // Cases that only works for VP9.
-    // For VP9: Swap width and height of original.
+    // Cases that only works for AV1.
+    // For AV1: Swap width and height of original.
     if (frame < 320) {
       *w = initial_h;
       *h = initial_w;
@@ -277,8 +277,8 @@
     SetMode(GET_PARAM(1));
   }
 
-  virtual void DecompressedFrameHook(const vpx_image_t &img,
-                                     vpx_codec_pts_t pts) {
+  virtual void DecompressedFrameHook(const aom_image_t &img,
+                                     aom_codec_pts_t pts) {
     frame_info_list_.push_back(FrameInfo(pts, img.d_w, img.d_h));
   }
 
@@ -321,7 +321,7 @@
 
   virtual void BeginPassHook(unsigned int /*pass*/) {
 #if WRITE_COMPRESSED_STREAM
-    outfile_ = fopen("vp90-2-05-resize.ivf", "wb");
+    outfile_ = fopen("av10-2-05-resize.ivf", "wb");
 #endif
   }
 
@@ -341,34 +341,34 @@
     if (change_config_) {
       int new_q = 60;
       if (video->frame() == 0) {
-        struct vpx_scaling_mode mode = { VP8E_ONETWO, VP8E_ONETWO };
-        encoder->Control(VP8E_SET_SCALEMODE, &mode);
+        struct aom_scaling_mode mode = { AOME_ONETWO, AOME_ONETWO };
+        encoder->Control(AOME_SET_SCALEMODE, &mode);
       }
       if (video->frame() == 1) {
-        struct vpx_scaling_mode mode = { VP8E_NORMAL, VP8E_NORMAL };
-        encoder->Control(VP8E_SET_SCALEMODE, &mode);
+        struct aom_scaling_mode mode = { AOME_NORMAL, AOME_NORMAL };
+        encoder->Control(AOME_SET_SCALEMODE, &mode);
         cfg_.rc_min_quantizer = cfg_.rc_max_quantizer = new_q;
         encoder->Config(&cfg_);
       }
     } else {
       if (video->frame() == kStepDownFrame) {
-        struct vpx_scaling_mode mode = { VP8E_FOURFIVE, VP8E_THREEFIVE };
-        encoder->Control(VP8E_SET_SCALEMODE, &mode);
+        struct aom_scaling_mode mode = { AOME_FOURFIVE, AOME_THREEFIVE };
+        encoder->Control(AOME_SET_SCALEMODE, &mode);
       }
       if (video->frame() == kStepUpFrame) {
-        struct vpx_scaling_mode mode = { VP8E_NORMAL, VP8E_NORMAL };
-        encoder->Control(VP8E_SET_SCALEMODE, &mode);
+        struct aom_scaling_mode mode = { AOME_NORMAL, AOME_NORMAL };
+        encoder->Control(AOME_SET_SCALEMODE, &mode);
       }
     }
   }
 
-  virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void PSNRPktHook(const aom_codec_cx_pkt_t *pkt) {
     if (frame0_psnr_ == 0.) frame0_psnr_ = pkt->data.psnr.psnr[0];
     EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 2.0);
   }
 
 #if WRITE_COMPRESSED_STREAM
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void FramePktHook(const aom_codec_cx_pkt_t *pkt) {
     ++out_frames_;
 
     // Write initial file header if first frame.
@@ -391,7 +391,7 @@
 TEST_P(ResizeInternalTest, TestInternalResizeWorks) {
   ::libaom_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
                                        30, 1, 0, 10);
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
   change_config_ = false;
 
   // q picked such that initial keyframe on this clip is ~30dB PSNR
@@ -406,7 +406,7 @@
 
   for (std::vector<FrameInfo>::const_iterator info = frame_info_list_.begin();
        info != frame_info_list_.end(); ++info) {
-    const vpx_codec_pts_t pts = info->pts;
+    const aom_codec_pts_t pts = info->pts;
     if (pts >= kStepDownFrame && pts < kStepUpFrame) {
       ASSERT_EQ(282U, info->w) << "Frame " << pts << " had unexpected width";
       ASSERT_EQ(173U, info->h) << "Frame " << pts << " had unexpected height";
@@ -436,8 +436,8 @@
   virtual void PreEncodeFrameHook(libaom_test::VideoSource *video,
                                   libaom_test::Encoder *encoder) {
     if (video->frame() == 0) {
-      encoder->Control(VP9E_SET_AQ_MODE, 3);
-      encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
+      encoder->Control(AV1E_SET_AQ_MODE, 3);
+      encoder->Control(AOME_SET_CPUUSED, set_cpu_used_);
     }
 
     if (change_bitrate_ && video->frame() == 120) {
@@ -453,12 +453,12 @@
     set_cpu_used_ = GET_PARAM(2);
   }
 
-  virtual void DecompressedFrameHook(const vpx_image_t &img,
-                                     vpx_codec_pts_t pts) {
+  virtual void DecompressedFrameHook(const aom_image_t &img,
+                                     aom_codec_pts_t pts) {
     frame_info_list_.push_back(FrameInfo(pts, img.d_w, img.d_h));
   }
 
-  virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2) {
+  virtual void MismatchHook(const aom_image_t *img1, const aom_image_t *img2) {
     double mismatch_psnr = compute_psnr(img1, img2);
     mismatch_psnr_ += mismatch_psnr;
     ++mismatch_nframes_;
@@ -474,8 +474,8 @@
     cfg_.rc_max_quantizer = 56;
     cfg_.rc_undershoot_pct = 50;
     cfg_.rc_overshoot_pct = 50;
-    cfg_.rc_end_usage = VPX_CBR;
-    cfg_.kf_mode = VPX_KF_AUTO;
+    cfg_.rc_end_usage = AOM_CBR;
+    cfg_.kf_mode = AOM_KF_AUTO;
     cfg_.g_lag_in_frames = 0;
     cfg_.kf_min_dist = cfg_.kf_max_dist = 3000;
     // Enable dropped frames.
@@ -550,12 +550,12 @@
     }
   }
 
-#if CONFIG_VP9_DECODER
+#if CONFIG_AV1_DECODER
   // Verify that we get 1 resize down event in this test.
   ASSERT_EQ(1, resize_count) << "Resizing should occur.";
   EXPECT_EQ(static_cast<unsigned int>(0), GetMismatchFrames());
 #else
-  printf("Warning: VP9 decoder unavailable, unable to check resize count!\n");
+  printf("Warning: AV1 decoder unavailable, unable to check resize count!\n");
 #endif
 }
 
@@ -598,19 +598,19 @@
     }
   }
 
-#if CONFIG_VP9_DECODER
+#if CONFIG_AV1_DECODER
   // Verify that we get 2 resize events in this test.
   ASSERT_EQ(resize_count, 2) << "Resizing should occur twice.";
   EXPECT_EQ(static_cast<unsigned int>(0), GetMismatchFrames());
 #else
-  printf("Warning: VP9 decoder unavailable, unable to check resize count!\n");
+  printf("Warning: AV1 decoder unavailable, unable to check resize count!\n");
 #endif
 }
 
-vpx_img_fmt_t CspForFrameNumber(int frame) {
-  if (frame < 10) return VPX_IMG_FMT_I420;
-  if (frame < 20) return VPX_IMG_FMT_I444;
-  return VPX_IMG_FMT_I420;
+aom_img_fmt_t CspForFrameNumber(int frame) {
+  if (frame < 10) return AOM_IMG_FMT_I420;
+  if (frame < 20) return AOM_IMG_FMT_I444;
+  return AOM_IMG_FMT_I420;
 }
 
 class ResizeCspTest : public ResizeTest {
@@ -626,7 +626,7 @@
 
   virtual void BeginPassHook(unsigned int /*pass*/) {
 #if WRITE_COMPRESSED_STREAM
-    outfile_ = fopen("vp91-2-05-cspchape.ivf", "wb");
+    outfile_ = fopen("av11-2-05-cspchape.ivf", "wb");
 #endif
   }
 
@@ -643,25 +643,25 @@
 
   virtual void PreEncodeFrameHook(libaom_test::VideoSource *video,
                                   libaom_test::Encoder *encoder) {
-    if (CspForFrameNumber(video->frame()) != VPX_IMG_FMT_I420 &&
+    if (CspForFrameNumber(video->frame()) != AOM_IMG_FMT_I420 &&
         cfg_.g_profile != 1) {
       cfg_.g_profile = 1;
       encoder->Config(&cfg_);
     }
-    if (CspForFrameNumber(video->frame()) == VPX_IMG_FMT_I420 &&
+    if (CspForFrameNumber(video->frame()) == AOM_IMG_FMT_I420 &&
         cfg_.g_profile != 0) {
       cfg_.g_profile = 0;
       encoder->Config(&cfg_);
     }
   }
 
-  virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void PSNRPktHook(const aom_codec_cx_pkt_t *pkt) {
     if (frame0_psnr_ == 0.) frame0_psnr_ = pkt->data.psnr.psnr[0];
     EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 2.0);
   }
 
 #if WRITE_COMPRESSED_STREAM
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void FramePktHook(const aom_codec_cx_pkt_t *pkt) {
     ++out_frames_;
 
     // Write initial file header if first frame.
@@ -699,19 +699,19 @@
 
 TEST_P(ResizeCspTest, TestResizeCspWorks) {
   ResizingCspVideoSource video;
-  init_flags_ = VPX_CODEC_USE_PSNR;
+  init_flags_ = AOM_CODEC_USE_PSNR;
   cfg_.rc_min_quantizer = cfg_.rc_max_quantizer = 48;
   cfg_.g_lag_in_frames = 0;
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-VP10_INSTANTIATE_TEST_CASE(ResizeTest,
-                           ::testing::Values(::libaom_test::kRealTime));
-VP10_INSTANTIATE_TEST_CASE(ResizeInternalTest,
-                           ::testing::Values(::libaom_test::kOnePassBest));
-VP10_INSTANTIATE_TEST_CASE(ResizeRealtimeTest,
-                           ::testing::Values(::libaom_test::kRealTime),
-                           ::testing::Range(5, 9));
-VP10_INSTANTIATE_TEST_CASE(ResizeCspTest,
-                           ::testing::Values(::libaom_test::kRealTime));
+AV1_INSTANTIATE_TEST_CASE(ResizeTest,
+                          ::testing::Values(::libaom_test::kRealTime));
+AV1_INSTANTIATE_TEST_CASE(ResizeInternalTest,
+                          ::testing::Values(::libaom_test::kOnePassBest));
+AV1_INSTANTIATE_TEST_CASE(ResizeRealtimeTest,
+                          ::testing::Values(::libaom_test::kRealTime),
+                          ::testing::Range(5, 9));
+AV1_INSTANTIATE_TEST_CASE(ResizeCspTest,
+                          ::testing::Values(::libaom_test::kRealTime));
 }  // namespace
diff --git a/test/resize_util.sh b/test/resize_util.sh
index 0c5851d..e8993e5 100755
--- a/test/resize_util.sh
+++ b/test/resize_util.sh
@@ -18,7 +18,7 @@
 # Environment check: $YUV_RAW_INPUT is required.
 resize_util_verify_environment() {
   if [ ! -e "${YUV_RAW_INPUT}" ]; then
-    echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+    echo "Libaom test data must exist in LIBVPX_TEST_DATA_PATH."
     return 1
   fi
 }
@@ -26,19 +26,19 @@
 # Resizes $YUV_RAW_INPUT using the resize_util example. $1 is the output
 # dimensions that will be passed to resize_util.
 resize_util() {
-  local resizer="${LIBAOM_BIN_PATH}/resize_util${VPX_TEST_EXE_SUFFIX}"
-  local output_file="${VPX_TEST_OUTPUT_DIR}/resize_util.raw"
+  local resizer="${LIBAOM_BIN_PATH}/resize_util${AOM_TEST_EXE_SUFFIX}"
+  local output_file="${AOM_TEST_OUTPUT_DIR}/resize_util.raw"
   local frames_to_resize="10"
   local target_dimensions="$1"
 
   # resize_util is available only when CONFIG_SHARED is disabled.
-  if [ -z "$(vpx_config_option_enabled CONFIG_SHARED)" ]; then
+  if [ -z "$(aom_config_option_enabled CONFIG_SHARED)" ]; then
     if [ ! -x "${resizer}" ]; then
       elog "${resizer} does not exist or is not executable."
       return 1
     fi
 
-    eval "${VPX_TEST_PREFIX}" "${resizer}" "${YUV_RAW_INPUT}" \
+    eval "${AOM_TEST_PREFIX}" "${resizer}" "${YUV_RAW_INPUT}" \
         "${YUV_RAW_INPUT_WIDTH}x${YUV_RAW_INPUT_HEIGHT}" \
         "${target_dimensions}" "${output_file}" ${frames_to_resize} \
         ${devnull}
diff --git a/test/sad_test.cc b/test/sad_test.cc
index 8276cd4..875b40b 100644
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -14,14 +14,14 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
-#include "aom/vpx_codec.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom/aom_codec.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr, int src_stride,
@@ -48,31 +48,31 @@
 
   static void SetUpTestCase() {
     source_data8_ = reinterpret_cast<uint8_t *>(
-        vpx_memalign(kDataAlignment, kDataBlockSize));
+        aom_memalign(kDataAlignment, kDataBlockSize));
     reference_data8_ = reinterpret_cast<uint8_t *>(
-        vpx_memalign(kDataAlignment, kDataBufferSize));
+        aom_memalign(kDataAlignment, kDataBufferSize));
     second_pred8_ =
-        reinterpret_cast<uint8_t *>(vpx_memalign(kDataAlignment, 128 * 128));
+        reinterpret_cast<uint8_t *>(aom_memalign(kDataAlignment, 128 * 128));
     source_data16_ = reinterpret_cast<uint16_t *>(
-        vpx_memalign(kDataAlignment, kDataBlockSize * sizeof(uint16_t)));
+        aom_memalign(kDataAlignment, kDataBlockSize * sizeof(uint16_t)));
     reference_data16_ = reinterpret_cast<uint16_t *>(
-        vpx_memalign(kDataAlignment, kDataBufferSize * sizeof(uint16_t)));
+        aom_memalign(kDataAlignment, kDataBufferSize * sizeof(uint16_t)));
     second_pred16_ = reinterpret_cast<uint16_t *>(
-        vpx_memalign(kDataAlignment, 128 * 128 * sizeof(uint16_t)));
+        aom_memalign(kDataAlignment, 128 * 128 * sizeof(uint16_t)));
   }
 
   static void TearDownTestCase() {
-    vpx_free(source_data8_);
+    aom_free(source_data8_);
     source_data8_ = NULL;
-    vpx_free(reference_data8_);
+    aom_free(reference_data8_);
     reference_data8_ = NULL;
-    vpx_free(second_pred8_);
+    aom_free(second_pred8_);
     second_pred8_ = NULL;
-    vpx_free(source_data16_);
+    aom_free(source_data16_);
     source_data16_ = NULL;
-    vpx_free(reference_data16_);
+    aom_free(reference_data16_);
     reference_data16_ = NULL;
-    vpx_free(second_pred16_);
+    aom_free(second_pred16_);
     second_pred16_ = NULL;
   }
 
@@ -87,18 +87,18 @@
   virtual void SetUp() {
     if (bd_ == -1) {
       use_high_bit_depth_ = false;
-      bit_depth_ = VPX_BITS_8;
+      bit_depth_ = AOM_BITS_8;
       source_data_ = source_data8_;
       reference_data_ = reference_data8_;
       second_pred_ = second_pred8_;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       use_high_bit_depth_ = true;
-      bit_depth_ = static_cast<vpx_bit_depth_t>(bd_);
+      bit_depth_ = static_cast<aom_bit_depth_t>(bd_);
       source_data_ = CONVERT_TO_BYTEPTR(source_data16_);
       reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_);
       second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     }
     mask_ = (1 << bit_depth_) - 1;
     source_stride_ = (width_ + 31) & ~31;
@@ -107,11 +107,11 @@
   }
 
   virtual uint8_t *GetReference(int block_idx) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (use_high_bit_depth_)
       return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) +
                                 block_idx * kDataBlockSize);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     return reference_data_ + block_idx * kDataBlockSize;
   }
 
@@ -121,21 +121,21 @@
     unsigned int sad = 0;
     const uint8_t *const reference8 = GetReference(block_idx);
     const uint8_t *const source8 = source_data_;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     const uint16_t *const reference16 =
         CONVERT_TO_SHORTPTR(GetReference(block_idx));
     const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int h = 0; h < height_; ++h) {
       for (int w = 0; w < width_; ++w) {
         if (!use_high_bit_depth_) {
           sad += abs(source8[h * source_stride_ + w] -
                      reference8[h * reference_stride_ + w]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           sad += abs(source16[h * source_stride_ + w] -
                      reference16[h * reference_stride_ + w]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
     }
@@ -150,12 +150,12 @@
     const uint8_t *const reference8 = GetReference(block_idx);
     const uint8_t *const source8 = source_data_;
     const uint8_t *const second_pred8 = second_pred_;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     const uint16_t *const reference16 =
         CONVERT_TO_SHORTPTR(GetReference(block_idx));
     const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
     const uint16_t *const second_pred16 = CONVERT_TO_SHORTPTR(second_pred_);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int h = 0; h < height_; ++h) {
       for (int w = 0; w < width_; ++w) {
         if (!use_high_bit_depth_) {
@@ -163,13 +163,13 @@
                           reference8[h * reference_stride_ + w];
           const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
           sad += abs(source8[h * source_stride_ + w] - comp_pred);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           const int tmp = second_pred16[h * width_ + w] +
                           reference16[h * reference_stride_ + w];
           const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
           sad += abs(source16[h * source_stride_ + w] - comp_pred);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
     }
@@ -178,17 +178,17 @@
 
   void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) {
     uint8_t *data8 = data;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int h = 0; h < height_; ++h) {
       for (int w = 0; w < width_; ++w) {
         if (!use_high_bit_depth_) {
           data8[h * stride + w] = static_cast<uint8_t>(fill_constant);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           data16[h * stride + w] = fill_constant;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
     }
@@ -196,24 +196,24 @@
 
   void FillRandom(uint8_t *data, int stride) {
     uint8_t *data8 = data;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int h = 0; h < height_; ++h) {
       for (int w = 0; w < width_; ++w) {
         if (!use_high_bit_depth_) {
           data8[h * stride + w] = rnd_.Rand8();
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           data16[h * stride + w] = rnd_.Rand16() & mask_;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
     }
   }
 
   int width_, height_, mask_, bd_;
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
   static uint8_t *source_data_;
   static uint8_t *reference_data_;
   static uint8_t *second_pred_;
@@ -472,236 +472,236 @@
 //------------------------------------------------------------------------------
 // C functions
 const SadMxNParam c_tests[] = {
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_sad128x128_c, -1),
-  make_tuple(128, 64, &vpx_sad128x64_c, -1),
-  make_tuple(64, 128, &vpx_sad64x128_c, -1),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_sad64x64_c, -1),
-  make_tuple(64, 32, &vpx_sad64x32_c, -1),
-  make_tuple(32, 64, &vpx_sad32x64_c, -1),
-  make_tuple(32, 32, &vpx_sad32x32_c, -1),
-  make_tuple(32, 16, &vpx_sad32x16_c, -1),
-  make_tuple(16, 32, &vpx_sad16x32_c, -1),
-  make_tuple(16, 16, &vpx_sad16x16_c, -1),
-  make_tuple(16, 8, &vpx_sad16x8_c, -1),
-  make_tuple(8, 16, &vpx_sad8x16_c, -1),
-  make_tuple(8, 8, &vpx_sad8x8_c, -1),
-  make_tuple(8, 4, &vpx_sad8x4_c, -1),
-  make_tuple(4, 8, &vpx_sad4x8_c, -1),
-  make_tuple(4, 4, &vpx_sad4x4_c, -1),
-#if CONFIG_VP9_HIGHBITDEPTH
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_highbd_sad128x128_c, 8),
-  make_tuple(128, 64, &vpx_highbd_sad128x64_c, 8),
-  make_tuple(64, 128, &vpx_highbd_sad64x128_c, 8),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_highbd_sad64x64_c, 8),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_c, 8),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_c, 8),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_c, 8),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_c, 8),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_c, 8),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_c, 8),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_c, 8),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_c, 8),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_c, 8),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_c, 8),
-  make_tuple(4, 8, &vpx_highbd_sad4x8_c, 8),
-  make_tuple(4, 4, &vpx_highbd_sad4x4_c, 8),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_highbd_sad128x128_c, 10),
-  make_tuple(128, 64, &vpx_highbd_sad128x64_c, 10),
-  make_tuple(64, 128, &vpx_highbd_sad64x128_c, 10),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_highbd_sad64x64_c, 10),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_c, 10),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_c, 10),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_c, 10),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_c, 10),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_c, 10),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_c, 10),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_c, 10),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_c, 10),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_c, 10),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_c, 10),
-  make_tuple(4, 8, &vpx_highbd_sad4x8_c, 10),
-  make_tuple(4, 4, &vpx_highbd_sad4x4_c, 10),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_highbd_sad128x128_c, 12),
-  make_tuple(128, 64, &vpx_highbd_sad128x64_c, 12),
-  make_tuple(64, 128, &vpx_highbd_sad64x128_c, 12),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_highbd_sad64x64_c, 12),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_c, 12),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_c, 12),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_c, 12),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_c, 12),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_c, 12),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_c, 12),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_c, 12),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_c, 12),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_c, 12),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_c, 12),
-  make_tuple(4, 8, &vpx_highbd_sad4x8_c, 12),
-  make_tuple(4, 4, &vpx_highbd_sad4x4_c, 12),
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_sad128x128_c, -1),
+  make_tuple(128, 64, &aom_sad128x64_c, -1),
+  make_tuple(64, 128, &aom_sad64x128_c, -1),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_sad64x64_c, -1),
+  make_tuple(64, 32, &aom_sad64x32_c, -1),
+  make_tuple(32, 64, &aom_sad32x64_c, -1),
+  make_tuple(32, 32, &aom_sad32x32_c, -1),
+  make_tuple(32, 16, &aom_sad32x16_c, -1),
+  make_tuple(16, 32, &aom_sad16x32_c, -1),
+  make_tuple(16, 16, &aom_sad16x16_c, -1),
+  make_tuple(16, 8, &aom_sad16x8_c, -1),
+  make_tuple(8, 16, &aom_sad8x16_c, -1),
+  make_tuple(8, 8, &aom_sad8x8_c, -1),
+  make_tuple(8, 4, &aom_sad8x4_c, -1),
+  make_tuple(4, 8, &aom_sad4x8_c, -1),
+  make_tuple(4, 4, &aom_sad4x4_c, -1),
+#if CONFIG_AOM_HIGHBITDEPTH
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_highbd_sad128x128_c, 8),
+  make_tuple(128, 64, &aom_highbd_sad128x64_c, 8),
+  make_tuple(64, 128, &aom_highbd_sad64x128_c, 8),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_highbd_sad64x64_c, 8),
+  make_tuple(64, 32, &aom_highbd_sad64x32_c, 8),
+  make_tuple(32, 64, &aom_highbd_sad32x64_c, 8),
+  make_tuple(32, 32, &aom_highbd_sad32x32_c, 8),
+  make_tuple(32, 16, &aom_highbd_sad32x16_c, 8),
+  make_tuple(16, 32, &aom_highbd_sad16x32_c, 8),
+  make_tuple(16, 16, &aom_highbd_sad16x16_c, 8),
+  make_tuple(16, 8, &aom_highbd_sad16x8_c, 8),
+  make_tuple(8, 16, &aom_highbd_sad8x16_c, 8),
+  make_tuple(8, 8, &aom_highbd_sad8x8_c, 8),
+  make_tuple(8, 4, &aom_highbd_sad8x4_c, 8),
+  make_tuple(4, 8, &aom_highbd_sad4x8_c, 8),
+  make_tuple(4, 4, &aom_highbd_sad4x4_c, 8),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_highbd_sad128x128_c, 10),
+  make_tuple(128, 64, &aom_highbd_sad128x64_c, 10),
+  make_tuple(64, 128, &aom_highbd_sad64x128_c, 10),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_highbd_sad64x64_c, 10),
+  make_tuple(64, 32, &aom_highbd_sad64x32_c, 10),
+  make_tuple(32, 64, &aom_highbd_sad32x64_c, 10),
+  make_tuple(32, 32, &aom_highbd_sad32x32_c, 10),
+  make_tuple(32, 16, &aom_highbd_sad32x16_c, 10),
+  make_tuple(16, 32, &aom_highbd_sad16x32_c, 10),
+  make_tuple(16, 16, &aom_highbd_sad16x16_c, 10),
+  make_tuple(16, 8, &aom_highbd_sad16x8_c, 10),
+  make_tuple(8, 16, &aom_highbd_sad8x16_c, 10),
+  make_tuple(8, 8, &aom_highbd_sad8x8_c, 10),
+  make_tuple(8, 4, &aom_highbd_sad8x4_c, 10),
+  make_tuple(4, 8, &aom_highbd_sad4x8_c, 10),
+  make_tuple(4, 4, &aom_highbd_sad4x4_c, 10),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_highbd_sad128x128_c, 12),
+  make_tuple(128, 64, &aom_highbd_sad128x64_c, 12),
+  make_tuple(64, 128, &aom_highbd_sad64x128_c, 12),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_highbd_sad64x64_c, 12),
+  make_tuple(64, 32, &aom_highbd_sad64x32_c, 12),
+  make_tuple(32, 64, &aom_highbd_sad32x64_c, 12),
+  make_tuple(32, 32, &aom_highbd_sad32x32_c, 12),
+  make_tuple(32, 16, &aom_highbd_sad32x16_c, 12),
+  make_tuple(16, 32, &aom_highbd_sad16x32_c, 12),
+  make_tuple(16, 16, &aom_highbd_sad16x16_c, 12),
+  make_tuple(16, 8, &aom_highbd_sad16x8_c, 12),
+  make_tuple(8, 16, &aom_highbd_sad8x16_c, 12),
+  make_tuple(8, 8, &aom_highbd_sad8x8_c, 12),
+  make_tuple(8, 4, &aom_highbd_sad8x4_c, 12),
+  make_tuple(4, 8, &aom_highbd_sad4x8_c, 12),
+  make_tuple(4, 4, &aom_highbd_sad4x4_c, 12),
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
 
 const SadMxNAvgParam avg_c_tests[] = {
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_sad128x128_avg_c, -1),
-  make_tuple(128, 64, &vpx_sad128x64_avg_c, -1),
-  make_tuple(64, 128, &vpx_sad64x128_avg_c, -1),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_sad64x64_avg_c, -1),
-  make_tuple(64, 32, &vpx_sad64x32_avg_c, -1),
-  make_tuple(32, 64, &vpx_sad32x64_avg_c, -1),
-  make_tuple(32, 32, &vpx_sad32x32_avg_c, -1),
-  make_tuple(32, 16, &vpx_sad32x16_avg_c, -1),
-  make_tuple(16, 32, &vpx_sad16x32_avg_c, -1),
-  make_tuple(16, 16, &vpx_sad16x16_avg_c, -1),
-  make_tuple(16, 8, &vpx_sad16x8_avg_c, -1),
-  make_tuple(8, 16, &vpx_sad8x16_avg_c, -1),
-  make_tuple(8, 8, &vpx_sad8x8_avg_c, -1),
-  make_tuple(8, 4, &vpx_sad8x4_avg_c, -1),
-  make_tuple(4, 8, &vpx_sad4x8_avg_c, -1),
-  make_tuple(4, 4, &vpx_sad4x4_avg_c, -1),
-#if CONFIG_VP9_HIGHBITDEPTH
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_highbd_sad128x128_avg_c, 8),
-  make_tuple(128, 64, &vpx_highbd_sad128x64_avg_c, 8),
-  make_tuple(64, 128, &vpx_highbd_sad64x128_avg_c, 8),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_c, 8),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_c, 8),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_c, 8),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_c, 8),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_c, 8),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_c, 8),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_c, 8),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_c, 8),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_c, 8),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_c, 8),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_c, 8),
-  make_tuple(4, 8, &vpx_highbd_sad4x8_avg_c, 8),
-  make_tuple(4, 4, &vpx_highbd_sad4x4_avg_c, 8),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_highbd_sad128x128_avg_c, 10),
-  make_tuple(128, 64, &vpx_highbd_sad128x64_avg_c, 10),
-  make_tuple(64, 128, &vpx_highbd_sad64x128_avg_c, 10),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_c, 10),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_c, 10),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_c, 10),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_c, 10),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_c, 10),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_c, 10),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_c, 10),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_c, 10),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_c, 10),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_c, 10),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_c, 10),
-  make_tuple(4, 8, &vpx_highbd_sad4x8_avg_c, 10),
-  make_tuple(4, 4, &vpx_highbd_sad4x4_avg_c, 10),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_highbd_sad128x128_avg_c, 12),
-  make_tuple(128, 64, &vpx_highbd_sad128x64_avg_c, 12),
-  make_tuple(64, 128, &vpx_highbd_sad64x128_avg_c, 12),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_c, 12),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_c, 12),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_c, 12),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_c, 12),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_c, 12),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_c, 12),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_c, 12),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_c, 12),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_c, 12),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_c, 12),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_c, 12),
-  make_tuple(4, 8, &vpx_highbd_sad4x8_avg_c, 12),
-  make_tuple(4, 4, &vpx_highbd_sad4x4_avg_c, 12),
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_sad128x128_avg_c, -1),
+  make_tuple(128, 64, &aom_sad128x64_avg_c, -1),
+  make_tuple(64, 128, &aom_sad64x128_avg_c, -1),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_sad64x64_avg_c, -1),
+  make_tuple(64, 32, &aom_sad64x32_avg_c, -1),
+  make_tuple(32, 64, &aom_sad32x64_avg_c, -1),
+  make_tuple(32, 32, &aom_sad32x32_avg_c, -1),
+  make_tuple(32, 16, &aom_sad32x16_avg_c, -1),
+  make_tuple(16, 32, &aom_sad16x32_avg_c, -1),
+  make_tuple(16, 16, &aom_sad16x16_avg_c, -1),
+  make_tuple(16, 8, &aom_sad16x8_avg_c, -1),
+  make_tuple(8, 16, &aom_sad8x16_avg_c, -1),
+  make_tuple(8, 8, &aom_sad8x8_avg_c, -1),
+  make_tuple(8, 4, &aom_sad8x4_avg_c, -1),
+  make_tuple(4, 8, &aom_sad4x8_avg_c, -1),
+  make_tuple(4, 4, &aom_sad4x4_avg_c, -1),
+#if CONFIG_AOM_HIGHBITDEPTH
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_highbd_sad128x128_avg_c, 8),
+  make_tuple(128, 64, &aom_highbd_sad128x64_avg_c, 8),
+  make_tuple(64, 128, &aom_highbd_sad64x128_avg_c, 8),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_highbd_sad64x64_avg_c, 8),
+  make_tuple(64, 32, &aom_highbd_sad64x32_avg_c, 8),
+  make_tuple(32, 64, &aom_highbd_sad32x64_avg_c, 8),
+  make_tuple(32, 32, &aom_highbd_sad32x32_avg_c, 8),
+  make_tuple(32, 16, &aom_highbd_sad32x16_avg_c, 8),
+  make_tuple(16, 32, &aom_highbd_sad16x32_avg_c, 8),
+  make_tuple(16, 16, &aom_highbd_sad16x16_avg_c, 8),
+  make_tuple(16, 8, &aom_highbd_sad16x8_avg_c, 8),
+  make_tuple(8, 16, &aom_highbd_sad8x16_avg_c, 8),
+  make_tuple(8, 8, &aom_highbd_sad8x8_avg_c, 8),
+  make_tuple(8, 4, &aom_highbd_sad8x4_avg_c, 8),
+  make_tuple(4, 8, &aom_highbd_sad4x8_avg_c, 8),
+  make_tuple(4, 4, &aom_highbd_sad4x4_avg_c, 8),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_highbd_sad128x128_avg_c, 10),
+  make_tuple(128, 64, &aom_highbd_sad128x64_avg_c, 10),
+  make_tuple(64, 128, &aom_highbd_sad64x128_avg_c, 10),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_highbd_sad64x64_avg_c, 10),
+  make_tuple(64, 32, &aom_highbd_sad64x32_avg_c, 10),
+  make_tuple(32, 64, &aom_highbd_sad32x64_avg_c, 10),
+  make_tuple(32, 32, &aom_highbd_sad32x32_avg_c, 10),
+  make_tuple(32, 16, &aom_highbd_sad32x16_avg_c, 10),
+  make_tuple(16, 32, &aom_highbd_sad16x32_avg_c, 10),
+  make_tuple(16, 16, &aom_highbd_sad16x16_avg_c, 10),
+  make_tuple(16, 8, &aom_highbd_sad16x8_avg_c, 10),
+  make_tuple(8, 16, &aom_highbd_sad8x16_avg_c, 10),
+  make_tuple(8, 8, &aom_highbd_sad8x8_avg_c, 10),
+  make_tuple(8, 4, &aom_highbd_sad8x4_avg_c, 10),
+  make_tuple(4, 8, &aom_highbd_sad4x8_avg_c, 10),
+  make_tuple(4, 4, &aom_highbd_sad4x4_avg_c, 10),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_highbd_sad128x128_avg_c, 12),
+  make_tuple(128, 64, &aom_highbd_sad128x64_avg_c, 12),
+  make_tuple(64, 128, &aom_highbd_sad64x128_avg_c, 12),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_highbd_sad64x64_avg_c, 12),
+  make_tuple(64, 32, &aom_highbd_sad64x32_avg_c, 12),
+  make_tuple(32, 64, &aom_highbd_sad32x64_avg_c, 12),
+  make_tuple(32, 32, &aom_highbd_sad32x32_avg_c, 12),
+  make_tuple(32, 16, &aom_highbd_sad32x16_avg_c, 12),
+  make_tuple(16, 32, &aom_highbd_sad16x32_avg_c, 12),
+  make_tuple(16, 16, &aom_highbd_sad16x16_avg_c, 12),
+  make_tuple(16, 8, &aom_highbd_sad16x8_avg_c, 12),
+  make_tuple(8, 16, &aom_highbd_sad8x16_avg_c, 12),
+  make_tuple(8, 8, &aom_highbd_sad8x8_avg_c, 12),
+  make_tuple(8, 4, &aom_highbd_sad8x4_avg_c, 12),
+  make_tuple(4, 8, &aom_highbd_sad4x8_avg_c, 12),
+  make_tuple(4, 4, &aom_highbd_sad4x4_avg_c, 12),
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(C, SADavgTest, ::testing::ValuesIn(avg_c_tests));
 
 const SadMxNx4Param x4d_c_tests[] = {
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_sad128x128x4d_c, -1),
-  make_tuple(128, 64, &vpx_sad128x64x4d_c, -1),
-  make_tuple(64, 128, &vpx_sad64x128x4d_c, -1),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_sad64x64x4d_c, -1),
-  make_tuple(64, 32, &vpx_sad64x32x4d_c, -1),
-  make_tuple(32, 64, &vpx_sad32x64x4d_c, -1),
-  make_tuple(32, 32, &vpx_sad32x32x4d_c, -1),
-  make_tuple(32, 16, &vpx_sad32x16x4d_c, -1),
-  make_tuple(16, 32, &vpx_sad16x32x4d_c, -1),
-  make_tuple(16, 16, &vpx_sad16x16x4d_c, -1),
-  make_tuple(16, 8, &vpx_sad16x8x4d_c, -1),
-  make_tuple(8, 16, &vpx_sad8x16x4d_c, -1),
-  make_tuple(8, 8, &vpx_sad8x8x4d_c, -1),
-  make_tuple(8, 4, &vpx_sad8x4x4d_c, -1),
-  make_tuple(4, 8, &vpx_sad4x8x4d_c, -1),
-  make_tuple(4, 4, &vpx_sad4x4x4d_c, -1),
-#if CONFIG_VP9_HIGHBITDEPTH
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_highbd_sad128x128x4d_c, 8),
-  make_tuple(128, 64, &vpx_highbd_sad128x64x4d_c, 8),
-  make_tuple(64, 128, &vpx_highbd_sad64x128x4d_c, 8),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_c, 8),
-  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_c, 8),
-  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_c, 8),
-  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_c, 8),
-  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_c, 8),
-  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_c, 8),
-  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_c, 8),
-  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_c, 8),
-  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_c, 8),
-  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_c, 8),
-  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_c, 8),
-  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_c, 8),
-  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_c, 8),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_highbd_sad128x128x4d_c, 10),
-  make_tuple(128, 64, &vpx_highbd_sad128x64x4d_c, 10),
-  make_tuple(64, 128, &vpx_highbd_sad64x128x4d_c, 10),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_c, 10),
-  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_c, 10),
-  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_c, 10),
-  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_c, 10),
-  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_c, 10),
-  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_c, 10),
-  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_c, 10),
-  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_c, 10),
-  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_c, 10),
-  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_c, 10),
-  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_c, 10),
-  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_c, 10),
-  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_c, 10),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_highbd_sad128x128x4d_c, 12),
-  make_tuple(128, 64, &vpx_highbd_sad128x64x4d_c, 12),
-  make_tuple(64, 128, &vpx_highbd_sad64x128x4d_c, 12),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_c, 12),
-  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_c, 12),
-  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_c, 12),
-  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_c, 12),
-  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_c, 12),
-  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_c, 12),
-  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_c, 12),
-  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_c, 12),
-  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_c, 12),
-  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_c, 12),
-  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_c, 12),
-  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_c, 12),
-  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_c, 12),
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_sad128x128x4d_c, -1),
+  make_tuple(128, 64, &aom_sad128x64x4d_c, -1),
+  make_tuple(64, 128, &aom_sad64x128x4d_c, -1),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_sad64x64x4d_c, -1),
+  make_tuple(64, 32, &aom_sad64x32x4d_c, -1),
+  make_tuple(32, 64, &aom_sad32x64x4d_c, -1),
+  make_tuple(32, 32, &aom_sad32x32x4d_c, -1),
+  make_tuple(32, 16, &aom_sad32x16x4d_c, -1),
+  make_tuple(16, 32, &aom_sad16x32x4d_c, -1),
+  make_tuple(16, 16, &aom_sad16x16x4d_c, -1),
+  make_tuple(16, 8, &aom_sad16x8x4d_c, -1),
+  make_tuple(8, 16, &aom_sad8x16x4d_c, -1),
+  make_tuple(8, 8, &aom_sad8x8x4d_c, -1),
+  make_tuple(8, 4, &aom_sad8x4x4d_c, -1),
+  make_tuple(4, 8, &aom_sad4x8x4d_c, -1),
+  make_tuple(4, 4, &aom_sad4x4x4d_c, -1),
+#if CONFIG_AOM_HIGHBITDEPTH
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_highbd_sad128x128x4d_c, 8),
+  make_tuple(128, 64, &aom_highbd_sad128x64x4d_c, 8),
+  make_tuple(64, 128, &aom_highbd_sad64x128x4d_c, 8),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_highbd_sad64x64x4d_c, 8),
+  make_tuple(64, 32, &aom_highbd_sad64x32x4d_c, 8),
+  make_tuple(32, 64, &aom_highbd_sad32x64x4d_c, 8),
+  make_tuple(32, 32, &aom_highbd_sad32x32x4d_c, 8),
+  make_tuple(32, 16, &aom_highbd_sad32x16x4d_c, 8),
+  make_tuple(16, 32, &aom_highbd_sad16x32x4d_c, 8),
+  make_tuple(16, 16, &aom_highbd_sad16x16x4d_c, 8),
+  make_tuple(16, 8, &aom_highbd_sad16x8x4d_c, 8),
+  make_tuple(8, 16, &aom_highbd_sad8x16x4d_c, 8),
+  make_tuple(8, 8, &aom_highbd_sad8x8x4d_c, 8),
+  make_tuple(8, 4, &aom_highbd_sad8x4x4d_c, 8),
+  make_tuple(4, 8, &aom_highbd_sad4x8x4d_c, 8),
+  make_tuple(4, 4, &aom_highbd_sad4x4x4d_c, 8),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_highbd_sad128x128x4d_c, 10),
+  make_tuple(128, 64, &aom_highbd_sad128x64x4d_c, 10),
+  make_tuple(64, 128, &aom_highbd_sad64x128x4d_c, 10),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_highbd_sad64x64x4d_c, 10),
+  make_tuple(64, 32, &aom_highbd_sad64x32x4d_c, 10),
+  make_tuple(32, 64, &aom_highbd_sad32x64x4d_c, 10),
+  make_tuple(32, 32, &aom_highbd_sad32x32x4d_c, 10),
+  make_tuple(32, 16, &aom_highbd_sad32x16x4d_c, 10),
+  make_tuple(16, 32, &aom_highbd_sad16x32x4d_c, 10),
+  make_tuple(16, 16, &aom_highbd_sad16x16x4d_c, 10),
+  make_tuple(16, 8, &aom_highbd_sad16x8x4d_c, 10),
+  make_tuple(8, 16, &aom_highbd_sad8x16x4d_c, 10),
+  make_tuple(8, 8, &aom_highbd_sad8x8x4d_c, 10),
+  make_tuple(8, 4, &aom_highbd_sad8x4x4d_c, 10),
+  make_tuple(4, 8, &aom_highbd_sad4x8x4d_c, 10),
+  make_tuple(4, 4, &aom_highbd_sad4x4x4d_c, 10),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_highbd_sad128x128x4d_c, 12),
+  make_tuple(128, 64, &aom_highbd_sad128x64x4d_c, 12),
+  make_tuple(64, 128, &aom_highbd_sad64x128x4d_c, 12),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_highbd_sad64x64x4d_c, 12),
+  make_tuple(64, 32, &aom_highbd_sad64x32x4d_c, 12),
+  make_tuple(32, 64, &aom_highbd_sad32x64x4d_c, 12),
+  make_tuple(32, 32, &aom_highbd_sad32x32x4d_c, 12),
+  make_tuple(32, 16, &aom_highbd_sad32x16x4d_c, 12),
+  make_tuple(16, 32, &aom_highbd_sad16x32x4d_c, 12),
+  make_tuple(16, 16, &aom_highbd_sad16x16x4d_c, 12),
+  make_tuple(16, 8, &aom_highbd_sad16x8x4d_c, 12),
+  make_tuple(8, 16, &aom_highbd_sad8x16x4d_c, 12),
+  make_tuple(8, 8, &aom_highbd_sad8x8x4d_c, 12),
+  make_tuple(8, 4, &aom_highbd_sad8x4x4d_c, 12),
+  make_tuple(4, 8, &aom_highbd_sad4x8x4d_c, 12),
+  make_tuple(4, 4, &aom_highbd_sad4x4x4d_c, 12),
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::ValuesIn(x4d_c_tests));
 
@@ -709,27 +709,27 @@
 // ARM functions
 #if HAVE_MEDIA
 const SadMxNParam media_tests[] = {
-  make_tuple(16, 16, &vpx_sad16x16_media, -1),
+  make_tuple(16, 16, &aom_sad16x16_media, -1),
 };
 INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::ValuesIn(media_tests));
 #endif  // HAVE_MEDIA
 
 #if HAVE_NEON
 const SadMxNParam neon_tests[] = {
-  make_tuple(64, 64, &vpx_sad64x64_neon, -1),
-  make_tuple(32, 32, &vpx_sad32x32_neon, -1),
-  make_tuple(16, 16, &vpx_sad16x16_neon, -1),
-  make_tuple(16, 8, &vpx_sad16x8_neon, -1),
-  make_tuple(8, 16, &vpx_sad8x16_neon, -1),
-  make_tuple(8, 8, &vpx_sad8x8_neon, -1),
-  make_tuple(4, 4, &vpx_sad4x4_neon, -1),
+  make_tuple(64, 64, &aom_sad64x64_neon, -1),
+  make_tuple(32, 32, &aom_sad32x32_neon, -1),
+  make_tuple(16, 16, &aom_sad16x16_neon, -1),
+  make_tuple(16, 8, &aom_sad16x8_neon, -1),
+  make_tuple(8, 16, &aom_sad8x16_neon, -1),
+  make_tuple(8, 8, &aom_sad8x8_neon, -1),
+  make_tuple(4, 4, &aom_sad4x4_neon, -1),
 };
 INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
 
 const SadMxNx4Param x4d_neon_tests[] = {
-  make_tuple(64, 64, &vpx_sad64x64x4d_neon, -1),
-  make_tuple(32, 32, &vpx_sad32x32x4d_neon, -1),
-  make_tuple(16, 16, &vpx_sad16x16x4d_neon, -1),
+  make_tuple(64, 64, &aom_sad64x64x4d_neon, -1),
+  make_tuple(32, 32, &aom_sad32x32x4d_neon, -1),
+  make_tuple(16, 16, &aom_sad16x16x4d_neon, -1),
 };
 INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests));
 #endif  // HAVE_NEON
@@ -738,179 +738,179 @@
 // x86 functions
 #if HAVE_SSE2
 const SadMxNParam sse2_tests[] = {
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_sad128x128_sse2, -1),
-  make_tuple(128, 64, &vpx_sad128x64_sse2, -1),
-  make_tuple(64, 128, &vpx_sad64x128_sse2, -1),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_sad64x64_sse2, -1),
-  make_tuple(64, 32, &vpx_sad64x32_sse2, -1),
-  make_tuple(32, 64, &vpx_sad32x64_sse2, -1),
-  make_tuple(32, 32, &vpx_sad32x32_sse2, -1),
-  make_tuple(32, 16, &vpx_sad32x16_sse2, -1),
-  make_tuple(16, 32, &vpx_sad16x32_sse2, -1),
-  make_tuple(16, 16, &vpx_sad16x16_sse2, -1),
-  make_tuple(16, 8, &vpx_sad16x8_sse2, -1),
-  make_tuple(8, 16, &vpx_sad8x16_sse2, -1),
-  make_tuple(8, 8, &vpx_sad8x8_sse2, -1),
-  make_tuple(8, 4, &vpx_sad8x4_sse2, -1),
-  make_tuple(4, 8, &vpx_sad4x8_sse2, -1),
-  make_tuple(4, 4, &vpx_sad4x4_sse2, -1),
-#if CONFIG_VP9_HIGHBITDEPTH
-  make_tuple(64, 64, &vpx_highbd_sad64x64_sse2, 8),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_sse2, 8),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_sse2, 8),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_sse2, 8),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_sse2, 8),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_sse2, 8),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_sse2, 8),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_sse2, 8),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_sse2, 8),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_sse2, 8),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_sse2, 8),
-  make_tuple(64, 64, &vpx_highbd_sad64x64_sse2, 10),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_sse2, 10),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_sse2, 10),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_sse2, 10),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_sse2, 10),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_sse2, 10),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_sse2, 10),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_sse2, 10),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_sse2, 10),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_sse2, 10),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_sse2, 10),
-  make_tuple(64, 64, &vpx_highbd_sad64x64_sse2, 12),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_sse2, 12),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_sse2, 12),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_sse2, 12),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_sse2, 12),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_sse2, 12),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_sse2, 12),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_sse2, 12),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_sse2, 12),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_sse2, 12),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_sse2, 12),
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_sad128x128_sse2, -1),
+  make_tuple(128, 64, &aom_sad128x64_sse2, -1),
+  make_tuple(64, 128, &aom_sad64x128_sse2, -1),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_sad64x64_sse2, -1),
+  make_tuple(64, 32, &aom_sad64x32_sse2, -1),
+  make_tuple(32, 64, &aom_sad32x64_sse2, -1),
+  make_tuple(32, 32, &aom_sad32x32_sse2, -1),
+  make_tuple(32, 16, &aom_sad32x16_sse2, -1),
+  make_tuple(16, 32, &aom_sad16x32_sse2, -1),
+  make_tuple(16, 16, &aom_sad16x16_sse2, -1),
+  make_tuple(16, 8, &aom_sad16x8_sse2, -1),
+  make_tuple(8, 16, &aom_sad8x16_sse2, -1),
+  make_tuple(8, 8, &aom_sad8x8_sse2, -1),
+  make_tuple(8, 4, &aom_sad8x4_sse2, -1),
+  make_tuple(4, 8, &aom_sad4x8_sse2, -1),
+  make_tuple(4, 4, &aom_sad4x4_sse2, -1),
+#if CONFIG_AOM_HIGHBITDEPTH
+  make_tuple(64, 64, &aom_highbd_sad64x64_sse2, 8),
+  make_tuple(64, 32, &aom_highbd_sad64x32_sse2, 8),
+  make_tuple(32, 64, &aom_highbd_sad32x64_sse2, 8),
+  make_tuple(32, 32, &aom_highbd_sad32x32_sse2, 8),
+  make_tuple(32, 16, &aom_highbd_sad32x16_sse2, 8),
+  make_tuple(16, 32, &aom_highbd_sad16x32_sse2, 8),
+  make_tuple(16, 16, &aom_highbd_sad16x16_sse2, 8),
+  make_tuple(16, 8, &aom_highbd_sad16x8_sse2, 8),
+  make_tuple(8, 16, &aom_highbd_sad8x16_sse2, 8),
+  make_tuple(8, 8, &aom_highbd_sad8x8_sse2, 8),
+  make_tuple(8, 4, &aom_highbd_sad8x4_sse2, 8),
+  make_tuple(64, 64, &aom_highbd_sad64x64_sse2, 10),
+  make_tuple(64, 32, &aom_highbd_sad64x32_sse2, 10),
+  make_tuple(32, 64, &aom_highbd_sad32x64_sse2, 10),
+  make_tuple(32, 32, &aom_highbd_sad32x32_sse2, 10),
+  make_tuple(32, 16, &aom_highbd_sad32x16_sse2, 10),
+  make_tuple(16, 32, &aom_highbd_sad16x32_sse2, 10),
+  make_tuple(16, 16, &aom_highbd_sad16x16_sse2, 10),
+  make_tuple(16, 8, &aom_highbd_sad16x8_sse2, 10),
+  make_tuple(8, 16, &aom_highbd_sad8x16_sse2, 10),
+  make_tuple(8, 8, &aom_highbd_sad8x8_sse2, 10),
+  make_tuple(8, 4, &aom_highbd_sad8x4_sse2, 10),
+  make_tuple(64, 64, &aom_highbd_sad64x64_sse2, 12),
+  make_tuple(64, 32, &aom_highbd_sad64x32_sse2, 12),
+  make_tuple(32, 64, &aom_highbd_sad32x64_sse2, 12),
+  make_tuple(32, 32, &aom_highbd_sad32x32_sse2, 12),
+  make_tuple(32, 16, &aom_highbd_sad32x16_sse2, 12),
+  make_tuple(16, 32, &aom_highbd_sad16x32_sse2, 12),
+  make_tuple(16, 16, &aom_highbd_sad16x16_sse2, 12),
+  make_tuple(16, 8, &aom_highbd_sad16x8_sse2, 12),
+  make_tuple(8, 16, &aom_highbd_sad8x16_sse2, 12),
+  make_tuple(8, 8, &aom_highbd_sad8x8_sse2, 12),
+  make_tuple(8, 4, &aom_highbd_sad8x4_sse2, 12),
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
 
 const SadMxNAvgParam avg_sse2_tests[] = {
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_sad128x128_avg_sse2, -1),
-  make_tuple(128, 64, &vpx_sad128x64_avg_sse2, -1),
-  make_tuple(64, 128, &vpx_sad64x128_avg_sse2, -1),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_sad64x64_avg_sse2, -1),
-  make_tuple(64, 32, &vpx_sad64x32_avg_sse2, -1),
-  make_tuple(32, 64, &vpx_sad32x64_avg_sse2, -1),
-  make_tuple(32, 32, &vpx_sad32x32_avg_sse2, -1),
-  make_tuple(32, 16, &vpx_sad32x16_avg_sse2, -1),
-  make_tuple(16, 32, &vpx_sad16x32_avg_sse2, -1),
-  make_tuple(16, 16, &vpx_sad16x16_avg_sse2, -1),
-  make_tuple(16, 8, &vpx_sad16x8_avg_sse2, -1),
-  make_tuple(8, 16, &vpx_sad8x16_avg_sse2, -1),
-  make_tuple(8, 8, &vpx_sad8x8_avg_sse2, -1),
-  make_tuple(8, 4, &vpx_sad8x4_avg_sse2, -1),
-  make_tuple(4, 8, &vpx_sad4x8_avg_sse2, -1),
-  make_tuple(4, 4, &vpx_sad4x4_avg_sse2, -1),
-#if CONFIG_VP9_HIGHBITDEPTH
-  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_sse2, 8),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_sse2, 8),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_sse2, 8),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_sse2, 8),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_sse2, 8),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_sse2, 8),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_sse2, 8),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_sse2, 8),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_sse2, 8),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_sse2, 8),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_sse2, 8),
-  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_sse2, 10),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_sse2, 10),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_sse2, 10),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_sse2, 10),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_sse2, 10),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_sse2, 10),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_sse2, 10),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_sse2, 10),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_sse2, 10),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_sse2, 10),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_sse2, 10),
-  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_sse2, 12),
-  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_sse2, 12),
-  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_sse2, 12),
-  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_sse2, 12),
-  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_sse2, 12),
-  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_sse2, 12),
-  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_sse2, 12),
-  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_sse2, 12),
-  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_sse2, 12),
-  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_sse2, 12),
-  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_sse2, 12),
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_sad128x128_avg_sse2, -1),
+  make_tuple(128, 64, &aom_sad128x64_avg_sse2, -1),
+  make_tuple(64, 128, &aom_sad64x128_avg_sse2, -1),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_sad64x64_avg_sse2, -1),
+  make_tuple(64, 32, &aom_sad64x32_avg_sse2, -1),
+  make_tuple(32, 64, &aom_sad32x64_avg_sse2, -1),
+  make_tuple(32, 32, &aom_sad32x32_avg_sse2, -1),
+  make_tuple(32, 16, &aom_sad32x16_avg_sse2, -1),
+  make_tuple(16, 32, &aom_sad16x32_avg_sse2, -1),
+  make_tuple(16, 16, &aom_sad16x16_avg_sse2, -1),
+  make_tuple(16, 8, &aom_sad16x8_avg_sse2, -1),
+  make_tuple(8, 16, &aom_sad8x16_avg_sse2, -1),
+  make_tuple(8, 8, &aom_sad8x8_avg_sse2, -1),
+  make_tuple(8, 4, &aom_sad8x4_avg_sse2, -1),
+  make_tuple(4, 8, &aom_sad4x8_avg_sse2, -1),
+  make_tuple(4, 4, &aom_sad4x4_avg_sse2, -1),
+#if CONFIG_AOM_HIGHBITDEPTH
+  make_tuple(64, 64, &aom_highbd_sad64x64_avg_sse2, 8),
+  make_tuple(64, 32, &aom_highbd_sad64x32_avg_sse2, 8),
+  make_tuple(32, 64, &aom_highbd_sad32x64_avg_sse2, 8),
+  make_tuple(32, 32, &aom_highbd_sad32x32_avg_sse2, 8),
+  make_tuple(32, 16, &aom_highbd_sad32x16_avg_sse2, 8),
+  make_tuple(16, 32, &aom_highbd_sad16x32_avg_sse2, 8),
+  make_tuple(16, 16, &aom_highbd_sad16x16_avg_sse2, 8),
+  make_tuple(16, 8, &aom_highbd_sad16x8_avg_sse2, 8),
+  make_tuple(8, 16, &aom_highbd_sad8x16_avg_sse2, 8),
+  make_tuple(8, 8, &aom_highbd_sad8x8_avg_sse2, 8),
+  make_tuple(8, 4, &aom_highbd_sad8x4_avg_sse2, 8),
+  make_tuple(64, 64, &aom_highbd_sad64x64_avg_sse2, 10),
+  make_tuple(64, 32, &aom_highbd_sad64x32_avg_sse2, 10),
+  make_tuple(32, 64, &aom_highbd_sad32x64_avg_sse2, 10),
+  make_tuple(32, 32, &aom_highbd_sad32x32_avg_sse2, 10),
+  make_tuple(32, 16, &aom_highbd_sad32x16_avg_sse2, 10),
+  make_tuple(16, 32, &aom_highbd_sad16x32_avg_sse2, 10),
+  make_tuple(16, 16, &aom_highbd_sad16x16_avg_sse2, 10),
+  make_tuple(16, 8, &aom_highbd_sad16x8_avg_sse2, 10),
+  make_tuple(8, 16, &aom_highbd_sad8x16_avg_sse2, 10),
+  make_tuple(8, 8, &aom_highbd_sad8x8_avg_sse2, 10),
+  make_tuple(8, 4, &aom_highbd_sad8x4_avg_sse2, 10),
+  make_tuple(64, 64, &aom_highbd_sad64x64_avg_sse2, 12),
+  make_tuple(64, 32, &aom_highbd_sad64x32_avg_sse2, 12),
+  make_tuple(32, 64, &aom_highbd_sad32x64_avg_sse2, 12),
+  make_tuple(32, 32, &aom_highbd_sad32x32_avg_sse2, 12),
+  make_tuple(32, 16, &aom_highbd_sad32x16_avg_sse2, 12),
+  make_tuple(16, 32, &aom_highbd_sad16x32_avg_sse2, 12),
+  make_tuple(16, 16, &aom_highbd_sad16x16_avg_sse2, 12),
+  make_tuple(16, 8, &aom_highbd_sad16x8_avg_sse2, 12),
+  make_tuple(8, 16, &aom_highbd_sad8x16_avg_sse2, 12),
+  make_tuple(8, 8, &aom_highbd_sad8x8_avg_sse2, 12),
+  make_tuple(8, 4, &aom_highbd_sad8x4_avg_sse2, 12),
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(SSE2, SADavgTest, ::testing::ValuesIn(avg_sse2_tests));
 
 const SadMxNx4Param x4d_sse2_tests[] = {
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(128, 128, &vpx_sad128x128x4d_sse2, -1),
-  make_tuple(128, 64, &vpx_sad128x64x4d_sse2, -1),
-  make_tuple(64, 128, &vpx_sad64x128x4d_sse2, -1),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(64, 64, &vpx_sad64x64x4d_sse2, -1),
-  make_tuple(64, 32, &vpx_sad64x32x4d_sse2, -1),
-  make_tuple(32, 64, &vpx_sad32x64x4d_sse2, -1),
-  make_tuple(32, 32, &vpx_sad32x32x4d_sse2, -1),
-  make_tuple(32, 16, &vpx_sad32x16x4d_sse2, -1),
-  make_tuple(16, 32, &vpx_sad16x32x4d_sse2, -1),
-  make_tuple(16, 16, &vpx_sad16x16x4d_sse2, -1),
-  make_tuple(16, 8, &vpx_sad16x8x4d_sse2, -1),
-  make_tuple(8, 16, &vpx_sad8x16x4d_sse2, -1),
-  make_tuple(8, 8, &vpx_sad8x8x4d_sse2, -1),
-  make_tuple(8, 4, &vpx_sad8x4x4d_sse2, -1),
-  make_tuple(4, 8, &vpx_sad4x8x4d_sse2, -1),
-  make_tuple(4, 4, &vpx_sad4x4x4d_sse2, -1),
-#if CONFIG_VP9_HIGHBITDEPTH
-  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_sse2, 8),
-  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_sse2, 8),
-  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_sse2, 8),
-  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_sse2, 8),
-  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_sse2, 8),
-  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_sse2, 8),
-  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_sse2, 8),
-  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_sse2, 8),
-  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_sse2, 8),
-  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_sse2, 8),
-  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_sse2, 8),
-  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_sse2, 8),
-  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_sse2, 8),
-  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_sse2, 10),
-  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_sse2, 10),
-  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_sse2, 10),
-  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_sse2, 10),
-  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_sse2, 10),
-  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_sse2, 10),
-  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_sse2, 10),
-  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_sse2, 10),
-  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_sse2, 10),
-  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_sse2, 10),
-  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_sse2, 10),
-  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_sse2, 10),
-  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_sse2, 10),
-  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_sse2, 12),
-  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_sse2, 12),
-  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_sse2, 12),
-  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_sse2, 12),
-  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_sse2, 12),
-  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_sse2, 12),
-  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_sse2, 12),
-  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_sse2, 12),
-  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_sse2, 12),
-  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_sse2, 12),
-  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_sse2, 12),
-  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_sse2, 12),
-  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_sse2, 12),
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(128, 128, &aom_sad128x128x4d_sse2, -1),
+  make_tuple(128, 64, &aom_sad128x64x4d_sse2, -1),
+  make_tuple(64, 128, &aom_sad64x128x4d_sse2, -1),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(64, 64, &aom_sad64x64x4d_sse2, -1),
+  make_tuple(64, 32, &aom_sad64x32x4d_sse2, -1),
+  make_tuple(32, 64, &aom_sad32x64x4d_sse2, -1),
+  make_tuple(32, 32, &aom_sad32x32x4d_sse2, -1),
+  make_tuple(32, 16, &aom_sad32x16x4d_sse2, -1),
+  make_tuple(16, 32, &aom_sad16x32x4d_sse2, -1),
+  make_tuple(16, 16, &aom_sad16x16x4d_sse2, -1),
+  make_tuple(16, 8, &aom_sad16x8x4d_sse2, -1),
+  make_tuple(8, 16, &aom_sad8x16x4d_sse2, -1),
+  make_tuple(8, 8, &aom_sad8x8x4d_sse2, -1),
+  make_tuple(8, 4, &aom_sad8x4x4d_sse2, -1),
+  make_tuple(4, 8, &aom_sad4x8x4d_sse2, -1),
+  make_tuple(4, 4, &aom_sad4x4x4d_sse2, -1),
+#if CONFIG_AOM_HIGHBITDEPTH
+  make_tuple(64, 64, &aom_highbd_sad64x64x4d_sse2, 8),
+  make_tuple(64, 32, &aom_highbd_sad64x32x4d_sse2, 8),
+  make_tuple(32, 64, &aom_highbd_sad32x64x4d_sse2, 8),
+  make_tuple(32, 32, &aom_highbd_sad32x32x4d_sse2, 8),
+  make_tuple(32, 16, &aom_highbd_sad32x16x4d_sse2, 8),
+  make_tuple(16, 32, &aom_highbd_sad16x32x4d_sse2, 8),
+  make_tuple(16, 16, &aom_highbd_sad16x16x4d_sse2, 8),
+  make_tuple(16, 8, &aom_highbd_sad16x8x4d_sse2, 8),
+  make_tuple(8, 16, &aom_highbd_sad8x16x4d_sse2, 8),
+  make_tuple(8, 8, &aom_highbd_sad8x8x4d_sse2, 8),
+  make_tuple(8, 4, &aom_highbd_sad8x4x4d_sse2, 8),
+  make_tuple(4, 8, &aom_highbd_sad4x8x4d_sse2, 8),
+  make_tuple(4, 4, &aom_highbd_sad4x4x4d_sse2, 8),
+  make_tuple(64, 64, &aom_highbd_sad64x64x4d_sse2, 10),
+  make_tuple(64, 32, &aom_highbd_sad64x32x4d_sse2, 10),
+  make_tuple(32, 64, &aom_highbd_sad32x64x4d_sse2, 10),
+  make_tuple(32, 32, &aom_highbd_sad32x32x4d_sse2, 10),
+  make_tuple(32, 16, &aom_highbd_sad32x16x4d_sse2, 10),
+  make_tuple(16, 32, &aom_highbd_sad16x32x4d_sse2, 10),
+  make_tuple(16, 16, &aom_highbd_sad16x16x4d_sse2, 10),
+  make_tuple(16, 8, &aom_highbd_sad16x8x4d_sse2, 10),
+  make_tuple(8, 16, &aom_highbd_sad8x16x4d_sse2, 10),
+  make_tuple(8, 8, &aom_highbd_sad8x8x4d_sse2, 10),
+  make_tuple(8, 4, &aom_highbd_sad8x4x4d_sse2, 10),
+  make_tuple(4, 8, &aom_highbd_sad4x8x4d_sse2, 10),
+  make_tuple(4, 4, &aom_highbd_sad4x4x4d_sse2, 10),
+  make_tuple(64, 64, &aom_highbd_sad64x64x4d_sse2, 12),
+  make_tuple(64, 32, &aom_highbd_sad64x32x4d_sse2, 12),
+  make_tuple(32, 64, &aom_highbd_sad32x64x4d_sse2, 12),
+  make_tuple(32, 32, &aom_highbd_sad32x32x4d_sse2, 12),
+  make_tuple(32, 16, &aom_highbd_sad32x16x4d_sse2, 12),
+  make_tuple(16, 32, &aom_highbd_sad16x32x4d_sse2, 12),
+  make_tuple(16, 16, &aom_highbd_sad16x16x4d_sse2, 12),
+  make_tuple(16, 8, &aom_highbd_sad16x8x4d_sse2, 12),
+  make_tuple(8, 16, &aom_highbd_sad8x16x4d_sse2, 12),
+  make_tuple(8, 8, &aom_highbd_sad8x8x4d_sse2, 12),
+  make_tuple(8, 4, &aom_highbd_sad8x4x4d_sse2, 12),
+  make_tuple(4, 8, &aom_highbd_sad4x8x4d_sse2, 12),
+  make_tuple(4, 4, &aom_highbd_sad4x4x4d_sse2, 12),
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests));
 #endif  // HAVE_SSE2
@@ -929,26 +929,26 @@
 
 #if HAVE_AVX2
 const SadMxNParam avx2_tests[] = {
-  make_tuple(64, 64, &vpx_sad64x64_avx2, -1),
-  make_tuple(64, 32, &vpx_sad64x32_avx2, -1),
-  make_tuple(32, 64, &vpx_sad32x64_avx2, -1),
-  make_tuple(32, 32, &vpx_sad32x32_avx2, -1),
-  make_tuple(32, 16, &vpx_sad32x16_avx2, -1),
+  make_tuple(64, 64, &aom_sad64x64_avx2, -1),
+  make_tuple(64, 32, &aom_sad64x32_avx2, -1),
+  make_tuple(32, 64, &aom_sad32x64_avx2, -1),
+  make_tuple(32, 32, &aom_sad32x32_avx2, -1),
+  make_tuple(32, 16, &aom_sad32x16_avx2, -1),
 };
 INSTANTIATE_TEST_CASE_P(AVX2, SADTest, ::testing::ValuesIn(avx2_tests));
 
 const SadMxNAvgParam avg_avx2_tests[] = {
-  make_tuple(64, 64, &vpx_sad64x64_avg_avx2, -1),
-  make_tuple(64, 32, &vpx_sad64x32_avg_avx2, -1),
-  make_tuple(32, 64, &vpx_sad32x64_avg_avx2, -1),
-  make_tuple(32, 32, &vpx_sad32x32_avg_avx2, -1),
-  make_tuple(32, 16, &vpx_sad32x16_avg_avx2, -1),
+  make_tuple(64, 64, &aom_sad64x64_avg_avx2, -1),
+  make_tuple(64, 32, &aom_sad64x32_avg_avx2, -1),
+  make_tuple(32, 64, &aom_sad32x64_avg_avx2, -1),
+  make_tuple(32, 32, &aom_sad32x32_avg_avx2, -1),
+  make_tuple(32, 16, &aom_sad32x16_avg_avx2, -1),
 };
 INSTANTIATE_TEST_CASE_P(AVX2, SADavgTest, ::testing::ValuesIn(avg_avx2_tests));
 
 const SadMxNx4Param x4d_avx2_tests[] = {
-  make_tuple(64, 64, &vpx_sad64x64x4d_avx2, -1),
-  make_tuple(32, 32, &vpx_sad32x32x4d_avx2, -1),
+  make_tuple(64, 64, &aom_sad64x64x4d_avx2, -1),
+  make_tuple(32, 32, &aom_sad32x32x4d_avx2, -1),
 };
 INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::ValuesIn(x4d_avx2_tests));
 #endif  // HAVE_AVX2
@@ -957,53 +957,53 @@
 // MIPS functions
 #if HAVE_MSA
 const SadMxNParam msa_tests[] = {
-  make_tuple(64, 64, &vpx_sad64x64_msa, -1),
-  make_tuple(64, 32, &vpx_sad64x32_msa, -1),
-  make_tuple(32, 64, &vpx_sad32x64_msa, -1),
-  make_tuple(32, 32, &vpx_sad32x32_msa, -1),
-  make_tuple(32, 16, &vpx_sad32x16_msa, -1),
-  make_tuple(16, 32, &vpx_sad16x32_msa, -1),
-  make_tuple(16, 16, &vpx_sad16x16_msa, -1),
-  make_tuple(16, 8, &vpx_sad16x8_msa, -1),
-  make_tuple(8, 16, &vpx_sad8x16_msa, -1),
-  make_tuple(8, 8, &vpx_sad8x8_msa, -1),
-  make_tuple(8, 4, &vpx_sad8x4_msa, -1),
-  make_tuple(4, 8, &vpx_sad4x8_msa, -1),
-  make_tuple(4, 4, &vpx_sad4x4_msa, -1),
+  make_tuple(64, 64, &aom_sad64x64_msa, -1),
+  make_tuple(64, 32, &aom_sad64x32_msa, -1),
+  make_tuple(32, 64, &aom_sad32x64_msa, -1),
+  make_tuple(32, 32, &aom_sad32x32_msa, -1),
+  make_tuple(32, 16, &aom_sad32x16_msa, -1),
+  make_tuple(16, 32, &aom_sad16x32_msa, -1),
+  make_tuple(16, 16, &aom_sad16x16_msa, -1),
+  make_tuple(16, 8, &aom_sad16x8_msa, -1),
+  make_tuple(8, 16, &aom_sad8x16_msa, -1),
+  make_tuple(8, 8, &aom_sad8x8_msa, -1),
+  make_tuple(8, 4, &aom_sad8x4_msa, -1),
+  make_tuple(4, 8, &aom_sad4x8_msa, -1),
+  make_tuple(4, 4, &aom_sad4x4_msa, -1),
 };
 INSTANTIATE_TEST_CASE_P(MSA, SADTest, ::testing::ValuesIn(msa_tests));
 
 const SadMxNAvgParam avg_msa_tests[] = {
-  make_tuple(64, 64, &vpx_sad64x64_avg_msa, -1),
-  make_tuple(64, 32, &vpx_sad64x32_avg_msa, -1),
-  make_tuple(32, 64, &vpx_sad32x64_avg_msa, -1),
-  make_tuple(32, 32, &vpx_sad32x32_avg_msa, -1),
-  make_tuple(32, 16, &vpx_sad32x16_avg_msa, -1),
-  make_tuple(16, 32, &vpx_sad16x32_avg_msa, -1),
-  make_tuple(16, 16, &vpx_sad16x16_avg_msa, -1),
-  make_tuple(16, 8, &vpx_sad16x8_avg_msa, -1),
-  make_tuple(8, 16, &vpx_sad8x16_avg_msa, -1),
-  make_tuple(8, 8, &vpx_sad8x8_avg_msa, -1),
-  make_tuple(8, 4, &vpx_sad8x4_avg_msa, -1),
-  make_tuple(4, 8, &vpx_sad4x8_avg_msa, -1),
-  make_tuple(4, 4, &vpx_sad4x4_avg_msa, -1),
+  make_tuple(64, 64, &aom_sad64x64_avg_msa, -1),
+  make_tuple(64, 32, &aom_sad64x32_avg_msa, -1),
+  make_tuple(32, 64, &aom_sad32x64_avg_msa, -1),
+  make_tuple(32, 32, &aom_sad32x32_avg_msa, -1),
+  make_tuple(32, 16, &aom_sad32x16_avg_msa, -1),
+  make_tuple(16, 32, &aom_sad16x32_avg_msa, -1),
+  make_tuple(16, 16, &aom_sad16x16_avg_msa, -1),
+  make_tuple(16, 8, &aom_sad16x8_avg_msa, -1),
+  make_tuple(8, 16, &aom_sad8x16_avg_msa, -1),
+  make_tuple(8, 8, &aom_sad8x8_avg_msa, -1),
+  make_tuple(8, 4, &aom_sad8x4_avg_msa, -1),
+  make_tuple(4, 8, &aom_sad4x8_avg_msa, -1),
+  make_tuple(4, 4, &aom_sad4x4_avg_msa, -1),
 };
 INSTANTIATE_TEST_CASE_P(MSA, SADavgTest, ::testing::ValuesIn(avg_msa_tests));
 
 const SadMxNx4Param x4d_msa_tests[] = {
-  make_tuple(64, 64, &vpx_sad64x64x4d_msa, -1),
-  make_tuple(64, 32, &vpx_sad64x32x4d_msa, -1),
-  make_tuple(32, 64, &vpx_sad32x64x4d_msa, -1),
-  make_tuple(32, 32, &vpx_sad32x32x4d_msa, -1),
-  make_tuple(32, 16, &vpx_sad32x16x4d_msa, -1),
-  make_tuple(16, 32, &vpx_sad16x32x4d_msa, -1),
-  make_tuple(16, 16, &vpx_sad16x16x4d_msa, -1),
-  make_tuple(16, 8, &vpx_sad16x8x4d_msa, -1),
-  make_tuple(8, 16, &vpx_sad8x16x4d_msa, -1),
-  make_tuple(8, 8, &vpx_sad8x8x4d_msa, -1),
-  make_tuple(8, 4, &vpx_sad8x4x4d_msa, -1),
-  make_tuple(4, 8, &vpx_sad4x8x4d_msa, -1),
-  make_tuple(4, 4, &vpx_sad4x4x4d_msa, -1),
+  make_tuple(64, 64, &aom_sad64x64x4d_msa, -1),
+  make_tuple(64, 32, &aom_sad64x32x4d_msa, -1),
+  make_tuple(32, 64, &aom_sad32x64x4d_msa, -1),
+  make_tuple(32, 32, &aom_sad32x32x4d_msa, -1),
+  make_tuple(32, 16, &aom_sad32x16x4d_msa, -1),
+  make_tuple(16, 32, &aom_sad16x32x4d_msa, -1),
+  make_tuple(16, 16, &aom_sad16x16x4d_msa, -1),
+  make_tuple(16, 8, &aom_sad16x8x4d_msa, -1),
+  make_tuple(8, 16, &aom_sad8x16x4d_msa, -1),
+  make_tuple(8, 8, &aom_sad8x8x4d_msa, -1),
+  make_tuple(8, 4, &aom_sad8x4x4d_msa, -1),
+  make_tuple(4, 8, &aom_sad4x8x4d_msa, -1),
+  make_tuple(4, 4, &aom_sad4x4x4d_msa, -1),
 };
 INSTANTIATE_TEST_CASE_P(MSA, SADx4Test, ::testing::ValuesIn(x4d_msa_tests));
 #endif  // HAVE_MSA
diff --git a/test/set_maps.sh b/test/set_maps.sh
index b50453e..174dc4a 100755
--- a/test/set_maps.sh
+++ b/test/set_maps.sh
@@ -19,10 +19,10 @@
 # $LIBAOM_BIN_PATH.
 set_maps_verify_environment() {
   if [ ! -e "${YUV_RAW_INPUT}" ]; then
-    echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+    echo "Libaom test data must exist in LIBVPX_TEST_DATA_PATH."
     return 1
   fi
-  if [ -z "$(vpx_tool_path set_maps)" ]; then
+  if [ -z "$(aom_tool_path set_maps)" ]; then
     elog "set_maps not found. It must exist in LIBAOM_BIN_PATH or its parent."
     return 1
   fi
@@ -30,30 +30,30 @@
 
 # Runs set_maps using the codec specified by $1.
 set_maps() {
-  local encoder="$(vpx_tool_path set_maps)"
+  local encoder="$(aom_tool_path set_maps)"
   local codec="$1"
-  local output_file="${VPX_TEST_OUTPUT_DIR}/set_maps_${codec}.ivf"
+  local output_file="${AOM_TEST_OUTPUT_DIR}/set_maps_${codec}.ivf"
 
-  eval "${VPX_TEST_PREFIX}" "${encoder}" "${codec}" "${YUV_RAW_INPUT_WIDTH}" \
+  eval "${AOM_TEST_PREFIX}" "${encoder}" "${codec}" "${YUV_RAW_INPUT_WIDTH}" \
       "${YUV_RAW_INPUT_HEIGHT}" "${YUV_RAW_INPUT}" "${output_file}" \
       ${devnull}
 
   [ -e "${output_file}" ] || return 1
 }
 
-set_maps_vp8() {
-  if [ "$(vp8_encode_available)" = "yes" ]; then
-    set_maps vp8 || return 1
+set_maps_aom() {
+  if [ "$(aom_encode_available)" = "yes" ]; then
+    set_maps aom || return 1
   fi
 }
 
-set_maps_vp9() {
-  if [ "$(vp9_encode_available)" = "yes" ]; then
-    set_maps vp9 || return 1
+set_maps_av1() {
+  if [ "$(av1_encode_available)" = "yes" ]; then
+    set_maps av1 || return 1
   fi
 }
 
-set_maps_tests="set_maps_vp8
-                set_maps_vp9"
+set_maps_tests="set_maps_aom
+                set_maps_av1"
 
 run_tests set_maps_verify_environment "${set_maps_tests}"
diff --git a/test/simple_decoder.sh b/test/simple_decoder.sh
index 0955c92..08f8fb0 100755
--- a/test/simple_decoder.sh
+++ b/test/simple_decoder.sh
@@ -16,10 +16,10 @@
 . $(dirname $0)/tools_common.sh
 
 # Environment check: Make sure input is available:
-#   $VP8_IVF_FILE and $VP9_IVF_FILE are required.
+#   $AOM_IVF_FILE and $AV1_IVF_FILE are required.
 simple_decoder_verify_environment() {
-  if [ ! -e "${VP8_IVF_FILE}" ] || [ ! -e "${VP9_IVF_FILE}" ]; then
-    echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+  if [ ! -e "${AOM_IVF_FILE}" ] || [ ! -e "${AV1_IVF_FILE}" ]; then
+    echo "Libaom test data must exist in LIBVPX_TEST_DATA_PATH."
     return 1
   fi
 }
@@ -27,35 +27,35 @@
 # Runs simple_decoder using $1 as input file. $2 is the codec name, and is used
 # solely to name the output file.
 simple_decoder() {
-  local decoder="${LIBAOM_BIN_PATH}/simple_decoder${VPX_TEST_EXE_SUFFIX}"
+  local decoder="${LIBAOM_BIN_PATH}/simple_decoder${AOM_TEST_EXE_SUFFIX}"
   local input_file="$1"
   local codec="$2"
-  local output_file="${VPX_TEST_OUTPUT_DIR}/simple_decoder_${codec}.raw"
+  local output_file="${AOM_TEST_OUTPUT_DIR}/simple_decoder_${codec}.raw"
 
   if [ ! -x "${decoder}" ]; then
     elog "${decoder} does not exist or is not executable."
     return 1
   fi
 
-  eval "${VPX_TEST_PREFIX}" "${decoder}" "${input_file}" "${output_file}" \
+  eval "${AOM_TEST_PREFIX}" "${decoder}" "${input_file}" "${output_file}" \
       ${devnull}
 
   [ -e "${output_file}" ] || return 1
 }
 
-simple_decoder_vp8() {
-  if [ "$(vp8_decode_available)" = "yes" ]; then
-    simple_decoder "${VP8_IVF_FILE}" vp8 || return 1
+simple_decoder_aom() {
+  if [ "$(aom_decode_available)" = "yes" ]; then
+    simple_decoder "${AOM_IVF_FILE}" aom || return 1
   fi
 }
 
-simple_decoder_vp9() {
-  if [ "$(vp9_decode_available)" = "yes" ]; then
-    simple_decoder "${VP9_IVF_FILE}" vp9 || return 1
+simple_decoder_av1() {
+  if [ "$(av1_decode_available)" = "yes" ]; then
+    simple_decoder "${AV1_IVF_FILE}" av1 || return 1
   fi
 }
 
-simple_decoder_tests="simple_decoder_vp8
-                      simple_decoder_vp9"
+simple_decoder_tests="simple_decoder_aom
+                      simple_decoder_av1"
 
 run_tests simple_decoder_verify_environment "${simple_decoder_tests}"
diff --git a/test/simple_encoder.sh b/test/simple_encoder.sh
index e0da0ec..25208c9 100755
--- a/test/simple_encoder.sh
+++ b/test/simple_encoder.sh
@@ -18,42 +18,45 @@
 # Environment check: $YUV_RAW_INPUT is required.
 simple_encoder_verify_environment() {
   if [ ! -e "${YUV_RAW_INPUT}" ]; then
-    echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+    echo "Libaom test data must exist in LIBVPX_TEST_DATA_PATH."
     return 1
   fi
 }
 
 # Runs simple_encoder using the codec specified by $1 with a frame limit of 100.
 simple_encoder() {
-  local encoder="${LIBAOM_BIN_PATH}/simple_encoder${VPX_TEST_EXE_SUFFIX}"
+  local encoder="${LIBAOM_BIN_PATH}/simple_encoder${AOM_TEST_EXE_SUFFIX}"
   local codec="$1"
-  local output_file="${VPX_TEST_OUTPUT_DIR}/simple_encoder_${codec}.ivf"
+  local output_file="${AOM_TEST_OUTPUT_DIR}/simple_encoder_${codec}.ivf"
 
   if [ ! -x "${encoder}" ]; then
     elog "${encoder} does not exist or is not executable."
     return 1
   fi
 
-  eval "${VPX_TEST_PREFIX}" "${encoder}" "${codec}" "${YUV_RAW_INPUT_WIDTH}" \
+  eval "${AOM_TEST_PREFIX}" "${encoder}" "${codec}" "${YUV_RAW_INPUT_WIDTH}" \
       "${YUV_RAW_INPUT_HEIGHT}" "${YUV_RAW_INPUT}" "${output_file}" 9999 0 100 \
       ${devnull}
 
   [ -e "${output_file}" ] || return 1
 }
 
-simple_encoder_vp8() {
-  if [ "$(vp8_encode_available)" = "yes" ]; then
-    simple_encoder vp8 || return 1
+simple_encoder_aom() {
+  if [ "$(aom_encode_available)" = "yes" ]; then
+    simple_encoder aom || return 1
   fi
 }
 
-simple_encoder_vp9() {
-  if [ "$(vp9_encode_available)" = "yes" ]; then
-    simple_encoder vp9 || return 1
+# TODO(tomfinegan): Add a frame limit param to simple_encoder and enable this
+# test. AV1 is just too slow right now: This test takes 4m30s+ on a fast
+# machine.
+DISABLED_simple_encoder_av1() {
+  if [ "$(av1_encode_available)" = "yes" ]; then
+    simple_encoder av1 || return 1
   fi
 }
 
-simple_encoder_tests="simple_encoder_vp8
-                      simple_encoder_vp9"
+simple_encoder_tests="simple_encoder_aom
+                      DISABLED_simple_encoder_av1"
 
 run_tests simple_encoder_verify_environment "${simple_encoder_tests}"
diff --git a/test/subtract_test.cc b/test/subtract_test.cc
index a6ba557..e1bf16c 100644
--- a/test/subtract_test.cc
+++ b/test/subtract_test.cc
@@ -10,16 +10,16 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
-#if CONFIG_VP10
+#if CONFIG_AV1
 #include "av1/common/blockd.h"
 #endif
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #define USE_SPEED_TEST (0)
@@ -31,14 +31,14 @@
 
 namespace {
 
-class VP9SubtractBlockTest : public ::testing::TestWithParam<SubtractFunc> {
+class AV1SubtractBlockTest : public ::testing::TestWithParam<SubtractFunc> {
  public:
   virtual void TearDown() { libaom_test::ClearSystemState(); }
 };
 
 using libaom_test::ACMRandom;
 
-TEST_P(VP9SubtractBlockTest, SimpleSubtract) {
+TEST_P(AV1SubtractBlockTest, SimpleSubtract) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
 
   // FIXME(rbultje) split in its own file
@@ -47,11 +47,11 @@
     const int block_width = 4 * num_4x4_blocks_wide_lookup[bsize];
     const int block_height = 4 * num_4x4_blocks_high_lookup[bsize];
     int16_t *diff = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(*diff) * block_width * block_height * 2));
+        aom_memalign(16, sizeof(*diff) * block_width * block_height * 2));
     uint8_t *pred = reinterpret_cast<uint8_t *>(
-        vpx_memalign(16, block_width * block_height * 2));
+        aom_memalign(16, block_width * block_height * 2));
     uint8_t *src = reinterpret_cast<uint8_t *>(
-        vpx_memalign(16, block_width * block_height * 2));
+        aom_memalign(16, block_width * block_height * 2));
 
     for (int n = 0; n < 100; n++) {
       for (int r = 0; r < block_height; ++r) {
@@ -84,26 +84,26 @@
         }
       }
     }
-    vpx_free(diff);
-    vpx_free(pred);
-    vpx_free(src);
+    aom_free(diff);
+    aom_free(pred);
+    aom_free(src);
   }
 }
 
-INSTANTIATE_TEST_CASE_P(C, VP9SubtractBlockTest,
-                        ::testing::Values(vpx_subtract_block_c));
+INSTANTIATE_TEST_CASE_P(C, AV1SubtractBlockTest,
+                        ::testing::Values(aom_subtract_block_c));
 
 #if HAVE_SSE2
-INSTANTIATE_TEST_CASE_P(SSE2, VP9SubtractBlockTest,
-                        ::testing::Values(vpx_subtract_block_sse2));
+INSTANTIATE_TEST_CASE_P(SSE2, AV1SubtractBlockTest,
+                        ::testing::Values(aom_subtract_block_sse2));
 #endif
 #if HAVE_NEON
-INSTANTIATE_TEST_CASE_P(NEON, VP9SubtractBlockTest,
-                        ::testing::Values(vpx_subtract_block_neon));
+INSTANTIATE_TEST_CASE_P(NEON, AV1SubtractBlockTest,
+                        ::testing::Values(aom_subtract_block_neon));
 #endif
 #if HAVE_MSA
-INSTANTIATE_TEST_CASE_P(MSA, VP9SubtractBlockTest,
-                        ::testing::Values(vpx_subtract_block_msa));
+INSTANTIATE_TEST_CASE_P(MSA, AV1SubtractBlockTest,
+                        ::testing::Values(aom_subtract_block_msa));
 #endif
 
 typedef void (*HBDSubtractFunc)(int rows, int cols, int16_t *diff_ptr,
@@ -118,13 +118,13 @@
 // <width, height, bit_dpeth, subtract>
 typedef tuple<int, int, int, HBDSubtractFunc> Params;
 
-#if CONFIG_VP9_HIGHBITDEPTH
-class VP10HBDSubtractBlockTest : public ::testing::TestWithParam<Params> {
+#if CONFIG_AOM_HIGHBITDEPTH
+class AV1HBDSubtractBlockTest : public ::testing::TestWithParam<Params> {
  public:
   virtual void SetUp() {
     block_width_ = GET_PARAM(0);
     block_height_ = GET_PARAM(1);
-    bit_depth_ = static_cast<vpx_bit_depth_t>(GET_PARAM(2));
+    bit_depth_ = static_cast<aom_bit_depth_t>(GET_PARAM(2));
     func_ = GET_PARAM(3);
 
     rnd_.Reset(ACMRandom::DeterministicSeed());
@@ -132,17 +132,17 @@
     const size_t max_width = 128;
     const size_t max_block_size = max_width * max_width;
     src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
-        vpx_memalign(16, max_block_size * sizeof(uint16_t))));
+        aom_memalign(16, max_block_size * sizeof(uint16_t))));
     pred_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
-        vpx_memalign(16, max_block_size * sizeof(uint16_t))));
+        aom_memalign(16, max_block_size * sizeof(uint16_t))));
     diff_ = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, max_block_size * sizeof(int16_t)));
+        aom_memalign(16, max_block_size * sizeof(int16_t)));
   }
 
   virtual void TearDown() {
-    vpx_free(CONVERT_TO_SHORTPTR(src_));
-    vpx_free(CONVERT_TO_SHORTPTR(pred_));
-    vpx_free(diff_);
+    aom_free(CONVERT_TO_SHORTPTR(src_));
+    aom_free(CONVERT_TO_SHORTPTR(pred_));
+    aom_free(diff_);
   }
 
  protected:
@@ -153,14 +153,14 @@
   ACMRandom rnd_;
   int block_height_;
   int block_width_;
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
   HBDSubtractFunc func_;
   uint8_t *src_;
   uint8_t *pred_;
   int16_t *diff_;
 };
 
-void VP10HBDSubtractBlockTest::RunForSpeed() {
+void AV1HBDSubtractBlockTest::RunForSpeed() {
   const int test_num = 200000;
   const int max_width = 128;
   const int max_block_size = max_width * max_width;
@@ -178,7 +178,7 @@
   }
 }
 
-void VP10HBDSubtractBlockTest::CheckResult() {
+void AV1HBDSubtractBlockTest::CheckResult() {
   const int test_num = 100;
   const int max_width = 128;
   const int max_block_size = max_width * max_width;
@@ -205,47 +205,47 @@
   }
 }
 
-TEST_P(VP10HBDSubtractBlockTest, CheckResult) { CheckResult(); }
+TEST_P(AV1HBDSubtractBlockTest, CheckResult) { CheckResult(); }
 
 #if USE_SPEED_TEST
-TEST_P(VP10HBDSubtractBlockTest, CheckSpeed) { RunForSpeed(); }
+TEST_P(AV1HBDSubtractBlockTest, CheckSpeed) { RunForSpeed(); }
 #endif  // USE_SPEED_TEST
 
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(
-    SSE2, VP10HBDSubtractBlockTest,
-    ::testing::Values(make_tuple(4, 4, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(4, 4, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(4, 8, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(4, 8, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(8, 4, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(8, 4, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(8, 8, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(8, 8, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(8, 16, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(8, 16, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(16, 8, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(16, 8, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(16, 16, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(16, 16, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(16, 32, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(16, 32, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(32, 16, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(32, 16, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(32, 32, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(32, 32, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(32, 64, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(32, 64, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(64, 32, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(64, 32, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(64, 64, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(64, 64, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(64, 128, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(64, 128, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(128, 64, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(128, 64, 12, vpx_highbd_subtract_block_c),
-                      make_tuple(128, 128, 12, vpx_highbd_subtract_block_sse2),
-                      make_tuple(128, 128, 12, vpx_highbd_subtract_block_c)));
+    SSE2, AV1HBDSubtractBlockTest,
+    ::testing::Values(make_tuple(4, 4, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(4, 4, 12, aom_highbd_subtract_block_c),
+                      make_tuple(4, 8, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(4, 8, 12, aom_highbd_subtract_block_c),
+                      make_tuple(8, 4, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(8, 4, 12, aom_highbd_subtract_block_c),
+                      make_tuple(8, 8, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(8, 8, 12, aom_highbd_subtract_block_c),
+                      make_tuple(8, 16, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(8, 16, 12, aom_highbd_subtract_block_c),
+                      make_tuple(16, 8, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(16, 8, 12, aom_highbd_subtract_block_c),
+                      make_tuple(16, 16, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(16, 16, 12, aom_highbd_subtract_block_c),
+                      make_tuple(16, 32, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(16, 32, 12, aom_highbd_subtract_block_c),
+                      make_tuple(32, 16, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(32, 16, 12, aom_highbd_subtract_block_c),
+                      make_tuple(32, 32, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(32, 32, 12, aom_highbd_subtract_block_c),
+                      make_tuple(32, 64, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(32, 64, 12, aom_highbd_subtract_block_c),
+                      make_tuple(64, 32, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(64, 32, 12, aom_highbd_subtract_block_c),
+                      make_tuple(64, 64, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(64, 64, 12, aom_highbd_subtract_block_c),
+                      make_tuple(64, 128, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(64, 128, 12, aom_highbd_subtract_block_c),
+                      make_tuple(128, 64, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(128, 64, 12, aom_highbd_subtract_block_c),
+                      make_tuple(128, 128, 12, aom_highbd_subtract_block_sse2),
+                      make_tuple(128, 128, 12, aom_highbd_subtract_block_c)));
 #endif  // HAVE_SSE2
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }  // namespace
diff --git a/test/sum_squares_test.cc b/test/sum_squares_test.cc
index c4ddf57..d651072 100644
--- a/test/sum_squares_test.cc
+++ b/test/sum_squares_test.cc
@@ -14,8 +14,8 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
@@ -120,8 +120,8 @@
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, SumSquaresTest,
-    ::testing::Values(TestFuncs(&vpx_sum_squares_2d_i16_c,
-                                &vpx_sum_squares_2d_i16_sse2)));
+    ::testing::Values(TestFuncs(&aom_sum_squares_2d_i16_c,
+                                &aom_sum_squares_2d_i16_sse2)));
 
 #endif  // HAVE_SSE2
 
@@ -180,7 +180,7 @@
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(SSE2, SumSquares1DTest,
                         ::testing::Values(TestFuncs1D(
-                            vpx_sum_squares_i16_c, vpx_sum_squares_i16_sse2)));
+                            aom_sum_squares_i16_c, aom_sum_squares_i16_sse2)));
 
 #endif  // HAVE_SSE2
 }  // namespace
diff --git a/test/superframe_test.cc b/test/superframe_test.cc
index 2f7132a..0eae40a 100644
--- a/test/superframe_test.cc
+++ b/test/superframe_test.cc
@@ -40,7 +40,7 @@
     SetMode(mode);
     sf_count_ = 0;
     sf_count_max_ = INT_MAX;
-    is_vp10_style_superframe_ = syntax;
+    is_av1_style_superframe_ = syntax;
     n_tile_cols_ = std::tr1::get<kTileCols>(input);
     n_tile_rows_ = std::tr1::get<kTileRows>(input);
   }
@@ -50,23 +50,22 @@
   virtual void PreEncodeFrameHook(libaom_test::VideoSource *video,
                                   libaom_test::Encoder *encoder) {
     if (video->frame() == 1) {
-      encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
-      encoder->Control(VP8E_SET_CPUUSED, 2);
-      encoder->Control(VP9E_SET_TILE_COLUMNS, n_tile_cols_);
-      encoder->Control(VP9E_SET_TILE_ROWS, n_tile_rows_);
+      encoder->Control(AOME_SET_ENABLEAUTOALTREF, 1);
+      encoder->Control(AOME_SET_CPUUSED, 2);
+      encoder->Control(AV1E_SET_TILE_COLUMNS, n_tile_cols_);
+      encoder->Control(AV1E_SET_TILE_ROWS, n_tile_rows_);
     }
   }
 
-  virtual const vpx_codec_cx_pkt_t *MutateEncoderOutputHook(
-      const vpx_codec_cx_pkt_t *pkt) {
-    if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return pkt;
+  virtual const aom_codec_cx_pkt_t *MutateEncoderOutputHook(
+      const aom_codec_cx_pkt_t *pkt) {
+    if (pkt->kind != AOM_CODEC_CX_FRAME_PKT) return pkt;
 
     const uint8_t *buffer = reinterpret_cast<uint8_t *>(pkt->data.frame.buf);
     const uint8_t marker = buffer[pkt->data.frame.sz - 1];
     const int frames = (marker & 0x7) + 1;
     const int mag = ((marker >> 3) & 3) + 1;
-    const unsigned int index_sz =
-        2 + mag * (frames - is_vp10_style_superframe_);
+    const unsigned int index_sz = 2 + mag * (frames - is_av1_style_superframe_);
     if ((marker & 0xe0) == 0xc0 && pkt->data.frame.sz >= index_sz &&
         buffer[pkt->data.frame.sz - index_sz] == marker) {
       // frame is a superframe. strip off the index.
@@ -88,12 +87,12 @@
     return pkt;
   }
 
-  int is_vp10_style_superframe_;
+  int is_av1_style_superframe_;
   int sf_count_;
   int sf_count_max_;
-  vpx_codec_cx_pkt_t modified_pkt_;
+  aom_codec_cx_pkt_t modified_pkt_;
   uint8_t *modified_buf_;
-  vpx_codec_pts_t last_sf_pts_;
+  aom_codec_pts_t last_sf_pts_;
 
  private:
   int n_tile_cols_;
@@ -126,7 +125,7 @@
 const int tile_col_values[] = { 1, 2, 32 };
 #endif
 const int tile_row_values[] = { 1, 2, 32 };
-VP10_INSTANTIATE_TEST_CASE(
+AV1_INSTANTIATE_TEST_CASE(
     SuperframeTest,
     ::testing::Combine(::testing::Values(::libaom_test::kTwoPassGood),
                        ::testing::Values(1),
@@ -134,7 +133,7 @@
                        ::testing::ValuesIn(tile_row_values)));
 #else
 #if !CONFIG_ANS
-VP10_INSTANTIATE_TEST_CASE(
+AV1_INSTANTIATE_TEST_CASE(
     SuperframeTest,
     ::testing::Combine(::testing::Values(::libaom_test::kTwoPassGood),
                        ::testing::Values(1), ::testing::Values(0),
diff --git a/test/test-data.mk b/test/test-data.mk
index fd33627..3936024 100644
--- a/test/test-data.mk
+++ b/test/test-data.mk
@@ -18,99 +18,49 @@
 LIBAOM_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_444.y4m
 LIBAOM_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_440.yuv
 
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += desktop_credits.y4m
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += niklas_1280_720_30.y4m
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += rush_hour_444.y4m
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += screendata.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += desktop_credits.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += niklas_1280_720_30.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += rush_hour_444.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += screendata.y4m
 
 ifeq ($(CONFIG_DECODE_PERF_TESTS),yes)
 # Encode / Decode test
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += niklas_1280_720_30.yuv
-# BBB VP9 streams
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_426x240_tile_1x1_180kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_640x360_tile_1x2_337kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_854x480_tile_1x2_651kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1280x720_tile_1x4_1310kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1920x1080_tile_1x1_2581kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1920x1080_tile_1x4_2586kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1920x1080_tile_1x4_fpm_2304kbps.webm
-# Sintel VP9 streams
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_426x182_tile_1x1_171kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_640x272_tile_1x2_318kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_854x364_tile_1x2_621kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_1280x546_tile_1x4_1257kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_1920x818_tile_1x4_fpm_2279kbps.webm
-# TOS VP9 streams
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_426x178_tile_1x1_181kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_640x266_tile_1x2_336kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_854x356_tile_1x2_656kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_854x356_tile_1x2_fpm_546kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_1280x534_tile_1x4_1306kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_1280x534_tile_1x4_fpm_952kbps.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_1920x800_tile_1x4_fpm_2335kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += niklas_1280_720_30.yuv
+# BBB AV1 streams
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-bbb_426x240_tile_1x1_180kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-bbb_640x360_tile_1x2_337kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-bbb_854x480_tile_1x2_651kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-bbb_1280x720_tile_1x4_1310kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-bbb_1920x1080_tile_1x1_2581kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-bbb_1920x1080_tile_1x4_2586kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-bbb_1920x1080_tile_1x4_fpm_2304kbps.webm
+# Sintel AV1 streams
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-sintel_426x182_tile_1x1_171kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-sintel_640x272_tile_1x2_318kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-sintel_854x364_tile_1x2_621kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-sintel_1280x546_tile_1x4_1257kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-sintel_1920x818_tile_1x4_fpm_2279kbps.webm
+# TOS AV1 streams
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-tos_426x178_tile_1x1_181kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-tos_640x266_tile_1x2_336kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-tos_854x356_tile_1x2_656kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-tos_854x356_tile_1x2_fpm_546kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-tos_1280x534_tile_1x4_1306kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-tos_1280x534_tile_1x4_fpm_952kbps.webm
+LIBAOM_TEST_DATA-$(CONFIG_AV1_DECODER) += av10-2-tos_1920x800_tile_1x4_fpm_2335kbps.webm
 endif  # CONFIG_DECODE_PERF_TESTS
 
 ifeq ($(CONFIG_ENCODE_PERF_TESTS),yes)
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += desktop_640_360_30.yuv
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += kirland_640_480_30.yuv
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += macmarcomoving_640_480_30.yuv
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += macmarcostationary_640_480_30.yuv
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += niklas_1280_720_30.yuv
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += niklas_640_480_30.yuv
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += tacomanarrows_640_480_30.yuv
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += tacomasmallcameramovement_640_480_30.yuv
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += thaloundeskmtg_640_480_30.yuv
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += desktop_640_360_30.yuv
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += kirland_640_480_30.yuv
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += macmarcomoving_640_480_30.yuv
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += macmarcostationary_640_480_30.yuv
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += niklas_1280_720_30.yuv
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += niklas_640_480_30.yuv
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += tacomanarrows_640_480_30.yuv
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += tacomasmallcameramovement_640_480_30.yuv
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += thaloundeskmtg_640_480_30.yuv
 endif  # CONFIG_ENCODE_PERF_TESTS
 
 # sort and remove duplicates
 LIBAOM_TEST_DATA-yes := $(sort $(LIBAOM_TEST_DATA-yes))
-
-# VP9 dynamic resizing test (decoder)
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_5_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_5_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_5_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_5_3-4.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_7_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_7_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_7_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_7_3-4.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_5_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_5_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_5_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_5_3-4.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_7_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_7_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_7_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_7_3-4.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_5_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_5_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_5_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_5_3-4.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_7_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_7_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_7_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_7_3-4.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_5_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_5_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_5_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_5_3-4.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_7_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_7_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_7_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_7_3-4.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_5_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_5_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_5_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_5_3-4.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_7_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_7_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_7_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_7_3-4.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_5_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_5_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_5_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_5_3-4.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_7_1-2.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_7_1-2.webm.md5
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_7_3-4.webm
-LIBAOM_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_7_3-4.webm.md5
diff --git a/test/test-data.sha1 b/test/test-data.sha1
index c958444..ac4e506 100644
--- a/test/test-data.sha1
+++ b/test/test-data.sha1
@@ -1,19 +1,19 @@
 d5dfb0151c9051f8c85999255645d7a23916d3c0 *hantro_collage_w352h288.yuv
 b87815bf86020c592ccc7a846ba2e28ec8043902 *hantro_odd.yuv
-76024eb753cdac6a5e5703aaea189d35c3c30ac7 *invalid-vp90-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.v2.ivf
-7448d8798a4380162d4b56f9b452e2f6f9e24e7a *invalid-vp90-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.v2.ivf.res
-83f50908c8dc0ef8760595447a2ff7727489542e *invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf
-456d1493e52d32a5c30edf44a27debc1fa6b253a *invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf.res
-c123d1f9f02fb4143abb5e271916e3a3080de8f6 *invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf
-456d1493e52d32a5c30edf44a27debc1fa6b253a *invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf.res
-fe346136b9b8c1e6f6084cc106485706915795e4 *invalid-vp90-01-v3.webm
-5d9474c0309b7ca09a182d888f73b37a8fe1362c *invalid-vp90-01-v3.webm.res
-d78e2fceba5ac942246503ec8366f879c4775ca5 *invalid-vp90-02-v2.webm
-8e2eff4af87d2b561cce2365713269e301457ef3 *invalid-vp90-02-v2.webm.res
-df1a1453feb3c00d7d89746c7003b4163523bff3 *invalid-vp90-03-v3.webm
-4935c62becc68c13642a03db1e6d3e2331c1c612 *invalid-vp90-03-v3.webm.res
-d637297561dd904eb2c97a9015deeb31c4a1e8d2 *invalid-vp90-2-08-tile_1x4_frame_parallel_all_key.webm
-3a204bdbeaa3c6458b77bcebb8366d107267f55d *invalid-vp90-2-08-tile_1x4_frame_parallel_all_key.webm.res
+76024eb753cdac6a5e5703aaea189d35c3c30ac7 *invalid-av10-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.v2.ivf
+7448d8798a4380162d4b56f9b452e2f6f9e24e7a *invalid-av10-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.v2.ivf.res
+83f50908c8dc0ef8760595447a2ff7727489542e *invalid-av10-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf
+456d1493e52d32a5c30edf44a27debc1fa6b253a *invalid-av10-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf.res
+c123d1f9f02fb4143abb5e271916e3a3080de8f6 *invalid-av10-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf
+456d1493e52d32a5c30edf44a27debc1fa6b253a *invalid-av10-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf.res
+fe346136b9b8c1e6f6084cc106485706915795e4 *invalid-av10-01-v3.webm
+5d9474c0309b7ca09a182d888f73b37a8fe1362c *invalid-av10-01-v3.webm.res
+d78e2fceba5ac942246503ec8366f879c4775ca5 *invalid-av10-02-v2.webm
+8e2eff4af87d2b561cce2365713269e301457ef3 *invalid-av10-02-v2.webm.res
+df1a1453feb3c00d7d89746c7003b4163523bff3 *invalid-av10-03-v3.webm
+4935c62becc68c13642a03db1e6d3e2331c1c612 *invalid-av10-03-v3.webm.res
+d637297561dd904eb2c97a9015deeb31c4a1e8d2 *invalid-av10-2-08-tile_1x4_frame_parallel_all_key.webm
+3a204bdbeaa3c6458b77bcebb8366d107267f55d *invalid-av10-2-08-tile_1x4_frame_parallel_all_key.webm.res
 a432f96ff0a787268e2f94a8092ab161a18d1b06 *park_joy_90p_10_420.y4m
 0b194cc312c3a2e84d156a221b0a5eb615dfddc5 *park_joy_90p_10_422.y4m
 ff0e0a21dc2adc95b8c1b37902713700655ced17 *park_joy_90p_10_444.y4m
@@ -38,30 +38,30 @@
 9a70e8b7d14fba9234d0e51dce876635413ce444 *thaloundeskmtg_640_480_30.yuv
 e7d315dbf4f3928779e0dc624311196d44491d32 *niklas_1280_720_30.yuv
 717da707afcaa1f692ff1946f291054eb75a4f06 *screendata.y4m
-b7c1296630cdf1a7ef493d15ff4f9eb2999202f6 *invalid-vp90-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf
-0a3884edb3fd8f9d9b500223e650f7de257b67d8 *invalid-vp90-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf.res
-359e138dfb66863828397b77000ea7a83c844d02 *invalid-vp90-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf
-bbd33de01c17b165b4ce00308e8a19a942023ab8 *invalid-vp90-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf.res
-fac89b5735be8a86b0dc05159f996a5c3208ae32 *invalid-vp90-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf
-0a3884edb3fd8f9d9b500223e650f7de257b67d8 *invalid-vp90-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf.res
-4506dfdcdf8ee4250924b075a0dcf1f070f72e5a *invalid-vp90-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf
-bcdedaf168ac225575468fda77502d2dc9fd5baa *invalid-vp90-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf.res
-b03c408cf23158638da18dbc3323b99a1635c68a *invalid-vp90-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf
-0a3884edb3fd8f9d9b500223e650f7de257b67d8 *invalid-vp90-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf.res
-5e67e24e7f53fd189e565513cef8519b1bd6c712 *invalid-vp90-2-05-resize.ivf.s59293_r01-05_b6-.ivf
-741158f67c0d9d23726624d06bdc482ad368afc9 *invalid-vp90-2-05-resize.ivf.s59293_r01-05_b6-.ivf.res
-8b1f7bf7e86c0976d277f60e8fcd9539e75a079a *invalid-vp90-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf
-9c6bdf048fb2e66f07d4b4db5b32e6f303bd6109 *invalid-vp90-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf.res
-552e372e9b78127389fb06b34545df2cec15ba6d *invalid-vp91-2-mixedrefcsp-444to420.ivf
-a61774cf03fc584bd9f0904fc145253bb8ea6c4c *invalid-vp91-2-mixedrefcsp-444to420.ivf.res
-812d05a64a0d83c1b504d0519927ddc5a2cdb273 *invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf
-1e472baaf5f6113459f0399a38a5a5e68d17799d *invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf.res
-efd5a51d175cfdacd169ed23477729dc558030dc *invalid-vp90-2-07-frame_parallel-1.webm
-9f912712ec418be69adb910e2ca886a63c4cec08 *invalid-vp90-2-07-frame_parallel-2.webm
-445f5a53ca9555341852997ccdd480a51540bd14 *invalid-vp90-2-07-frame_parallel-3.webm
-d18c90709a0d03c82beadf10898b27d88fff719c *invalid-vp90-2-03-size-224x196.webm.ivf.s44156_r01-05_b6-.ivf
-d06285d109ecbaef63b0cbcc44d70a129186f51c *invalid-vp90-2-03-size-224x196.webm.ivf.s44156_r01-05_b6-.ivf.res
-e60d859b0ef2b331b21740cf6cb83fabe469b079 *invalid-vp90-2-03-size-202x210.webm.ivf.s113306_r01-05_b6-.ivf
-0ae808dca4d3c1152a9576e14830b6faa39f1b4a *invalid-vp90-2-03-size-202x210.webm.ivf.s113306_r01-05_b6-.ivf.res
+b7c1296630cdf1a7ef493d15ff4f9eb2999202f6 *invalid-av10-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf
+0a3884edb3fd8f9d9b500223e650f7de257b67d8 *invalid-av10-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf.res
+359e138dfb66863828397b77000ea7a83c844d02 *invalid-av10-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf
+bbd33de01c17b165b4ce00308e8a19a942023ab8 *invalid-av10-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf.res
+fac89b5735be8a86b0dc05159f996a5c3208ae32 *invalid-av10-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf
+0a3884edb3fd8f9d9b500223e650f7de257b67d8 *invalid-av10-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf.res
+4506dfdcdf8ee4250924b075a0dcf1f070f72e5a *invalid-av10-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf
+bcdedaf168ac225575468fda77502d2dc9fd5baa *invalid-av10-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf.res
+b03c408cf23158638da18dbc3323b99a1635c68a *invalid-av10-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf
+0a3884edb3fd8f9d9b500223e650f7de257b67d8 *invalid-av10-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf.res
+5e67e24e7f53fd189e565513cef8519b1bd6c712 *invalid-av10-2-05-resize.ivf.s59293_r01-05_b6-.ivf
+741158f67c0d9d23726624d06bdc482ad368afc9 *invalid-av10-2-05-resize.ivf.s59293_r01-05_b6-.ivf.res
+8b1f7bf7e86c0976d277f60e8fcd9539e75a079a *invalid-av10-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf
+9c6bdf048fb2e66f07d4b4db5b32e6f303bd6109 *invalid-av10-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf.res
+552e372e9b78127389fb06b34545df2cec15ba6d *invalid-av11-2-mixedrefcsp-444to420.ivf
+a61774cf03fc584bd9f0904fc145253bb8ea6c4c *invalid-av11-2-mixedrefcsp-444to420.ivf.res
+812d05a64a0d83c1b504d0519927ddc5a2cdb273 *invalid-av10-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf
+1e472baaf5f6113459f0399a38a5a5e68d17799d *invalid-av10-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf.res
+efd5a51d175cfdacd169ed23477729dc558030dc *invalid-av10-2-07-frame_parallel-1.webm
+9f912712ec418be69adb910e2ca886a63c4cec08 *invalid-av10-2-07-frame_parallel-2.webm
+445f5a53ca9555341852997ccdd480a51540bd14 *invalid-av10-2-07-frame_parallel-3.webm
+d18c90709a0d03c82beadf10898b27d88fff719c *invalid-av10-2-03-size-224x196.webm.ivf.s44156_r01-05_b6-.ivf
+d06285d109ecbaef63b0cbcc44d70a129186f51c *invalid-av10-2-03-size-224x196.webm.ivf.s44156_r01-05_b6-.ivf.res
+e60d859b0ef2b331b21740cf6cb83fabe469b079 *invalid-av10-2-03-size-202x210.webm.ivf.s113306_r01-05_b6-.ivf
+0ae808dca4d3c1152a9576e14830b6faa39f1b4a *invalid-av10-2-03-size-202x210.webm.ivf.s113306_r01-05_b6-.ivf.res
 9cfc855459e7549fd015c79e8eca512b2f2cb7e3 *niklas_1280_720_30.y4m
 5b5763b388b1b52a81bb82b39f7ec25c4bd3d0e1 *desktop_credits.y4m
diff --git a/test/test.mk b/test/test.mk
index d7f2391..ba41e26 100644
--- a/test/test.mk
+++ b/test/test.mk
@@ -4,7 +4,7 @@
 LIBAOM_TEST_SRCS-yes += md5_helper.h
 LIBAOM_TEST_SRCS-yes += register_state_check.h
 LIBAOM_TEST_SRCS-yes += test.mk
-LIBAOM_TEST_SRCS-yes += test_libvpx.cc
+LIBAOM_TEST_SRCS-yes += test_libaom.cc
 LIBAOM_TEST_SRCS-yes += util.h
 LIBAOM_TEST_SRCS-yes += video_source.h
 LIBAOM_TEST_SRCS-yes += transform_test_base.h
@@ -29,14 +29,14 @@
 LIBAOM_TEST_SRCS-$(CONFIG_ENCODERS)    += y4m_video_source.h
 LIBAOM_TEST_SRCS-$(CONFIG_ENCODERS)    += yuv_video_source.h
 
-#LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += level_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += active_map_refresh_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += active_map_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += borders_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += cpu_speed_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += frame_size_tests.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += lossless_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += ethread_test.cc
+#LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += level_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += active_map_refresh_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += active_map_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += borders_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += cpu_speed_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += frame_size_tests.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += lossless_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += ethread_test.cc
 
 LIBAOM_TEST_SRCS-yes                   += decode_test_driver.cc
 LIBAOM_TEST_SRCS-yes                   += decode_test_driver.h
@@ -64,20 +64,20 @@
 
 LIBAOM_TEST_SRCS-$(CONFIG_DECODERS)    += decode_api_test.cc
 
-# Currently we only support decoder perf tests for vp9. Also they read from WebM
+# Currently we only support decoder perf tests for av1. Also they read from WebM
 # files, so WebM IO is required.
-ifeq ($(CONFIG_DECODE_PERF_TESTS)$(CONFIG_VP10_DECODER)$(CONFIG_WEBM_IO), \
+ifeq ($(CONFIG_DECODE_PERF_TESTS)$(CONFIG_AV1_DECODER)$(CONFIG_WEBM_IO), \
       yesyesyes)
 LIBAOM_TEST_SRCS-yes                   += decode_perf_test.cc
 endif
 
-# encode perf tests are vp9 only
-ifeq ($(CONFIG_ENCODE_PERF_TESTS)$(CONFIG_VP10_ENCODER), yesyes)
+# encode perf tests are av1 only
+ifeq ($(CONFIG_ENCODE_PERF_TESTS)$(CONFIG_AV1_ENCODER), yesyes)
 LIBAOM_TEST_SRCS-yes += encode_perf_test.cc
 endif
 
 ## Multi-codec / unconditional black box tests.
-ifeq ($(findstring yes,$(CONFIG_VP10_ENCODER)),yes)
+ifeq ($(findstring yes,$(CONFIG_AV1_ENCODER)),yes)
 LIBAOM_TEST_SRCS-yes += active_map_refresh_test.cc
 LIBAOM_TEST_SRCS-yes += active_map_test.cc
 LIBAOM_TEST_SRCS-yes += end_to_end_test.cc
@@ -91,11 +91,11 @@
 ##
 ifeq ($(CONFIG_SHARED),)
 
-## VP10
-ifeq ($(CONFIG_VP10),yes)
+## AV1
+ifeq ($(CONFIG_AV1),yes)
 
 # These tests require both the encoder and decoder to be built.
-ifeq ($(CONFIG_VP10_ENCODER)$(CONFIG_VP10_DECODER),yesyes)
+ifeq ($(CONFIG_AV1_ENCODER)$(CONFIG_AV1_DECODER),yesyes)
 # IDCT test currently depends on FDCT function
 LIBAOM_TEST_SRCS-yes                   += idct8x8_test.cc
 LIBAOM_TEST_SRCS-yes                   += partial_idct_test.cc
@@ -109,41 +109,41 @@
 #LIBAOM_TEST_SRCS-yes                   += convolve_test.cc
 LIBAOM_TEST_SRCS-yes                   += lpf_8_test.cc
 LIBAOM_TEST_SRCS-yes                   += intrapred_test.cc
-#LIBAOM_TEST_SRCS-$(CONFIG_VP10_DECODER) += vp9_thread_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += dct16x16_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += dct32x32_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += fdct4x4_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += fdct8x8_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += hadamard_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += minmax_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += variance_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += error_block_test.cc
-#LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp9_quantize_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += subtract_test.cc
+#LIBAOM_TEST_SRCS-$(CONFIG_AV1_DECODER) += av1_thread_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += dct16x16_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += dct32x32_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += fdct4x4_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += fdct8x8_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += hadamard_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += minmax_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += variance_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += error_block_test.cc
+#LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_quantize_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += subtract_test.cc
 
-ifeq ($(CONFIG_VP10_ENCODER)$(CONFIG_VP10_TEMPORAL_DENOISING),yesyes)
-LIBAOM_TEST_SRCS-$(HAVE_SSE2) += denoiser_sse2_test.cc
+ifeq ($(CONFIG_AV1_ENCODER)$(CONFIG_AV1_TEMPORAL_DENOISING),yesyes)
+#LIBAOM_TEST_SRCS-$(HAVE_SSE2) += denoiser_sse2_test.cc
 endif
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += arf_freq_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += arf_freq_test.cc
 
 
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp10_inv_txfm_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp10_dct_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp10_fht4x4_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp10_fht8x8_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp10_fht16x16_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_ANS)          += vp10_ans_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_EXT_TILE)     += vp10_ext_tile_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_inv_txfm_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_dct_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht4x4_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht8x8_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht16x16_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_ANS)          += av1_ans_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_EXT_TILE)     += av1_ext_tile_test.cc
 
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += sum_squares_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += subtract_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += blend_a64_mask_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += blend_a64_mask_1d_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += sum_squares_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += subtract_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += blend_a64_mask_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += blend_a64_mask_1d_test.cc
 
 ifeq ($(CONFIG_EXT_INTER),yes)
 LIBAOM_TEST_SRCS-$(HAVE_SSSE3) += masked_variance_test.cc
 LIBAOM_TEST_SRCS-$(HAVE_SSSE3) += masked_sad_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp10_wedge_utils_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_wedge_utils_test.cc
 endif
 
 ifeq ($(CONFIG_EXT_INTRA),yes)
@@ -151,33 +151,33 @@
 endif
 
 ifeq ($(CONFIG_OBMC),yes)
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += obmc_sad_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += obmc_variance_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += obmc_sad_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += obmc_variance_test.cc
 endif
 
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-LIBAOM_TEST_SRCS-$(HAVE_SSE4_1) += vp10_quantize_test.cc
-LIBAOM_TEST_SRCS-$(HAVE_SSE4_1) += vp10_highbd_iht_test.cc
-endif # CONFIG_VP9_HIGHBITDEPTH
-endif # VP10
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+LIBAOM_TEST_SRCS-$(HAVE_SSE4_1) += av1_quantize_test.cc
+LIBAOM_TEST_SRCS-$(HAVE_SSE4_1) += av1_highbd_iht_test.cc
+endif # CONFIG_AOM_HIGHBITDEPTH
+endif # AV1
 
 ## Multi-codec / unconditional whitebox tests.
 
-ifeq ($(CONFIG_VP10_ENCODER),yes)
+ifeq ($(CONFIG_AV1_ENCODER),yes)
 LIBAOM_TEST_SRCS-yes += avg_test.cc
 endif
 ifeq ($(CONFIG_INTERNAL_STATS),yes)
-LIBAOM_TEST_SRCS-$(CONFIG_VP9_HIGHBITDEPTH) += hbd_metrics_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AOM_HIGHBITDEPTH) += hbd_metrics_test.cc
 endif
 LIBAOM_TEST_SRCS-$(CONFIG_ENCODERS) += sad_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10) += vp10_txfm_test.h
-LIBAOM_TEST_SRCS-$(CONFIG_VP10) += vp10_txfm_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10) += vp10_fwd_txfm1d_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10) += vp10_inv_txfm1d_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10) += vp10_fwd_txfm2d_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10) += vp10_inv_txfm2d_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10) += vp10_convolve_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10) += vp10_convolve_optimz_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1) += av1_txfm_test.h
+LIBAOM_TEST_SRCS-$(CONFIG_AV1) += av1_txfm_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1) += av1_fwd_txfm1d_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1) += av1_inv_txfm1d_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1) += av1_fwd_txfm2d_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1) += av1_inv_txfm2d_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1) += av1_convolve_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1) += av1_convolve_optimz_test.cc
 
 TEST_INTRA_PRED_SPEED_SRCS-yes := test_intra_pred_speed.cc
 TEST_INTRA_PRED_SPEED_SRCS-yes += ../md5_utils.h ../md5_utils.c
diff --git a/test/test_intra_pred_speed.cc b/test/test_intra_pred_speed.cc
index 41bc4ec..5f08dec 100644
--- a/test/test_intra_pred_speed.cc
+++ b/test/test_intra_pred_speed.cc
@@ -7,36 +7,36 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-//  Test and time VPX intra-predictor functions
+//  Test and time AOM intra-predictor functions
 
 #include <stdio.h>
 #include <string.h>
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/md5_helper.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
-#include "aom_ports/vpx_timer.h"
+#include "aom_ports/aom_timer.h"
 
 // -----------------------------------------------------------------------------
 
 namespace {
 
-typedef void (*VpxPredFunc)(uint8_t *dst, ptrdiff_t y_stride,
+typedef void (*AvxPredFunc)(uint8_t *dst, ptrdiff_t y_stride,
                             const uint8_t *above, const uint8_t *left);
 
-const int kNumVp9IntraPredFuncs = 13;
-const char *kVp9IntraPredNames[kNumVp9IntraPredFuncs] = {
+const int kNumAv1IntraPredFuncs = 13;
+const char *kAv1IntraPredNames[kNumAv1IntraPredFuncs] = {
   "DC_PRED",   "DC_LEFT_PRED", "DC_TOP_PRED", "DC_128_PRED", "V_PRED",
   "H_PRED",    "D45_PRED",     "D135_PRED",   "D117_PRED",   "D153_PRED",
   "D207_PRED", "D63_PRED",     "TM_PRED"
 };
 
-void TestIntraPred(const char name[], VpxPredFunc const *pred_funcs,
+void TestIntraPred(const char name[], AvxPredFunc const *pred_funcs,
                    const char *const pred_func_names[], int num_funcs,
                    const char *const signatures[], int block_size,
                    int num_pixels_per_test) {
@@ -62,15 +62,15 @@
   for (int k = 0; k < num_funcs; ++k) {
     if (pred_funcs[k] == NULL) continue;
     memcpy(src, ref_src, sizeof(src));
-    vpx_usec_timer timer;
-    vpx_usec_timer_start(&timer);
+    aom_usec_timer timer;
+    aom_usec_timer_start(&timer);
     for (int num_tests = 0; num_tests < kNumTests; ++num_tests) {
       pred_funcs[k](src, kBPS, above, left);
     }
     libaom_test::ClearSystemState();
-    vpx_usec_timer_mark(&timer);
+    aom_usec_timer_mark(&timer);
     const int elapsed_time =
-        static_cast<int>(vpx_usec_timer_elapsed(&timer) / 1000);
+        static_cast<int>(aom_usec_timer_elapsed(&timer) / 1000);
     libaom_test::MD5 md5;
     md5.Add(src, sizeof(src));
     printf("Mode %s[%12s]: %5d ms     MD5: %s\n", name, pred_func_names[k],
@@ -79,9 +79,9 @@
   }
 }
 
-void TestIntraPred4(VpxPredFunc const *pred_funcs) {
-  static const int kNumVp9IntraFuncs = 13;
-  static const char *const kSignatures[kNumVp9IntraFuncs] = {
+void TestIntraPred4(AvxPredFunc const *pred_funcs) {
+  static const int kNumAv1IntraFuncs = 13;
+  static const char *const kSignatures[kNumAv1IntraFuncs] = {
     "4334156168b34ab599d9b5b30f522fe9", "bc4649d5ba47c7ff178d92e475960fb0",
     "8d316e5933326dcac24e1064794b5d12", "a27270fed024eafd762c95de85f4da51",
     "c33dff000d4256c2b8f3bf9e9bab14d2", "44d8cddc2ad8f79b8ed3306051722b4f",
@@ -90,13 +90,13 @@
     "c56d5e8c729e46825f46dd5d3b5d508a", "c0889e2039bcf7bcb5d2f33cdca69adc",
     "309a618577b27c648f9c5ee45252bc8f",
   };
-  TestIntraPred("Intra4", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
-                kSignatures, 4, 4 * 4 * kNumVp9IntraFuncs);
+  TestIntraPred("Intra4", pred_funcs, kAv1IntraPredNames, kNumAv1IntraFuncs,
+                kSignatures, 4, 4 * 4 * kNumAv1IntraFuncs);
 }
 
-void TestIntraPred8(VpxPredFunc const *pred_funcs) {
-  static const int kNumVp9IntraFuncs = 13;
-  static const char *const kSignatures[kNumVp9IntraFuncs] = {
+void TestIntraPred8(AvxPredFunc const *pred_funcs) {
+  static const int kNumAv1IntraFuncs = 13;
+  static const char *const kSignatures[kNumAv1IntraFuncs] = {
     "7694ddeeefed887faf9d339d18850928", "7d726b1213591b99f736be6dec65065b",
     "19c5711281357a485591aaf9c96c0a67", "ba6b66877a089e71cd938e3b8c40caac",
     "802440c93317e0f8ba93fab02ef74265", "9e09a47a15deb0b9d8372824f9805080",
@@ -105,13 +105,13 @@
     "08323400005a297f16d7e57e7fe1eaac", "95f7bfc262329a5849eda66d8f7c68ce",
     "815b75c8e0d91cc1ae766dc5d3e445a3",
   };
-  TestIntraPred("Intra8", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
-                kSignatures, 8, 8 * 8 * kNumVp9IntraFuncs);
+  TestIntraPred("Intra8", pred_funcs, kAv1IntraPredNames, kNumAv1IntraFuncs,
+                kSignatures, 8, 8 * 8 * kNumAv1IntraFuncs);
 }
 
-void TestIntraPred16(VpxPredFunc const *pred_funcs) {
-  static const int kNumVp9IntraFuncs = 13;
-  static const char *const kSignatures[kNumVp9IntraFuncs] = {
+void TestIntraPred16(AvxPredFunc const *pred_funcs) {
+  static const int kNumAv1IntraFuncs = 13;
+  static const char *const kSignatures[kNumAv1IntraFuncs] = {
     "b40dbb555d5d16a043dc361e6694fe53", "fb08118cee3b6405d64c1fd68be878c6",
     "6c190f341475c837cc38c2e566b64875", "db5c34ccbe2c7f595d9b08b0dc2c698c",
     "a62cbfd153a1f0b9fed13e62b8408a7a", "143df5b4c89335e281103f610f5052e4",
@@ -120,13 +120,13 @@
     "b9f69fa6532b372c545397dcb78ef311", "a8fe1c70432f09d0c20c67bdb6432c4d",
     "b8a41aa968ec108af447af4217cba91b",
   };
-  TestIntraPred("Intra16", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
-                kSignatures, 16, 16 * 16 * kNumVp9IntraFuncs);
+  TestIntraPred("Intra16", pred_funcs, kAv1IntraPredNames, kNumAv1IntraFuncs,
+                kSignatures, 16, 16 * 16 * kNumAv1IntraFuncs);
 }
 
-void TestIntraPred32(VpxPredFunc const *pred_funcs) {
-  static const int kNumVp9IntraFuncs = 13;
-  static const char *const kSignatures[kNumVp9IntraFuncs] = {
+void TestIntraPred32(AvxPredFunc const *pred_funcs) {
+  static const int kNumAv1IntraFuncs = 13;
+  static const char *const kSignatures[kNumAv1IntraFuncs] = {
     "558541656d84f9ae7896db655826febe", "b3587a1f9a01495fa38c8cd3c8e2a1bf",
     "4c6501e64f25aacc55a2a16c7e8f0255", "b3b01379ba08916ef6b1b35f7d9ad51c",
     "0f1eb38b6cbddb3d496199ef9f329071", "911c06efb9ed1c3b4c104b232b55812f",
@@ -135,8 +135,8 @@
     "ed012a4a5da71f36c2393023184a0e59", "f162b51ed618d28b936974cff4391da5",
     "9e1370c6d42e08d357d9612c93a71cfc",
   };
-  TestIntraPred("Intra32", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
-                kSignatures, 32, 32 * 32 * kNumVp9IntraFuncs);
+  TestIntraPred("Intra32", pred_funcs, kAv1IntraPredNames, kNumAv1IntraFuncs,
+                kSignatures, 32, 32 * 32 * kNumAv1IntraFuncs);
 }
 
 }  // namespace
@@ -146,201 +146,201 @@
 #define INTRA_PRED_TEST(arch, test_func, dc, dc_left, dc_top, dc_128, v, h,   \
                         d45, d135, d117, d153, d207, d63, tm)                 \
   TEST(arch, test_func) {                                                     \
-    static const VpxPredFunc vpx_intra_pred[] = {                             \
+    static const AvxPredFunc aom_intra_pred[] = {                             \
       dc, dc_left, dc_top, dc_128, v, h, d45, d135, d117, d153, d207, d63, tm \
     };                                                                        \
-    test_func(vpx_intra_pred);                                                \
+    test_func(aom_intra_pred);                                                \
   }
 
 // -----------------------------------------------------------------------------
 // 4x4
 
-INTRA_PRED_TEST(C, TestIntraPred4, vpx_dc_predictor_4x4_c,
-                vpx_dc_left_predictor_4x4_c, vpx_dc_top_predictor_4x4_c,
-                vpx_dc_128_predictor_4x4_c, vpx_v_predictor_4x4_c,
-                vpx_h_predictor_4x4_c, vpx_d45_predictor_4x4_c,
-                vpx_d135_predictor_4x4_c, vpx_d117_predictor_4x4_c,
-                vpx_d153_predictor_4x4_c, vpx_d207_predictor_4x4_c,
-                vpx_d63_predictor_4x4_c, vpx_tm_predictor_4x4_c)
+INTRA_PRED_TEST(C, TestIntraPred4, aom_dc_predictor_4x4_c,
+                aom_dc_left_predictor_4x4_c, aom_dc_top_predictor_4x4_c,
+                aom_dc_128_predictor_4x4_c, aom_v_predictor_4x4_c,
+                aom_h_predictor_4x4_c, aom_d45_predictor_4x4_c,
+                aom_d135_predictor_4x4_c, aom_d117_predictor_4x4_c,
+                aom_d153_predictor_4x4_c, aom_d207_predictor_4x4_c,
+                aom_d63_predictor_4x4_c, aom_tm_predictor_4x4_c)
 
 #if HAVE_SSE2
-INTRA_PRED_TEST(SSE2, TestIntraPred4, vpx_dc_predictor_4x4_sse2,
-                vpx_dc_left_predictor_4x4_sse2, vpx_dc_top_predictor_4x4_sse2,
-                vpx_dc_128_predictor_4x4_sse2, vpx_v_predictor_4x4_sse2,
-                vpx_h_predictor_4x4_sse2, vpx_d45_predictor_4x4_sse2, NULL,
-                NULL, NULL, vpx_d207_predictor_4x4_sse2, NULL,
-                vpx_tm_predictor_4x4_sse2)
+INTRA_PRED_TEST(SSE2, TestIntraPred4, aom_dc_predictor_4x4_sse2,
+                aom_dc_left_predictor_4x4_sse2, aom_dc_top_predictor_4x4_sse2,
+                aom_dc_128_predictor_4x4_sse2, aom_v_predictor_4x4_sse2,
+                aom_h_predictor_4x4_sse2, aom_d45_predictor_4x4_sse2, NULL,
+                NULL, NULL, aom_d207_predictor_4x4_sse2, NULL,
+                aom_tm_predictor_4x4_sse2)
 #endif  // HAVE_SSE2
 
 #if HAVE_SSSE3
 INTRA_PRED_TEST(SSSE3, TestIntraPred4, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-                NULL, NULL, vpx_d153_predictor_4x4_ssse3, NULL,
-                vpx_d63_predictor_4x4_ssse3, NULL)
+                NULL, NULL, aom_d153_predictor_4x4_ssse3, NULL,
+                aom_d63_predictor_4x4_ssse3, NULL)
 #endif  // HAVE_SSSE3
 
 #if HAVE_DSPR2
-INTRA_PRED_TEST(DSPR2, TestIntraPred4, vpx_dc_predictor_4x4_dspr2, NULL, NULL,
-                NULL, NULL, vpx_h_predictor_4x4_dspr2, NULL, NULL, NULL, NULL,
-                NULL, NULL, vpx_tm_predictor_4x4_dspr2)
+INTRA_PRED_TEST(DSPR2, TestIntraPred4, aom_dc_predictor_4x4_dspr2, NULL, NULL,
+                NULL, NULL, aom_h_predictor_4x4_dspr2, NULL, NULL, NULL, NULL,
+                NULL, NULL, aom_tm_predictor_4x4_dspr2)
 #endif  // HAVE_DSPR2
 
 #if HAVE_NEON
-INTRA_PRED_TEST(NEON, TestIntraPred4, vpx_dc_predictor_4x4_neon,
-                vpx_dc_left_predictor_4x4_neon, vpx_dc_top_predictor_4x4_neon,
-                vpx_dc_128_predictor_4x4_neon, vpx_v_predictor_4x4_neon,
-                vpx_h_predictor_4x4_neon, vpx_d45_predictor_4x4_neon,
-                vpx_d135_predictor_4x4_neon, NULL, NULL, NULL, NULL,
-                vpx_tm_predictor_4x4_neon)
+INTRA_PRED_TEST(NEON, TestIntraPred4, aom_dc_predictor_4x4_neon,
+                aom_dc_left_predictor_4x4_neon, aom_dc_top_predictor_4x4_neon,
+                aom_dc_128_predictor_4x4_neon, aom_v_predictor_4x4_neon,
+                aom_h_predictor_4x4_neon, aom_d45_predictor_4x4_neon,
+                aom_d135_predictor_4x4_neon, NULL, NULL, NULL, NULL,
+                aom_tm_predictor_4x4_neon)
 #endif  // HAVE_NEON
 
 #if HAVE_MSA
-INTRA_PRED_TEST(MSA, TestIntraPred4, vpx_dc_predictor_4x4_msa,
-                vpx_dc_left_predictor_4x4_msa, vpx_dc_top_predictor_4x4_msa,
-                vpx_dc_128_predictor_4x4_msa, vpx_v_predictor_4x4_msa,
-                vpx_h_predictor_4x4_msa, NULL, NULL, NULL, NULL, NULL, NULL,
-                vpx_tm_predictor_4x4_msa)
+INTRA_PRED_TEST(MSA, TestIntraPred4, aom_dc_predictor_4x4_msa,
+                aom_dc_left_predictor_4x4_msa, aom_dc_top_predictor_4x4_msa,
+                aom_dc_128_predictor_4x4_msa, aom_v_predictor_4x4_msa,
+                aom_h_predictor_4x4_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+                aom_tm_predictor_4x4_msa)
 #endif  // HAVE_MSA
 
 // -----------------------------------------------------------------------------
 // 8x8
 
-INTRA_PRED_TEST(C, TestIntraPred8, vpx_dc_predictor_8x8_c,
-                vpx_dc_left_predictor_8x8_c, vpx_dc_top_predictor_8x8_c,
-                vpx_dc_128_predictor_8x8_c, vpx_v_predictor_8x8_c,
-                vpx_h_predictor_8x8_c, vpx_d45_predictor_8x8_c,
-                vpx_d135_predictor_8x8_c, vpx_d117_predictor_8x8_c,
-                vpx_d153_predictor_8x8_c, vpx_d207_predictor_8x8_c,
-                vpx_d63_predictor_8x8_c, vpx_tm_predictor_8x8_c)
+INTRA_PRED_TEST(C, TestIntraPred8, aom_dc_predictor_8x8_c,
+                aom_dc_left_predictor_8x8_c, aom_dc_top_predictor_8x8_c,
+                aom_dc_128_predictor_8x8_c, aom_v_predictor_8x8_c,
+                aom_h_predictor_8x8_c, aom_d45_predictor_8x8_c,
+                aom_d135_predictor_8x8_c, aom_d117_predictor_8x8_c,
+                aom_d153_predictor_8x8_c, aom_d207_predictor_8x8_c,
+                aom_d63_predictor_8x8_c, aom_tm_predictor_8x8_c)
 
 #if HAVE_SSE2
-INTRA_PRED_TEST(SSE2, TestIntraPred8, vpx_dc_predictor_8x8_sse2,
-                vpx_dc_left_predictor_8x8_sse2, vpx_dc_top_predictor_8x8_sse2,
-                vpx_dc_128_predictor_8x8_sse2, vpx_v_predictor_8x8_sse2,
-                vpx_h_predictor_8x8_sse2, vpx_d45_predictor_8x8_sse2, NULL,
-                NULL, NULL, NULL, NULL, vpx_tm_predictor_8x8_sse2)
+INTRA_PRED_TEST(SSE2, TestIntraPred8, aom_dc_predictor_8x8_sse2,
+                aom_dc_left_predictor_8x8_sse2, aom_dc_top_predictor_8x8_sse2,
+                aom_dc_128_predictor_8x8_sse2, aom_v_predictor_8x8_sse2,
+                aom_h_predictor_8x8_sse2, aom_d45_predictor_8x8_sse2, NULL,
+                NULL, NULL, NULL, NULL, aom_tm_predictor_8x8_sse2)
 #endif  // HAVE_SSE2
 
 #if HAVE_SSSE3
 INTRA_PRED_TEST(SSSE3, TestIntraPred8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-                NULL, NULL, vpx_d153_predictor_8x8_ssse3,
-                vpx_d207_predictor_8x8_ssse3, vpx_d63_predictor_8x8_ssse3, NULL)
+                NULL, NULL, aom_d153_predictor_8x8_ssse3,
+                aom_d207_predictor_8x8_ssse3, aom_d63_predictor_8x8_ssse3, NULL)
 #endif  // HAVE_SSSE3
 
 #if HAVE_DSPR2
-INTRA_PRED_TEST(DSPR2, TestIntraPred8, vpx_dc_predictor_8x8_dspr2, NULL, NULL,
-                NULL, NULL, vpx_h_predictor_8x8_dspr2, NULL, NULL, NULL, NULL,
-                NULL, NULL, vpx_tm_predictor_8x8_dspr2)
+INTRA_PRED_TEST(DSPR2, TestIntraPred8, aom_dc_predictor_8x8_dspr2, NULL, NULL,
+                NULL, NULL, aom_h_predictor_8x8_dspr2, NULL, NULL, NULL, NULL,
+                NULL, NULL, aom_tm_predictor_8x8_dspr2)
 #endif  // HAVE_DSPR2
 
 #if HAVE_NEON
-INTRA_PRED_TEST(NEON, TestIntraPred8, vpx_dc_predictor_8x8_neon,
-                vpx_dc_left_predictor_8x8_neon, vpx_dc_top_predictor_8x8_neon,
-                vpx_dc_128_predictor_8x8_neon, vpx_v_predictor_8x8_neon,
-                vpx_h_predictor_8x8_neon, vpx_d45_predictor_8x8_neon, NULL,
-                NULL, NULL, NULL, NULL, vpx_tm_predictor_8x8_neon)
+INTRA_PRED_TEST(NEON, TestIntraPred8, aom_dc_predictor_8x8_neon,
+                aom_dc_left_predictor_8x8_neon, aom_dc_top_predictor_8x8_neon,
+                aom_dc_128_predictor_8x8_neon, aom_v_predictor_8x8_neon,
+                aom_h_predictor_8x8_neon, aom_d45_predictor_8x8_neon, NULL,
+                NULL, NULL, NULL, NULL, aom_tm_predictor_8x8_neon)
 
 #endif  // HAVE_NEON
 
 #if HAVE_MSA
-INTRA_PRED_TEST(MSA, TestIntraPred8, vpx_dc_predictor_8x8_msa,
-                vpx_dc_left_predictor_8x8_msa, vpx_dc_top_predictor_8x8_msa,
-                vpx_dc_128_predictor_8x8_msa, vpx_v_predictor_8x8_msa,
-                vpx_h_predictor_8x8_msa, NULL, NULL, NULL, NULL, NULL, NULL,
-                vpx_tm_predictor_8x8_msa)
+INTRA_PRED_TEST(MSA, TestIntraPred8, aom_dc_predictor_8x8_msa,
+                aom_dc_left_predictor_8x8_msa, aom_dc_top_predictor_8x8_msa,
+                aom_dc_128_predictor_8x8_msa, aom_v_predictor_8x8_msa,
+                aom_h_predictor_8x8_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+                aom_tm_predictor_8x8_msa)
 #endif  // HAVE_MSA
 
 // -----------------------------------------------------------------------------
 // 16x16
 
-INTRA_PRED_TEST(C, TestIntraPred16, vpx_dc_predictor_16x16_c,
-                vpx_dc_left_predictor_16x16_c, vpx_dc_top_predictor_16x16_c,
-                vpx_dc_128_predictor_16x16_c, vpx_v_predictor_16x16_c,
-                vpx_h_predictor_16x16_c, vpx_d45_predictor_16x16_c,
-                vpx_d135_predictor_16x16_c, vpx_d117_predictor_16x16_c,
-                vpx_d153_predictor_16x16_c, vpx_d207_predictor_16x16_c,
-                vpx_d63_predictor_16x16_c, vpx_tm_predictor_16x16_c)
+INTRA_PRED_TEST(C, TestIntraPred16, aom_dc_predictor_16x16_c,
+                aom_dc_left_predictor_16x16_c, aom_dc_top_predictor_16x16_c,
+                aom_dc_128_predictor_16x16_c, aom_v_predictor_16x16_c,
+                aom_h_predictor_16x16_c, aom_d45_predictor_16x16_c,
+                aom_d135_predictor_16x16_c, aom_d117_predictor_16x16_c,
+                aom_d153_predictor_16x16_c, aom_d207_predictor_16x16_c,
+                aom_d63_predictor_16x16_c, aom_tm_predictor_16x16_c)
 
 #if HAVE_SSE2
-INTRA_PRED_TEST(SSE2, TestIntraPred16, vpx_dc_predictor_16x16_sse2,
-                vpx_dc_left_predictor_16x16_sse2,
-                vpx_dc_top_predictor_16x16_sse2,
-                vpx_dc_128_predictor_16x16_sse2, vpx_v_predictor_16x16_sse2,
-                vpx_h_predictor_16x16_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
-                vpx_tm_predictor_16x16_sse2)
+INTRA_PRED_TEST(SSE2, TestIntraPred16, aom_dc_predictor_16x16_sse2,
+                aom_dc_left_predictor_16x16_sse2,
+                aom_dc_top_predictor_16x16_sse2,
+                aom_dc_128_predictor_16x16_sse2, aom_v_predictor_16x16_sse2,
+                aom_h_predictor_16x16_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
+                aom_tm_predictor_16x16_sse2)
 #endif  // HAVE_SSE2
 
 #if HAVE_SSSE3
 INTRA_PRED_TEST(SSSE3, TestIntraPred16, NULL, NULL, NULL, NULL, NULL, NULL,
-                vpx_d45_predictor_16x16_ssse3, NULL, NULL,
-                vpx_d153_predictor_16x16_ssse3, vpx_d207_predictor_16x16_ssse3,
-                vpx_d63_predictor_16x16_ssse3, NULL)
+                aom_d45_predictor_16x16_ssse3, NULL, NULL,
+                aom_d153_predictor_16x16_ssse3, aom_d207_predictor_16x16_ssse3,
+                aom_d63_predictor_16x16_ssse3, NULL)
 #endif  // HAVE_SSSE3
 
 #if HAVE_DSPR2
-INTRA_PRED_TEST(DSPR2, TestIntraPred16, vpx_dc_predictor_16x16_dspr2, NULL,
-                NULL, NULL, NULL, vpx_h_predictor_16x16_dspr2, NULL, NULL, NULL,
+INTRA_PRED_TEST(DSPR2, TestIntraPred16, aom_dc_predictor_16x16_dspr2, NULL,
+                NULL, NULL, NULL, aom_h_predictor_16x16_dspr2, NULL, NULL, NULL,
                 NULL, NULL, NULL, NULL)
 #endif  // HAVE_DSPR2
 
 #if HAVE_NEON
-INTRA_PRED_TEST(NEON, TestIntraPred16, vpx_dc_predictor_16x16_neon,
-                vpx_dc_left_predictor_16x16_neon,
-                vpx_dc_top_predictor_16x16_neon,
-                vpx_dc_128_predictor_16x16_neon, vpx_v_predictor_16x16_neon,
-                vpx_h_predictor_16x16_neon, vpx_d45_predictor_16x16_neon, NULL,
-                NULL, NULL, NULL, NULL, vpx_tm_predictor_16x16_neon)
+INTRA_PRED_TEST(NEON, TestIntraPred16, aom_dc_predictor_16x16_neon,
+                aom_dc_left_predictor_16x16_neon,
+                aom_dc_top_predictor_16x16_neon,
+                aom_dc_128_predictor_16x16_neon, aom_v_predictor_16x16_neon,
+                aom_h_predictor_16x16_neon, aom_d45_predictor_16x16_neon, NULL,
+                NULL, NULL, NULL, NULL, aom_tm_predictor_16x16_neon)
 #endif  // HAVE_NEON
 
 #if HAVE_MSA
-INTRA_PRED_TEST(MSA, TestIntraPred16, vpx_dc_predictor_16x16_msa,
-                vpx_dc_left_predictor_16x16_msa, vpx_dc_top_predictor_16x16_msa,
-                vpx_dc_128_predictor_16x16_msa, vpx_v_predictor_16x16_msa,
-                vpx_h_predictor_16x16_msa, NULL, NULL, NULL, NULL, NULL, NULL,
-                vpx_tm_predictor_16x16_msa)
+INTRA_PRED_TEST(MSA, TestIntraPred16, aom_dc_predictor_16x16_msa,
+                aom_dc_left_predictor_16x16_msa, aom_dc_top_predictor_16x16_msa,
+                aom_dc_128_predictor_16x16_msa, aom_v_predictor_16x16_msa,
+                aom_h_predictor_16x16_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+                aom_tm_predictor_16x16_msa)
 #endif  // HAVE_MSA
 
 // -----------------------------------------------------------------------------
 // 32x32
 
-INTRA_PRED_TEST(C, TestIntraPred32, vpx_dc_predictor_32x32_c,
-                vpx_dc_left_predictor_32x32_c, vpx_dc_top_predictor_32x32_c,
-                vpx_dc_128_predictor_32x32_c, vpx_v_predictor_32x32_c,
-                vpx_h_predictor_32x32_c, vpx_d45_predictor_32x32_c,
-                vpx_d135_predictor_32x32_c, vpx_d117_predictor_32x32_c,
-                vpx_d153_predictor_32x32_c, vpx_d207_predictor_32x32_c,
-                vpx_d63_predictor_32x32_c, vpx_tm_predictor_32x32_c)
+INTRA_PRED_TEST(C, TestIntraPred32, aom_dc_predictor_32x32_c,
+                aom_dc_left_predictor_32x32_c, aom_dc_top_predictor_32x32_c,
+                aom_dc_128_predictor_32x32_c, aom_v_predictor_32x32_c,
+                aom_h_predictor_32x32_c, aom_d45_predictor_32x32_c,
+                aom_d135_predictor_32x32_c, aom_d117_predictor_32x32_c,
+                aom_d153_predictor_32x32_c, aom_d207_predictor_32x32_c,
+                aom_d63_predictor_32x32_c, aom_tm_predictor_32x32_c)
 
 #if HAVE_SSE2
-INTRA_PRED_TEST(SSE2, TestIntraPred32, vpx_dc_predictor_32x32_sse2,
-                vpx_dc_left_predictor_32x32_sse2,
-                vpx_dc_top_predictor_32x32_sse2,
-                vpx_dc_128_predictor_32x32_sse2, vpx_v_predictor_32x32_sse2,
-                vpx_h_predictor_32x32_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
-                vpx_tm_predictor_32x32_sse2)
+INTRA_PRED_TEST(SSE2, TestIntraPred32, aom_dc_predictor_32x32_sse2,
+                aom_dc_left_predictor_32x32_sse2,
+                aom_dc_top_predictor_32x32_sse2,
+                aom_dc_128_predictor_32x32_sse2, aom_v_predictor_32x32_sse2,
+                aom_h_predictor_32x32_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
+                aom_tm_predictor_32x32_sse2)
 #endif  // HAVE_SSE2
 
 #if HAVE_SSSE3
 INTRA_PRED_TEST(SSSE3, TestIntraPred32, NULL, NULL, NULL, NULL, NULL, NULL,
-                vpx_d45_predictor_32x32_ssse3, NULL, NULL,
-                vpx_d153_predictor_32x32_ssse3, vpx_d207_predictor_32x32_ssse3,
-                vpx_d63_predictor_32x32_ssse3, NULL)
+                aom_d45_predictor_32x32_ssse3, NULL, NULL,
+                aom_d153_predictor_32x32_ssse3, aom_d207_predictor_32x32_ssse3,
+                aom_d63_predictor_32x32_ssse3, NULL)
 #endif  // HAVE_SSSE3
 
 #if HAVE_NEON
-INTRA_PRED_TEST(NEON, TestIntraPred32, vpx_dc_predictor_32x32_neon,
-                vpx_dc_left_predictor_32x32_neon,
-                vpx_dc_top_predictor_32x32_neon,
-                vpx_dc_128_predictor_32x32_neon, vpx_v_predictor_32x32_neon,
-                vpx_h_predictor_32x32_neon, NULL, NULL, NULL, NULL, NULL, NULL,
-                vpx_tm_predictor_32x32_neon)
+INTRA_PRED_TEST(NEON, TestIntraPred32, aom_dc_predictor_32x32_neon,
+                aom_dc_left_predictor_32x32_neon,
+                aom_dc_top_predictor_32x32_neon,
+                aom_dc_128_predictor_32x32_neon, aom_v_predictor_32x32_neon,
+                aom_h_predictor_32x32_neon, NULL, NULL, NULL, NULL, NULL, NULL,
+                aom_tm_predictor_32x32_neon)
 #endif  // HAVE_NEON
 
 #if HAVE_MSA
-INTRA_PRED_TEST(MSA, TestIntraPred32, vpx_dc_predictor_32x32_msa,
-                vpx_dc_left_predictor_32x32_msa, vpx_dc_top_predictor_32x32_msa,
-                vpx_dc_128_predictor_32x32_msa, vpx_v_predictor_32x32_msa,
-                vpx_h_predictor_32x32_msa, NULL, NULL, NULL, NULL, NULL, NULL,
-                vpx_tm_predictor_32x32_msa)
+INTRA_PRED_TEST(MSA, TestIntraPred32, aom_dc_predictor_32x32_msa,
+                aom_dc_left_predictor_32x32_msa, aom_dc_top_predictor_32x32_msa,
+                aom_dc_128_predictor_32x32_msa, aom_v_predictor_32x32_msa,
+                aom_h_predictor_32x32_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+                aom_tm_predictor_32x32_msa)
 #endif  // HAVE_MSA
 
-#include "test/test_libvpx.cc"
+#include "test/test_libaom.cc"
diff --git a/test/test_libvpx.cc b/test/test_libaom.cc
similarity index 88%
rename from test/test_libvpx.cc
rename to test/test_libaom.cc
index 3f650ab..c8ea59b 100644
--- a/test/test_libvpx.cc
+++ b/test/test_libaom.cc
@@ -11,16 +11,16 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #if ARCH_X86 || ARCH_X86_64
 #include "aom_ports/x86.h"
 #endif
 extern "C" {
-#if CONFIG_VP10
-extern void vp10_rtcd();
-#endif  // CONFIG_VP10
-extern void vpx_dsp_rtcd();
-extern void vpx_scale_rtcd();
+#if CONFIG_AV1
+extern void av1_rtcd();
+#endif  // CONFIG_AV1
+extern void aom_dsp_rtcd();
+extern void aom_scale_rtcd();
 }
 
 #if ARCH_X86 || ARCH_X86_64
@@ -54,11 +54,11 @@
 // Shared library builds don't support whitebox tests
 // that exercise internal symbols.
 
-#if CONFIG_VP10
-  vp10_rtcd();
-#endif  // CONFIG_VP10
-  vpx_dsp_rtcd();
-  vpx_scale_rtcd();
+#if CONFIG_AV1
+  av1_rtcd();
+#endif  // CONFIG_AV1
+  aom_dsp_rtcd();
+  aom_scale_rtcd();
 #endif  // !CONFIG_SHARED
 
   return RUN_ALL_TESTS();
diff --git a/test/tile_independence_test.cc b/test/tile_independence_test.cc
index 4cb50c3..cda69cd 100644
--- a/test/tile_independence_test.cc
+++ b/test/tile_independence_test.cc
@@ -17,7 +17,7 @@
 #include "test/i420_video_source.h"
 #include "test/util.h"
 #include "test/md5_helper.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 namespace {
 class TileIndependenceTest
@@ -27,21 +27,21 @@
   TileIndependenceTest()
       : EncoderTest(GET_PARAM(0)), md5_fw_order_(), md5_inv_order_(),
         n_tile_cols_(GET_PARAM(1)), n_tile_rows_(GET_PARAM(2)) {
-    init_flags_ = VPX_CODEC_USE_PSNR;
-    vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
+    init_flags_ = AOM_CODEC_USE_PSNR;
+    aom_codec_dec_cfg_t cfg = aom_codec_dec_cfg_t();
     cfg.w = 704;
     cfg.h = 144;
     cfg.threads = 1;
     fw_dec_ = codec_->CreateDecoder(cfg, 0);
     inv_dec_ = codec_->CreateDecoder(cfg, 0);
-    inv_dec_->Control(VP9_INVERT_TILE_DECODE_ORDER, 1);
+    inv_dec_->Control(AV1_INVERT_TILE_DECODE_ORDER, 1);
 
-#if CONFIG_VP10 && CONFIG_EXT_TILE
-    if (fw_dec_->IsVP10() && inv_dec_->IsVP10()) {
-      fw_dec_->Control(VP10_SET_DECODE_TILE_ROW, -1);
-      fw_dec_->Control(VP10_SET_DECODE_TILE_COL, -1);
-      inv_dec_->Control(VP10_SET_DECODE_TILE_ROW, -1);
-      inv_dec_->Control(VP10_SET_DECODE_TILE_COL, -1);
+#if CONFIG_AV1 && CONFIG_EXT_TILE
+    if (fw_dec_->IsAV1() && inv_dec_->IsAV1()) {
+      fw_dec_->Control(AV1_SET_DECODE_TILE_ROW, -1);
+      fw_dec_->Control(AV1_SET_DECODE_TILE_COL, -1);
+      inv_dec_->Control(AV1_SET_DECODE_TILE_ROW, -1);
+      inv_dec_->Control(AV1_SET_DECODE_TILE_COL, -1);
     }
 #endif
   }
@@ -59,40 +59,40 @@
   virtual void PreEncodeFrameHook(libaom_test::VideoSource *video,
                                   libaom_test::Encoder *encoder) {
     if (video->frame() == 1) {
-      encoder->Control(VP9E_SET_TILE_COLUMNS, n_tile_cols_);
-      encoder->Control(VP9E_SET_TILE_ROWS, n_tile_rows_);
+      encoder->Control(AV1E_SET_TILE_COLUMNS, n_tile_cols_);
+      encoder->Control(AV1E_SET_TILE_ROWS, n_tile_rows_);
       SetCpuUsed(encoder);
     }
   }
 
   virtual void SetCpuUsed(libaom_test::Encoder *encoder) {
     static const int kCpuUsed = 3;
-    encoder->Control(VP8E_SET_CPUUSED, kCpuUsed);
+    encoder->Control(AOME_SET_CPUUSED, kCpuUsed);
   }
 
-  void UpdateMD5(::libaom_test::Decoder *dec, const vpx_codec_cx_pkt_t *pkt,
+  void UpdateMD5(::libaom_test::Decoder *dec, const aom_codec_cx_pkt_t *pkt,
                  ::libaom_test::MD5 *md5) {
-    const vpx_codec_err_t res = dec->DecodeFrame(
+    const aom_codec_err_t res = dec->DecodeFrame(
         reinterpret_cast<uint8_t *>(pkt->data.frame.buf), pkt->data.frame.sz);
-    if (res != VPX_CODEC_OK) {
+    if (res != AOM_CODEC_OK) {
       abort_ = true;
-      ASSERT_EQ(VPX_CODEC_OK, res);
+      ASSERT_EQ(AOM_CODEC_OK, res);
     }
-    const vpx_image_t *img = dec->GetDxData().Next();
+    const aom_image_t *img = dec->GetDxData().Next();
     md5->Add(img);
   }
 
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+  virtual void FramePktHook(const aom_codec_cx_pkt_t *pkt) {
     UpdateMD5(fw_dec_, pkt, &md5_fw_order_);
     UpdateMD5(inv_dec_, pkt, &md5_inv_order_);
   }
 
   void DoTest() {
-    const vpx_rational timebase = { 33333333, 1000000000 };
+    const aom_rational timebase = { 33333333, 1000000000 };
     cfg_.g_timebase = timebase;
     cfg_.rc_target_bitrate = 500;
     cfg_.g_lag_in_frames = 12;
-    cfg_.rc_end_usage = VPX_VBR;
+    cfg_.rc_end_usage = AOM_VBR;
 
     libaom_test::I420VideoSource video("hantro_collage_w352h288.yuv", 704, 576,
                                        timebase.den, timebase.num, 0, 5);
@@ -119,22 +119,22 @@
 class TileIndependenceTestLarge : public TileIndependenceTest {
   virtual void SetCpuUsed(libaom_test::Encoder *encoder) {
     static const int kCpuUsed = 0;
-    encoder->Control(VP8E_SET_CPUUSED, kCpuUsed);
+    encoder->Control(AOME_SET_CPUUSED, kCpuUsed);
   }
 };
 
 TEST_P(TileIndependenceTestLarge, MD5Match) { DoTest(); }
 
 #if CONFIG_EXT_TILE
-VP10_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Values(1, 2, 32),
-                           ::testing::Values(1, 2, 32));
-VP10_INSTANTIATE_TEST_CASE(TileIndependenceTestLarge,
-                           ::testing::Values(1, 2, 32),
-                           ::testing::Values(1, 2, 32));
+AV1_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Values(1, 2, 32),
+                          ::testing::Values(1, 2, 32));
+AV1_INSTANTIATE_TEST_CASE(TileIndependenceTestLarge,
+                          ::testing::Values(1, 2, 32),
+                          ::testing::Values(1, 2, 32));
 #else
-VP10_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Values(0, 1),
-                           ::testing::Values(0, 1));
-VP10_INSTANTIATE_TEST_CASE(TileIndependenceTestLarge, ::testing::Values(0, 1),
-                           ::testing::Values(0, 1));
+AV1_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Values(0, 1),
+                          ::testing::Values(0, 1));
+AV1_INSTANTIATE_TEST_CASE(TileIndependenceTestLarge, ::testing::Values(0, 1),
+                          ::testing::Values(0, 1));
 #endif  // CONFIG_EXT_TILE
 }  // namespace
diff --git a/test/tools_common.sh b/test/tools_common.sh
index 2e10437..2e4ea30 100755
--- a/test/tools_common.sh
+++ b/test/tools_common.sh
@@ -10,59 +10,59 @@
 ##
 ##  This file contains shell code shared by test scripts for libaom tools.
 
-# Use $VPX_TEST_TOOLS_COMMON_SH as a pseudo include guard.
-if [ -z "${VPX_TEST_TOOLS_COMMON_SH}" ]; then
-VPX_TEST_TOOLS_COMMON_SH=included
+# Use $AOM_TEST_TOOLS_COMMON_SH as a pseudo include guard.
+if [ -z "${AOM_TEST_TOOLS_COMMON_SH}" ]; then
+AOM_TEST_TOOLS_COMMON_SH=included
 
 set -e
 devnull='> /dev/null 2>&1'
-VPX_TEST_PREFIX=""
+AOM_TEST_PREFIX=""
 
 elog() {
   echo "$@" 1>&2
 }
 
 vlog() {
-  if [ "${VPX_TEST_VERBOSE_OUTPUT}" = "yes" ]; then
+  if [ "${AOM_TEST_VERBOSE_OUTPUT}" = "yes" ]; then
     echo "$@"
   fi
 }
 
-# Sets $VPX_TOOL_TEST to the name specified by positional parameter one.
+# Sets $AOM_TOOL_TEST to the name specified by positional parameter one.
 test_begin() {
-  VPX_TOOL_TEST="${1}"
+  AOM_TOOL_TEST="${1}"
 }
 
-# Clears the VPX_TOOL_TEST variable after confirming that $VPX_TOOL_TEST matches
+# Clears the AOM_TOOL_TEST variable after confirming that $AOM_TOOL_TEST matches
 # positional parameter one.
 test_end() {
-  if [ "$1" != "${VPX_TOOL_TEST}" ]; then
+  if [ "$1" != "${AOM_TOOL_TEST}" ]; then
     echo "FAIL completed test mismatch!."
     echo "  completed test: ${1}"
-    echo "  active test: ${VPX_TOOL_TEST}."
+    echo "  active test: ${AOM_TOOL_TEST}."
     return 1
   fi
-  VPX_TOOL_TEST='<unset>'
+  AOM_TOOL_TEST='<unset>'
 }
 
 # Echoes the target configuration being tested.
 test_configuration_target() {
-  vpx_config_mk="${LIBAOM_CONFIG_PATH}/config.mk"
+  aom_config_mk="${LIBAOM_CONFIG_PATH}/config.mk"
   # Find the TOOLCHAIN line, split it using ':=' as the field separator, and
   # print the last field to get the value. Then pipe the value to tr to consume
   # any leading/trailing spaces while allowing tr to echo the output to stdout.
-  awk -F ':=' '/TOOLCHAIN/ { print $NF }' "${vpx_config_mk}" | tr -d ' '
+  awk -F ':=' '/TOOLCHAIN/ { print $NF }' "${aom_config_mk}" | tr -d ' '
 }
 
 # Trap function used for failure reports and tool output directory removal.
-# When the contents of $VPX_TOOL_TEST do not match the string '<unset>', reports
-# failure of test stored in $VPX_TOOL_TEST.
+# When the contents of $AOM_TOOL_TEST do not match the string '<unset>', reports
+# failure of test stored in $AOM_TOOL_TEST.
 cleanup() {
-  if [ -n "${VPX_TOOL_TEST}" ] && [ "${VPX_TOOL_TEST}" != '<unset>' ]; then
-    echo "FAIL: $VPX_TOOL_TEST"
+  if [ -n "${AOM_TOOL_TEST}" ] && [ "${AOM_TOOL_TEST}" != '<unset>' ]; then
+    echo "FAIL: $AOM_TOOL_TEST"
   fi
-  if [ -n "${VPX_TEST_OUTPUT_DIR}" ] && [ -d "${VPX_TEST_OUTPUT_DIR}" ]; then
-    rm -rf "${VPX_TEST_OUTPUT_DIR}"
+  if [ -n "${AOM_TEST_OUTPUT_DIR}" ] && [ -d "${AOM_TEST_OUTPUT_DIR}" ]; then
+    rm -rf "${AOM_TEST_OUTPUT_DIR}"
   fi
 }
 
@@ -70,17 +70,17 @@
 # $LIBAOM_CONFIG_PATH/config.mk to stdout, or the version number string when
 # no git hash is contained in VERSION_STRING.
 config_hash() {
-  vpx_config_mk="${LIBAOM_CONFIG_PATH}/config.mk"
+  aom_config_mk="${LIBAOM_CONFIG_PATH}/config.mk"
   # Find VERSION_STRING line, split it with "-g" and print the last field to
   # output the git hash to stdout.
-  vpx_version=$(awk -F -g '/VERSION_STRING/ {print $NF}' "${vpx_config_mk}")
+  aom_version=$(awk -F -g '/VERSION_STRING/ {print $NF}' "${aom_config_mk}")
   # Handle two situations here:
-  # 1. The default case: $vpx_version is a git hash, so echo it unchanged.
+  # 1. The default case: $aom_version is a git hash, so echo it unchanged.
   # 2. When being run a non-dev tree, the -g portion is not present in the
   #    version string: It's only the version number.
-  #    In this case $vpx_version is something like 'VERSION_STRING=v1.3.0', so
+  #    In this case $aom_version is something like 'VERSION_STRING=v1.3.0', so
   #    we echo only what is after the '='.
-  echo "${vpx_version##*=}"
+  echo "${aom_version##*=}"
 }
 
 # Echoes the short form of the current git hash.
@@ -95,7 +95,7 @@
   fi
 }
 
-# Echoes warnings to stdout when git hash in vpx_config.h does not match the
+# Echoes warnings to stdout when git hash in aom_config.h does not match the
 # current git hash.
 check_git_hashes() {
   hash_at_configure_time=$(config_hash)
@@ -120,19 +120,19 @@
 # This script requires that the LIBAOM_BIN_PATH, LIBAOM_CONFIG_PATH, and
 # LIBVPX_TEST_DATA_PATH variables are in the environment: Confirm that
 # the variables are set and that they all evaluate to directory paths.
-verify_vpx_test_environment() {
+verify_aom_test_environment() {
   test_env_var_dir "LIBAOM_BIN_PATH" \
     && test_env_var_dir "LIBAOM_CONFIG_PATH" \
     && test_env_var_dir "LIBVPX_TEST_DATA_PATH"
 }
 
-# Greps vpx_config.h in LIBAOM_CONFIG_PATH for positional parameter one, which
+# Greps aom_config.h in LIBAOM_CONFIG_PATH for positional parameter one, which
 # should be a LIBAOM preprocessor flag. Echoes yes to stdout when the feature
 # is available.
-vpx_config_option_enabled() {
-  vpx_config_option="${1}"
-  vpx_config_file="${LIBAOM_CONFIG_PATH}/vpx_config.h"
-  config_line=$(grep "${vpx_config_option}" "${vpx_config_file}")
+aom_config_option_enabled() {
+  aom_config_option="${1}"
+  aom_config_file="${LIBAOM_CONFIG_PATH}/aom_config.h"
+  config_line=$(grep "${aom_config_option}" "${aom_config_file}")
   if echo "${config_line}" | egrep -q '1$'; then
     echo yes
   fi
@@ -149,13 +149,13 @@
 # Echoes path to $1 when it's executable and exists in ${LIBAOM_BIN_PATH}, or an
 # empty string. Caller is responsible for testing the string once the function
 # returns.
-vpx_tool_path() {
+aom_tool_path() {
   local readonly tool_name="$1"
-  local tool_path="${LIBAOM_BIN_PATH}/${tool_name}${VPX_TEST_EXE_SUFFIX}"
+  local tool_path="${LIBAOM_BIN_PATH}/${tool_name}${AOM_TEST_EXE_SUFFIX}"
   if [ ! -x "${tool_path}" ]; then
     # Try one directory up: when running via examples.sh the tool could be in
     # the parent directory of $LIBAOM_BIN_PATH.
-    tool_path="${LIBAOM_BIN_PATH}/../${tool_name}${VPX_TEST_EXE_SUFFIX}"
+    tool_path="${LIBAOM_BIN_PATH}/../${tool_name}${AOM_TEST_EXE_SUFFIX}"
   fi
 
   if [ ! -x "${tool_path}" ]; then
@@ -166,28 +166,28 @@
 
 # Echoes yes to stdout when the file named by positional parameter one exists
 # in LIBAOM_BIN_PATH, and is executable.
-vpx_tool_available() {
+aom_tool_available() {
   local tool_name="$1"
-  local tool="${LIBAOM_BIN_PATH}/${tool_name}${VPX_TEST_EXE_SUFFIX}"
+  local tool="${LIBAOM_BIN_PATH}/${tool_name}${AOM_TEST_EXE_SUFFIX}"
   [ -x "${tool}" ] && echo yes
 }
 
-# Echoes yes to stdout when vpx_config_option_enabled() reports yes for
-# CONFIG_VP10_DECODER.
-vp10_decode_available() {
-  [ "$(vpx_config_option_enabled CONFIG_VP10_DECODER)" = "yes" ] && echo yes
+# Echoes yes to stdout when aom_config_option_enabled() reports yes for
+# CONFIG_AV1_DECODER.
+av1_decode_available() {
+  [ "$(aom_config_option_enabled CONFIG_AV1_DECODER)" = "yes" ] && echo yes
 }
 
-# Echoes yes to stdout when vpx_config_option_enabled() reports yes for
-# CONFIG_VP10_ENCODER.
-vp10_encode_available() {
-  [ "$(vpx_config_option_enabled CONFIG_VP10_ENCODER)" = "yes" ] && echo yes
+# Echoes yes to stdout when aom_config_option_enabled() reports yes for
+# CONFIG_AV1_ENCODER.
+av1_encode_available() {
+  [ "$(aom_config_option_enabled CONFIG_AV1_ENCODER)" = "yes" ] && echo yes
 }
 
-# Echoes yes to stdout when vpx_config_option_enabled() reports yes for
+# Echoes yes to stdout when aom_config_option_enabled() reports yes for
 # CONFIG_WEBM_IO.
 webm_io_available() {
-  [ "$(vpx_config_option_enabled CONFIG_WEBM_IO)" = "yes" ] && echo yes
+  [ "$(aom_config_option_enabled CONFIG_WEBM_IO)" = "yes" ] && echo yes
 }
 
 # Filters strings from $1 using the filter specified by $2. Filter behavior
@@ -223,28 +223,28 @@
 # Runs user test functions passed via positional parameters one and two.
 # Functions in positional parameter one are treated as environment verification
 # functions and are run unconditionally. Functions in positional parameter two
-# are run according to the rules specified in vpx_test_usage().
+# are run according to the rules specified in aom_test_usage().
 run_tests() {
-  local env_tests="verify_vpx_test_environment $1"
+  local env_tests="verify_aom_test_environment $1"
   local tests_to_filter="$2"
-  local test_name="${VPX_TEST_NAME}"
+  local test_name="${AOM_TEST_NAME}"
 
   if [ -z "${test_name}" ]; then
     test_name="$(basename "${0%.*}")"
   fi
 
-  if [ "${VPX_TEST_RUN_DISABLED_TESTS}" != "yes" ]; then
+  if [ "${AOM_TEST_RUN_DISABLED_TESTS}" != "yes" ]; then
     # Filter out DISABLED tests.
     tests_to_filter=$(filter_strings "${tests_to_filter}" ^DISABLED exclude)
   fi
 
-  if [ -n "${VPX_TEST_FILTER}" ]; then
+  if [ -n "${AOM_TEST_FILTER}" ]; then
     # Remove tests not matching the user's filter.
-    tests_to_filter=$(filter_strings "${tests_to_filter}" ${VPX_TEST_FILTER})
+    tests_to_filter=$(filter_strings "${tests_to_filter}" ${AOM_TEST_FILTER})
   fi
 
   # User requested test listing: Dump test names and return.
-  if [ "${VPX_TEST_LIST_TESTS}" = "yes" ]; then
+  if [ "${AOM_TEST_LIST_TESTS}" = "yes" ]; then
     for test_name in $tests_to_filter; do
       echo ${test_name}
     done
@@ -272,7 +272,7 @@
   echo "${test_name}: Done, all tests pass for ${tested_config}."
 }
 
-vpx_test_usage() {
+aom_test_usage() {
 cat << EOF
   Usage: ${0##*/} [arguments]
     --bin-path <path to libaom binaries directory>
@@ -301,7 +301,7 @@
 
 # Returns non-zero (failure) when required environment variables are empty
 # strings.
-vpx_test_check_environment() {
+aom_test_check_environment() {
   if [ -z "${LIBAOM_BIN_PATH}" ] || \
      [ -z "${LIBAOM_CONFIG_PATH}" ] || \
      [ -z "${LIBVPX_TEST_DATA_PATH}" ]; then
@@ -321,14 +321,14 @@
       shift
       ;;
     --filter)
-      VPX_TEST_FILTER="$2"
+      AOM_TEST_FILTER="$2"
       shift
       ;;
     --run-disabled-tests)
-      VPX_TEST_RUN_DISABLED_TESTS=yes
+      AOM_TEST_RUN_DISABLED_TESTS=yes
       ;;
     --help)
-      vpx_test_usage
+      aom_test_usage
       exit
       ;;
     --test-data-path)
@@ -336,20 +336,20 @@
       shift
       ;;
     --prefix)
-      VPX_TEST_PREFIX="$2"
+      AOM_TEST_PREFIX="$2"
       shift
       ;;
     --verbose)
-      VPX_TEST_VERBOSE_OUTPUT=yes
+      AOM_TEST_VERBOSE_OUTPUT=yes
       ;;
     --show-program-output)
       devnull=
       ;;
     --list-tests)
-      VPX_TEST_LIST_TESTS=yes
+      AOM_TEST_LIST_TESTS=yes
       ;;
     *)
-      vpx_test_usage
+      aom_test_usage
       exit 1
       ;;
   esac
@@ -364,33 +364,33 @@
 
 # Create a temporary directory for output files, and a trap to clean it up.
 if [ -n "${TMPDIR}" ]; then
-  VPX_TEST_TEMP_ROOT="${TMPDIR}"
+  AOM_TEST_TEMP_ROOT="${TMPDIR}"
 elif [ -n "${TEMPDIR}" ]; then
-  VPX_TEST_TEMP_ROOT="${TEMPDIR}"
+  AOM_TEST_TEMP_ROOT="${TEMPDIR}"
 else
-  VPX_TEST_TEMP_ROOT=/tmp
+  AOM_TEST_TEMP_ROOT=/tmp
 fi
 
-VPX_TEST_OUTPUT_DIR="${VPX_TEST_TEMP_ROOT}/vpx_test_$$"
+AOM_TEST_OUTPUT_DIR="${AOM_TEST_TEMP_ROOT}/aom_test_$$"
 
-if ! mkdir -p "${VPX_TEST_OUTPUT_DIR}" || \
-   [ ! -d "${VPX_TEST_OUTPUT_DIR}" ]; then
+if ! mkdir -p "${AOM_TEST_OUTPUT_DIR}" || \
+   [ ! -d "${AOM_TEST_OUTPUT_DIR}" ]; then
   echo "${0##*/}: Cannot create output directory, giving up."
-  echo "${0##*/}:   VPX_TEST_OUTPUT_DIR=${VPX_TEST_OUTPUT_DIR}"
+  echo "${0##*/}:   AOM_TEST_OUTPUT_DIR=${AOM_TEST_OUTPUT_DIR}"
   exit 1
 fi
 
 if [ "$(is_windows_target)" = "yes" ]; then
-  VPX_TEST_EXE_SUFFIX=".exe"
+  AOM_TEST_EXE_SUFFIX=".exe"
 fi
 
 # Variables shared by tests.
 VP8_IVF_FILE="${LIBVPX_TEST_DATA_PATH}/vp80-00-comprehensive-001.ivf"
-VP9_IVF_FILE="${LIBVPX_TEST_DATA_PATH}/vp90-2-09-subpixel-00.ivf"
+AV1_IVF_FILE="${LIBVPX_TEST_DATA_PATH}/vp90-2-09-subpixel-00.ivf"
 
-VP9_WEBM_FILE="${LIBVPX_TEST_DATA_PATH}/vp90-2-00-quantizer-00.webm"
-VP9_FPM_WEBM_FILE="${LIBVPX_TEST_DATA_PATH}/vp90-2-07-frame_parallel-1.webm"
-VP9_LT_50_FRAMES_WEBM_FILE="${LIBVPX_TEST_DATA_PATH}/vp90-2-02-size-32x08.webm"
+AV1_WEBM_FILE="${LIBVPX_TEST_DATA_PATH}/vp90-2-00-quantizer-00.webm"
+AV1_FPM_WEBM_FILE="${LIBVPX_TEST_DATA_PATH}/vp90-2-07-frame_parallel-1.webm"
+AV1_LT_50_FRAMES_WEBM_FILE="${LIBVPX_TEST_DATA_PATH}/vp90-2-02-size-32x08.webm"
 
 YUV_RAW_INPUT="${LIBVPX_TEST_DATA_PATH}/hantro_collage_w352h288.yuv"
 YUV_RAW_INPUT_WIDTH=352
@@ -406,21 +406,21 @@
   LIBAOM_BIN_PATH=${LIBAOM_BIN_PATH}
   LIBAOM_CONFIG_PATH=${LIBAOM_CONFIG_PATH}
   LIBVPX_TEST_DATA_PATH=${LIBVPX_TEST_DATA_PATH}
-  VP8_IVF_FILE=${VP8_IVF_FILE}
-  VP9_IVF_FILE=${VP9_IVF_FILE}
-  VP9_WEBM_FILE=${VP9_WEBM_FILE}
-  VPX_TEST_EXE_SUFFIX=${VPX_TEST_EXE_SUFFIX}
-  VPX_TEST_FILTER=${VPX_TEST_FILTER}
-  VPX_TEST_LIST_TESTS=${VPX_TEST_LIST_TESTS}
-  VPX_TEST_OUTPUT_DIR=${VPX_TEST_OUTPUT_DIR}
-  VPX_TEST_PREFIX=${VPX_TEST_PREFIX}
-  VPX_TEST_RUN_DISABLED_TESTS=${VPX_TEST_RUN_DISABLED_TESTS}
-  VPX_TEST_SHOW_PROGRAM_OUTPUT=${VPX_TEST_SHOW_PROGRAM_OUTPUT}
-  VPX_TEST_TEMP_ROOT=${VPX_TEST_TEMP_ROOT}
-  VPX_TEST_VERBOSE_OUTPUT=${VPX_TEST_VERBOSE_OUTPUT}
+  AOM_IVF_FILE=${AOM_IVF_FILE}
+  AV1_IVF_FILE=${AV1_IVF_FILE}
+  AV1_WEBM_FILE=${AV1_WEBM_FILE}
+  AOM_TEST_EXE_SUFFIX=${AOM_TEST_EXE_SUFFIX}
+  AOM_TEST_FILTER=${AOM_TEST_FILTER}
+  AOM_TEST_LIST_TESTS=${AOM_TEST_LIST_TESTS}
+  AOM_TEST_OUTPUT_DIR=${AOM_TEST_OUTPUT_DIR}
+  AOM_TEST_PREFIX=${AOM_TEST_PREFIX}
+  AOM_TEST_RUN_DISABLED_TESTS=${AOM_TEST_RUN_DISABLED_TESTS}
+  AOM_TEST_SHOW_PROGRAM_OUTPUT=${AOM_TEST_SHOW_PROGRAM_OUTPUT}
+  AOM_TEST_TEMP_ROOT=${AOM_TEST_TEMP_ROOT}
+  AOM_TEST_VERBOSE_OUTPUT=${AOM_TEST_VERBOSE_OUTPUT}
   YUV_RAW_INPUT=${YUV_RAW_INPUT}
   YUV_RAW_INPUT_WIDTH=${YUV_RAW_INPUT_WIDTH}
   YUV_RAW_INPUT_HEIGHT=${YUV_RAW_INPUT_HEIGHT}
   Y4M_NOSQ_PAR_INPUT=${Y4M_NOSQ_PAR_INPUT}"
 
-fi  # End $VPX_TEST_TOOLS_COMMON_SH pseudo include guard.
+fi  # End $AOM_TEST_TOOLS_COMMON_SH pseudo include guard.
diff --git a/test/transform_test_base.h b/test/transform_test_base.h
index a09897c..a128b3b 100644
--- a/test/transform_test_base.h
+++ b/test/transform_test_base.h
@@ -10,19 +10,19 @@
 #ifndef TEST_TRANSFORM_TEST_BASE_H_
 #define TEST_TRANSFORM_TEST_BASE_H_
 
-#include "./vpx_config.h"
-#include "aom_mem/vpx_mem.h"
-#include "aom/vpx_codec.h"
+#include "./aom_config.h"
+#include "aom_mem/aom_mem.h"
+#include "aom/aom_codec.h"
 
 namespace libaom_test {
 
 //  Note:
-//   Same constant are defined in vp9/common/vp9_entropy.h and
+//   Same constant are defined in av1/common/av1_entropy.h and
 //   av1/common/entropy.h.  Goal is to make this base class
 //   to use for future codec transform testing.  But including
 //   either of them would lead to compiling error when we do
 //   unit test for another codec. Suggest to move the definition
-//   to a vpx header file.
+//   to a aom header file.
 const int kDctMaxValue = 16384;
 
 typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
@@ -44,28 +44,28 @@
     const int count_test_block = 10000;
 
     int16_t *test_input_block = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
+        aom_memalign(16, sizeof(int16_t) * num_coeffs_));
     tran_low_t *test_temp_block = reinterpret_cast<tran_low_t *>(
-        vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+        aom_memalign(16, sizeof(tran_low_t) * num_coeffs_));
     uint8_t *dst = reinterpret_cast<uint8_t *>(
-        vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
+        aom_memalign(16, sizeof(uint8_t) * num_coeffs_));
     uint8_t *src = reinterpret_cast<uint8_t *>(
-        vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
-#if CONFIG_VP9_HIGHBITDEPTH
+        aom_memalign(16, sizeof(uint8_t) * num_coeffs_));
+#if CONFIG_AOM_HIGHBITDEPTH
     uint16_t *dst16 = reinterpret_cast<uint16_t *>(
-        vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
+        aom_memalign(16, sizeof(uint16_t) * num_coeffs_));
     uint16_t *src16 = reinterpret_cast<uint16_t *>(
-        vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
+        aom_memalign(16, sizeof(uint16_t) * num_coeffs_));
 #endif
 
     for (int i = 0; i < count_test_block; ++i) {
       // Initialize a test block with input range [-255, 255].
       for (int j = 0; j < num_coeffs_; ++j) {
-        if (bit_depth_ == VPX_BITS_8) {
+        if (bit_depth_ == AOM_BITS_8) {
           src[j] = rnd.Rand8();
           dst[j] = rnd.Rand8();
           test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand16() & mask_;
           dst16[j] = rnd.Rand16() & mask_;
@@ -76,9 +76,9 @@
 
       ASM_REGISTER_STATE_CHECK(
           RunFwdTxfm(test_input_block, test_temp_block, pitch_));
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -86,11 +86,11 @@
       }
 
       for (int j = 0; j < num_coeffs_; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const uint32_t diff =
-            bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+            bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
-        ASSERT_EQ(VPX_BITS_8, bit_depth_);
+        ASSERT_EQ(AOM_BITS_8, bit_depth_);
         const uint32_t diff = dst[j] - src[j];
 #endif
         const uint32_t error = diff * diff;
@@ -106,13 +106,13 @@
         << "Error: 4x4 FHT/IHT has average round trip error > " << limit
         << " per block";
 
-    vpx_free(test_input_block);
-    vpx_free(test_temp_block);
-    vpx_free(dst);
-    vpx_free(src);
-#if CONFIG_VP9_HIGHBITDEPTH
-    vpx_free(dst16);
-    vpx_free(src16);
+    aom_free(test_input_block);
+    aom_free(test_temp_block);
+    aom_free(dst);
+    aom_free(src);
+#if CONFIG_AOM_HIGHBITDEPTH
+    aom_free(dst16);
+    aom_free(src16);
 #endif
   }
 
@@ -121,11 +121,11 @@
     const int count_test_block = 5000;
 
     int16_t *input_block = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
+        aom_memalign(16, sizeof(int16_t) * num_coeffs_));
     tran_low_t *output_ref_block = reinterpret_cast<tran_low_t *>(
-        vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+        aom_memalign(16, sizeof(tran_low_t) * num_coeffs_));
     tran_low_t *output_block = reinterpret_cast<tran_low_t *>(
-        vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+        aom_memalign(16, sizeof(tran_low_t) * num_coeffs_));
 
     for (int i = 0; i < count_test_block; ++i) {
       // Initialize a test block with input range [-mask_, mask_].
@@ -142,9 +142,9 @@
             << " at test block: " << i;
       }
     }
-    vpx_free(input_block);
-    vpx_free(output_ref_block);
-    vpx_free(output_block);
+    aom_free(input_block);
+    aom_free(output_ref_block);
+    aom_free(output_block);
   }
 
   void RunMemCheck() {
@@ -152,11 +152,11 @@
     const int count_test_block = 5000;
 
     int16_t *input_extreme_block = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
+        aom_memalign(16, sizeof(int16_t) * num_coeffs_));
     tran_low_t *output_ref_block = reinterpret_cast<tran_low_t *>(
-        vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+        aom_memalign(16, sizeof(tran_low_t) * num_coeffs_));
     tran_low_t *output_block = reinterpret_cast<tran_low_t *>(
-        vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+        aom_memalign(16, sizeof(tran_low_t) * num_coeffs_));
 
     for (int i = 0; i < count_test_block; ++i) {
       // Initialize a test block with input range [-mask_, mask_].
@@ -182,9 +182,9 @@
             << "Error: NxN FDCT has coefficient larger than N*DCT_MAX_VALUE";
       }
     }
-    vpx_free(input_extreme_block);
-    vpx_free(output_ref_block);
-    vpx_free(output_block);
+    aom_free(input_extreme_block);
+    aom_free(output_ref_block);
+    aom_free(output_block);
   }
 
   void RunInvAccuracyCheck(int limit) {
@@ -192,29 +192,29 @@
     const int count_test_block = 1000;
 
     int16_t *in = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
+        aom_memalign(16, sizeof(int16_t) * num_coeffs_));
     tran_low_t *coeff = reinterpret_cast<tran_low_t *>(
-        vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+        aom_memalign(16, sizeof(tran_low_t) * num_coeffs_));
     uint8_t *dst = reinterpret_cast<uint8_t *>(
-        vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
+        aom_memalign(16, sizeof(uint8_t) * num_coeffs_));
     uint8_t *src = reinterpret_cast<uint8_t *>(
-        vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
+        aom_memalign(16, sizeof(uint8_t) * num_coeffs_));
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     uint16_t *dst16 = reinterpret_cast<uint16_t *>(
-        vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
+        aom_memalign(16, sizeof(uint16_t) * num_coeffs_));
     uint16_t *src16 = reinterpret_cast<uint16_t *>(
-        vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
+        aom_memalign(16, sizeof(uint16_t) * num_coeffs_));
 #endif
 
     for (int i = 0; i < count_test_block; ++i) {
       // Initialize a test block with input range [-mask_, mask_].
       for (int j = 0; j < num_coeffs_; ++j) {
-        if (bit_depth_ == VPX_BITS_8) {
+        if (bit_depth_ == AOM_BITS_8) {
           src[j] = rnd.Rand8();
           dst[j] = rnd.Rand8();
           in[j] = src[j] - dst[j];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand16() & mask_;
           dst16[j] = rnd.Rand16() & mask_;
@@ -225,9 +225,9 @@
 
       fwd_txfm_ref(in, coeff, pitch_, tx_type_);
 
-      if (bit_depth_ == VPX_BITS_8) {
+      if (bit_depth_ == AOM_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -235,9 +235,9 @@
       }
 
       for (int j = 0; j < num_coeffs_; ++j) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const uint32_t diff =
-            bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+            bit_depth_ == AOM_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
         const uint32_t diff = dst[j] - src[j];
 #endif
@@ -246,20 +246,20 @@
             << "Error: 4x4 IDCT has error " << error << " at index " << j;
       }
     }
-    vpx_free(in);
-    vpx_free(coeff);
-    vpx_free(dst);
-    vpx_free(src);
-#if CONFIG_VP9_HIGHBITDEPTH
-    vpx_free(src16);
-    vpx_free(dst16);
+    aom_free(in);
+    aom_free(coeff);
+    aom_free(dst);
+    aom_free(src);
+#if CONFIG_AOM_HIGHBITDEPTH
+    aom_free(src16);
+    aom_free(dst16);
 #endif
   }
 
   int pitch_;
   int tx_type_;
   FhtFunc fwd_txfm_ref;
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
   int mask_;
   int num_coeffs_;
 
diff --git a/test/twopass_encoder.sh b/test/twopass_encoder.sh
index 222f170..5e2a97b 100755
--- a/test/twopass_encoder.sh
+++ b/test/twopass_encoder.sh
@@ -18,7 +18,7 @@
 # Environment check: $YUV_RAW_INPUT is required.
 twopass_encoder_verify_environment() {
   if [ ! -e "${YUV_RAW_INPUT}" ]; then
-    echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+    echo "Libaom test data must exist in LIBVPX_TEST_DATA_PATH."
     return 1
   fi
 }
@@ -26,35 +26,38 @@
 # Runs twopass_encoder using the codec specified by $1 with a frame limit of
 # 100.
 twopass_encoder() {
-  local encoder="${LIBAOM_BIN_PATH}/twopass_encoder${VPX_TEST_EXE_SUFFIX}"
+  local encoder="${LIBAOM_BIN_PATH}/twopass_encoder${AOM_TEST_EXE_SUFFIX}"
   local codec="$1"
-  local output_file="${VPX_TEST_OUTPUT_DIR}/twopass_encoder_${codec}.ivf"
+  local output_file="${AOM_TEST_OUTPUT_DIR}/twopass_encoder_${codec}.ivf"
 
   if [ ! -x "${encoder}" ]; then
     elog "${encoder} does not exist or is not executable."
     return 1
   fi
 
-  eval "${VPX_TEST_PREFIX}" "${encoder}" "${codec}" "${YUV_RAW_INPUT_WIDTH}" \
+  eval "${AOM_TEST_PREFIX}" "${encoder}" "${codec}" "${YUV_RAW_INPUT_WIDTH}" \
       "${YUV_RAW_INPUT_HEIGHT}" "${YUV_RAW_INPUT}" "${output_file}" 100 \
       ${devnull}
 
   [ -e "${output_file}" ] || return 1
 }
 
-twopass_encoder_vp8() {
-  if [ "$(vp8_encode_available)" = "yes" ]; then
-    twopass_encoder vp8 || return 1
+twopass_encoder_aom() {
+  if [ "$(aom_encode_available)" = "yes" ]; then
+    twopass_encoder aom || return 1
   fi
 }
 
-twopass_encoder_vp9() {
-  if [ "$(vp9_encode_available)" = "yes" ]; then
-    twopass_encoder vp9 || return 1
+# TODO(tomfinegan): Add a frame limit param to twopass_encoder and enable this
+# test. AV1 is just too slow right now: This test takes 31m16s+ on a fast
+# machine.
+DISABLED_twopass_encoder_av1() {
+  if [ "$(av1_encode_available)" = "yes" ]; then
+    twopass_encoder av1 || return 1
   fi
 }
 
-twopass_encoder_tests="twopass_encoder_vp8
-                       twopass_encoder_vp9"
+twopass_encoder_tests="twopass_encoder_aom
+                       DISABLED_twopass_encoder_av1"
 
 run_tests twopass_encoder_verify_environment "${twopass_encoder_tests}"
diff --git a/test/user_priv_test.cc b/test/user_priv_test.cc
index 7e8d398..54d4ee6 100644
--- a/test/user_priv_test.cc
+++ b/test/user_priv_test.cc
@@ -12,7 +12,7 @@
 #include <cstdlib>
 #include <string>
 #include "third_party/googletest/src/include/gtest/gtest.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "test/acm_random.h"
 #include "test/codec_factory.h"
 #include "test/decode_test_driver.h"
@@ -22,8 +22,8 @@
 #if CONFIG_WEBM_IO
 #include "test/webm_video_source.h"
 #endif
-#include "aom_mem/vpx_mem.h"
-#include "aom/vp8.h"
+#include "aom_mem/aom_mem.h"
+#include "aom/aom.h"
 
 namespace {
 
@@ -47,23 +47,23 @@
   libaom_test::WebMVideoSource video(filename);
   video.Init();
 
-  vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
-  libaom_test::VP9Decoder decoder(cfg, 0);
+  aom_codec_dec_cfg_t cfg = aom_codec_dec_cfg_t();
+  libaom_test::AV1Decoder decoder(cfg, 0);
 
   libaom_test::MD5 md5;
   int frame_num = 0;
   for (video.Begin(); !::testing::Test::HasFailure() && video.cxdata();
        video.Next()) {
     void *user_priv = reinterpret_cast<void *>(&frame_num);
-    const vpx_codec_err_t res =
+    const aom_codec_err_t res =
         decoder.DecodeFrame(video.cxdata(), video.frame_size(),
                             (frame_num == 0) ? NULL : user_priv);
-    if (res != VPX_CODEC_OK) {
-      EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
+    if (res != AOM_CODEC_OK) {
+      EXPECT_EQ(AOM_CODEC_OK, res) << decoder.DecodeError();
       break;
     }
     libaom_test::DxDataIterator dec_iter = decoder.GetDxData();
-    const vpx_image_t *img = NULL;
+    const aom_image_t *img = NULL;
 
     // Get decompressed data.
     while ((img = dec_iter.Next())) {
@@ -73,10 +73,10 @@
         CheckUserPrivateData(img->user_priv, &frame_num);
 
         // Also test ctrl_get_reference api.
-        struct vp9_ref_frame ref;
+        struct av1_ref_frame ref;
         // Randomly fetch a reference frame.
         ref.idx = rnd.Rand8() % 3;
-        decoder.Control(VP9_GET_REFERENCE, &ref);
+        decoder.Control(AV1_GET_REFERENCE, &ref);
 
         CheckUserPrivateData(ref.img.user_priv, NULL);
       }
@@ -92,7 +92,7 @@
   // no tiles or frame parallel; this exercises the decoding to test the
   // user_priv.
   EXPECT_STREQ("b35a1b707b28e82be025d960aba039bc",
-               DecodeFile("vp90-2-03-size-226x226.webm").c_str());
+               DecodeFile("av10-2-03-size-226x226.webm").c_str());
 }
 
 #endif  // CONFIG_WEBM_IO
diff --git a/test/util.h b/test/util.h
index 6567253..1704473 100644
--- a/test/util.h
+++ b/test/util.h
@@ -14,12 +14,12 @@
 #include <stdio.h>
 #include <math.h>
 #include "third_party/googletest/src/include/gtest/gtest.h"
-#include "aom/vpx_image.h"
+#include "aom/aom_image.h"
 
 // Macros
 #define GET_PARAM(k) std::tr1::get<k>(GetParam())
 
-inline double compute_psnr(const vpx_image_t *img1, const vpx_image_t *img2) {
+inline double compute_psnr(const aom_image_t *img1, const aom_image_t *img2) {
   assert((img1->fmt == img2->fmt) && (img1->d_w == img2->d_w) &&
          (img1->d_h == img2->d_h));
 
@@ -30,8 +30,8 @@
   int64_t sqrerr = 0;
   for (i = 0; i < height_y; ++i)
     for (j = 0; j < width_y; ++j) {
-      int64_t d = img1->planes[VPX_PLANE_Y][i * img1->stride[VPX_PLANE_Y] + j] -
-                  img2->planes[VPX_PLANE_Y][i * img2->stride[VPX_PLANE_Y] + j];
+      int64_t d = img1->planes[AOM_PLANE_Y][i * img1->stride[AOM_PLANE_Y] + j] -
+                  img2->planes[AOM_PLANE_Y][i * img2->stride[AOM_PLANE_Y] + j];
       sqrerr += d * d;
     }
   double mse = static_cast<double>(sqrerr) / (width_y * height_y);
diff --git a/test/variance_test.cc b/test/variance_test.cc
index 1541f8e..432ab32 100644
--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -13,14 +13,14 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
-#include "aom/vpx_codec.h"
-#include "aom/vpx_integer.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom/aom_codec.h"
+#include "aom/aom_integer.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 namespace {
@@ -48,15 +48,15 @@
 // (bit_depth - 8) for se
 static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
   switch (bit_depth) {
-    case VPX_BITS_12:
+    case AOM_BITS_12:
       *sse = (*sse + 128) >> 8;
       *se = (*se + 8) >> 4;
       break;
-    case VPX_BITS_10:
+    case AOM_BITS_10:
       *sse = (*sse + 8) >> 4;
       *se = (*se + 2) >> 2;
       break;
-    case VPX_BITS_8:
+    case AOM_BITS_8:
     default: break;
   }
 }
@@ -76,7 +76,7 @@
 static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref, int l2w,
                              int l2h, int src_stride, int ref_stride,
                              uint32_t *sse_ptr, bool use_high_bit_depth_,
-                             vpx_bit_depth_t bit_depth) {
+                             aom_bit_depth_t bit_depth) {
   int64_t se = 0;
   uint64_t sse = 0;
   const int w = 1 << l2w;
@@ -88,13 +88,13 @@
         diff = src[y * src_stride + x] - ref[y * ref_stride + x];
         se += diff;
         sse += diff * diff;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         diff = CONVERT_TO_SHORTPTR(src)[y * src_stride + x] -
                CONVERT_TO_SHORTPTR(ref)[y * ref_stride + x];
         se += diff;
         sse += diff * diff;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
   }
@@ -113,7 +113,7 @@
 static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
                                     int l2w, int l2h, int xoff, int yoff,
                                     uint32_t *sse_ptr, bool use_high_bit_depth_,
-                                    vpx_bit_depth_t bit_depth) {
+                                    aom_bit_depth_t bit_depth) {
   int64_t se = 0;
   uint64_t sse = 0;
   const int w = 1 << l2w;
@@ -136,7 +136,7 @@
         const int diff = r - src[w * y + x];
         se += diff;
         sse += diff * diff;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
         uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
@@ -150,7 +150,7 @@
         const int diff = r - src16[w * y + x];
         se += diff;
         sse += diff * diff;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
   }
@@ -165,7 +165,7 @@
                                         int l2h, int xoff, int yoff,
                                         uint32_t *sse_ptr,
                                         bool use_high_bit_depth,
-                                        vpx_bit_depth_t bit_depth) {
+                                        aom_bit_depth_t bit_depth) {
   int64_t se = 0;
   uint64_t sse = 0;
   const int w = 1 << l2w;
@@ -189,7 +189,7 @@
             ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
         se += diff;
         sse += diff * diff;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         const uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
         const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
@@ -204,7 +204,7 @@
         const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
         se += diff;
         sse += diff * diff;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
   }
@@ -268,9 +268,9 @@
       : log2width(log2w), log2height(log2h), func(function) {
     use_high_bit_depth = (bit_depth_value > 0);
     if (use_high_bit_depth) {
-      bit_depth = static_cast<vpx_bit_depth_t>(bit_depth_value);
+      bit_depth = static_cast<aom_bit_depth_t>(bit_depth_value);
     } else {
-      bit_depth = VPX_BITS_8;
+      bit_depth = AOM_BITS_8;
     }
     width = 1 << log2width;
     height = 1 << log2height;
@@ -282,7 +282,7 @@
   int width, height;
   int block_size;
   Func func;
-  vpx_bit_depth_t bit_depth;
+  aom_bit_depth_t bit_depth;
   bool use_high_bit_depth;
   uint32_t mask;
 };
@@ -305,11 +305,11 @@
     rnd_.Reset(ACMRandom::DeterministicSeed());
     const size_t unit =
         use_high_bit_depth() ? sizeof(uint16_t) : sizeof(uint8_t);
-    src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size() * unit));
+    src_ = reinterpret_cast<uint8_t *>(aom_memalign(16, block_size() * unit));
     ref_ = new uint8_t[block_size() * unit];
     ASSERT_TRUE(src_ != NULL);
     ASSERT_TRUE(ref_ != NULL);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (use_high_bit_depth()) {
       // TODO(skal): remove!
       src_ = CONVERT_TO_BYTEPTR(src_);
@@ -319,7 +319,7 @@
   }
 
   virtual void TearDown() {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (use_high_bit_depth()) {
       // TODO(skal): remove!
       src_ = reinterpret_cast<uint8_t *>(CONVERT_TO_SHORTPTR(src_));
@@ -327,7 +327,7 @@
     }
 #endif
 
-    vpx_free(src_);
+    aom_free(src_);
     delete[] ref_;
     src_ = NULL;
     ref_ = NULL;
@@ -400,11 +400,11 @@
       if (!use_high_bit_depth()) {
         src_[j] = rnd_.Rand8();
         ref_[j] = rnd_.Rand8();
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask();
         CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask();
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
     unsigned int sse1, sse2, var1, var2;
@@ -430,11 +430,11 @@
       if (!use_high_bit_depth()) {
         src_[src_ind] = rnd_.Rand8();
         ref_[ref_ind] = rnd_.Rand8();
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() & mask();
         CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() & mask();
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
     unsigned int sse1, sse2;
@@ -457,12 +457,12 @@
     memset(src_, 255, block_size());
     memset(ref_, 255, half);
     memset(ref_ + half, 0, half);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   } else {
-    vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << byte_shift(), block_size());
-    vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << byte_shift(), half);
-    vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+    aom_memset16(CONVERT_TO_SHORTPTR(src_), 255 << byte_shift(), block_size());
+    aom_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << byte_shift(), half);
+    aom_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
   unsigned int sse, var, expected;
   ASM_REGISTER_STATE_CHECK(
@@ -485,7 +485,7 @@
     const int stride = width();
     ASM_REGISTER_STATE_CHECK(params_.func(src_, stride, ref_, stride, &sse1));
     variance_ref(src_, ref_, params_.log2width, params_.log2height, stride,
-                 stride, &sse2, false, VPX_BITS_8);
+                 stride, &sse2, false, AOM_BITS_8);
     EXPECT_EQ(sse1, sse2);
   }
 }
@@ -502,7 +502,7 @@
     const int stride = width();
     ASM_REGISTER_STATE_CHECK(var1 = params_.func(src_, stride, ref_, stride));
     variance_ref(src_, ref_, params_.log2width, params_.log2height, stride,
-                 stride, &sse2, false, VPX_BITS_8);
+                 stride, &sse2, false, AOM_BITS_8);
     EXPECT_EQ(var1, sse2);
   }
 }
@@ -547,10 +547,10 @@
     height_ = 1 << log2height_;
     subpel_variance_ = get<2>(params);
     if (get<3>(params)) {
-      bit_depth_ = (vpx_bit_depth_t)get<3>(params);
+      bit_depth_ = (aom_bit_depth_t)get<3>(params);
       use_high_bit_depth_ = true;
     } else {
-      bit_depth_ = VPX_BITS_8;
+      bit_depth_ = AOM_BITS_8;
       use_high_bit_depth_ = false;
     }
     mask_ = (1 << bit_depth_) - 1;
@@ -558,18 +558,18 @@
     rnd_.Reset(ACMRandom::DeterministicSeed());
     block_size_ = width_ * height_;
     if (!use_high_bit_depth_) {
-      src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
-      sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+      src_ = reinterpret_cast<uint8_t *>(aom_memalign(16, block_size_));
+      sec_ = reinterpret_cast<uint8_t *>(aom_memalign(16, block_size_));
       ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
-          vpx_memalign(16, block_size_ * sizeof(uint16_t))));
+          aom_memalign(16, block_size_ * sizeof(uint16_t))));
       sec_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
-          vpx_memalign(16, block_size_ * sizeof(uint16_t))));
+          aom_memalign(16, block_size_ * sizeof(uint16_t))));
       ref_ =
           CONVERT_TO_BYTEPTR(new uint16_t[block_size_ + width_ + height_ + 1]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     }
     ASSERT_TRUE(src_ != NULL);
     ASSERT_TRUE(sec_ != NULL);
@@ -578,15 +578,15 @@
 
   virtual void TearDown() {
     if (!use_high_bit_depth_) {
-      vpx_free(src_);
+      aom_free(src_);
       delete[] ref_;
-      vpx_free(sec_);
-#if CONFIG_VP9_HIGHBITDEPTH
+      aom_free(sec_);
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
-      vpx_free(CONVERT_TO_SHORTPTR(src_));
+      aom_free(CONVERT_TO_SHORTPTR(src_));
       delete[] CONVERT_TO_SHORTPTR(ref_);
-      vpx_free(CONVERT_TO_SHORTPTR(sec_));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+      aom_free(CONVERT_TO_SHORTPTR(sec_));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     }
     libaom_test::ClearSystemState();
   }
@@ -600,7 +600,7 @@
   uint8_t *ref_;
   uint8_t *sec_;
   bool use_high_bit_depth_;
-  vpx_bit_depth_t bit_depth_;
+  aom_bit_depth_t bit_depth_;
   int width_, log2width_;
   int height_, log2height_;
   int block_size_, mask_;
@@ -618,7 +618,7 @@
         for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
           ref_[j] = rnd_.Rand8();
         }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         for (int j = 0; j < block_size_; j++) {
           CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
@@ -626,7 +626,7 @@
         for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
           CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
         }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
       unsigned int sse1, sse2;
       unsigned int var1;
@@ -654,14 +654,14 @@
         memset(src_ + half, 255, half);
         memset(ref_, 255, half);
         memset(ref_ + half, 0, half + width_ + height_ + 1);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
-        vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
-        vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
-        vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
-        vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
+        aom_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
+        aom_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
+        aom_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
+        aom_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
                      half + width_ + height_ + 1);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
       unsigned int sse1, sse2;
       unsigned int var1;
@@ -688,7 +688,7 @@
         for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
           ref_[j] = rnd_.Rand8();
         }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         for (int j = 0; j < block_size_; j++) {
           CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
@@ -697,7 +697,7 @@
         for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
           CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
         }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
       uint32_t sse1, sse2;
       uint32_t var1, var2;
@@ -706,682 +706,682 @@
                                                     src_, width_, &sse1, sec_));
       var2 = subpel_avg_variance_ref(ref_, src_, sec_, log2width_, log2height_,
                                      x, y, &sse2, use_high_bit_depth_,
-                                     static_cast<vpx_bit_depth_t>(bit_depth_));
+                                     static_cast<aom_bit_depth_t>(bit_depth_));
       EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
       EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
     }
   }
 }
 
-typedef MainTestClass<Get4x4SseFunc> VpxSseTest;
-typedef MainTestClass<VarianceMxNFunc> VpxMseTest;
-typedef MainTestClass<VarianceMxNFunc> VpxVarianceTest;
-typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
-typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
+typedef MainTestClass<Get4x4SseFunc> AvxSseTest;
+typedef MainTestClass<VarianceMxNFunc> AvxMseTest;
+typedef MainTestClass<VarianceMxNFunc> AvxVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> AvxSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> AvxSubpelAvgVarianceTest;
 
-TEST_P(VpxSseTest, RefSse) { RefTestSse(); }
-TEST_P(VpxSseTest, MaxSse) { MaxTestSse(); }
-TEST_P(VpxMseTest, RefMse) { RefTestMse(); }
-TEST_P(VpxMseTest, MaxMse) { MaxTestMse(); }
-TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
-TEST_P(VpxVarianceTest, Ref) { RefTest(); }
-TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
-TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(AvxSseTest, RefSse) { RefTestSse(); }
+TEST_P(AvxSseTest, MaxSse) { MaxTestSse(); }
+TEST_P(AvxMseTest, RefMse) { RefTestMse(); }
+TEST_P(AvxMseTest, MaxMse) { MaxTestMse(); }
+TEST_P(AvxVarianceTest, Zero) { ZeroTest(); }
+TEST_P(AvxVarianceTest, Ref) { RefTest(); }
+TEST_P(AvxVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(AvxVarianceTest, OneQuarter) { OneQuarterTest(); }
 TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
 TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
-TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
-TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
+TEST_P(AvxSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(AvxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(AvxSubpelAvgVarianceTest, Ref) { RefTest(); }
 
 INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
-                        ::testing::Values(vpx_get_mb_ss_c));
+                        ::testing::Values(aom_get_mb_ss_c));
 
 typedef TestParams<Get4x4SseFunc> SseParams;
-INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
+INSTANTIATE_TEST_CASE_P(C, AvxSseTest,
                         ::testing::Values(SseParams(2, 2,
-                                                    &vpx_get4x4sse_cs_c)));
+                                                    &aom_get4x4sse_cs_c)));
 
 typedef TestParams<VarianceMxNFunc> MseParams;
-INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
-                        ::testing::Values(MseParams(4, 4, &vpx_mse16x16_c),
-                                          MseParams(4, 3, &vpx_mse16x8_c),
-                                          MseParams(3, 4, &vpx_mse8x16_c),
-                                          MseParams(3, 3, &vpx_mse8x8_c)));
+INSTANTIATE_TEST_CASE_P(C, AvxMseTest,
+                        ::testing::Values(MseParams(4, 4, &aom_mse16x16_c),
+                                          MseParams(4, 3, &aom_mse16x8_c),
+                                          MseParams(3, 4, &aom_mse8x16_c),
+                                          MseParams(3, 3, &aom_mse8x8_c)));
 
 typedef TestParams<VarianceMxNFunc> VarianceParams;
 INSTANTIATE_TEST_CASE_P(
-    C, VpxVarianceTest,
-    ::testing::Values(VarianceParams(6, 6, &vpx_variance64x64_c),
-                      VarianceParams(6, 5, &vpx_variance64x32_c),
-                      VarianceParams(5, 6, &vpx_variance32x64_c),
-                      VarianceParams(5, 5, &vpx_variance32x32_c),
-                      VarianceParams(5, 4, &vpx_variance32x16_c),
-                      VarianceParams(4, 5, &vpx_variance16x32_c),
-                      VarianceParams(4, 4, &vpx_variance16x16_c),
-                      VarianceParams(4, 3, &vpx_variance16x8_c),
-                      VarianceParams(3, 4, &vpx_variance8x16_c),
-                      VarianceParams(3, 3, &vpx_variance8x8_c),
-                      VarianceParams(3, 2, &vpx_variance8x4_c),
-                      VarianceParams(2, 3, &vpx_variance4x8_c),
-                      VarianceParams(2, 2, &vpx_variance4x4_c)));
+    C, AvxVarianceTest,
+    ::testing::Values(VarianceParams(6, 6, &aom_variance64x64_c),
+                      VarianceParams(6, 5, &aom_variance64x32_c),
+                      VarianceParams(5, 6, &aom_variance32x64_c),
+                      VarianceParams(5, 5, &aom_variance32x32_c),
+                      VarianceParams(5, 4, &aom_variance32x16_c),
+                      VarianceParams(4, 5, &aom_variance16x32_c),
+                      VarianceParams(4, 4, &aom_variance16x16_c),
+                      VarianceParams(4, 3, &aom_variance16x8_c),
+                      VarianceParams(3, 4, &aom_variance8x16_c),
+                      VarianceParams(3, 3, &aom_variance8x8_c),
+                      VarianceParams(3, 2, &aom_variance8x4_c),
+                      VarianceParams(2, 3, &aom_variance4x8_c),
+                      VarianceParams(2, 2, &aom_variance4x4_c)));
 
 INSTANTIATE_TEST_CASE_P(
-    C, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_c, 0),
-                      make_tuple(6, 5, &vpx_sub_pixel_variance64x32_c, 0),
-                      make_tuple(5, 6, &vpx_sub_pixel_variance32x64_c, 0),
-                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_c, 0),
-                      make_tuple(5, 4, &vpx_sub_pixel_variance32x16_c, 0),
-                      make_tuple(4, 5, &vpx_sub_pixel_variance16x32_c, 0),
-                      make_tuple(4, 4, &vpx_sub_pixel_variance16x16_c, 0),
-                      make_tuple(4, 3, &vpx_sub_pixel_variance16x8_c, 0),
-                      make_tuple(3, 4, &vpx_sub_pixel_variance8x16_c, 0),
-                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_c, 0),
-                      make_tuple(3, 2, &vpx_sub_pixel_variance8x4_c, 0),
-                      make_tuple(2, 3, &vpx_sub_pixel_variance4x8_c, 0),
-                      make_tuple(2, 2, &vpx_sub_pixel_variance4x4_c, 0)));
+    C, AvxSubpelVarianceTest,
+    ::testing::Values(make_tuple(6, 6, &aom_sub_pixel_variance64x64_c, 0),
+                      make_tuple(6, 5, &aom_sub_pixel_variance64x32_c, 0),
+                      make_tuple(5, 6, &aom_sub_pixel_variance32x64_c, 0),
+                      make_tuple(5, 5, &aom_sub_pixel_variance32x32_c, 0),
+                      make_tuple(5, 4, &aom_sub_pixel_variance32x16_c, 0),
+                      make_tuple(4, 5, &aom_sub_pixel_variance16x32_c, 0),
+                      make_tuple(4, 4, &aom_sub_pixel_variance16x16_c, 0),
+                      make_tuple(4, 3, &aom_sub_pixel_variance16x8_c, 0),
+                      make_tuple(3, 4, &aom_sub_pixel_variance8x16_c, 0),
+                      make_tuple(3, 3, &aom_sub_pixel_variance8x8_c, 0),
+                      make_tuple(3, 2, &aom_sub_pixel_variance8x4_c, 0),
+                      make_tuple(2, 3, &aom_sub_pixel_variance4x8_c, 0),
+                      make_tuple(2, 2, &aom_sub_pixel_variance4x4_c, 0)));
 
 INSTANTIATE_TEST_CASE_P(
-    C, VpxSubpelAvgVarianceTest,
-    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_c, 0),
-                      make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_c, 0),
-                      make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_c, 0),
-                      make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_c, 0),
-                      make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_c, 0),
-                      make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_c, 0),
-                      make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_c, 0),
-                      make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_c, 0),
-                      make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_c, 0),
-                      make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_c, 0),
-                      make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_c, 0),
-                      make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_c, 0),
-                      make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_c, 0)));
+    C, AvxSubpelAvgVarianceTest,
+    ::testing::Values(make_tuple(6, 6, &aom_sub_pixel_avg_variance64x64_c, 0),
+                      make_tuple(6, 5, &aom_sub_pixel_avg_variance64x32_c, 0),
+                      make_tuple(5, 6, &aom_sub_pixel_avg_variance32x64_c, 0),
+                      make_tuple(5, 5, &aom_sub_pixel_avg_variance32x32_c, 0),
+                      make_tuple(5, 4, &aom_sub_pixel_avg_variance32x16_c, 0),
+                      make_tuple(4, 5, &aom_sub_pixel_avg_variance16x32_c, 0),
+                      make_tuple(4, 4, &aom_sub_pixel_avg_variance16x16_c, 0),
+                      make_tuple(4, 3, &aom_sub_pixel_avg_variance16x8_c, 0),
+                      make_tuple(3, 4, &aom_sub_pixel_avg_variance8x16_c, 0),
+                      make_tuple(3, 3, &aom_sub_pixel_avg_variance8x8_c, 0),
+                      make_tuple(3, 2, &aom_sub_pixel_avg_variance8x4_c, 0),
+                      make_tuple(2, 3, &aom_sub_pixel_avg_variance4x8_c, 0),
+                      make_tuple(2, 2, &aom_sub_pixel_avg_variance4x4_c, 0)));
 
-#if CONFIG_VP9_HIGHBITDEPTH
-typedef MainTestClass<VarianceMxNFunc> VpxHBDMseTest;
-typedef MainTestClass<VarianceMxNFunc> VpxHBDVarianceTest;
-typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
-typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxHBDSubpelAvgVarianceTest;
+#if CONFIG_AOM_HIGHBITDEPTH
+typedef MainTestClass<VarianceMxNFunc> AvxHBDMseTest;
+typedef MainTestClass<VarianceMxNFunc> AvxHBDVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> AvxHBDSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> AvxHBDSubpelAvgVarianceTest;
 
-TEST_P(VpxHBDMseTest, RefMse) { RefTestMse(); }
-TEST_P(VpxHBDMseTest, MaxMse) { MaxTestMse(); }
-TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
-TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
-TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
-TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
-TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
-TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
+TEST_P(AvxHBDMseTest, RefMse) { RefTestMse(); }
+TEST_P(AvxHBDMseTest, MaxMse) { MaxTestMse(); }
+TEST_P(AvxHBDVarianceTest, Zero) { ZeroTest(); }
+TEST_P(AvxHBDVarianceTest, Ref) { RefTest(); }
+TEST_P(AvxHBDVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(AvxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(AvxHBDSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(AvxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(AvxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
 
 /* TODO(debargha): This test does not support the highbd version
 INSTANTIATE_TEST_CASE_P(
-    C, VpxHBDMseTest,
-    ::testing::Values(make_tuple(4, 4, &vpx_highbd_12_mse16x16_c),
-                      make_tuple(4, 4, &vpx_highbd_12_mse16x8_c),
-                      make_tuple(4, 4, &vpx_highbd_12_mse8x16_c),
-                      make_tuple(4, 4, &vpx_highbd_12_mse8x8_c),
-                      make_tuple(4, 4, &vpx_highbd_10_mse16x16_c),
-                      make_tuple(4, 4, &vpx_highbd_10_mse16x8_c),
-                      make_tuple(4, 4, &vpx_highbd_10_mse8x16_c),
-                      make_tuple(4, 4, &vpx_highbd_10_mse8x8_c),
-                      make_tuple(4, 4, &vpx_highbd_8_mse16x16_c),
-                      make_tuple(4, 4, &vpx_highbd_8_mse16x8_c),
-                      make_tuple(4, 4, &vpx_highbd_8_mse8x16_c),
-                      make_tuple(4, 4, &vpx_highbd_8_mse8x8_c)));
+    C, AvxHBDMseTest,
+    ::testing::Values(make_tuple(4, 4, &aom_highbd_12_mse16x16_c),
+                      make_tuple(4, 4, &aom_highbd_12_mse16x8_c),
+                      make_tuple(4, 4, &aom_highbd_12_mse8x16_c),
+                      make_tuple(4, 4, &aom_highbd_12_mse8x8_c),
+                      make_tuple(4, 4, &aom_highbd_10_mse16x16_c),
+                      make_tuple(4, 4, &aom_highbd_10_mse16x8_c),
+                      make_tuple(4, 4, &aom_highbd_10_mse8x16_c),
+                      make_tuple(4, 4, &aom_highbd_10_mse8x8_c),
+                      make_tuple(4, 4, &aom_highbd_8_mse16x16_c),
+                      make_tuple(4, 4, &aom_highbd_8_mse16x8_c),
+                      make_tuple(4, 4, &aom_highbd_8_mse8x16_c),
+                      make_tuple(4, 4, &aom_highbd_8_mse8x8_c)));
 */
 
 const VarianceParams kArrayHBDVariance_c[] = {
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  VarianceParams(7, 7, &vpx_highbd_12_variance128x128_c, 12),
-  VarianceParams(7, 6, &vpx_highbd_12_variance128x64_c, 12),
-  VarianceParams(6, 7, &vpx_highbd_12_variance64x128_c, 12),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  VarianceParams(6, 6, &vpx_highbd_12_variance64x64_c, 12),
-  VarianceParams(6, 5, &vpx_highbd_12_variance64x32_c, 12),
-  VarianceParams(5, 6, &vpx_highbd_12_variance32x64_c, 12),
-  VarianceParams(5, 5, &vpx_highbd_12_variance32x32_c, 12),
-  VarianceParams(5, 4, &vpx_highbd_12_variance32x16_c, 12),
-  VarianceParams(4, 5, &vpx_highbd_12_variance16x32_c, 12),
-  VarianceParams(4, 4, &vpx_highbd_12_variance16x16_c, 12),
-  VarianceParams(4, 3, &vpx_highbd_12_variance16x8_c, 12),
-  VarianceParams(3, 4, &vpx_highbd_12_variance8x16_c, 12),
-  VarianceParams(3, 3, &vpx_highbd_12_variance8x8_c, 12),
-  VarianceParams(3, 2, &vpx_highbd_12_variance8x4_c, 12),
-  VarianceParams(2, 3, &vpx_highbd_12_variance4x8_c, 12),
-  VarianceParams(2, 2, &vpx_highbd_12_variance4x4_c, 12),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  VarianceParams(7, 7, &vpx_highbd_10_variance128x128_c, 10),
-  VarianceParams(7, 6, &vpx_highbd_10_variance128x64_c, 10),
-  VarianceParams(6, 7, &vpx_highbd_10_variance64x128_c, 10),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  VarianceParams(6, 6, &vpx_highbd_10_variance64x64_c, 10),
-  VarianceParams(6, 5, &vpx_highbd_10_variance64x32_c, 10),
-  VarianceParams(5, 6, &vpx_highbd_10_variance32x64_c, 10),
-  VarianceParams(5, 5, &vpx_highbd_10_variance32x32_c, 10),
-  VarianceParams(5, 4, &vpx_highbd_10_variance32x16_c, 10),
-  VarianceParams(4, 5, &vpx_highbd_10_variance16x32_c, 10),
-  VarianceParams(4, 4, &vpx_highbd_10_variance16x16_c, 10),
-  VarianceParams(4, 3, &vpx_highbd_10_variance16x8_c, 10),
-  VarianceParams(3, 4, &vpx_highbd_10_variance8x16_c, 10),
-  VarianceParams(3, 3, &vpx_highbd_10_variance8x8_c, 10),
-  VarianceParams(3, 2, &vpx_highbd_10_variance8x4_c, 10),
-  VarianceParams(2, 3, &vpx_highbd_10_variance4x8_c, 10),
-  VarianceParams(2, 2, &vpx_highbd_10_variance4x4_c, 10),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  VarianceParams(7, 7, &vpx_highbd_8_variance128x128_c, 8),
-  VarianceParams(7, 6, &vpx_highbd_8_variance128x64_c, 8),
-  VarianceParams(6, 7, &vpx_highbd_8_variance64x128_c, 8),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  VarianceParams(6, 6, &vpx_highbd_8_variance64x64_c, 8),
-  VarianceParams(6, 5, &vpx_highbd_8_variance64x32_c, 8),
-  VarianceParams(5, 6, &vpx_highbd_8_variance32x64_c, 8),
-  VarianceParams(5, 5, &vpx_highbd_8_variance32x32_c, 8),
-  VarianceParams(5, 4, &vpx_highbd_8_variance32x16_c, 8),
-  VarianceParams(4, 5, &vpx_highbd_8_variance16x32_c, 8),
-  VarianceParams(4, 4, &vpx_highbd_8_variance16x16_c, 8),
-  VarianceParams(4, 3, &vpx_highbd_8_variance16x8_c, 8),
-  VarianceParams(3, 4, &vpx_highbd_8_variance8x16_c, 8),
-  VarianceParams(3, 3, &vpx_highbd_8_variance8x8_c, 8),
-  VarianceParams(3, 2, &vpx_highbd_8_variance8x4_c, 8),
-  VarianceParams(2, 3, &vpx_highbd_8_variance4x8_c, 8),
-  VarianceParams(2, 2, &vpx_highbd_8_variance4x4_c, 8)
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  VarianceParams(7, 7, &aom_highbd_12_variance128x128_c, 12),
+  VarianceParams(7, 6, &aom_highbd_12_variance128x64_c, 12),
+  VarianceParams(6, 7, &aom_highbd_12_variance64x128_c, 12),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  VarianceParams(6, 6, &aom_highbd_12_variance64x64_c, 12),
+  VarianceParams(6, 5, &aom_highbd_12_variance64x32_c, 12),
+  VarianceParams(5, 6, &aom_highbd_12_variance32x64_c, 12),
+  VarianceParams(5, 5, &aom_highbd_12_variance32x32_c, 12),
+  VarianceParams(5, 4, &aom_highbd_12_variance32x16_c, 12),
+  VarianceParams(4, 5, &aom_highbd_12_variance16x32_c, 12),
+  VarianceParams(4, 4, &aom_highbd_12_variance16x16_c, 12),
+  VarianceParams(4, 3, &aom_highbd_12_variance16x8_c, 12),
+  VarianceParams(3, 4, &aom_highbd_12_variance8x16_c, 12),
+  VarianceParams(3, 3, &aom_highbd_12_variance8x8_c, 12),
+  VarianceParams(3, 2, &aom_highbd_12_variance8x4_c, 12),
+  VarianceParams(2, 3, &aom_highbd_12_variance4x8_c, 12),
+  VarianceParams(2, 2, &aom_highbd_12_variance4x4_c, 12),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  VarianceParams(7, 7, &aom_highbd_10_variance128x128_c, 10),
+  VarianceParams(7, 6, &aom_highbd_10_variance128x64_c, 10),
+  VarianceParams(6, 7, &aom_highbd_10_variance64x128_c, 10),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  VarianceParams(6, 6, &aom_highbd_10_variance64x64_c, 10),
+  VarianceParams(6, 5, &aom_highbd_10_variance64x32_c, 10),
+  VarianceParams(5, 6, &aom_highbd_10_variance32x64_c, 10),
+  VarianceParams(5, 5, &aom_highbd_10_variance32x32_c, 10),
+  VarianceParams(5, 4, &aom_highbd_10_variance32x16_c, 10),
+  VarianceParams(4, 5, &aom_highbd_10_variance16x32_c, 10),
+  VarianceParams(4, 4, &aom_highbd_10_variance16x16_c, 10),
+  VarianceParams(4, 3, &aom_highbd_10_variance16x8_c, 10),
+  VarianceParams(3, 4, &aom_highbd_10_variance8x16_c, 10),
+  VarianceParams(3, 3, &aom_highbd_10_variance8x8_c, 10),
+  VarianceParams(3, 2, &aom_highbd_10_variance8x4_c, 10),
+  VarianceParams(2, 3, &aom_highbd_10_variance4x8_c, 10),
+  VarianceParams(2, 2, &aom_highbd_10_variance4x4_c, 10),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  VarianceParams(7, 7, &aom_highbd_8_variance128x128_c, 8),
+  VarianceParams(7, 6, &aom_highbd_8_variance128x64_c, 8),
+  VarianceParams(6, 7, &aom_highbd_8_variance64x128_c, 8),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  VarianceParams(6, 6, &aom_highbd_8_variance64x64_c, 8),
+  VarianceParams(6, 5, &aom_highbd_8_variance64x32_c, 8),
+  VarianceParams(5, 6, &aom_highbd_8_variance32x64_c, 8),
+  VarianceParams(5, 5, &aom_highbd_8_variance32x32_c, 8),
+  VarianceParams(5, 4, &aom_highbd_8_variance32x16_c, 8),
+  VarianceParams(4, 5, &aom_highbd_8_variance16x32_c, 8),
+  VarianceParams(4, 4, &aom_highbd_8_variance16x16_c, 8),
+  VarianceParams(4, 3, &aom_highbd_8_variance16x8_c, 8),
+  VarianceParams(3, 4, &aom_highbd_8_variance8x16_c, 8),
+  VarianceParams(3, 3, &aom_highbd_8_variance8x8_c, 8),
+  VarianceParams(3, 2, &aom_highbd_8_variance8x4_c, 8),
+  VarianceParams(2, 3, &aom_highbd_8_variance4x8_c, 8),
+  VarianceParams(2, 2, &aom_highbd_8_variance4x4_c, 8)
 };
-INSTANTIATE_TEST_CASE_P(C, VpxHBDVarianceTest,
+INSTANTIATE_TEST_CASE_P(C, AvxHBDVarianceTest,
                         ::testing::ValuesIn(kArrayHBDVariance_c));
 
-#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
-    SSE4_1, VpxHBDVarianceTest,
+    SSE4_1, AvxHBDVarianceTest,
     ::testing::Values(
-        VarianceParams(2, 2, &vpx_highbd_8_variance4x4_sse4_1, 8),
-        VarianceParams(2, 2, &vpx_highbd_10_variance4x4_sse4_1, 10),
-        VarianceParams(2, 2, &vpx_highbd_12_variance4x4_sse4_1, 12)));
-#endif  // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
+        VarianceParams(2, 2, &aom_highbd_8_variance4x4_sse4_1, 8),
+        VarianceParams(2, 2, &aom_highbd_10_variance4x4_sse4_1, 10),
+        VarianceParams(2, 2, &aom_highbd_12_variance4x4_sse4_1, 12)));
+#endif  // HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
 
-const VpxHBDSubpelVarianceTest::ParamType kArrayHBDSubpelVariance_c[] = {
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(7, 7, &vpx_highbd_8_sub_pixel_variance128x128_c, 8),
-  make_tuple(7, 6, &vpx_highbd_8_sub_pixel_variance128x64_c, 8),
-  make_tuple(6, 7, &vpx_highbd_8_sub_pixel_variance64x128_c, 8),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(6, 6, &vpx_highbd_8_sub_pixel_variance64x64_c, 8),
-  make_tuple(6, 5, &vpx_highbd_8_sub_pixel_variance64x32_c, 8),
-  make_tuple(5, 6, &vpx_highbd_8_sub_pixel_variance32x64_c, 8),
-  make_tuple(5, 5, &vpx_highbd_8_sub_pixel_variance32x32_c, 8),
-  make_tuple(5, 4, &vpx_highbd_8_sub_pixel_variance32x16_c, 8),
-  make_tuple(4, 5, &vpx_highbd_8_sub_pixel_variance16x32_c, 8),
-  make_tuple(4, 4, &vpx_highbd_8_sub_pixel_variance16x16_c, 8),
-  make_tuple(4, 3, &vpx_highbd_8_sub_pixel_variance16x8_c, 8),
-  make_tuple(3, 4, &vpx_highbd_8_sub_pixel_variance8x16_c, 8),
-  make_tuple(3, 3, &vpx_highbd_8_sub_pixel_variance8x8_c, 8),
-  make_tuple(3, 2, &vpx_highbd_8_sub_pixel_variance8x4_c, 8),
-  make_tuple(2, 3, &vpx_highbd_8_sub_pixel_variance4x8_c, 8),
-  make_tuple(2, 2, &vpx_highbd_8_sub_pixel_variance4x4_c, 8),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(7, 7, &vpx_highbd_10_sub_pixel_variance128x128_c, 10),
-  make_tuple(7, 6, &vpx_highbd_10_sub_pixel_variance128x64_c, 10),
-  make_tuple(6, 7, &vpx_highbd_10_sub_pixel_variance64x128_c, 10),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(6, 6, &vpx_highbd_10_sub_pixel_variance64x64_c, 10),
-  make_tuple(6, 5, &vpx_highbd_10_sub_pixel_variance64x32_c, 10),
-  make_tuple(5, 6, &vpx_highbd_10_sub_pixel_variance32x64_c, 10),
-  make_tuple(5, 5, &vpx_highbd_10_sub_pixel_variance32x32_c, 10),
-  make_tuple(5, 4, &vpx_highbd_10_sub_pixel_variance32x16_c, 10),
-  make_tuple(4, 5, &vpx_highbd_10_sub_pixel_variance16x32_c, 10),
-  make_tuple(4, 4, &vpx_highbd_10_sub_pixel_variance16x16_c, 10),
-  make_tuple(4, 3, &vpx_highbd_10_sub_pixel_variance16x8_c, 10),
-  make_tuple(3, 4, &vpx_highbd_10_sub_pixel_variance8x16_c, 10),
-  make_tuple(3, 3, &vpx_highbd_10_sub_pixel_variance8x8_c, 10),
-  make_tuple(3, 2, &vpx_highbd_10_sub_pixel_variance8x4_c, 10),
-  make_tuple(2, 3, &vpx_highbd_10_sub_pixel_variance4x8_c, 10),
-  make_tuple(2, 2, &vpx_highbd_10_sub_pixel_variance4x4_c, 10),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(7, 7, &vpx_highbd_12_sub_pixel_variance128x128_c, 12),
-  make_tuple(7, 6, &vpx_highbd_12_sub_pixel_variance128x64_c, 12),
-  make_tuple(6, 7, &vpx_highbd_12_sub_pixel_variance64x128_c, 12),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(6, 6, &vpx_highbd_12_sub_pixel_variance64x64_c, 12),
-  make_tuple(6, 5, &vpx_highbd_12_sub_pixel_variance64x32_c, 12),
-  make_tuple(5, 6, &vpx_highbd_12_sub_pixel_variance32x64_c, 12),
-  make_tuple(5, 5, &vpx_highbd_12_sub_pixel_variance32x32_c, 12),
-  make_tuple(5, 4, &vpx_highbd_12_sub_pixel_variance32x16_c, 12),
-  make_tuple(4, 5, &vpx_highbd_12_sub_pixel_variance16x32_c, 12),
-  make_tuple(4, 4, &vpx_highbd_12_sub_pixel_variance16x16_c, 12),
-  make_tuple(4, 3, &vpx_highbd_12_sub_pixel_variance16x8_c, 12),
-  make_tuple(3, 4, &vpx_highbd_12_sub_pixel_variance8x16_c, 12),
-  make_tuple(3, 3, &vpx_highbd_12_sub_pixel_variance8x8_c, 12),
-  make_tuple(3, 2, &vpx_highbd_12_sub_pixel_variance8x4_c, 12),
-  make_tuple(2, 3, &vpx_highbd_12_sub_pixel_variance4x8_c, 12),
-  make_tuple(2, 2, &vpx_highbd_12_sub_pixel_variance4x4_c, 12),
+const AvxHBDSubpelVarianceTest::ParamType kArrayHBDSubpelVariance_c[] = {
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(7, 7, &aom_highbd_8_sub_pixel_variance128x128_c, 8),
+  make_tuple(7, 6, &aom_highbd_8_sub_pixel_variance128x64_c, 8),
+  make_tuple(6, 7, &aom_highbd_8_sub_pixel_variance64x128_c, 8),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(6, 6, &aom_highbd_8_sub_pixel_variance64x64_c, 8),
+  make_tuple(6, 5, &aom_highbd_8_sub_pixel_variance64x32_c, 8),
+  make_tuple(5, 6, &aom_highbd_8_sub_pixel_variance32x64_c, 8),
+  make_tuple(5, 5, &aom_highbd_8_sub_pixel_variance32x32_c, 8),
+  make_tuple(5, 4, &aom_highbd_8_sub_pixel_variance32x16_c, 8),
+  make_tuple(4, 5, &aom_highbd_8_sub_pixel_variance16x32_c, 8),
+  make_tuple(4, 4, &aom_highbd_8_sub_pixel_variance16x16_c, 8),
+  make_tuple(4, 3, &aom_highbd_8_sub_pixel_variance16x8_c, 8),
+  make_tuple(3, 4, &aom_highbd_8_sub_pixel_variance8x16_c, 8),
+  make_tuple(3, 3, &aom_highbd_8_sub_pixel_variance8x8_c, 8),
+  make_tuple(3, 2, &aom_highbd_8_sub_pixel_variance8x4_c, 8),
+  make_tuple(2, 3, &aom_highbd_8_sub_pixel_variance4x8_c, 8),
+  make_tuple(2, 2, &aom_highbd_8_sub_pixel_variance4x4_c, 8),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(7, 7, &aom_highbd_10_sub_pixel_variance128x128_c, 10),
+  make_tuple(7, 6, &aom_highbd_10_sub_pixel_variance128x64_c, 10),
+  make_tuple(6, 7, &aom_highbd_10_sub_pixel_variance64x128_c, 10),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(6, 6, &aom_highbd_10_sub_pixel_variance64x64_c, 10),
+  make_tuple(6, 5, &aom_highbd_10_sub_pixel_variance64x32_c, 10),
+  make_tuple(5, 6, &aom_highbd_10_sub_pixel_variance32x64_c, 10),
+  make_tuple(5, 5, &aom_highbd_10_sub_pixel_variance32x32_c, 10),
+  make_tuple(5, 4, &aom_highbd_10_sub_pixel_variance32x16_c, 10),
+  make_tuple(4, 5, &aom_highbd_10_sub_pixel_variance16x32_c, 10),
+  make_tuple(4, 4, &aom_highbd_10_sub_pixel_variance16x16_c, 10),
+  make_tuple(4, 3, &aom_highbd_10_sub_pixel_variance16x8_c, 10),
+  make_tuple(3, 4, &aom_highbd_10_sub_pixel_variance8x16_c, 10),
+  make_tuple(3, 3, &aom_highbd_10_sub_pixel_variance8x8_c, 10),
+  make_tuple(3, 2, &aom_highbd_10_sub_pixel_variance8x4_c, 10),
+  make_tuple(2, 3, &aom_highbd_10_sub_pixel_variance4x8_c, 10),
+  make_tuple(2, 2, &aom_highbd_10_sub_pixel_variance4x4_c, 10),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(7, 7, &aom_highbd_12_sub_pixel_variance128x128_c, 12),
+  make_tuple(7, 6, &aom_highbd_12_sub_pixel_variance128x64_c, 12),
+  make_tuple(6, 7, &aom_highbd_12_sub_pixel_variance64x128_c, 12),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(6, 6, &aom_highbd_12_sub_pixel_variance64x64_c, 12),
+  make_tuple(6, 5, &aom_highbd_12_sub_pixel_variance64x32_c, 12),
+  make_tuple(5, 6, &aom_highbd_12_sub_pixel_variance32x64_c, 12),
+  make_tuple(5, 5, &aom_highbd_12_sub_pixel_variance32x32_c, 12),
+  make_tuple(5, 4, &aom_highbd_12_sub_pixel_variance32x16_c, 12),
+  make_tuple(4, 5, &aom_highbd_12_sub_pixel_variance16x32_c, 12),
+  make_tuple(4, 4, &aom_highbd_12_sub_pixel_variance16x16_c, 12),
+  make_tuple(4, 3, &aom_highbd_12_sub_pixel_variance16x8_c, 12),
+  make_tuple(3, 4, &aom_highbd_12_sub_pixel_variance8x16_c, 12),
+  make_tuple(3, 3, &aom_highbd_12_sub_pixel_variance8x8_c, 12),
+  make_tuple(3, 2, &aom_highbd_12_sub_pixel_variance8x4_c, 12),
+  make_tuple(2, 3, &aom_highbd_12_sub_pixel_variance4x8_c, 12),
+  make_tuple(2, 2, &aom_highbd_12_sub_pixel_variance4x4_c, 12),
 };
-INSTANTIATE_TEST_CASE_P(C, VpxHBDSubpelVarianceTest,
+INSTANTIATE_TEST_CASE_P(C, AvxHBDSubpelVarianceTest,
                         ::testing::ValuesIn(kArrayHBDSubpelVariance_c));
 
-const VpxHBDSubpelAvgVarianceTest::ParamType kArrayHBDSubpelAvgVariance_c[] = {
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(7, 7, &vpx_highbd_8_sub_pixel_avg_variance128x128_c, 8),
-  make_tuple(7, 6, &vpx_highbd_8_sub_pixel_avg_variance128x64_c, 8),
-  make_tuple(6, 7, &vpx_highbd_8_sub_pixel_avg_variance64x128_c, 8),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(6, 6, &vpx_highbd_8_sub_pixel_avg_variance64x64_c, 8),
-  make_tuple(6, 5, &vpx_highbd_8_sub_pixel_avg_variance64x32_c, 8),
-  make_tuple(5, 6, &vpx_highbd_8_sub_pixel_avg_variance32x64_c, 8),
-  make_tuple(5, 5, &vpx_highbd_8_sub_pixel_avg_variance32x32_c, 8),
-  make_tuple(5, 4, &vpx_highbd_8_sub_pixel_avg_variance32x16_c, 8),
-  make_tuple(4, 5, &vpx_highbd_8_sub_pixel_avg_variance16x32_c, 8),
-  make_tuple(4, 4, &vpx_highbd_8_sub_pixel_avg_variance16x16_c, 8),
-  make_tuple(4, 3, &vpx_highbd_8_sub_pixel_avg_variance16x8_c, 8),
-  make_tuple(3, 4, &vpx_highbd_8_sub_pixel_avg_variance8x16_c, 8),
-  make_tuple(3, 3, &vpx_highbd_8_sub_pixel_avg_variance8x8_c, 8),
-  make_tuple(3, 2, &vpx_highbd_8_sub_pixel_avg_variance8x4_c, 8),
-  make_tuple(2, 3, &vpx_highbd_8_sub_pixel_avg_variance4x8_c, 8),
-  make_tuple(2, 2, &vpx_highbd_8_sub_pixel_avg_variance4x4_c, 8),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(7, 7, &vpx_highbd_10_sub_pixel_avg_variance128x128_c, 10),
-  make_tuple(7, 6, &vpx_highbd_10_sub_pixel_avg_variance128x64_c, 10),
-  make_tuple(6, 7, &vpx_highbd_10_sub_pixel_avg_variance64x128_c, 10),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(6, 6, &vpx_highbd_10_sub_pixel_avg_variance64x64_c, 10),
-  make_tuple(6, 5, &vpx_highbd_10_sub_pixel_avg_variance64x32_c, 10),
-  make_tuple(5, 6, &vpx_highbd_10_sub_pixel_avg_variance32x64_c, 10),
-  make_tuple(5, 5, &vpx_highbd_10_sub_pixel_avg_variance32x32_c, 10),
-  make_tuple(5, 4, &vpx_highbd_10_sub_pixel_avg_variance32x16_c, 10),
-  make_tuple(4, 5, &vpx_highbd_10_sub_pixel_avg_variance16x32_c, 10),
-  make_tuple(4, 4, &vpx_highbd_10_sub_pixel_avg_variance16x16_c, 10),
-  make_tuple(4, 3, &vpx_highbd_10_sub_pixel_avg_variance16x8_c, 10),
-  make_tuple(3, 4, &vpx_highbd_10_sub_pixel_avg_variance8x16_c, 10),
-  make_tuple(3, 3, &vpx_highbd_10_sub_pixel_avg_variance8x8_c, 10),
-  make_tuple(3, 2, &vpx_highbd_10_sub_pixel_avg_variance8x4_c, 10),
-  make_tuple(2, 3, &vpx_highbd_10_sub_pixel_avg_variance4x8_c, 10),
-  make_tuple(2, 2, &vpx_highbd_10_sub_pixel_avg_variance4x4_c, 10),
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(7, 7, &vpx_highbd_12_sub_pixel_avg_variance128x128_c, 12),
-  make_tuple(7, 6, &vpx_highbd_12_sub_pixel_avg_variance128x64_c, 12),
-  make_tuple(6, 7, &vpx_highbd_12_sub_pixel_avg_variance64x128_c, 12),
-#endif  // CONFIG_VP10 && CONFIG_EXT_PARTITION
-  make_tuple(6, 6, &vpx_highbd_12_sub_pixel_avg_variance64x64_c, 12),
-  make_tuple(6, 5, &vpx_highbd_12_sub_pixel_avg_variance64x32_c, 12),
-  make_tuple(5, 6, &vpx_highbd_12_sub_pixel_avg_variance32x64_c, 12),
-  make_tuple(5, 5, &vpx_highbd_12_sub_pixel_avg_variance32x32_c, 12),
-  make_tuple(5, 4, &vpx_highbd_12_sub_pixel_avg_variance32x16_c, 12),
-  make_tuple(4, 5, &vpx_highbd_12_sub_pixel_avg_variance16x32_c, 12),
-  make_tuple(4, 4, &vpx_highbd_12_sub_pixel_avg_variance16x16_c, 12),
-  make_tuple(4, 3, &vpx_highbd_12_sub_pixel_avg_variance16x8_c, 12),
-  make_tuple(3, 4, &vpx_highbd_12_sub_pixel_avg_variance8x16_c, 12),
-  make_tuple(3, 3, &vpx_highbd_12_sub_pixel_avg_variance8x8_c, 12),
-  make_tuple(3, 2, &vpx_highbd_12_sub_pixel_avg_variance8x4_c, 12),
-  make_tuple(2, 3, &vpx_highbd_12_sub_pixel_avg_variance4x8_c, 12),
-  make_tuple(2, 2, &vpx_highbd_12_sub_pixel_avg_variance4x4_c, 12)
+const AvxHBDSubpelAvgVarianceTest::ParamType kArrayHBDSubpelAvgVariance_c[] = {
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(7, 7, &aom_highbd_8_sub_pixel_avg_variance128x128_c, 8),
+  make_tuple(7, 6, &aom_highbd_8_sub_pixel_avg_variance128x64_c, 8),
+  make_tuple(6, 7, &aom_highbd_8_sub_pixel_avg_variance64x128_c, 8),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(6, 6, &aom_highbd_8_sub_pixel_avg_variance64x64_c, 8),
+  make_tuple(6, 5, &aom_highbd_8_sub_pixel_avg_variance64x32_c, 8),
+  make_tuple(5, 6, &aom_highbd_8_sub_pixel_avg_variance32x64_c, 8),
+  make_tuple(5, 5, &aom_highbd_8_sub_pixel_avg_variance32x32_c, 8),
+  make_tuple(5, 4, &aom_highbd_8_sub_pixel_avg_variance32x16_c, 8),
+  make_tuple(4, 5, &aom_highbd_8_sub_pixel_avg_variance16x32_c, 8),
+  make_tuple(4, 4, &aom_highbd_8_sub_pixel_avg_variance16x16_c, 8),
+  make_tuple(4, 3, &aom_highbd_8_sub_pixel_avg_variance16x8_c, 8),
+  make_tuple(3, 4, &aom_highbd_8_sub_pixel_avg_variance8x16_c, 8),
+  make_tuple(3, 3, &aom_highbd_8_sub_pixel_avg_variance8x8_c, 8),
+  make_tuple(3, 2, &aom_highbd_8_sub_pixel_avg_variance8x4_c, 8),
+  make_tuple(2, 3, &aom_highbd_8_sub_pixel_avg_variance4x8_c, 8),
+  make_tuple(2, 2, &aom_highbd_8_sub_pixel_avg_variance4x4_c, 8),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(7, 7, &aom_highbd_10_sub_pixel_avg_variance128x128_c, 10),
+  make_tuple(7, 6, &aom_highbd_10_sub_pixel_avg_variance128x64_c, 10),
+  make_tuple(6, 7, &aom_highbd_10_sub_pixel_avg_variance64x128_c, 10),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(6, 6, &aom_highbd_10_sub_pixel_avg_variance64x64_c, 10),
+  make_tuple(6, 5, &aom_highbd_10_sub_pixel_avg_variance64x32_c, 10),
+  make_tuple(5, 6, &aom_highbd_10_sub_pixel_avg_variance32x64_c, 10),
+  make_tuple(5, 5, &aom_highbd_10_sub_pixel_avg_variance32x32_c, 10),
+  make_tuple(5, 4, &aom_highbd_10_sub_pixel_avg_variance32x16_c, 10),
+  make_tuple(4, 5, &aom_highbd_10_sub_pixel_avg_variance16x32_c, 10),
+  make_tuple(4, 4, &aom_highbd_10_sub_pixel_avg_variance16x16_c, 10),
+  make_tuple(4, 3, &aom_highbd_10_sub_pixel_avg_variance16x8_c, 10),
+  make_tuple(3, 4, &aom_highbd_10_sub_pixel_avg_variance8x16_c, 10),
+  make_tuple(3, 3, &aom_highbd_10_sub_pixel_avg_variance8x8_c, 10),
+  make_tuple(3, 2, &aom_highbd_10_sub_pixel_avg_variance8x4_c, 10),
+  make_tuple(2, 3, &aom_highbd_10_sub_pixel_avg_variance4x8_c, 10),
+  make_tuple(2, 2, &aom_highbd_10_sub_pixel_avg_variance4x4_c, 10),
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(7, 7, &aom_highbd_12_sub_pixel_avg_variance128x128_c, 12),
+  make_tuple(7, 6, &aom_highbd_12_sub_pixel_avg_variance128x64_c, 12),
+  make_tuple(6, 7, &aom_highbd_12_sub_pixel_avg_variance64x128_c, 12),
+#endif  // CONFIG_AV1 && CONFIG_EXT_PARTITION
+  make_tuple(6, 6, &aom_highbd_12_sub_pixel_avg_variance64x64_c, 12),
+  make_tuple(6, 5, &aom_highbd_12_sub_pixel_avg_variance64x32_c, 12),
+  make_tuple(5, 6, &aom_highbd_12_sub_pixel_avg_variance32x64_c, 12),
+  make_tuple(5, 5, &aom_highbd_12_sub_pixel_avg_variance32x32_c, 12),
+  make_tuple(5, 4, &aom_highbd_12_sub_pixel_avg_variance32x16_c, 12),
+  make_tuple(4, 5, &aom_highbd_12_sub_pixel_avg_variance16x32_c, 12),
+  make_tuple(4, 4, &aom_highbd_12_sub_pixel_avg_variance16x16_c, 12),
+  make_tuple(4, 3, &aom_highbd_12_sub_pixel_avg_variance16x8_c, 12),
+  make_tuple(3, 4, &aom_highbd_12_sub_pixel_avg_variance8x16_c, 12),
+  make_tuple(3, 3, &aom_highbd_12_sub_pixel_avg_variance8x8_c, 12),
+  make_tuple(3, 2, &aom_highbd_12_sub_pixel_avg_variance8x4_c, 12),
+  make_tuple(2, 3, &aom_highbd_12_sub_pixel_avg_variance4x8_c, 12),
+  make_tuple(2, 2, &aom_highbd_12_sub_pixel_avg_variance4x4_c, 12)
 };
-INSTANTIATE_TEST_CASE_P(C, VpxHBDSubpelAvgVarianceTest,
+INSTANTIATE_TEST_CASE_P(C, AvxHBDSubpelAvgVarianceTest,
                         ::testing::ValuesIn(kArrayHBDSubpelAvgVariance_c));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
-                        ::testing::Values(vpx_get_mb_ss_sse2));
+                        ::testing::Values(aom_get_mb_ss_sse2));
 
-INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
-                        ::testing::Values(MseParams(4, 4, &vpx_mse16x16_sse2),
-                                          MseParams(4, 3, &vpx_mse16x8_sse2),
-                                          MseParams(3, 4, &vpx_mse8x16_sse2),
-                                          MseParams(3, 3, &vpx_mse8x8_sse2)));
+INSTANTIATE_TEST_CASE_P(SSE2, AvxMseTest,
+                        ::testing::Values(MseParams(4, 4, &aom_mse16x16_sse2),
+                                          MseParams(4, 3, &aom_mse16x8_sse2),
+                                          MseParams(3, 4, &aom_mse8x16_sse2),
+                                          MseParams(3, 3, &aom_mse8x8_sse2)));
 
 INSTANTIATE_TEST_CASE_P(
-    SSE2, VpxVarianceTest,
-    ::testing::Values(VarianceParams(6, 6, &vpx_variance64x64_sse2),
-                      VarianceParams(6, 5, &vpx_variance64x32_sse2),
-                      VarianceParams(5, 6, &vpx_variance32x64_sse2),
-                      VarianceParams(5, 5, &vpx_variance32x32_sse2),
-                      VarianceParams(5, 4, &vpx_variance32x16_sse2),
-                      VarianceParams(4, 5, &vpx_variance16x32_sse2),
-                      VarianceParams(4, 4, &vpx_variance16x16_sse2),
-                      VarianceParams(4, 3, &vpx_variance16x8_sse2),
-                      VarianceParams(3, 4, &vpx_variance8x16_sse2),
-                      VarianceParams(3, 3, &vpx_variance8x8_sse2),
-                      VarianceParams(3, 2, &vpx_variance8x4_sse2),
-                      VarianceParams(2, 3, &vpx_variance4x8_sse2),
-                      VarianceParams(2, 2, &vpx_variance4x4_sse2)));
+    SSE2, AvxVarianceTest,
+    ::testing::Values(VarianceParams(6, 6, &aom_variance64x64_sse2),
+                      VarianceParams(6, 5, &aom_variance64x32_sse2),
+                      VarianceParams(5, 6, &aom_variance32x64_sse2),
+                      VarianceParams(5, 5, &aom_variance32x32_sse2),
+                      VarianceParams(5, 4, &aom_variance32x16_sse2),
+                      VarianceParams(4, 5, &aom_variance16x32_sse2),
+                      VarianceParams(4, 4, &aom_variance16x16_sse2),
+                      VarianceParams(4, 3, &aom_variance16x8_sse2),
+                      VarianceParams(3, 4, &aom_variance8x16_sse2),
+                      VarianceParams(3, 3, &aom_variance8x8_sse2),
+                      VarianceParams(3, 2, &aom_variance8x4_sse2),
+                      VarianceParams(2, 3, &aom_variance4x8_sse2),
+                      VarianceParams(2, 2, &aom_variance4x4_sse2)));
 
 INSTANTIATE_TEST_CASE_P(
-    SSE2, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_sse2, 0),
-                      make_tuple(6, 5, &vpx_sub_pixel_variance64x32_sse2, 0),
-                      make_tuple(5, 6, &vpx_sub_pixel_variance32x64_sse2, 0),
-                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_sse2, 0),
-                      make_tuple(5, 4, &vpx_sub_pixel_variance32x16_sse2, 0),
-                      make_tuple(4, 5, &vpx_sub_pixel_variance16x32_sse2, 0),
-                      make_tuple(4, 4, &vpx_sub_pixel_variance16x16_sse2, 0),
-                      make_tuple(4, 3, &vpx_sub_pixel_variance16x8_sse2, 0),
-                      make_tuple(3, 4, &vpx_sub_pixel_variance8x16_sse2, 0),
-                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_sse2, 0),
-                      make_tuple(3, 2, &vpx_sub_pixel_variance8x4_sse2, 0),
-                      make_tuple(2, 3, &vpx_sub_pixel_variance4x8_sse2, 0),
-                      make_tuple(2, 2, &vpx_sub_pixel_variance4x4_sse2, 0)));
+    SSE2, AvxSubpelVarianceTest,
+    ::testing::Values(make_tuple(6, 6, &aom_sub_pixel_variance64x64_sse2, 0),
+                      make_tuple(6, 5, &aom_sub_pixel_variance64x32_sse2, 0),
+                      make_tuple(5, 6, &aom_sub_pixel_variance32x64_sse2, 0),
+                      make_tuple(5, 5, &aom_sub_pixel_variance32x32_sse2, 0),
+                      make_tuple(5, 4, &aom_sub_pixel_variance32x16_sse2, 0),
+                      make_tuple(4, 5, &aom_sub_pixel_variance16x32_sse2, 0),
+                      make_tuple(4, 4, &aom_sub_pixel_variance16x16_sse2, 0),
+                      make_tuple(4, 3, &aom_sub_pixel_variance16x8_sse2, 0),
+                      make_tuple(3, 4, &aom_sub_pixel_variance8x16_sse2, 0),
+                      make_tuple(3, 3, &aom_sub_pixel_variance8x8_sse2, 0),
+                      make_tuple(3, 2, &aom_sub_pixel_variance8x4_sse2, 0),
+                      make_tuple(2, 3, &aom_sub_pixel_variance4x8_sse2, 0),
+                      make_tuple(2, 2, &aom_sub_pixel_variance4x4_sse2, 0)));
 
 INSTANTIATE_TEST_CASE_P(
-    SSE2, VpxSubpelAvgVarianceTest,
+    SSE2, AvxSubpelAvgVarianceTest,
     ::testing::Values(
-        make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_sse2, 0),
-        make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_sse2, 0),
-        make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_sse2, 0),
-        make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_sse2, 0),
-        make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_sse2, 0),
-        make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_sse2, 0),
-        make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_sse2, 0),
-        make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_sse2, 0),
-        make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_sse2, 0),
-        make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_sse2, 0),
-        make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_sse2, 0),
-        make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_sse2, 0),
-        make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_sse2, 0)));
+        make_tuple(6, 6, &aom_sub_pixel_avg_variance64x64_sse2, 0),
+        make_tuple(6, 5, &aom_sub_pixel_avg_variance64x32_sse2, 0),
+        make_tuple(5, 6, &aom_sub_pixel_avg_variance32x64_sse2, 0),
+        make_tuple(5, 5, &aom_sub_pixel_avg_variance32x32_sse2, 0),
+        make_tuple(5, 4, &aom_sub_pixel_avg_variance32x16_sse2, 0),
+        make_tuple(4, 5, &aom_sub_pixel_avg_variance16x32_sse2, 0),
+        make_tuple(4, 4, &aom_sub_pixel_avg_variance16x16_sse2, 0),
+        make_tuple(4, 3, &aom_sub_pixel_avg_variance16x8_sse2, 0),
+        make_tuple(3, 4, &aom_sub_pixel_avg_variance8x16_sse2, 0),
+        make_tuple(3, 3, &aom_sub_pixel_avg_variance8x8_sse2, 0),
+        make_tuple(3, 2, &aom_sub_pixel_avg_variance8x4_sse2, 0),
+        make_tuple(2, 3, &aom_sub_pixel_avg_variance4x8_sse2, 0),
+        make_tuple(2, 2, &aom_sub_pixel_avg_variance4x4_sse2, 0)));
 
-#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
-    SSE4_1, VpxSubpelVarianceTest,
+    SSE4_1, AvxSubpelVarianceTest,
     ::testing::Values(
-        make_tuple(2, 2, &vpx_highbd_8_sub_pixel_variance4x4_sse4_1, 8),
-        make_tuple(2, 2, &vpx_highbd_10_sub_pixel_variance4x4_sse4_1, 10),
-        make_tuple(2, 2, &vpx_highbd_12_sub_pixel_variance4x4_sse4_1, 12)));
+        make_tuple(2, 2, &aom_highbd_8_sub_pixel_variance4x4_sse4_1, 8),
+        make_tuple(2, 2, &aom_highbd_10_sub_pixel_variance4x4_sse4_1, 10),
+        make_tuple(2, 2, &aom_highbd_12_sub_pixel_variance4x4_sse4_1, 12)));
 
 INSTANTIATE_TEST_CASE_P(
-    SSE4_1, VpxSubpelAvgVarianceTest,
+    SSE4_1, AvxSubpelAvgVarianceTest,
     ::testing::Values(
-        make_tuple(2, 2, &vpx_highbd_8_sub_pixel_avg_variance4x4_sse4_1, 8),
-        make_tuple(2, 2, &vpx_highbd_10_sub_pixel_avg_variance4x4_sse4_1, 10),
-        make_tuple(2, 2, &vpx_highbd_12_sub_pixel_avg_variance4x4_sse4_1, 12)));
-#endif  // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
+        make_tuple(2, 2, &aom_highbd_8_sub_pixel_avg_variance4x4_sse4_1, 8),
+        make_tuple(2, 2, &aom_highbd_10_sub_pixel_avg_variance4x4_sse4_1, 10),
+        make_tuple(2, 2, &aom_highbd_12_sub_pixel_avg_variance4x4_sse4_1, 12)));
+#endif  // HAVE_SSE4_1 && CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 /* TODO(debargha): This test does not support the highbd version
 INSTANTIATE_TEST_CASE_P(
-    SSE2, VpxHBDMseTest,
-    ::testing::Values(MseParams(4, 4, &vpx_highbd_12_mse16x16_sse2),
-                      MseParams(4, 3, &vpx_highbd_12_mse16x8_sse2),
-                      MseParams(3, 4, &vpx_highbd_12_mse8x16_sse2),
-                      MseParams(3, 3, &vpx_highbd_12_mse8x8_sse2),
-                      MseParams(4, 4, &vpx_highbd_10_mse16x16_sse2),
-                      MseParams(4, 3, &vpx_highbd_10_mse16x8_sse2),
-                      MseParams(3, 4, &vpx_highbd_10_mse8x16_sse2),
-                      MseParams(3, 3, &vpx_highbd_10_mse8x8_sse2),
-                      MseParams(4, 4, &vpx_highbd_8_mse16x16_sse2),
-                      MseParams(4, 3, &vpx_highbd_8_mse16x8_sse2),
-                      MseParams(3, 4, &vpx_highbd_8_mse8x16_sse2),
-                      MseParams(3, 3, &vpx_highbd_8_mse8x8_sse2)));
+    SSE2, AvxHBDMseTest,
+    ::testing::Values(MseParams(4, 4, &aom_highbd_12_mse16x16_sse2),
+                      MseParams(4, 3, &aom_highbd_12_mse16x8_sse2),
+                      MseParams(3, 4, &aom_highbd_12_mse8x16_sse2),
+                      MseParams(3, 3, &aom_highbd_12_mse8x8_sse2),
+                      MseParams(4, 4, &aom_highbd_10_mse16x16_sse2),
+                      MseParams(4, 3, &aom_highbd_10_mse16x8_sse2),
+                      MseParams(3, 4, &aom_highbd_10_mse8x16_sse2),
+                      MseParams(3, 3, &aom_highbd_10_mse8x8_sse2),
+                      MseParams(4, 4, &aom_highbd_8_mse16x16_sse2),
+                      MseParams(4, 3, &aom_highbd_8_mse16x8_sse2),
+                      MseParams(3, 4, &aom_highbd_8_mse8x16_sse2),
+                      MseParams(3, 3, &aom_highbd_8_mse8x8_sse2)));
 */
 
 INSTANTIATE_TEST_CASE_P(
-    SSE2, VpxHBDVarianceTest,
+    SSE2, AvxHBDVarianceTest,
     ::testing::Values(
-        VarianceParams(6, 6, &vpx_highbd_12_variance64x64_sse2, 12),
-        VarianceParams(6, 5, &vpx_highbd_12_variance64x32_sse2, 12),
-        VarianceParams(5, 6, &vpx_highbd_12_variance32x64_sse2, 12),
-        VarianceParams(5, 5, &vpx_highbd_12_variance32x32_sse2, 12),
-        VarianceParams(5, 4, &vpx_highbd_12_variance32x16_sse2, 12),
-        VarianceParams(4, 5, &vpx_highbd_12_variance16x32_sse2, 12),
-        VarianceParams(4, 4, &vpx_highbd_12_variance16x16_sse2, 12),
-        VarianceParams(4, 3, &vpx_highbd_12_variance16x8_sse2, 12),
-        VarianceParams(3, 4, &vpx_highbd_12_variance8x16_sse2, 12),
-        VarianceParams(3, 3, &vpx_highbd_12_variance8x8_sse2, 12),
-        VarianceParams(6, 6, &vpx_highbd_10_variance64x64_sse2, 10),
-        VarianceParams(6, 5, &vpx_highbd_10_variance64x32_sse2, 10),
-        VarianceParams(5, 6, &vpx_highbd_10_variance32x64_sse2, 10),
-        VarianceParams(5, 5, &vpx_highbd_10_variance32x32_sse2, 10),
-        VarianceParams(5, 4, &vpx_highbd_10_variance32x16_sse2, 10),
-        VarianceParams(4, 5, &vpx_highbd_10_variance16x32_sse2, 10),
-        VarianceParams(4, 4, &vpx_highbd_10_variance16x16_sse2, 10),
-        VarianceParams(4, 3, &vpx_highbd_10_variance16x8_sse2, 10),
-        VarianceParams(3, 4, &vpx_highbd_10_variance8x16_sse2, 10),
-        VarianceParams(3, 3, &vpx_highbd_10_variance8x8_sse2, 10),
-        VarianceParams(6, 6, &vpx_highbd_8_variance64x64_sse2, 8),
-        VarianceParams(6, 5, &vpx_highbd_8_variance64x32_sse2, 8),
-        VarianceParams(5, 6, &vpx_highbd_8_variance32x64_sse2, 8),
-        VarianceParams(5, 5, &vpx_highbd_8_variance32x32_sse2, 8),
-        VarianceParams(5, 4, &vpx_highbd_8_variance32x16_sse2, 8),
-        VarianceParams(4, 5, &vpx_highbd_8_variance16x32_sse2, 8),
-        VarianceParams(4, 4, &vpx_highbd_8_variance16x16_sse2, 8),
-        VarianceParams(4, 3, &vpx_highbd_8_variance16x8_sse2, 8),
-        VarianceParams(3, 4, &vpx_highbd_8_variance8x16_sse2, 8),
-        VarianceParams(3, 3, &vpx_highbd_8_variance8x8_sse2, 8)));
+        VarianceParams(6, 6, &aom_highbd_12_variance64x64_sse2, 12),
+        VarianceParams(6, 5, &aom_highbd_12_variance64x32_sse2, 12),
+        VarianceParams(5, 6, &aom_highbd_12_variance32x64_sse2, 12),
+        VarianceParams(5, 5, &aom_highbd_12_variance32x32_sse2, 12),
+        VarianceParams(5, 4, &aom_highbd_12_variance32x16_sse2, 12),
+        VarianceParams(4, 5, &aom_highbd_12_variance16x32_sse2, 12),
+        VarianceParams(4, 4, &aom_highbd_12_variance16x16_sse2, 12),
+        VarianceParams(4, 3, &aom_highbd_12_variance16x8_sse2, 12),
+        VarianceParams(3, 4, &aom_highbd_12_variance8x16_sse2, 12),
+        VarianceParams(3, 3, &aom_highbd_12_variance8x8_sse2, 12),
+        VarianceParams(6, 6, &aom_highbd_10_variance64x64_sse2, 10),
+        VarianceParams(6, 5, &aom_highbd_10_variance64x32_sse2, 10),
+        VarianceParams(5, 6, &aom_highbd_10_variance32x64_sse2, 10),
+        VarianceParams(5, 5, &aom_highbd_10_variance32x32_sse2, 10),
+        VarianceParams(5, 4, &aom_highbd_10_variance32x16_sse2, 10),
+        VarianceParams(4, 5, &aom_highbd_10_variance16x32_sse2, 10),
+        VarianceParams(4, 4, &aom_highbd_10_variance16x16_sse2, 10),
+        VarianceParams(4, 3, &aom_highbd_10_variance16x8_sse2, 10),
+        VarianceParams(3, 4, &aom_highbd_10_variance8x16_sse2, 10),
+        VarianceParams(3, 3, &aom_highbd_10_variance8x8_sse2, 10),
+        VarianceParams(6, 6, &aom_highbd_8_variance64x64_sse2, 8),
+        VarianceParams(6, 5, &aom_highbd_8_variance64x32_sse2, 8),
+        VarianceParams(5, 6, &aom_highbd_8_variance32x64_sse2, 8),
+        VarianceParams(5, 5, &aom_highbd_8_variance32x32_sse2, 8),
+        VarianceParams(5, 4, &aom_highbd_8_variance32x16_sse2, 8),
+        VarianceParams(4, 5, &aom_highbd_8_variance16x32_sse2, 8),
+        VarianceParams(4, 4, &aom_highbd_8_variance16x16_sse2, 8),
+        VarianceParams(4, 3, &aom_highbd_8_variance16x8_sse2, 8),
+        VarianceParams(3, 4, &aom_highbd_8_variance8x16_sse2, 8),
+        VarianceParams(3, 3, &aom_highbd_8_variance8x8_sse2, 8)));
 
 INSTANTIATE_TEST_CASE_P(
-    SSE2, VpxHBDSubpelVarianceTest,
+    SSE2, AvxHBDSubpelVarianceTest,
     ::testing::Values(
-        make_tuple(6, 6, &vpx_highbd_12_sub_pixel_variance64x64_sse2, 12),
-        make_tuple(6, 5, &vpx_highbd_12_sub_pixel_variance64x32_sse2, 12),
-        make_tuple(5, 6, &vpx_highbd_12_sub_pixel_variance32x64_sse2, 12),
-        make_tuple(5, 5, &vpx_highbd_12_sub_pixel_variance32x32_sse2, 12),
-        make_tuple(5, 4, &vpx_highbd_12_sub_pixel_variance32x16_sse2, 12),
-        make_tuple(4, 5, &vpx_highbd_12_sub_pixel_variance16x32_sse2, 12),
-        make_tuple(4, 4, &vpx_highbd_12_sub_pixel_variance16x16_sse2, 12),
-        make_tuple(4, 3, &vpx_highbd_12_sub_pixel_variance16x8_sse2, 12),
-        make_tuple(3, 4, &vpx_highbd_12_sub_pixel_variance8x16_sse2, 12),
-        make_tuple(3, 3, &vpx_highbd_12_sub_pixel_variance8x8_sse2, 12),
-        make_tuple(3, 2, &vpx_highbd_12_sub_pixel_variance8x4_sse2, 12),
-        make_tuple(6, 6, &vpx_highbd_10_sub_pixel_variance64x64_sse2, 10),
-        make_tuple(6, 5, &vpx_highbd_10_sub_pixel_variance64x32_sse2, 10),
-        make_tuple(5, 6, &vpx_highbd_10_sub_pixel_variance32x64_sse2, 10),
-        make_tuple(5, 5, &vpx_highbd_10_sub_pixel_variance32x32_sse2, 10),
-        make_tuple(5, 4, &vpx_highbd_10_sub_pixel_variance32x16_sse2, 10),
-        make_tuple(4, 5, &vpx_highbd_10_sub_pixel_variance16x32_sse2, 10),
-        make_tuple(4, 4, &vpx_highbd_10_sub_pixel_variance16x16_sse2, 10),
-        make_tuple(4, 3, &vpx_highbd_10_sub_pixel_variance16x8_sse2, 10),
-        make_tuple(3, 4, &vpx_highbd_10_sub_pixel_variance8x16_sse2, 10),
-        make_tuple(3, 3, &vpx_highbd_10_sub_pixel_variance8x8_sse2, 10),
-        make_tuple(3, 2, &vpx_highbd_10_sub_pixel_variance8x4_sse2, 10),
-        make_tuple(6, 6, &vpx_highbd_8_sub_pixel_variance64x64_sse2, 8),
-        make_tuple(6, 5, &vpx_highbd_8_sub_pixel_variance64x32_sse2, 8),
-        make_tuple(5, 6, &vpx_highbd_8_sub_pixel_variance32x64_sse2, 8),
-        make_tuple(5, 5, &vpx_highbd_8_sub_pixel_variance32x32_sse2, 8),
-        make_tuple(5, 4, &vpx_highbd_8_sub_pixel_variance32x16_sse2, 8),
-        make_tuple(4, 5, &vpx_highbd_8_sub_pixel_variance16x32_sse2, 8),
-        make_tuple(4, 4, &vpx_highbd_8_sub_pixel_variance16x16_sse2, 8),
-        make_tuple(4, 3, &vpx_highbd_8_sub_pixel_variance16x8_sse2, 8),
-        make_tuple(3, 4, &vpx_highbd_8_sub_pixel_variance8x16_sse2, 8),
-        make_tuple(3, 3, &vpx_highbd_8_sub_pixel_variance8x8_sse2, 8),
-        make_tuple(3, 2, &vpx_highbd_8_sub_pixel_variance8x4_sse2, 8)));
+        make_tuple(6, 6, &aom_highbd_12_sub_pixel_variance64x64_sse2, 12),
+        make_tuple(6, 5, &aom_highbd_12_sub_pixel_variance64x32_sse2, 12),
+        make_tuple(5, 6, &aom_highbd_12_sub_pixel_variance32x64_sse2, 12),
+        make_tuple(5, 5, &aom_highbd_12_sub_pixel_variance32x32_sse2, 12),
+        make_tuple(5, 4, &aom_highbd_12_sub_pixel_variance32x16_sse2, 12),
+        make_tuple(4, 5, &aom_highbd_12_sub_pixel_variance16x32_sse2, 12),
+        make_tuple(4, 4, &aom_highbd_12_sub_pixel_variance16x16_sse2, 12),
+        make_tuple(4, 3, &aom_highbd_12_sub_pixel_variance16x8_sse2, 12),
+        make_tuple(3, 4, &aom_highbd_12_sub_pixel_variance8x16_sse2, 12),
+        make_tuple(3, 3, &aom_highbd_12_sub_pixel_variance8x8_sse2, 12),
+        make_tuple(3, 2, &aom_highbd_12_sub_pixel_variance8x4_sse2, 12),
+        make_tuple(6, 6, &aom_highbd_10_sub_pixel_variance64x64_sse2, 10),
+        make_tuple(6, 5, &aom_highbd_10_sub_pixel_variance64x32_sse2, 10),
+        make_tuple(5, 6, &aom_highbd_10_sub_pixel_variance32x64_sse2, 10),
+        make_tuple(5, 5, &aom_highbd_10_sub_pixel_variance32x32_sse2, 10),
+        make_tuple(5, 4, &aom_highbd_10_sub_pixel_variance32x16_sse2, 10),
+        make_tuple(4, 5, &aom_highbd_10_sub_pixel_variance16x32_sse2, 10),
+        make_tuple(4, 4, &aom_highbd_10_sub_pixel_variance16x16_sse2, 10),
+        make_tuple(4, 3, &aom_highbd_10_sub_pixel_variance16x8_sse2, 10),
+        make_tuple(3, 4, &aom_highbd_10_sub_pixel_variance8x16_sse2, 10),
+        make_tuple(3, 3, &aom_highbd_10_sub_pixel_variance8x8_sse2, 10),
+        make_tuple(3, 2, &aom_highbd_10_sub_pixel_variance8x4_sse2, 10),
+        make_tuple(6, 6, &aom_highbd_8_sub_pixel_variance64x64_sse2, 8),
+        make_tuple(6, 5, &aom_highbd_8_sub_pixel_variance64x32_sse2, 8),
+        make_tuple(5, 6, &aom_highbd_8_sub_pixel_variance32x64_sse2, 8),
+        make_tuple(5, 5, &aom_highbd_8_sub_pixel_variance32x32_sse2, 8),
+        make_tuple(5, 4, &aom_highbd_8_sub_pixel_variance32x16_sse2, 8),
+        make_tuple(4, 5, &aom_highbd_8_sub_pixel_variance16x32_sse2, 8),
+        make_tuple(4, 4, &aom_highbd_8_sub_pixel_variance16x16_sse2, 8),
+        make_tuple(4, 3, &aom_highbd_8_sub_pixel_variance16x8_sse2, 8),
+        make_tuple(3, 4, &aom_highbd_8_sub_pixel_variance8x16_sse2, 8),
+        make_tuple(3, 3, &aom_highbd_8_sub_pixel_variance8x8_sse2, 8),
+        make_tuple(3, 2, &aom_highbd_8_sub_pixel_variance8x4_sse2, 8)));
 
 INSTANTIATE_TEST_CASE_P(
-    SSE2, VpxHBDSubpelAvgVarianceTest,
+    SSE2, AvxHBDSubpelAvgVarianceTest,
     ::testing::Values(
-        make_tuple(6, 6, &vpx_highbd_12_sub_pixel_avg_variance64x64_sse2, 12),
-        make_tuple(6, 5, &vpx_highbd_12_sub_pixel_avg_variance64x32_sse2, 12),
-        make_tuple(5, 6, &vpx_highbd_12_sub_pixel_avg_variance32x64_sse2, 12),
-        make_tuple(5, 5, &vpx_highbd_12_sub_pixel_avg_variance32x32_sse2, 12),
-        make_tuple(5, 4, &vpx_highbd_12_sub_pixel_avg_variance32x16_sse2, 12),
-        make_tuple(4, 5, &vpx_highbd_12_sub_pixel_avg_variance16x32_sse2, 12),
-        make_tuple(4, 4, &vpx_highbd_12_sub_pixel_avg_variance16x16_sse2, 12),
-        make_tuple(4, 3, &vpx_highbd_12_sub_pixel_avg_variance16x8_sse2, 12),
-        make_tuple(3, 4, &vpx_highbd_12_sub_pixel_avg_variance8x16_sse2, 12),
-        make_tuple(3, 3, &vpx_highbd_12_sub_pixel_avg_variance8x8_sse2, 12),
-        make_tuple(3, 2, &vpx_highbd_12_sub_pixel_avg_variance8x4_sse2, 12),
-        make_tuple(6, 6, &vpx_highbd_10_sub_pixel_avg_variance64x64_sse2, 10),
-        make_tuple(6, 5, &vpx_highbd_10_sub_pixel_avg_variance64x32_sse2, 10),
-        make_tuple(5, 6, &vpx_highbd_10_sub_pixel_avg_variance32x64_sse2, 10),
-        make_tuple(5, 5, &vpx_highbd_10_sub_pixel_avg_variance32x32_sse2, 10),
-        make_tuple(5, 4, &vpx_highbd_10_sub_pixel_avg_variance32x16_sse2, 10),
-        make_tuple(4, 5, &vpx_highbd_10_sub_pixel_avg_variance16x32_sse2, 10),
-        make_tuple(4, 4, &vpx_highbd_10_sub_pixel_avg_variance16x16_sse2, 10),
-        make_tuple(4, 3, &vpx_highbd_10_sub_pixel_avg_variance16x8_sse2, 10),
-        make_tuple(3, 4, &vpx_highbd_10_sub_pixel_avg_variance8x16_sse2, 10),
-        make_tuple(3, 3, &vpx_highbd_10_sub_pixel_avg_variance8x8_sse2, 10),
-        make_tuple(3, 2, &vpx_highbd_10_sub_pixel_avg_variance8x4_sse2, 10),
-        make_tuple(6, 6, &vpx_highbd_8_sub_pixel_avg_variance64x64_sse2, 8),
-        make_tuple(6, 5, &vpx_highbd_8_sub_pixel_avg_variance64x32_sse2, 8),
-        make_tuple(5, 6, &vpx_highbd_8_sub_pixel_avg_variance32x64_sse2, 8),
-        make_tuple(5, 5, &vpx_highbd_8_sub_pixel_avg_variance32x32_sse2, 8),
-        make_tuple(5, 4, &vpx_highbd_8_sub_pixel_avg_variance32x16_sse2, 8),
-        make_tuple(4, 5, &vpx_highbd_8_sub_pixel_avg_variance16x32_sse2, 8),
-        make_tuple(4, 4, &vpx_highbd_8_sub_pixel_avg_variance16x16_sse2, 8),
-        make_tuple(4, 3, &vpx_highbd_8_sub_pixel_avg_variance16x8_sse2, 8),
-        make_tuple(3, 4, &vpx_highbd_8_sub_pixel_avg_variance8x16_sse2, 8),
-        make_tuple(3, 3, &vpx_highbd_8_sub_pixel_avg_variance8x8_sse2, 8),
-        make_tuple(3, 2, &vpx_highbd_8_sub_pixel_avg_variance8x4_sse2, 8)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+        make_tuple(6, 6, &aom_highbd_12_sub_pixel_avg_variance64x64_sse2, 12),
+        make_tuple(6, 5, &aom_highbd_12_sub_pixel_avg_variance64x32_sse2, 12),
+        make_tuple(5, 6, &aom_highbd_12_sub_pixel_avg_variance32x64_sse2, 12),
+        make_tuple(5, 5, &aom_highbd_12_sub_pixel_avg_variance32x32_sse2, 12),
+        make_tuple(5, 4, &aom_highbd_12_sub_pixel_avg_variance32x16_sse2, 12),
+        make_tuple(4, 5, &aom_highbd_12_sub_pixel_avg_variance16x32_sse2, 12),
+        make_tuple(4, 4, &aom_highbd_12_sub_pixel_avg_variance16x16_sse2, 12),
+        make_tuple(4, 3, &aom_highbd_12_sub_pixel_avg_variance16x8_sse2, 12),
+        make_tuple(3, 4, &aom_highbd_12_sub_pixel_avg_variance8x16_sse2, 12),
+        make_tuple(3, 3, &aom_highbd_12_sub_pixel_avg_variance8x8_sse2, 12),
+        make_tuple(3, 2, &aom_highbd_12_sub_pixel_avg_variance8x4_sse2, 12),
+        make_tuple(6, 6, &aom_highbd_10_sub_pixel_avg_variance64x64_sse2, 10),
+        make_tuple(6, 5, &aom_highbd_10_sub_pixel_avg_variance64x32_sse2, 10),
+        make_tuple(5, 6, &aom_highbd_10_sub_pixel_avg_variance32x64_sse2, 10),
+        make_tuple(5, 5, &aom_highbd_10_sub_pixel_avg_variance32x32_sse2, 10),
+        make_tuple(5, 4, &aom_highbd_10_sub_pixel_avg_variance32x16_sse2, 10),
+        make_tuple(4, 5, &aom_highbd_10_sub_pixel_avg_variance16x32_sse2, 10),
+        make_tuple(4, 4, &aom_highbd_10_sub_pixel_avg_variance16x16_sse2, 10),
+        make_tuple(4, 3, &aom_highbd_10_sub_pixel_avg_variance16x8_sse2, 10),
+        make_tuple(3, 4, &aom_highbd_10_sub_pixel_avg_variance8x16_sse2, 10),
+        make_tuple(3, 3, &aom_highbd_10_sub_pixel_avg_variance8x8_sse2, 10),
+        make_tuple(3, 2, &aom_highbd_10_sub_pixel_avg_variance8x4_sse2, 10),
+        make_tuple(6, 6, &aom_highbd_8_sub_pixel_avg_variance64x64_sse2, 8),
+        make_tuple(6, 5, &aom_highbd_8_sub_pixel_avg_variance64x32_sse2, 8),
+        make_tuple(5, 6, &aom_highbd_8_sub_pixel_avg_variance32x64_sse2, 8),
+        make_tuple(5, 5, &aom_highbd_8_sub_pixel_avg_variance32x32_sse2, 8),
+        make_tuple(5, 4, &aom_highbd_8_sub_pixel_avg_variance32x16_sse2, 8),
+        make_tuple(4, 5, &aom_highbd_8_sub_pixel_avg_variance16x32_sse2, 8),
+        make_tuple(4, 4, &aom_highbd_8_sub_pixel_avg_variance16x16_sse2, 8),
+        make_tuple(4, 3, &aom_highbd_8_sub_pixel_avg_variance16x8_sse2, 8),
+        make_tuple(3, 4, &aom_highbd_8_sub_pixel_avg_variance8x16_sse2, 8),
+        make_tuple(3, 3, &aom_highbd_8_sub_pixel_avg_variance8x8_sse2, 8),
+        make_tuple(3, 2, &aom_highbd_8_sub_pixel_avg_variance8x4_sse2, 8)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // HAVE_SSE2
 
 #if HAVE_SSSE3
 INSTANTIATE_TEST_CASE_P(
-    SSSE3, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_ssse3, 0),
-                      make_tuple(6, 5, &vpx_sub_pixel_variance64x32_ssse3, 0),
-                      make_tuple(5, 6, &vpx_sub_pixel_variance32x64_ssse3, 0),
-                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_ssse3, 0),
-                      make_tuple(5, 4, &vpx_sub_pixel_variance32x16_ssse3, 0),
-                      make_tuple(4, 5, &vpx_sub_pixel_variance16x32_ssse3, 0),
-                      make_tuple(4, 4, &vpx_sub_pixel_variance16x16_ssse3, 0),
-                      make_tuple(4, 3, &vpx_sub_pixel_variance16x8_ssse3, 0),
-                      make_tuple(3, 4, &vpx_sub_pixel_variance8x16_ssse3, 0),
-                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_ssse3, 0),
-                      make_tuple(3, 2, &vpx_sub_pixel_variance8x4_ssse3, 0),
-                      make_tuple(2, 3, &vpx_sub_pixel_variance4x8_ssse3, 0),
-                      make_tuple(2, 2, &vpx_sub_pixel_variance4x4_ssse3, 0)));
+    SSSE3, AvxSubpelVarianceTest,
+    ::testing::Values(make_tuple(6, 6, &aom_sub_pixel_variance64x64_ssse3, 0),
+                      make_tuple(6, 5, &aom_sub_pixel_variance64x32_ssse3, 0),
+                      make_tuple(5, 6, &aom_sub_pixel_variance32x64_ssse3, 0),
+                      make_tuple(5, 5, &aom_sub_pixel_variance32x32_ssse3, 0),
+                      make_tuple(5, 4, &aom_sub_pixel_variance32x16_ssse3, 0),
+                      make_tuple(4, 5, &aom_sub_pixel_variance16x32_ssse3, 0),
+                      make_tuple(4, 4, &aom_sub_pixel_variance16x16_ssse3, 0),
+                      make_tuple(4, 3, &aom_sub_pixel_variance16x8_ssse3, 0),
+                      make_tuple(3, 4, &aom_sub_pixel_variance8x16_ssse3, 0),
+                      make_tuple(3, 3, &aom_sub_pixel_variance8x8_ssse3, 0),
+                      make_tuple(3, 2, &aom_sub_pixel_variance8x4_ssse3, 0),
+                      make_tuple(2, 3, &aom_sub_pixel_variance4x8_ssse3, 0),
+                      make_tuple(2, 2, &aom_sub_pixel_variance4x4_ssse3, 0)));
 
 INSTANTIATE_TEST_CASE_P(
-    SSSE3, VpxSubpelAvgVarianceTest,
+    SSSE3, AvxSubpelAvgVarianceTest,
     ::testing::Values(
-        make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_ssse3, 0),
-        make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_ssse3, 0),
-        make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_ssse3, 0),
-        make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_ssse3, 0),
-        make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_ssse3, 0),
-        make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_ssse3, 0),
-        make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_ssse3, 0),
-        make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_ssse3, 0),
-        make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_ssse3, 0),
-        make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_ssse3, 0),
-        make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_ssse3, 0),
-        make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_ssse3, 0),
-        make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_ssse3, 0)));
+        make_tuple(6, 6, &aom_sub_pixel_avg_variance64x64_ssse3, 0),
+        make_tuple(6, 5, &aom_sub_pixel_avg_variance64x32_ssse3, 0),
+        make_tuple(5, 6, &aom_sub_pixel_avg_variance32x64_ssse3, 0),
+        make_tuple(5, 5, &aom_sub_pixel_avg_variance32x32_ssse3, 0),
+        make_tuple(5, 4, &aom_sub_pixel_avg_variance32x16_ssse3, 0),
+        make_tuple(4, 5, &aom_sub_pixel_avg_variance16x32_ssse3, 0),
+        make_tuple(4, 4, &aom_sub_pixel_avg_variance16x16_ssse3, 0),
+        make_tuple(4, 3, &aom_sub_pixel_avg_variance16x8_ssse3, 0),
+        make_tuple(3, 4, &aom_sub_pixel_avg_variance8x16_ssse3, 0),
+        make_tuple(3, 3, &aom_sub_pixel_avg_variance8x8_ssse3, 0),
+        make_tuple(3, 2, &aom_sub_pixel_avg_variance8x4_ssse3, 0),
+        make_tuple(2, 3, &aom_sub_pixel_avg_variance4x8_ssse3, 0),
+        make_tuple(2, 2, &aom_sub_pixel_avg_variance4x4_ssse3, 0)));
 #endif  // HAVE_SSSE3
 
 #if HAVE_AVX2
-INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
-                        ::testing::Values(MseParams(4, 4, &vpx_mse16x16_avx2)));
+INSTANTIATE_TEST_CASE_P(AVX2, AvxMseTest,
+                        ::testing::Values(MseParams(4, 4, &aom_mse16x16_avx2)));
 
 INSTANTIATE_TEST_CASE_P(
-    AVX2, VpxVarianceTest,
-    ::testing::Values(VarianceParams(6, 6, &vpx_variance64x64_avx2),
-                      VarianceParams(6, 5, &vpx_variance64x32_avx2),
-                      VarianceParams(5, 5, &vpx_variance32x32_avx2),
-                      VarianceParams(5, 4, &vpx_variance32x16_avx2),
-                      VarianceParams(4, 4, &vpx_variance16x16_avx2)));
+    AVX2, AvxVarianceTest,
+    ::testing::Values(VarianceParams(6, 6, &aom_variance64x64_avx2),
+                      VarianceParams(6, 5, &aom_variance64x32_avx2),
+                      VarianceParams(5, 5, &aom_variance32x32_avx2),
+                      VarianceParams(5, 4, &aom_variance32x16_avx2),
+                      VarianceParams(4, 4, &aom_variance16x16_avx2)));
 
 INSTANTIATE_TEST_CASE_P(
-    AVX2, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_avx2, 0),
-                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_avx2, 0)));
+    AVX2, AvxSubpelVarianceTest,
+    ::testing::Values(make_tuple(6, 6, &aom_sub_pixel_variance64x64_avx2, 0),
+                      make_tuple(5, 5, &aom_sub_pixel_variance32x32_avx2, 0)));
 
 INSTANTIATE_TEST_CASE_P(
-    AVX2, VpxSubpelAvgVarianceTest,
+    AVX2, AvxSubpelAvgVarianceTest,
     ::testing::Values(
-        make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_avx2, 0),
-        make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_avx2, 0)));
+        make_tuple(6, 6, &aom_sub_pixel_avg_variance64x64_avx2, 0),
+        make_tuple(5, 5, &aom_sub_pixel_avg_variance32x32_avx2, 0)));
 #endif  // HAVE_AVX2
 
 #if HAVE_MEDIA
-INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
+INSTANTIATE_TEST_CASE_P(MEDIA, AvxMseTest,
                         ::testing::Values(MseParams(4, 4,
-                                                    &vpx_mse16x16_media)));
+                                                    &aom_mse16x16_media)));
 
 INSTANTIATE_TEST_CASE_P(
-    MEDIA, VpxVarianceTest,
-    ::testing::Values(VarianceParams(4, 4, &vpx_variance16x16_media),
-                      VarianceParams(3, 3, &vpx_variance8x8_media)));
+    MEDIA, AvxVarianceTest,
+    ::testing::Values(VarianceParams(4, 4, &aom_variance16x16_media),
+                      VarianceParams(3, 3, &aom_variance8x8_media)));
 
 INSTANTIATE_TEST_CASE_P(
-    MEDIA, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(4, 4, &vpx_sub_pixel_variance16x16_media, 0),
-                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_media, 0)));
+    MEDIA, AvxSubpelVarianceTest,
+    ::testing::Values(make_tuple(4, 4, &aom_sub_pixel_variance16x16_media, 0),
+                      make_tuple(3, 3, &aom_sub_pixel_variance8x8_media, 0)));
 #endif  // HAVE_MEDIA
 
 #if HAVE_NEON
-INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
+INSTANTIATE_TEST_CASE_P(NEON, AvxSseTest,
                         ::testing::Values(SseParams(2, 2,
-                                                    &vpx_get4x4sse_cs_neon)));
+                                                    &aom_get4x4sse_cs_neon)));
 
-INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
-                        ::testing::Values(MseParams(4, 4, &vpx_mse16x16_neon)));
+INSTANTIATE_TEST_CASE_P(NEON, AvxMseTest,
+                        ::testing::Values(MseParams(4, 4, &aom_mse16x16_neon)));
 
 INSTANTIATE_TEST_CASE_P(
-    NEON, VpxVarianceTest,
-    ::testing::Values(VarianceParams(6, 6, &vpx_variance64x64_neon),
-                      VarianceParams(6, 5, &vpx_variance64x32_neon),
-                      VarianceParams(5, 6, &vpx_variance32x64_neon),
-                      VarianceParams(5, 5, &vpx_variance32x32_neon),
-                      VarianceParams(4, 4, &vpx_variance16x16_neon),
-                      VarianceParams(4, 3, &vpx_variance16x8_neon),
-                      VarianceParams(3, 4, &vpx_variance8x16_neon),
-                      VarianceParams(3, 3, &vpx_variance8x8_neon)));
+    NEON, AvxVarianceTest,
+    ::testing::Values(VarianceParams(6, 6, &aom_variance64x64_neon),
+                      VarianceParams(6, 5, &aom_variance64x32_neon),
+                      VarianceParams(5, 6, &aom_variance32x64_neon),
+                      VarianceParams(5, 5, &aom_variance32x32_neon),
+                      VarianceParams(4, 4, &aom_variance16x16_neon),
+                      VarianceParams(4, 3, &aom_variance16x8_neon),
+                      VarianceParams(3, 4, &aom_variance8x16_neon),
+                      VarianceParams(3, 3, &aom_variance8x8_neon)));
 
 INSTANTIATE_TEST_CASE_P(
-    NEON, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_neon, 0),
-                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_neon, 0),
-                      make_tuple(4, 4, &vpx_sub_pixel_variance16x16_neon, 0),
-                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_neon, 0)));
+    NEON, AvxSubpelVarianceTest,
+    ::testing::Values(make_tuple(6, 6, &aom_sub_pixel_variance64x64_neon, 0),
+                      make_tuple(5, 5, &aom_sub_pixel_variance32x32_neon, 0),
+                      make_tuple(4, 4, &aom_sub_pixel_variance16x16_neon, 0),
+                      make_tuple(3, 3, &aom_sub_pixel_variance8x8_neon, 0)));
 #endif  // HAVE_NEON
 
 #if HAVE_MSA
 INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
-                        ::testing::Values(vpx_get_mb_ss_msa));
+                        ::testing::Values(aom_get_mb_ss_msa));
 
-INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
+INSTANTIATE_TEST_CASE_P(MSA, AvxSseTest,
                         ::testing::Values(SseParams(2, 2,
-                                                    &vpx_get4x4sse_cs_msa)));
+                                                    &aom_get4x4sse_cs_msa)));
 
-INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
-                        ::testing::Values(MseParams(4, 4, &vpx_mse16x16_msa),
-                                          MseParams(4, 3, &vpx_mse16x8_msa),
-                                          MseParams(3, 4, &vpx_mse8x16_msa),
-                                          MseParams(3, 3, &vpx_mse8x8_msa)));
+INSTANTIATE_TEST_CASE_P(MSA, AvxMseTest,
+                        ::testing::Values(MseParams(4, 4, &aom_mse16x16_msa),
+                                          MseParams(4, 3, &aom_mse16x8_msa),
+                                          MseParams(3, 4, &aom_mse8x16_msa),
+                                          MseParams(3, 3, &aom_mse8x8_msa)));
 
 INSTANTIATE_TEST_CASE_P(
-    MSA, VpxVarianceTest,
-    ::testing::Values(VarianceParams(6, 6, &vpx_variance64x64_msa),
-                      VarianceParams(6, 5, &vpx_variance64x32_msa),
-                      VarianceParams(5, 6, &vpx_variance32x64_msa),
-                      VarianceParams(5, 5, &vpx_variance32x32_msa),
-                      VarianceParams(5, 4, &vpx_variance32x16_msa),
-                      VarianceParams(4, 5, &vpx_variance16x32_msa),
-                      VarianceParams(4, 4, &vpx_variance16x16_msa),
-                      VarianceParams(4, 3, &vpx_variance16x8_msa),
-                      VarianceParams(3, 4, &vpx_variance8x16_msa),
-                      VarianceParams(3, 3, &vpx_variance8x8_msa),
-                      VarianceParams(3, 2, &vpx_variance8x4_msa),
-                      VarianceParams(2, 3, &vpx_variance4x8_msa),
-                      VarianceParams(2, 2, &vpx_variance4x4_msa)));
+    MSA, AvxVarianceTest,
+    ::testing::Values(VarianceParams(6, 6, &aom_variance64x64_msa),
+                      VarianceParams(6, 5, &aom_variance64x32_msa),
+                      VarianceParams(5, 6, &aom_variance32x64_msa),
+                      VarianceParams(5, 5, &aom_variance32x32_msa),
+                      VarianceParams(5, 4, &aom_variance32x16_msa),
+                      VarianceParams(4, 5, &aom_variance16x32_msa),
+                      VarianceParams(4, 4, &aom_variance16x16_msa),
+                      VarianceParams(4, 3, &aom_variance16x8_msa),
+                      VarianceParams(3, 4, &aom_variance8x16_msa),
+                      VarianceParams(3, 3, &aom_variance8x8_msa),
+                      VarianceParams(3, 2, &aom_variance8x4_msa),
+                      VarianceParams(2, 3, &aom_variance4x8_msa),
+                      VarianceParams(2, 2, &aom_variance4x4_msa)));
 
 INSTANTIATE_TEST_CASE_P(
-    MSA, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(2, 2, &vpx_sub_pixel_variance4x4_msa, 0),
-                      make_tuple(2, 3, &vpx_sub_pixel_variance4x8_msa, 0),
-                      make_tuple(3, 2, &vpx_sub_pixel_variance8x4_msa, 0),
-                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_msa, 0),
-                      make_tuple(3, 4, &vpx_sub_pixel_variance8x16_msa, 0),
-                      make_tuple(4, 3, &vpx_sub_pixel_variance16x8_msa, 0),
-                      make_tuple(4, 4, &vpx_sub_pixel_variance16x16_msa, 0),
-                      make_tuple(4, 5, &vpx_sub_pixel_variance16x32_msa, 0),
-                      make_tuple(5, 4, &vpx_sub_pixel_variance32x16_msa, 0),
-                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_msa, 0),
-                      make_tuple(5, 6, &vpx_sub_pixel_variance32x64_msa, 0),
-                      make_tuple(6, 5, &vpx_sub_pixel_variance64x32_msa, 0),
-                      make_tuple(6, 6, &vpx_sub_pixel_variance64x64_msa, 0)));
+    MSA, AvxSubpelVarianceTest,
+    ::testing::Values(make_tuple(2, 2, &aom_sub_pixel_variance4x4_msa, 0),
+                      make_tuple(2, 3, &aom_sub_pixel_variance4x8_msa, 0),
+                      make_tuple(3, 2, &aom_sub_pixel_variance8x4_msa, 0),
+                      make_tuple(3, 3, &aom_sub_pixel_variance8x8_msa, 0),
+                      make_tuple(3, 4, &aom_sub_pixel_variance8x16_msa, 0),
+                      make_tuple(4, 3, &aom_sub_pixel_variance16x8_msa, 0),
+                      make_tuple(4, 4, &aom_sub_pixel_variance16x16_msa, 0),
+                      make_tuple(4, 5, &aom_sub_pixel_variance16x32_msa, 0),
+                      make_tuple(5, 4, &aom_sub_pixel_variance32x16_msa, 0),
+                      make_tuple(5, 5, &aom_sub_pixel_variance32x32_msa, 0),
+                      make_tuple(5, 6, &aom_sub_pixel_variance32x64_msa, 0),
+                      make_tuple(6, 5, &aom_sub_pixel_variance64x32_msa, 0),
+                      make_tuple(6, 6, &aom_sub_pixel_variance64x64_msa, 0)));
 
 INSTANTIATE_TEST_CASE_P(
-    MSA, VpxSubpelAvgVarianceTest,
-    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_msa, 0),
-                      make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_msa, 0),
-                      make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_msa, 0),
-                      make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_msa, 0),
-                      make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_msa, 0),
-                      make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_msa, 0),
-                      make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_msa, 0),
-                      make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_msa, 0),
-                      make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_msa, 0),
-                      make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_msa, 0),
-                      make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_msa, 0),
-                      make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_msa, 0),
-                      make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_msa, 0)));
+    MSA, AvxSubpelAvgVarianceTest,
+    ::testing::Values(make_tuple(6, 6, &aom_sub_pixel_avg_variance64x64_msa, 0),
+                      make_tuple(6, 5, &aom_sub_pixel_avg_variance64x32_msa, 0),
+                      make_tuple(5, 6, &aom_sub_pixel_avg_variance32x64_msa, 0),
+                      make_tuple(5, 5, &aom_sub_pixel_avg_variance32x32_msa, 0),
+                      make_tuple(5, 4, &aom_sub_pixel_avg_variance32x16_msa, 0),
+                      make_tuple(4, 5, &aom_sub_pixel_avg_variance16x32_msa, 0),
+                      make_tuple(4, 4, &aom_sub_pixel_avg_variance16x16_msa, 0),
+                      make_tuple(4, 3, &aom_sub_pixel_avg_variance16x8_msa, 0),
+                      make_tuple(3, 4, &aom_sub_pixel_avg_variance8x16_msa, 0),
+                      make_tuple(3, 3, &aom_sub_pixel_avg_variance8x8_msa, 0),
+                      make_tuple(3, 2, &aom_sub_pixel_avg_variance8x4_msa, 0),
+                      make_tuple(2, 3, &aom_sub_pixel_avg_variance4x8_msa, 0),
+                      make_tuple(2, 2, &aom_sub_pixel_avg_variance4x4_msa, 0)));
 #endif  // HAVE_MSA
 }  // namespace
diff --git a/test/video_source.h b/test/video_source.h
index 7fde59c..d46bde3 100644
--- a/test/video_source.h
+++ b/test/video_source.h
@@ -20,7 +20,7 @@
 #include <cstdlib>
 #include <string>
 #include "test/acm_random.h"
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 
 namespace libaom_test {
 
@@ -98,7 +98,7 @@
 };
 
 // Abstract base class for test video sources, which provide a stream of
-// vpx_image_t images with associated timestamps and duration.
+// aom_image_t images with associated timestamps and duration.
 class VideoSource {
  public:
   virtual ~VideoSource() {}
@@ -110,16 +110,16 @@
   virtual void Next() = 0;
 
   // Get the current video frame, or NULL on End-Of-Stream.
-  virtual vpx_image_t *img() const = 0;
+  virtual aom_image_t *img() const = 0;
 
   // Get the presentation timestamp of the current frame.
-  virtual vpx_codec_pts_t pts() const = 0;
+  virtual aom_codec_pts_t pts() const = 0;
 
   // Get the current frame's duration
   virtual unsigned long duration() const = 0;
 
   // Get the timebase for the stream
-  virtual vpx_rational_t timebase() const = 0;
+  virtual aom_rational_t timebase() const = 0;
 
   // Get the current frame counter, starting at 0.
   virtual unsigned int frame() const = 0;
@@ -132,11 +132,11 @@
  public:
   DummyVideoSource()
       : img_(NULL), limit_(100), width_(80), height_(64),
-        format_(VPX_IMG_FMT_I420) {
+        format_(AOM_IMG_FMT_I420) {
     ReallocImage();
   }
 
-  virtual ~DummyVideoSource() { vpx_img_free(img_); }
+  virtual ~DummyVideoSource() { aom_img_free(img_); }
 
   virtual void Begin() {
     frame_ = 0;
@@ -148,15 +148,15 @@
     FillFrame();
   }
 
-  virtual vpx_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; }
+  virtual aom_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; }
 
   // Models a stream where Timebase = 1/FPS, so pts == frame.
-  virtual vpx_codec_pts_t pts() const { return frame_; }
+  virtual aom_codec_pts_t pts() const { return frame_; }
 
   virtual unsigned long duration() const { return 1; }
 
-  virtual vpx_rational_t timebase() const {
-    const vpx_rational_t t = { 1, 30 };
+  virtual aom_rational_t timebase() const {
+    const aom_rational_t t = { 1, 30 };
     return t;
   }
 
@@ -174,7 +174,7 @@
     }
   }
 
-  void SetImageFormat(vpx_img_fmt_t format) {
+  void SetImageFormat(aom_img_fmt_t format) {
     if (format_ != format) {
       format_ = format;
       ReallocImage();
@@ -187,18 +187,18 @@
   }
 
   void ReallocImage() {
-    vpx_img_free(img_);
-    img_ = vpx_img_alloc(NULL, format_, width_, height_, 32);
+    aom_img_free(img_);
+    img_ = aom_img_alloc(NULL, format_, width_, height_, 32);
     raw_sz_ = ((img_->w + 31) & ~31) * img_->h * img_->bps / 8;
   }
 
-  vpx_image_t *img_;
+  aom_image_t *img_;
   size_t raw_sz_;
   unsigned int limit_;
   unsigned int frame_;
   unsigned int width_;
   unsigned int height_;
-  vpx_img_fmt_t format_;
+  aom_img_fmt_t format_;
 };
 
 class RandomVideoSource : public DummyVideoSource {
diff --git a/test/vp10_fht16x16_test.cc b/test/vp10_fht16x16_test.cc
deleted file mode 100644
index 40884f3..0000000
--- a/test/vp10_fht16x16_test.cc
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "third_party/googletest/src/include/gtest/gtest.h"
-
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-
-#include "test/acm_random.h"
-#include "test/clear_system_state.h"
-#include "test/register_state_check.h"
-#include "test/transform_test_base.h"
-#include "test/util.h"
-#include "aom_ports/mem.h"
-
-using libaom_test::ACMRandom;
-
-namespace {
-typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
-                        int tx_type);
-using std::tr1::tuple;
-using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t, int> Ht16x16Param;
-
-void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
-  vp10_fht16x16_c(in, out, stride, tx_type);
-}
-
-#if CONFIG_VP9_HIGHBITDEPTH
-typedef void (*IHbdHtFunc)(const tran_low_t *in, uint8_t *out, int stride,
-                           int tx_type, int bd);
-typedef void (*HbdHtFunc)(const int16_t *input, int32_t *output, int stride,
-                          int tx_type, int bd);
-
-// Target optimized function, tx_type, bit depth
-typedef tuple<HbdHtFunc, int, int> HighbdHt16x16Param;
-
-void highbd_fht16x16_ref(const int16_t *in, int32_t *out, int stride,
-                         int tx_type, int bd) {
-  vp10_fwd_txfm2d_16x16_c(in, out, stride, tx_type, bd);
-}
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-class VP10Trans16x16HT : public libaom_test::TransformTestBase,
-                         public ::testing::TestWithParam<Ht16x16Param> {
- public:
-  virtual ~VP10Trans16x16HT() {}
-
-  virtual void SetUp() {
-    fwd_txfm_ = GET_PARAM(0);
-    inv_txfm_ = GET_PARAM(1);
-    tx_type_ = GET_PARAM(2);
-    pitch_ = 16;
-    fwd_txfm_ref = fht16x16_ref;
-    bit_depth_ = GET_PARAM(3);
-    mask_ = (1 << bit_depth_) - 1;
-    num_coeffs_ = GET_PARAM(4);
-  }
-  virtual void TearDown() { libaom_test::ClearSystemState(); }
-
- protected:
-  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
-    fwd_txfm_(in, out, stride, tx_type_);
-  }
-
-  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
-    inv_txfm_(out, dst, stride, tx_type_);
-  }
-
-  FhtFunc fwd_txfm_;
-  IhtFunc inv_txfm_;
-};
-
-TEST_P(VP10Trans16x16HT, CoeffCheck) { RunCoeffCheck(); }
-
-#if CONFIG_VP9_HIGHBITDEPTH
-class VP10HighbdTrans16x16HT
-    : public ::testing::TestWithParam<HighbdHt16x16Param> {
- public:
-  virtual ~VP10HighbdTrans16x16HT() {}
-
-  virtual void SetUp() {
-    fwd_txfm_ = GET_PARAM(0);
-    fwd_txfm_ref_ = highbd_fht16x16_ref;
-    tx_type_ = GET_PARAM(1);
-    bit_depth_ = GET_PARAM(2);
-    mask_ = (1 << bit_depth_) - 1;
-    num_coeffs_ = 256;
-
-    input_ = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
-    output_ = reinterpret_cast<int32_t *>(
-        vpx_memalign(16, sizeof(int32_t) * num_coeffs_));
-    output_ref_ = reinterpret_cast<int32_t *>(
-        vpx_memalign(16, sizeof(int32_t) * num_coeffs_));
-  }
-
-  virtual void TearDown() {
-    vpx_free(input_);
-    vpx_free(output_);
-    vpx_free(output_ref_);
-    libaom_test::ClearSystemState();
-  }
-
- protected:
-  void RunBitexactCheck();
-
- private:
-  HbdHtFunc fwd_txfm_;
-  HbdHtFunc fwd_txfm_ref_;
-  int tx_type_;
-  int bit_depth_;
-  int mask_;
-  int num_coeffs_;
-  int16_t *input_;
-  int32_t *output_;
-  int32_t *output_ref_;
-};
-
-void VP10HighbdTrans16x16HT::RunBitexactCheck() {
-  ACMRandom rnd(ACMRandom::DeterministicSeed());
-  int i, j;
-  const int stride = 16;
-  const int num_tests = 1000;
-
-  for (i = 0; i < num_tests; ++i) {
-    for (j = 0; j < num_coeffs_; ++j) {
-      input_[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
-    }
-
-    fwd_txfm_ref_(input_, output_ref_, stride, tx_type_, bit_depth_);
-    ASM_REGISTER_STATE_CHECK(
-        fwd_txfm_(input_, output_, stride, tx_type_, bit_depth_));
-
-    for (j = 0; j < num_coeffs_; ++j) {
-      EXPECT_EQ(output_ref_[j], output_[j])
-          << "Not bit-exact result at index: " << j << " at test block: " << i;
-    }
-  }
-}
-
-TEST_P(VP10HighbdTrans16x16HT, HighbdCoeffCheck) { RunBitexactCheck(); }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-using std::tr1::make_tuple;
-
-#if HAVE_SSE2
-const Ht16x16Param kArrayHt16x16Param_sse2[] = {
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 0, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 1, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 2, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 3, VPX_BITS_8,
-             256),
-#if CONFIG_EXT_TX
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 4, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 5, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 6, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 7, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 8, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 10, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 11, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 12, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 13, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 14, VPX_BITS_8,
-             256),
-  make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 15, VPX_BITS_8,
-             256)
-#endif  // CONFIG_EXT_TX
-};
-INSTANTIATE_TEST_CASE_P(SSE2, VP10Trans16x16HT,
-                        ::testing::ValuesIn(kArrayHt16x16Param_sse2));
-#endif  // HAVE_SSE2
-
-#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
-const HighbdHt16x16Param kArrayHBDHt16x16Param_sse4_1[] = {
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 0, 10),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 0, 12),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 1, 10),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 1, 12),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 2, 10),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 2, 12),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 3, 10),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 3, 12),
-#if CONFIG_EXT_TX
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 4, 10),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 4, 12),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 5, 10),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 5, 12),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 6, 10),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 6, 12),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 7, 10),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 7, 12),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 8, 10),
-  make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 8, 12),
-#endif  // CONFIG_EXT_TX
-};
-INSTANTIATE_TEST_CASE_P(SSE4_1, VP10HighbdTrans16x16HT,
-                        ::testing::ValuesIn(kArrayHBDHt16x16Param_sse4_1));
-#endif  // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
-
-}  // namespace
diff --git a/test/vp10_fht4x4_test.cc b/test/vp10_fht4x4_test.cc
deleted file mode 100644
index 1f4aa1e..0000000
--- a/test/vp10_fht4x4_test.cc
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "third_party/googletest/src/include/gtest/gtest.h"
-
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-
-#include "test/acm_random.h"
-#include "test/clear_system_state.h"
-#include "test/register_state_check.h"
-#include "test/transform_test_base.h"
-#include "test/util.h"
-#include "aom_ports/mem.h"
-
-using libaom_test::ACMRandom;
-
-namespace {
-typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
-                        int tx_type);
-using std::tr1::tuple;
-using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t, int> Ht4x4Param;
-
-void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
-  vp10_fht4x4_c(in, out, stride, tx_type);
-}
-
-#if CONFIG_VP9_HIGHBITDEPTH
-typedef void (*IhighbdHtFunc)(const tran_low_t *in, uint8_t *out, int stride,
-                              int tx_type, int bd);
-typedef void (*HBDFhtFunc)(const int16_t *input, int32_t *output, int stride,
-                           int tx_type, int bd);
-
-// HighbdHt4x4Param argument list:
-// <Target optimized function, tx_type, bit depth>
-typedef tuple<HBDFhtFunc, int, int> HighbdHt4x4Param;
-
-void highbe_fht4x4_ref(const int16_t *in, int32_t *out, int stride, int tx_type,
-                       int bd) {
-  vp10_fwd_txfm2d_4x4_c(in, out, stride, tx_type, bd);
-}
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-class VP10Trans4x4HT : public libaom_test::TransformTestBase,
-                       public ::testing::TestWithParam<Ht4x4Param> {
- public:
-  virtual ~VP10Trans4x4HT() {}
-
-  virtual void SetUp() {
-    fwd_txfm_ = GET_PARAM(0);
-    inv_txfm_ = GET_PARAM(1);
-    tx_type_ = GET_PARAM(2);
-    pitch_ = 4;
-    fwd_txfm_ref = fht4x4_ref;
-    bit_depth_ = GET_PARAM(3);
-    mask_ = (1 << bit_depth_) - 1;
-    num_coeffs_ = GET_PARAM(4);
-  }
-  virtual void TearDown() { libaom_test::ClearSystemState(); }
-
- protected:
-  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
-    fwd_txfm_(in, out, stride, tx_type_);
-  }
-
-  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
-    inv_txfm_(out, dst, stride, tx_type_);
-  }
-
-  FhtFunc fwd_txfm_;
-  IhtFunc inv_txfm_;
-};
-
-TEST_P(VP10Trans4x4HT, CoeffCheck) { RunCoeffCheck(); }
-
-#if CONFIG_VP9_HIGHBITDEPTH
-class VP10HighbdTrans4x4HT : public ::testing::TestWithParam<HighbdHt4x4Param> {
- public:
-  virtual ~VP10HighbdTrans4x4HT() {}
-
-  virtual void SetUp() {
-    fwd_txfm_ = GET_PARAM(0);
-    fwd_txfm_ref_ = highbe_fht4x4_ref;
-    tx_type_ = GET_PARAM(1);
-    bit_depth_ = GET_PARAM(2);
-    mask_ = (1 << bit_depth_) - 1;
-    num_coeffs_ = 16;
-
-    input_ = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
-    output_ = reinterpret_cast<int32_t *>(
-        vpx_memalign(16, sizeof(int32_t) * num_coeffs_));
-    output_ref_ = reinterpret_cast<int32_t *>(
-        vpx_memalign(16, sizeof(int32_t) * num_coeffs_));
-  }
-
-  virtual void TearDown() {
-    vpx_free(input_);
-    vpx_free(output_);
-    vpx_free(output_ref_);
-    libaom_test::ClearSystemState();
-  }
-
- protected:
-  void RunBitexactCheck();
-
- private:
-  HBDFhtFunc fwd_txfm_;
-  HBDFhtFunc fwd_txfm_ref_;
-  int tx_type_;
-  int bit_depth_;
-  int mask_;
-  int num_coeffs_;
-  int16_t *input_;
-  int32_t *output_;
-  int32_t *output_ref_;
-};
-
-void VP10HighbdTrans4x4HT::RunBitexactCheck() {
-  ACMRandom rnd(ACMRandom::DeterministicSeed());
-  int i, j;
-  const int stride = 4;
-  const int num_tests = 1000;
-  const int num_coeffs = 16;
-
-  for (i = 0; i < num_tests; ++i) {
-    for (j = 0; j < num_coeffs; ++j) {
-      input_[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
-    }
-
-    fwd_txfm_ref_(input_, output_ref_, stride, tx_type_, bit_depth_);
-    fwd_txfm_(input_, output_, stride, tx_type_, bit_depth_);
-
-    for (j = 0; j < num_coeffs; ++j) {
-      EXPECT_EQ(output_[j], output_ref_[j])
-          << "Not bit-exact result at index: " << j << " at test block: " << i;
-    }
-  }
-}
-
-TEST_P(VP10HighbdTrans4x4HT, HighbdCoeffCheck) { RunBitexactCheck(); }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-using std::tr1::make_tuple;
-
-#if HAVE_SSE2
-const Ht4x4Param kArrayHt4x4Param_sse2[] = {
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 0, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 1, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 2, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 3, VPX_BITS_8, 16),
-#if CONFIG_EXT_TX
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 4, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 5, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 6, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 7, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 8, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 10, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 11, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 12, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 13, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 14, VPX_BITS_8, 16),
-  make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 15, VPX_BITS_8, 16)
-#endif  // CONFIG_EXT_TX
-};
-INSTANTIATE_TEST_CASE_P(SSE2, VP10Trans4x4HT,
-                        ::testing::ValuesIn(kArrayHt4x4Param_sse2));
-#endif  // HAVE_SSE2
-
-#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
-const HighbdHt4x4Param kArrayHighbdHt4x4Param[] = {
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 0, 10),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 0, 12),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 1, 10),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 1, 12),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 2, 10),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 2, 12),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 3, 10),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 3, 12),
-#if CONFIG_EXT_TX
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 4, 10),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 4, 12),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 5, 10),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 5, 12),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 6, 10),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 6, 12),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 7, 10),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 7, 12),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 8, 10),
-  make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 8, 12),
-#endif  // CONFIG_EXT_TX
-};
-
-INSTANTIATE_TEST_CASE_P(SSE4_1, VP10HighbdTrans4x4HT,
-                        ::testing::ValuesIn(kArrayHighbdHt4x4Param));
-
-#endif  // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
-
-}  // namespace
diff --git a/test/vp10_fht8x8_test.cc b/test/vp10_fht8x8_test.cc
deleted file mode 100644
index 3990bd5..0000000
--- a/test/vp10_fht8x8_test.cc
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "third_party/googletest/src/include/gtest/gtest.h"
-
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-
-#include "test/acm_random.h"
-#include "test/clear_system_state.h"
-#include "test/register_state_check.h"
-#include "test/transform_test_base.h"
-#include "test/util.h"
-#include "aom_ports/mem.h"
-
-using libaom_test::ACMRandom;
-
-namespace {
-typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
-                        int tx_type);
-
-using libaom_test::FhtFunc;
-using std::tr1::tuple;
-typedef tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t, int> Ht8x8Param;
-
-void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
-  vp10_fht8x8_c(in, out, stride, tx_type);
-}
-
-#if CONFIG_VP9_HIGHBITDEPTH
-typedef void (*IHbdHtFunc)(const tran_low_t *in, uint8_t *out, int stride,
-                           int tx_type, int bd);
-typedef void (*HbdHtFunc)(const int16_t *input, int32_t *output, int stride,
-                          int tx_type, int bd);
-// Target optimized function, tx_type, bit depth
-typedef tuple<HbdHtFunc, int, int> HighbdHt8x8Param;
-
-void highbd_fht8x8_ref(const int16_t *in, int32_t *out, int stride, int tx_type,
-                       int bd) {
-  vp10_fwd_txfm2d_8x8_c(in, out, stride, tx_type, bd);
-}
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-class VP10Trans8x8HT : public libaom_test::TransformTestBase,
-                       public ::testing::TestWithParam<Ht8x8Param> {
- public:
-  virtual ~VP10Trans8x8HT() {}
-
-  virtual void SetUp() {
-    fwd_txfm_ = GET_PARAM(0);
-    inv_txfm_ = GET_PARAM(1);
-    tx_type_ = GET_PARAM(2);
-    pitch_ = 8;
-    fwd_txfm_ref = fht8x8_ref;
-    bit_depth_ = GET_PARAM(3);
-    mask_ = (1 << bit_depth_) - 1;
-    num_coeffs_ = GET_PARAM(4);
-  }
-  virtual void TearDown() { libaom_test::ClearSystemState(); }
-
- protected:
-  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
-    fwd_txfm_(in, out, stride, tx_type_);
-  }
-
-  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
-    inv_txfm_(out, dst, stride, tx_type_);
-  }
-
-  FhtFunc fwd_txfm_;
-  IhtFunc inv_txfm_;
-};
-
-TEST_P(VP10Trans8x8HT, CoeffCheck) { RunCoeffCheck(); }
-
-#if CONFIG_VP9_HIGHBITDEPTH
-class VP10HighbdTrans8x8HT : public ::testing::TestWithParam<HighbdHt8x8Param> {
- public:
-  virtual ~VP10HighbdTrans8x8HT() {}
-
-  virtual void SetUp() {
-    fwd_txfm_ = GET_PARAM(0);
-    fwd_txfm_ref_ = highbd_fht8x8_ref;
-    tx_type_ = GET_PARAM(1);
-    bit_depth_ = GET_PARAM(2);
-    mask_ = (1 << bit_depth_) - 1;
-    num_coeffs_ = 64;
-
-    input_ = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
-    output_ = reinterpret_cast<int32_t *>(
-        vpx_memalign(16, sizeof(int32_t) * num_coeffs_));
-    output_ref_ = reinterpret_cast<int32_t *>(
-        vpx_memalign(16, sizeof(int32_t) * num_coeffs_));
-  }
-
-  virtual void TearDown() {
-    vpx_free(input_);
-    vpx_free(output_);
-    vpx_free(output_ref_);
-    libaom_test::ClearSystemState();
-  }
-
- protected:
-  void RunBitexactCheck();
-
- private:
-  HbdHtFunc fwd_txfm_;
-  HbdHtFunc fwd_txfm_ref_;
-  int tx_type_;
-  int bit_depth_;
-  int mask_;
-  int num_coeffs_;
-  int16_t *input_;
-  int32_t *output_;
-  int32_t *output_ref_;
-};
-
-void VP10HighbdTrans8x8HT::RunBitexactCheck() {
-  ACMRandom rnd(ACMRandom::DeterministicSeed());
-  int i, j;
-  const int stride = 8;
-  const int num_tests = 1000;
-  const int num_coeffs = 64;
-
-  for (i = 0; i < num_tests; ++i) {
-    for (j = 0; j < num_coeffs; ++j) {
-      input_[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
-    }
-
-    fwd_txfm_ref_(input_, output_ref_, stride, tx_type_, bit_depth_);
-    ASM_REGISTER_STATE_CHECK(
-        fwd_txfm_(input_, output_, stride, tx_type_, bit_depth_));
-
-    for (j = 0; j < num_coeffs; ++j) {
-      EXPECT_EQ(output_ref_[j], output_[j])
-          << "Not bit-exact result at index: " << j << " at test block: " << i;
-    }
-  }
-}
-
-TEST_P(VP10HighbdTrans8x8HT, HighbdCoeffCheck) { RunBitexactCheck(); }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-using std::tr1::make_tuple;
-
-#if HAVE_SSE2
-const Ht8x8Param kArrayHt8x8Param_sse2[] = {
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 0, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 1, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 2, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 3, VPX_BITS_8, 64),
-#if CONFIG_EXT_TX
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 4, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 5, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 6, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 7, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 8, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 10, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 11, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 12, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 13, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 14, VPX_BITS_8, 64),
-  make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 15, VPX_BITS_8, 64)
-#endif  // CONFIG_EXT_TX
-};
-INSTANTIATE_TEST_CASE_P(SSE2, VP10Trans8x8HT,
-                        ::testing::ValuesIn(kArrayHt8x8Param_sse2));
-#endif  // HAVE_SSE2
-
-#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
-const HighbdHt8x8Param kArrayHBDHt8x8Param_sse4_1[] = {
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 0, 10),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 0, 12),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 1, 10),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 1, 12),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 2, 10),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 2, 12),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 3, 10),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 3, 12),
-#if CONFIG_EXT_TX
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 4, 10),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 4, 12),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 5, 10),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 5, 12),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 6, 10),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 6, 12),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 7, 10),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 7, 12),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 8, 10),
-  make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 8, 12),
-#endif  // CONFIG_EXT_TX
-};
-INSTANTIATE_TEST_CASE_P(SSE4_1, VP10HighbdTrans8x8HT,
-                        ::testing::ValuesIn(kArrayHBDHt8x8Param_sse4_1));
-#endif  // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
-
-}  // namespace
diff --git a/test/vp10_fwd_txfm2d_test.cc b/test/vp10_fwd_txfm2d_test.cc
deleted file mode 100644
index 7dbb922..0000000
--- a/test/vp10_fwd_txfm2d_test.cc
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "test/acm_random.h"
-#include "test/util.h"
-#include "test/vp10_txfm_test.h"
-#include "av1/common/vp10_txfm.h"
-#include "./vp10_rtcd.h"
-
-using libaom_test::ACMRandom;
-using libaom_test::input_base;
-using libaom_test::bd;
-using libaom_test::compute_avg_abs_error;
-using libaom_test::Fwd_Txfm2d_Func;
-using libaom_test::TYPE_TXFM;
-
-namespace {
-#if CONFIG_VP9_HIGHBITDEPTH
-// tx_type_, tx_size_, max_error_, max_avg_error_
-typedef std::tr1::tuple<TX_TYPE, TX_SIZE, double, double> VP10FwdTxfm2dParam;
-
-class VP10FwdTxfm2d : public ::testing::TestWithParam<VP10FwdTxfm2dParam> {
- public:
-  virtual void SetUp() {
-    tx_type_ = GET_PARAM(0);
-    tx_size_ = GET_PARAM(1);
-    max_error_ = GET_PARAM(2);
-    max_avg_error_ = GET_PARAM(3);
-    count_ = 500;
-    TXFM_2D_FLIP_CFG fwd_txfm_flip_cfg =
-        vp10_get_fwd_txfm_cfg(tx_type_, tx_size_);
-    const TXFM_2D_CFG *fwd_txfm_cfg = fwd_txfm_flip_cfg.cfg;
-    int amplify_bit = fwd_txfm_cfg->shift[0] + fwd_txfm_cfg->shift[1] +
-                      fwd_txfm_cfg->shift[2];
-    ud_flip_ = fwd_txfm_flip_cfg.ud_flip;
-    lr_flip_ = fwd_txfm_flip_cfg.lr_flip;
-    amplify_factor_ =
-        amplify_bit >= 0 ? (1 << amplify_bit) : (1.0 / (1 << -amplify_bit));
-
-    fwd_txfm_ = libaom_test::fwd_txfm_func_ls[tx_size_];
-    txfm1d_size_ = libaom_test::get_txfm1d_size(tx_size_);
-    txfm2d_size_ = txfm1d_size_ * txfm1d_size_;
-    get_txfm1d_type(tx_type_, &type0_, &type1_);
-    input_ = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(input_[0]) * txfm2d_size_));
-    output_ = reinterpret_cast<int32_t *>(
-        vpx_memalign(16, sizeof(output_[0]) * txfm2d_size_));
-    ref_input_ = reinterpret_cast<double *>(
-        vpx_memalign(16, sizeof(ref_input_[0]) * txfm2d_size_));
-    ref_output_ = reinterpret_cast<double *>(
-        vpx_memalign(16, sizeof(ref_output_[0]) * txfm2d_size_));
-  }
-
-  void RunFwdAccuracyCheck() {
-    ACMRandom rnd(ACMRandom::DeterministicSeed());
-    double avg_abs_error = 0;
-    for (int ci = 0; ci < count_; ci++) {
-      for (int ni = 0; ni < txfm2d_size_; ++ni) {
-        input_[ni] = rnd.Rand16() % input_base;
-        ref_input_[ni] = static_cast<double>(input_[ni]);
-        output_[ni] = 0;
-        ref_output_[ni] = 0;
-      }
-
-      fwd_txfm_(input_, output_, txfm1d_size_, tx_type_, bd);
-
-      if (lr_flip_ && ud_flip_)
-        libaom_test::fliplrud(ref_input_, txfm1d_size_, txfm1d_size_);
-      else if (lr_flip_)
-        libaom_test::fliplr(ref_input_, txfm1d_size_, txfm1d_size_);
-      else if (ud_flip_)
-        libaom_test::flipud(ref_input_, txfm1d_size_, txfm1d_size_);
-
-      reference_hybrid_2d(ref_input_, ref_output_, txfm1d_size_, type0_,
-                          type1_);
-
-      for (int ni = 0; ni < txfm2d_size_; ++ni) {
-        ref_output_[ni] = round(ref_output_[ni] * amplify_factor_);
-        EXPECT_GE(max_error_,
-                  fabs(output_[ni] - ref_output_[ni]) / amplify_factor_);
-      }
-      avg_abs_error += compute_avg_abs_error<int32_t, double>(
-          output_, ref_output_, txfm2d_size_);
-    }
-
-    avg_abs_error /= amplify_factor_;
-    avg_abs_error /= count_;
-    // max_abs_avg_error comes from upper bound of avg_abs_error
-    // printf("type0: %d type1: %d txfm_size: %d accuracy_avg_abs_error:
-    // %f\n", type0_, type1_, txfm1d_size_, avg_abs_error);
-    EXPECT_GE(max_avg_error_, avg_abs_error);
-  }
-
-  virtual void TearDown() {
-    vpx_free(input_);
-    vpx_free(output_);
-    vpx_free(ref_input_);
-    vpx_free(ref_output_);
-  }
-
- private:
-  double max_error_;
-  double max_avg_error_;
-  int count_;
-  double amplify_factor_;
-  TX_TYPE tx_type_;
-  TX_SIZE tx_size_;
-  int txfm1d_size_;
-  int txfm2d_size_;
-  Fwd_Txfm2d_Func fwd_txfm_;
-  TYPE_TXFM type0_;
-  TYPE_TXFM type1_;
-  int16_t *input_;
-  int32_t *output_;
-  double *ref_input_;
-  double *ref_output_;
-  int ud_flip_;  // flip upside down
-  int lr_flip_;  // flip left to right
-};
-
-TEST_P(VP10FwdTxfm2d, RunFwdAccuracyCheck) { RunFwdAccuracyCheck(); }
-const VP10FwdTxfm2dParam vp10_fwd_txfm2d_param_c[] = {
-#if CONFIG_EXT_TX
-  VP10FwdTxfm2dParam(FLIPADST_DCT, TX_4X4, 2, 0.2),
-  VP10FwdTxfm2dParam(DCT_FLIPADST, TX_4X4, 2, 0.2),
-  VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_4X4, 2, 0.2),
-  VP10FwdTxfm2dParam(ADST_FLIPADST, TX_4X4, 2, 0.2),
-  VP10FwdTxfm2dParam(FLIPADST_ADST, TX_4X4, 2, 0.2),
-  VP10FwdTxfm2dParam(FLIPADST_DCT, TX_8X8, 5, 0.6),
-  VP10FwdTxfm2dParam(DCT_FLIPADST, TX_8X8, 5, 0.6),
-  VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_8X8, 5, 0.6),
-  VP10FwdTxfm2dParam(ADST_FLIPADST, TX_8X8, 5, 0.6),
-  VP10FwdTxfm2dParam(FLIPADST_ADST, TX_8X8, 5, 0.6),
-  VP10FwdTxfm2dParam(FLIPADST_DCT, TX_16X16, 11, 1.5),
-  VP10FwdTxfm2dParam(DCT_FLIPADST, TX_16X16, 11, 1.5),
-  VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_16X16, 11, 1.5),
-  VP10FwdTxfm2dParam(ADST_FLIPADST, TX_16X16, 11, 1.5),
-  VP10FwdTxfm2dParam(FLIPADST_ADST, TX_16X16, 11, 1.5),
-  VP10FwdTxfm2dParam(FLIPADST_DCT, TX_32X32, 70, 7),
-  VP10FwdTxfm2dParam(DCT_FLIPADST, TX_32X32, 70, 7),
-  VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_32X32, 70, 7),
-  VP10FwdTxfm2dParam(ADST_FLIPADST, TX_32X32, 70, 7),
-  VP10FwdTxfm2dParam(FLIPADST_ADST, TX_32X32, 70, 7),
-#endif
-  VP10FwdTxfm2dParam(DCT_DCT, TX_4X4, 2, 0.2),
-  VP10FwdTxfm2dParam(ADST_DCT, TX_4X4, 2, 0.2),
-  VP10FwdTxfm2dParam(DCT_ADST, TX_4X4, 2, 0.2),
-  VP10FwdTxfm2dParam(ADST_ADST, TX_4X4, 2, 0.2),
-  VP10FwdTxfm2dParam(DCT_DCT, TX_8X8, 5, 0.6),
-  VP10FwdTxfm2dParam(ADST_DCT, TX_8X8, 5, 0.6),
-  VP10FwdTxfm2dParam(DCT_ADST, TX_8X8, 5, 0.6),
-  VP10FwdTxfm2dParam(ADST_ADST, TX_8X8, 5, 0.6),
-  VP10FwdTxfm2dParam(DCT_DCT, TX_16X16, 11, 1.5),
-  VP10FwdTxfm2dParam(ADST_DCT, TX_16X16, 11, 1.5),
-  VP10FwdTxfm2dParam(DCT_ADST, TX_16X16, 11, 1.5),
-  VP10FwdTxfm2dParam(ADST_ADST, TX_16X16, 11, 1.5),
-  VP10FwdTxfm2dParam(DCT_DCT, TX_32X32, 70, 7),
-  VP10FwdTxfm2dParam(ADST_DCT, TX_32X32, 70, 7),
-  VP10FwdTxfm2dParam(DCT_ADST, TX_32X32, 70, 7),
-  VP10FwdTxfm2dParam(ADST_ADST, TX_32X32, 70, 7)
-};
-
-INSTANTIATE_TEST_CASE_P(C, VP10FwdTxfm2d,
-                        ::testing::ValuesIn(vp10_fwd_txfm2d_param_c));
-
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-}  // namespace
diff --git a/test/vp10_inv_txfm2d_test.cc b/test/vp10_inv_txfm2d_test.cc
deleted file mode 100644
index bbd0aaf..0000000
--- a/test/vp10_inv_txfm2d_test.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "./vp10_rtcd.h"
-#include "test/acm_random.h"
-#include "test/util.h"
-#include "test/vp10_txfm_test.h"
-#include "av1/common/vp10_inv_txfm2d_cfg.h"
-
-using libaom_test::ACMRandom;
-using libaom_test::input_base;
-using libaom_test::bd;
-using libaom_test::compute_avg_abs_error;
-using libaom_test::Fwd_Txfm2d_Func;
-using libaom_test::Inv_Txfm2d_Func;
-
-namespace {
-
-#if CONFIG_VP9_HIGHBITDEPTH
-// VP10InvTxfm2dParam argument list:
-// tx_type_, tx_size_, max_error_, max_avg_error_
-typedef std::tr1::tuple<TX_TYPE, TX_SIZE, int, double> VP10InvTxfm2dParam;
-
-class VP10InvTxfm2d : public ::testing::TestWithParam<VP10InvTxfm2dParam> {
- public:
-  virtual void SetUp() {
-    tx_type_ = GET_PARAM(0);
-    tx_size_ = GET_PARAM(1);
-    max_error_ = GET_PARAM(2);
-    max_avg_error_ = GET_PARAM(3);
-    txfm1d_size_ = libaom_test::get_txfm1d_size(tx_size_);
-    txfm2d_size_ = txfm1d_size_ * txfm1d_size_;
-    count_ = 500;
-
-    input_ = reinterpret_cast<int16_t *>(
-        vpx_memalign(16, sizeof(int16_t) * txfm2d_size_));
-    ref_input_ = reinterpret_cast<uint16_t *>(
-        vpx_memalign(16, sizeof(uint16_t) * txfm2d_size_));
-    output_ = reinterpret_cast<int32_t *>(
-        vpx_memalign(16, sizeof(int32_t) * txfm2d_size_));
-  }
-
-  void RunRoundtripCheck() {
-    const Fwd_Txfm2d_Func fwd_txfm_func =
-        libaom_test::fwd_txfm_func_ls[tx_size_];
-    const Inv_Txfm2d_Func inv_txfm_func =
-        libaom_test::inv_txfm_func_ls[tx_size_];
-    double avg_abs_error = 0;
-    ACMRandom rnd(ACMRandom::DeterministicSeed());
-    for (int ci = 0; ci < count_; ci++) {
-      for (int ni = 0; ni < txfm2d_size_; ++ni) {
-        if (ci == 0) {
-          int extreme_input = input_base - 1;
-          input_[ni] = extreme_input;  // extreme case
-          ref_input_[ni] = 0;
-        } else {
-          input_[ni] = rnd.Rand16() % input_base;
-          ref_input_[ni] = 0;
-        }
-      }
-
-      fwd_txfm_func(input_, output_, txfm1d_size_, tx_type_, bd);
-      inv_txfm_func(output_, ref_input_, txfm1d_size_, tx_type_, bd);
-
-      for (int ni = 0; ni < txfm2d_size_; ++ni) {
-        EXPECT_GE(max_error_, abs(input_[ni] - ref_input_[ni]));
-      }
-      avg_abs_error += compute_avg_abs_error<int16_t, uint16_t>(
-          input_, ref_input_, txfm2d_size_);
-    }
-
-    avg_abs_error /= count_;
-    // max_abs_avg_error comes from upper bound of
-    // printf("txfm1d_size: %d accuracy_avg_abs_error: %f\n",
-    // txfm1d_size_, avg_abs_error);
-    EXPECT_GE(max_avg_error_, avg_abs_error);
-  }
-
-  virtual void TearDown() {
-    vpx_free(input_);
-    vpx_free(output_);
-    vpx_free(ref_input_);
-  }
-
- private:
-  int count_;
-  int max_error_;
-  double max_avg_error_;
-  TX_TYPE tx_type_;
-  TX_SIZE tx_size_;
-  int txfm1d_size_;
-  int txfm2d_size_;
-  int16_t *input_;
-  uint16_t *ref_input_;
-  int32_t *output_;
-};
-
-TEST_P(VP10InvTxfm2d, RunRoundtripCheck) { RunRoundtripCheck(); }
-
-const VP10InvTxfm2dParam vp10_inv_txfm2d_param[] = {
-#if CONFIG_EXT_TX
-  VP10InvTxfm2dParam(FLIPADST_DCT, TX_4X4, 2, 0.002),
-  VP10InvTxfm2dParam(DCT_FLIPADST, TX_4X4, 2, 0.002),
-  VP10InvTxfm2dParam(FLIPADST_FLIPADST, TX_4X4, 2, 0.002),
-  VP10InvTxfm2dParam(ADST_FLIPADST, TX_4X4, 2, 0.002),
-  VP10InvTxfm2dParam(FLIPADST_ADST, TX_4X4, 2, 0.002),
-  VP10InvTxfm2dParam(FLIPADST_DCT, TX_8X8, 2, 0.02),
-  VP10InvTxfm2dParam(DCT_FLIPADST, TX_8X8, 2, 0.02),
-  VP10InvTxfm2dParam(FLIPADST_FLIPADST, TX_8X8, 2, 0.02),
-  VP10InvTxfm2dParam(ADST_FLIPADST, TX_8X8, 2, 0.02),
-  VP10InvTxfm2dParam(FLIPADST_ADST, TX_8X8, 2, 0.02),
-  VP10InvTxfm2dParam(FLIPADST_DCT, TX_16X16, 2, 0.04),
-  VP10InvTxfm2dParam(DCT_FLIPADST, TX_16X16, 2, 0.04),
-  VP10InvTxfm2dParam(FLIPADST_FLIPADST, TX_16X16, 11, 0.04),
-  VP10InvTxfm2dParam(ADST_FLIPADST, TX_16X16, 2, 0.04),
-  VP10InvTxfm2dParam(FLIPADST_ADST, TX_16X16, 2, 0.04),
-  VP10InvTxfm2dParam(FLIPADST_DCT, TX_32X32, 4, 0.4),
-  VP10InvTxfm2dParam(DCT_FLIPADST, TX_32X32, 4, 0.4),
-  VP10InvTxfm2dParam(FLIPADST_FLIPADST, TX_32X32, 4, 0.4),
-  VP10InvTxfm2dParam(ADST_FLIPADST, TX_32X32, 4, 0.4),
-  VP10InvTxfm2dParam(FLIPADST_ADST, TX_32X32, 4, 0.4),
-#endif
-  VP10InvTxfm2dParam(DCT_DCT, TX_4X4, 2, 0.002),
-  VP10InvTxfm2dParam(ADST_DCT, TX_4X4, 2, 0.002),
-  VP10InvTxfm2dParam(DCT_ADST, TX_4X4, 2, 0.002),
-  VP10InvTxfm2dParam(ADST_ADST, TX_4X4, 2, 0.002),
-  VP10InvTxfm2dParam(DCT_DCT, TX_8X8, 2, 0.02),
-  VP10InvTxfm2dParam(ADST_DCT, TX_8X8, 2, 0.02),
-  VP10InvTxfm2dParam(DCT_ADST, TX_8X8, 2, 0.02),
-  VP10InvTxfm2dParam(ADST_ADST, TX_8X8, 2, 0.02),
-  VP10InvTxfm2dParam(DCT_DCT, TX_16X16, 2, 0.04),
-  VP10InvTxfm2dParam(ADST_DCT, TX_16X16, 2, 0.04),
-  VP10InvTxfm2dParam(DCT_ADST, TX_16X16, 2, 0.04),
-  VP10InvTxfm2dParam(ADST_ADST, TX_16X16, 2, 0.04),
-  VP10InvTxfm2dParam(DCT_DCT, TX_32X32, 4, 0.4),
-  VP10InvTxfm2dParam(ADST_DCT, TX_32X32, 4, 0.4),
-  VP10InvTxfm2dParam(DCT_ADST, TX_32X32, 4, 0.4),
-  VP10InvTxfm2dParam(ADST_ADST, TX_32X32, 4, 0.4)
-};
-
-INSTANTIATE_TEST_CASE_P(C, VP10InvTxfm2d,
-                        ::testing::ValuesIn(vp10_inv_txfm2d_param));
-
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-}  // namespace
diff --git a/test/vp8_multi_resolution_encoder.sh b/test/vp8_multi_resolution_encoder.sh
index 9b09daa..35121b8 100755
--- a/test/vp8_multi_resolution_encoder.sh
+++ b/test/vp8_multi_resolution_encoder.sh
@@ -17,13 +17,13 @@
 
 # Environment check: $YUV_RAW_INPUT is required.
 vp8_multi_resolution_encoder_verify_environment() {
-  if [ "$(vpx_config_option_enabled CONFIG_MULTI_RES_ENCODING)" = "yes" ]; then
+  if [ "$(aom_config_option_enabled CONFIG_MULTI_RES_ENCODING)" = "yes" ]; then
     if [ ! -e "${YUV_RAW_INPUT}" ]; then
-      elog "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+      elog "Libaom test data must exist in LIBVPX_TEST_DATA_PATH."
       return 1
     fi
     local readonly app="vp8_multi_resolution_encoder"
-    if [ -z "$(vpx_tool_path "${app}")" ]; then
+    if [ -z "$(aom_tool_path "${app}")" ]; then
       elog "${app} not found. It must exist in LIBAOM_BIN_PATH or its parent."
       return 1
     fi
@@ -33,21 +33,21 @@
 # Runs vp8_multi_resolution_encoder. Simply forwards all arguments to
 # vp8_multi_resolution_encoder after building path to the executable.
 vp8_mre() {
-  local readonly encoder="$(vpx_tool_path vp8_multi_resolution_encoder)"
+  local readonly encoder="$(aom_tool_path vp8_multi_resolution_encoder)"
   if [ ! -x "${encoder}" ]; then
     elog "${encoder} does not exist or is not executable."
     return 1
   fi
 
-  eval "${VPX_TEST_PREFIX}" "${encoder}" "$@" ${devnull}
+  eval "${AOM_TEST_PREFIX}" "${encoder}" "$@" ${devnull}
 }
 
 vp8_multi_resolution_encoder_three_formats() {
-  local readonly output_files="${VPX_TEST_OUTPUT_DIR}/vp8_mre_0.ivf
-                               ${VPX_TEST_OUTPUT_DIR}/vp8_mre_1.ivf
-                               ${VPX_TEST_OUTPUT_DIR}/vp8_mre_2.ivf"
+  local readonly output_files="${AOM_TEST_OUTPUT_DIR}/vp8_mre_0.ivf
+                               ${AOM_TEST_OUTPUT_DIR}/vp8_mre_1.ivf
+                               ${AOM_TEST_OUTPUT_DIR}/vp8_mre_2.ivf"
 
-  if [ "$(vpx_config_option_enabled CONFIG_MULTI_RES_ENCODING)" = "yes" ]; then
+  if [ "$(aom_config_option_enabled CONFIG_MULTI_RES_ENCODING)" = "yes" ]; then
     if [ "$(vp8_encode_available)" = "yes" ]; then
       # Param order:
       #  Input width
diff --git a/test/vpxdec.sh b/test/vpxdec.sh
index 8b213ea..7c5169d 100755
--- a/test/vpxdec.sh
+++ b/test/vpxdec.sh
@@ -8,97 +8,97 @@
 ##  in the file PATENTS.  All contributing project authors may
 ##  be found in the AUTHORS file in the root of the source tree.
 ##
-##  This file tests vpxdec. To add new tests to this file, do the following:
+##  This file tests aomdec. To add new tests to this file, do the following:
 ##    1. Write a shell function (this is your test).
-##    2. Add the function to vpxdec_tests (on a new line).
+##    2. Add the function to aomdec_tests (on a new line).
 ##
 . $(dirname $0)/tools_common.sh
 
 # Environment check: Make sure input is available.
-vpxdec_verify_environment() {
-  if [ ! -e "${VP8_IVF_FILE}" ] || [ ! -e "${VP9_WEBM_FILE}" ] || \
-    [ ! -e "${VP9_FPM_WEBM_FILE}" ] || \
-    [ ! -e "${VP9_LT_50_FRAMES_WEBM_FILE}" ] ; then
-    elog "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+aomdec_verify_environment() {
+  if [ ! -e "${VP8_IVF_FILE}" ] || [ ! -e "${AV1_WEBM_FILE}" ] || \
+    [ ! -e "${AV1_FPM_WEBM_FILE}" ] || \
+    [ ! -e "${AV1_LT_50_FRAMES_WEBM_FILE}" ] ; then
+    elog "Libaom test data must exist in LIBVPX_TEST_DATA_PATH."
     return 1
   fi
-  if [ -z "$(vpx_tool_path vpxdec)" ]; then
-    elog "vpxdec not found. It must exist in LIBAOM_BIN_PATH or its parent."
+  if [ -z "$(aom_tool_path aomdec)" ]; then
+    elog "aomdec not found. It must exist in LIBAOM_BIN_PATH or its parent."
     return 1
   fi
 }
 
-# Wrapper function for running vpxdec with pipe input. Requires that
-# LIBAOM_BIN_PATH points to the directory containing vpxdec. $1 is used as the
+# Wrapper function for running aomdec with pipe input. Requires that
+# LIBAOM_BIN_PATH points to the directory containing aomdec. $1 is used as the
 # input file path and shifted away. All remaining parameters are passed through
-# to vpxdec.
-vpxdec_pipe() {
-  local readonly decoder="$(vpx_tool_path vpxdec)"
+# to aomdec.
+aomdec_pipe() {
+  local readonly decoder="$(aom_tool_path aomdec)"
   local readonly input="$1"
   shift
-  cat "${input}" | eval "${VPX_TEST_PREFIX}" "${decoder}" - "$@" ${devnull}
+  cat "${input}" | eval "${AOM_TEST_PREFIX}" "${decoder}" - "$@" ${devnull}
 }
 
-# Wrapper function for running vpxdec. Requires that LIBAOM_BIN_PATH points to
-# the directory containing vpxdec. $1 one is used as the input file path and
-# shifted away. All remaining parameters are passed through to vpxdec.
-vpxdec() {
-  local readonly decoder="$(vpx_tool_path vpxdec)"
+# Wrapper function for running aomdec. Requires that LIBAOM_BIN_PATH points to
+# the directory containing aomdec. $1 one is used as the input file path and
+# shifted away. All remaining parameters are passed through to aomdec.
+aomdec() {
+  local readonly decoder="$(aom_tool_path aomdec)"
   local readonly input="$1"
   shift
-  eval "${VPX_TEST_PREFIX}" "${decoder}" "$input" "$@" ${devnull}
+  eval "${AOM_TEST_PREFIX}" "${decoder}" "$input" "$@" ${devnull}
 }
 
-vpxdec_can_decode_vp8() {
+aomdec_can_decode_vp8() {
   if [ "$(vp8_decode_available)" = "yes" ]; then
     echo yes
   fi
 }
 
-vpxdec_can_decode_vp9() {
+aomdec_can_decode_vp9() {
   if [ "$(vp9_decode_available)" = "yes" ]; then
     echo yes
   fi
 }
 
-vpxdec_vp8_ivf() {
-  if [ "$(vpxdec_can_decode_vp8)" = "yes" ]; then
-    vpxdec "${VP8_IVF_FILE}" --summary --noblit
+aomdec_vp8_ivf() {
+  if [ "$(aomdec_can_decode_vp8)" = "yes" ]; then
+    aomdec "${VP8_IVF_FILE}" --summary --noblit
   fi
 }
 
-vpxdec_vp8_ivf_pipe_input() {
-  if [ "$(vpxdec_can_decode_vp8)" = "yes" ]; then
-    vpxdec_pipe "${VP8_IVF_FILE}" --summary --noblit
+aomdec_vp8_ivf_pipe_input() {
+  if [ "$(aomdec_can_decode_vp8)" = "yes" ]; then
+    aomdec_pipe "${VP8_IVF_FILE}" --summary --noblit
   fi
 }
 
-vpxdec_vp9_webm() {
-  if [ "$(vpxdec_can_decode_vp9)" = "yes" ] && \
+aomdec_vp9_webm() {
+  if [ "$(aomdec_can_decode_vp9)" = "yes" ] && \
      [ "$(webm_io_available)" = "yes" ]; then
-    vpxdec "${VP9_WEBM_FILE}" --summary --noblit
+    aomdec "${AV1_WEBM_FILE}" --summary --noblit
   fi
 }
 
-vpxdec_vp9_webm_frame_parallel() {
-  if [ "$(vpxdec_can_decode_vp9)" = "yes" ] && \
+aomdec_vp9_webm_frame_parallel() {
+  if [ "$(aomdec_can_decode_vp9)" = "yes" ] && \
      [ "$(webm_io_available)" = "yes" ]; then
     for threads in 2 3 4 5 6 7 8; do
-      vpxdec "${VP9_FPM_WEBM_FILE}" --summary --noblit --threads=$threads \
+      aomdec "${AV1_FPM_WEBM_FILE}" --summary --noblit --threads=$threads \
         --frame-parallel
     done
   fi
 }
 
-vpxdec_vp9_webm_less_than_50_frames() {
+aomdec_vp9_webm_less_than_50_frames() {
   # ensure that reaching eof in webm_guess_framerate doesn't result in invalid
   # frames in actual webm_read_frame calls.
-  if [ "$(vpxdec_can_decode_vp9)" = "yes" ] && \
+  if [ "$(aomdec_can_decode_vp9)" = "yes" ] && \
      [ "$(webm_io_available)" = "yes" ]; then
-    local readonly decoder="$(vpx_tool_path vpxdec)"
+    local readonly decoder="$(aom_tool_path aomdec)"
     local readonly expected=10
-    local readonly num_frames=$(${VPX_TEST_PREFIX} "${decoder}" \
-      "${VP9_LT_50_FRAMES_WEBM_FILE}" --summary --noblit 2>&1 \
+    local readonly num_frames=$(${AOM_TEST_PREFIX} "${decoder}" \
+      "${AV1_LT_50_FRAMES_WEBM_FILE}" --summary --noblit 2>&1 \
       | awk '/^[0-9]+ decoded frames/ { print $1 }')
     if [ "$num_frames" -ne "$expected" ]; then
       elog "Output frames ($num_frames) != expected ($expected)"
@@ -107,10 +107,10 @@
   fi
 }
 
-vpxdec_tests="vpxdec_vp8_ivf
-              vpxdec_vp8_ivf_pipe_input
-              vpxdec_vp9_webm
-              vpxdec_vp9_webm_frame_parallel
-              vpxdec_vp9_webm_less_than_50_frames"
+aomdec_tests="aomdec_vp8_ivf
+              aomdec_vp8_ivf_pipe_input
+              aomdec_vp9_webm
+              aomdec_vp9_webm_frame_parallel
+              aomdec_vp9_webm_less_than_50_frames"
 
-run_tests vpxdec_verify_environment "${vpxdec_tests}"
+run_tests aomdec_verify_environment "${aomdec_tests}"
diff --git a/test/vpxenc.sh b/test/vpxenc.sh
deleted file mode 100755
index 20e147f..0000000
--- a/test/vpxenc.sh
+++ /dev/null
@@ -1,429 +0,0 @@
-#!/bin/sh
-##
-##  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
-##
-##  Use of this source code is governed by a BSD-style license
-##  that can be found in the LICENSE file in the root of the source
-##  tree. An additional intellectual property rights grant can be found
-##  in the file PATENTS.  All contributing project authors may
-##  be found in the AUTHORS file in the root of the source tree.
-##
-##  This file tests vpxenc using hantro_collage_w352h288.yuv as input. To add
-##  new tests to this file, do the following:
-##    1. Write a shell function (this is your test).
-##    2. Add the function to vpxenc_tests (on a new line).
-##
-. $(dirname $0)/tools_common.sh
-
-readonly TEST_FRAMES=10
-
-# Environment check: Make sure input is available.
-vpxenc_verify_environment() {
-  if [ ! -e "${YUV_RAW_INPUT}" ]; then
-    elog "The file ${YUV_RAW_INPUT##*/} must exist in LIBVPX_TEST_DATA_PATH."
-    return 1
-  fi
-  if [ "$(vpxenc_can_encode_vp9)" = "yes" ]; then
-    if [ ! -e "${Y4M_NOSQ_PAR_INPUT}" ]; then
-      elog "The file ${Y4M_NOSQ_PAR_INPUT##*/} must exist in"
-      elog "LIBVPX_TEST_DATA_PATH."
-      return 1
-    fi
-  fi
-  if [ -z "$(vpx_tool_path vpxenc)" ]; then
-    elog "vpxenc not found. It must exist in LIBAOM_BIN_PATH or its parent."
-    return 1
-  fi
-}
-
-vpxenc_can_encode_vp8() {
-  if [ "$(vp8_encode_available)" = "yes" ]; then
-    echo yes
-  fi
-}
-
-vpxenc_can_encode_vp9() {
-  if [ "$(vp9_encode_available)" = "yes" ]; then
-    echo yes
-  fi
-}
-
-# Echo vpxenc command line parameters allowing use of
-# hantro_collage_w352h288.yuv as input.
-yuv_input_hantro_collage() {
-  echo ""${YUV_RAW_INPUT}"
-       --width="${YUV_RAW_INPUT_WIDTH}"
-       --height="${YUV_RAW_INPUT_HEIGHT}""
-}
-
-y4m_input_non_square_par() {
-  echo ""${Y4M_NOSQ_PAR_INPUT}""
-}
-
-y4m_input_720p() {
-  echo ""${Y4M_720P_INPUT}""
-}
-
-# Echo default vpxenc real time encoding params. $1 is the codec, which defaults
-# to vp8 if unspecified.
-vpxenc_rt_params() {
-  local readonly codec="${1:-vp8}"
-  echo "--codec=${codec}
-    --buf-initial-sz=500
-    --buf-optimal-sz=600
-    --buf-sz=1000
-    --cpu-used=-6
-    --end-usage=cbr
-    --error-resilient=1
-    --kf-max-dist=90000
-    --lag-in-frames=0
-    --max-intra-rate=300
-    --max-q=56
-    --min-q=2
-    --noise-sensitivity=0
-    --overshoot-pct=50
-    --passes=1
-    --profile=0
-    --resize-allowed=0
-    --rt
-    --static-thresh=0
-    --undershoot-pct=50"
-}
-
-# Wrapper function for running vpxenc with pipe input. Requires that
-# LIBAOM_BIN_PATH points to the directory containing vpxenc. $1 is used as the
-# input file path and shifted away. All remaining parameters are passed through
-# to vpxenc.
-vpxenc_pipe() {
-  local readonly encoder="$(vpx_tool_path vpxenc)"
-  local readonly input="$1"
-  shift
-  cat "${input}" | eval "${VPX_TEST_PREFIX}" "${encoder}" - \
-    --test-decode=fatal \
-    "$@" ${devnull}
-}
-
-# Wrapper function for running vpxenc. Requires that LIBAOM_BIN_PATH points to
-# the directory containing vpxenc. $1 one is used as the input file path and
-# shifted away. All remaining parameters are passed through to vpxenc.
-vpxenc() {
-  local readonly encoder="$(vpx_tool_path vpxenc)"
-  local readonly input="$1"
-  shift
-  eval "${VPX_TEST_PREFIX}" "${encoder}" "${input}" \
-    --test-decode=fatal \
-    "$@" ${devnull}
-}
-
-vpxenc_vp8_ivf() {
-  if [ "$(vpxenc_can_encode_vp8)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp8.ivf"
-    vpxenc $(yuv_input_hantro_collage) \
-      --codec=vp8 \
-      --limit="${TEST_FRAMES}" \
-      --ivf \
-      --output="${output}"
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp8_webm() {
-  if [ "$(vpxenc_can_encode_vp8)" = "yes" ] && \
-     [ "$(webm_io_available)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp8.webm"
-    vpxenc $(yuv_input_hantro_collage) \
-      --codec=vp8 \
-      --limit="${TEST_FRAMES}" \
-      --output="${output}"
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp8_webm_rt() {
-  if [ "$(vpxenc_can_encode_vp8)" = "yes" ] && \
-     [ "$(webm_io_available)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp8_rt.webm"
-    vpxenc $(yuv_input_hantro_collage) \
-      $(vpxenc_rt_params vp8) \
-      --output="${output}"
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp8_webm_2pass() {
-  if [ "$(vpxenc_can_encode_vp8)" = "yes" ] && \
-     [ "$(webm_io_available)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp8.webm"
-    vpxenc $(yuv_input_hantro_collage) \
-      --codec=vp8 \
-      --limit="${TEST_FRAMES}" \
-      --output="${output}" \
-      --passes=2
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp8_webm_lag10_frames20() {
-  if [ "$(vpxenc_can_encode_vp8)" = "yes" ] && \
-     [ "$(webm_io_available)" = "yes" ]; then
-    local readonly lag_total_frames=20
-    local readonly lag_frames=10
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp8_lag10_frames20.webm"
-    vpxenc $(yuv_input_hantro_collage) \
-      --codec=vp8 \
-      --limit="${lag_total_frames}" \
-      --lag-in-frames="${lag_frames}" \
-      --output="${output}" \
-      --auto-alt-ref=1 \
-      --passes=2
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp8_ivf_piped_input() {
-  if [ "$(vpxenc_can_encode_vp8)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp8_piped_input.ivf"
-    vpxenc_pipe $(yuv_input_hantro_collage) \
-      --codec=vp8 \
-      --limit="${TEST_FRAMES}" \
-      --ivf \
-      --output="${output}"
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp9_ivf() {
-  if [ "$(vpxenc_can_encode_vp9)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp9.ivf"
-    vpxenc $(yuv_input_hantro_collage) \
-      --codec=vp9 \
-      --limit="${TEST_FRAMES}" \
-      --ivf \
-      --output="${output}"
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp9_webm() {
-  if [ "$(vpxenc_can_encode_vp9)" = "yes" ] && \
-     [ "$(webm_io_available)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp9.webm"
-    vpxenc $(yuv_input_hantro_collage) \
-      --codec=vp9 \
-      --limit="${TEST_FRAMES}" \
-      --output="${output}"
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp9_webm_rt() {
-  if [ "$(vpxenc_can_encode_vp9)" = "yes" ] && \
-     [ "$(webm_io_available)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp9_rt.webm"
-    vpxenc $(yuv_input_hantro_collage) \
-      $(vpxenc_rt_params vp9) \
-      --output="${output}"
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp9_webm_rt_multithread_tiled() {
-  if [ "$(vpxenc_can_encode_vp9)" = "yes" ] && \
-     [ "$(webm_io_available)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp9_rt_multithread_tiled.webm"
-    local readonly tilethread_min=2
-    local readonly tilethread_max=4
-    local readonly num_threads="$(seq ${tilethread_min} ${tilethread_max})"
-    local readonly num_tile_cols="$(seq ${tilethread_min} ${tilethread_max})"
-
-    for threads in ${num_threads}; do
-      for tile_cols in ${num_tile_cols}; do
-        vpxenc $(y4m_input_720p) \
-          $(vpxenc_rt_params vp9) \
-          --threads=${threads} \
-          --tile-columns=${tile_cols} \
-          --output="${output}"
-      done
-    done
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-
-    rm "${output}"
-  fi
-}
-
-vpxenc_vp9_webm_rt_multithread_tiled_frameparallel() {
-  if [ "$(vpxenc_can_encode_vp9)" = "yes" ] && \
-     [ "$(webm_io_available)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp9_rt_mt_t_fp.webm"
-    local readonly tilethread_min=2
-    local readonly tilethread_max=4
-    local readonly num_threads="$(seq ${tilethread_min} ${tilethread_max})"
-    local readonly num_tile_cols="$(seq ${tilethread_min} ${tilethread_max})"
-
-    for threads in ${num_threads}; do
-      for tile_cols in ${num_tile_cols}; do
-        vpxenc $(y4m_input_720p) \
-          $(vpxenc_rt_params vp9) \
-          --threads=${threads} \
-          --tile-columns=${tile_cols} \
-          --frame-parallel=1 \
-          --output="${output}"
-      done
-    done
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-
-    rm "${output}"
-  fi
-}
-
-vpxenc_vp9_webm_2pass() {
-  if [ "$(vpxenc_can_encode_vp9)" = "yes" ] && \
-     [ "$(webm_io_available)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp9.webm"
-    vpxenc $(yuv_input_hantro_collage) \
-      --codec=vp9 \
-      --limit="${TEST_FRAMES}" \
-      --output="${output}" \
-      --passes=2
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp9_ivf_lossless() {
-  if [ "$(vpxenc_can_encode_vp9)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp9_lossless.ivf"
-    vpxenc $(yuv_input_hantro_collage) \
-      --codec=vp9 \
-      --limit="${TEST_FRAMES}" \
-      --ivf \
-      --output="${output}" \
-      --lossless=1
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp9_ivf_minq0_maxq0() {
-  if [ "$(vpxenc_can_encode_vp9)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp9_lossless_minq0_maxq0.ivf"
-    vpxenc $(yuv_input_hantro_collage) \
-      --codec=vp9 \
-      --limit="${TEST_FRAMES}" \
-      --ivf \
-      --output="${output}" \
-      --min-q=0 \
-      --max-q=0
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_vp9_webm_lag10_frames20() {
-  if [ "$(vpxenc_can_encode_vp9)" = "yes" ] && \
-     [ "$(webm_io_available)" = "yes" ]; then
-    local readonly lag_total_frames=20
-    local readonly lag_frames=10
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp9_lag10_frames20.webm"
-    vpxenc $(yuv_input_hantro_collage) \
-      --codec=vp9 \
-      --limit="${lag_total_frames}" \
-      --lag-in-frames="${lag_frames}" \
-      --output="${output}" \
-      --passes=2 \
-      --auto-alt-ref=1
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-# TODO(fgalligan): Test that DisplayWidth is different than video width.
-vpxenc_vp9_webm_non_square_par() {
-  if [ "$(vpxenc_can_encode_vp9)" = "yes" ] && \
-     [ "$(webm_io_available)" = "yes" ]; then
-    local readonly output="${VPX_TEST_OUTPUT_DIR}/vp9_non_square_par.webm"
-    vpxenc $(y4m_input_non_square_par) \
-      --codec=vp9 \
-      --limit="${TEST_FRAMES}" \
-      --output="${output}"
-
-    if [ ! -e "${output}" ]; then
-      elog "Output file does not exist."
-      return 1
-    fi
-  fi
-}
-
-vpxenc_tests="vpxenc_vp8_ivf
-              vpxenc_vp8_webm
-              vpxenc_vp8_webm_rt
-              vpxenc_vp8_webm_2pass
-              vpxenc_vp8_webm_lag10_frames20
-              vpxenc_vp8_ivf_piped_input
-              vpxenc_vp9_ivf
-              vpxenc_vp9_webm
-              vpxenc_vp9_webm_rt
-              vpxenc_vp9_webm_rt_multithread_tiled
-              vpxenc_vp9_webm_rt_multithread_tiled_frameparallel
-              vpxenc_vp9_webm_2pass
-              vpxenc_vp9_ivf_lossless
-              vpxenc_vp9_ivf_minq0_maxq0
-              vpxenc_vp9_webm_lag10_frames20
-              vpxenc_vp9_webm_non_square_par"
-
-run_tests vpxenc_verify_environment "${vpxenc_tests}"
diff --git a/test/webm_video_source.h b/test/webm_video_source.h
index c0e1cb1..e5c34bb 100644
--- a/test/webm_video_source.h
+++ b/test/webm_video_source.h
@@ -25,25 +25,25 @@
 class WebMVideoSource : public CompressedVideoSource {
  public:
   explicit WebMVideoSource(const std::string &file_name)
-      : file_name_(file_name), vpx_ctx_(new VpxInputContext()),
+      : file_name_(file_name), aom_ctx_(new AvxInputContext()),
         webm_ctx_(new WebmInputContext()), buf_(NULL), buf_sz_(0), frame_(0),
         end_of_file_(false) {}
 
   virtual ~WebMVideoSource() {
-    if (vpx_ctx_->file != NULL) fclose(vpx_ctx_->file);
+    if (aom_ctx_->file != NULL) fclose(aom_ctx_->file);
     webm_free(webm_ctx_);
-    delete vpx_ctx_;
+    delete aom_ctx_;
     delete webm_ctx_;
   }
 
   virtual void Init() {}
 
   virtual void Begin() {
-    vpx_ctx_->file = OpenTestDataFile(file_name_);
-    ASSERT_TRUE(vpx_ctx_->file != NULL) << "Input file open failed. Filename: "
+    aom_ctx_->file = OpenTestDataFile(file_name_);
+    ASSERT_TRUE(aom_ctx_->file != NULL) << "Input file open failed. Filename: "
                                         << file_name_;
 
-    ASSERT_EQ(file_is_webm(webm_ctx_, vpx_ctx_), 1) << "file is not WebM";
+    ASSERT_EQ(file_is_webm(webm_ctx_, aom_ctx_), 1) << "file is not WebM";
 
     FillFrame();
   }
@@ -54,7 +54,7 @@
   }
 
   void FillFrame() {
-    ASSERT_TRUE(vpx_ctx_->file != NULL);
+    ASSERT_TRUE(aom_ctx_->file != NULL);
     const int status = webm_read_frame(webm_ctx_, &buf_, &buf_sz_);
     ASSERT_GE(status, 0) << "webm_read_frame failed";
     if (status == 1) {
@@ -63,7 +63,7 @@
   }
 
   void SeekToNextKeyFrame() {
-    ASSERT_TRUE(vpx_ctx_->file != NULL);
+    ASSERT_TRUE(aom_ctx_->file != NULL);
     do {
       const int status = webm_read_frame(webm_ctx_, &buf_, &buf_sz_);
       ASSERT_GE(status, 0) << "webm_read_frame failed";
@@ -80,7 +80,7 @@
 
  protected:
   std::string file_name_;
-  VpxInputContext *vpx_ctx_;
+  AvxInputContext *aom_ctx_;
   WebmInputContext *webm_ctx_;
   uint8_t *buf_;
   size_t buf_sz_;
diff --git a/test/y4m_test.cc b/test/y4m_test.cc
index d68d3d1..c3c4ac6 100644
--- a/test/y4m_test.cc
+++ b/test/y4m_test.cc
@@ -12,7 +12,7 @@
 
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "./y4menc.h"
 #include "test/md5_helper.h"
 #include "test/util.h"
@@ -29,37 +29,37 @@
 struct Y4mTestParam {
   const char *filename;
   unsigned int bit_depth;
-  vpx_img_fmt format;
+  aom_img_fmt format;
   const char *md5raw;
 };
 
 const Y4mTestParam kY4mTestVectors[] = {
-  { "park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420,
+  { "park_joy_90p_8_420.y4m", 8, AOM_IMG_FMT_I420,
     "e5406275b9fc6bb3436c31d4a05c1cab" },
-  { "park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422,
+  { "park_joy_90p_8_422.y4m", 8, AOM_IMG_FMT_I422,
     "284a47a47133b12884ec3a14e959a0b6" },
-  { "park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444,
+  { "park_joy_90p_8_444.y4m", 8, AOM_IMG_FMT_I444,
     "90517ff33843d85de712fd4fe60dbed0" },
-  { "park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016,
+  { "park_joy_90p_10_420.y4m", 10, AOM_IMG_FMT_I42016,
     "63f21f9f717d8b8631bd2288ee87137b" },
-  { "park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216,
+  { "park_joy_90p_10_422.y4m", 10, AOM_IMG_FMT_I42216,
     "48ab51fb540aed07f7ff5af130c9b605" },
-  { "park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416,
+  { "park_joy_90p_10_444.y4m", 10, AOM_IMG_FMT_I44416,
     "067bfd75aa85ff9bae91fa3e0edd1e3e" },
-  { "park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016,
+  { "park_joy_90p_12_420.y4m", 12, AOM_IMG_FMT_I42016,
     "9e6d8f6508c6e55625f6b697bc461cef" },
-  { "park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216,
+  { "park_joy_90p_12_422.y4m", 12, AOM_IMG_FMT_I42216,
     "b239c6b301c0b835485be349ca83a7e3" },
-  { "park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416,
+  { "park_joy_90p_12_444.y4m", 12, AOM_IMG_FMT_I44416,
     "5a6481a550821dab6d0192f5c63845e9" },
 };
 
-static void write_image_file(const vpx_image_t *img, FILE *file) {
+static void write_image_file(const aom_image_t *img, FILE *file) {
   int plane, y;
   for (plane = 0; plane < 3; ++plane) {
     const unsigned char *buf = img->planes[plane];
     const int stride = img->stride[plane];
-    const int bytes_per_sample = (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
+    const int bytes_per_sample = (img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
     const int h =
         (plane ? (img->d_h + img->y_chroma_shift) >> img->y_chroma_shift
                : img->d_h);
@@ -89,25 +89,25 @@
   }
 
   // Checks y4m header information
-  void HeaderChecks(unsigned int bit_depth, vpx_img_fmt_t fmt) {
+  void HeaderChecks(unsigned int bit_depth, aom_img_fmt_t fmt) {
     ASSERT_TRUE(input_file_ != NULL);
     ASSERT_EQ(y4m_.pic_w, (int)kWidth);
     ASSERT_EQ(y4m_.pic_h, (int)kHeight);
     ASSERT_EQ(img()->d_w, kWidth);
     ASSERT_EQ(img()->d_h, kHeight);
     ASSERT_EQ(y4m_.bit_depth, bit_depth);
-    ASSERT_EQ(y4m_.vpx_fmt, fmt);
-    if (fmt == VPX_IMG_FMT_I420 || fmt == VPX_IMG_FMT_I42016) {
+    ASSERT_EQ(y4m_.aom_fmt, fmt);
+    if (fmt == AOM_IMG_FMT_I420 || fmt == AOM_IMG_FMT_I42016) {
       ASSERT_EQ(y4m_.bps, (int)y4m_.bit_depth * 3 / 2);
       ASSERT_EQ(img()->x_chroma_shift, 1U);
       ASSERT_EQ(img()->y_chroma_shift, 1U);
     }
-    if (fmt == VPX_IMG_FMT_I422 || fmt == VPX_IMG_FMT_I42216) {
+    if (fmt == AOM_IMG_FMT_I422 || fmt == AOM_IMG_FMT_I42216) {
       ASSERT_EQ(y4m_.bps, (int)y4m_.bit_depth * 2);
       ASSERT_EQ(img()->x_chroma_shift, 1U);
       ASSERT_EQ(img()->y_chroma_shift, 0U);
     }
-    if (fmt == VPX_IMG_FMT_I444 || fmt == VPX_IMG_FMT_I44416) {
+    if (fmt == AOM_IMG_FMT_I444 || fmt == AOM_IMG_FMT_I44416) {
       ASSERT_EQ(y4m_.bps, (int)y4m_.bit_depth * 3);
       ASSERT_EQ(img()->x_chroma_shift, 0U);
       ASSERT_EQ(img()->y_chroma_shift, 0U);
@@ -157,11 +157,11 @@
   void WriteY4mAndReadBack() {
     ASSERT_TRUE(input_file_ != NULL);
     char buf[Y4M_BUFFER_SIZE] = { 0 };
-    const struct VpxRational framerate = { y4m_.fps_n, y4m_.fps_d };
+    const struct AvxRational framerate = { y4m_.fps_n, y4m_.fps_d };
     tmpfile_ = new libaom_test::TempOutFile;
     ASSERT_TRUE(tmpfile_->file() != NULL);
     y4m_write_file_header(buf, sizeof(buf), kWidth, kHeight, &framerate,
-                          y4m_.vpx_fmt, y4m_.bit_depth);
+                          y4m_.aom_fmt, y4m_.bit_depth);
     fputs(buf, tmpfile_->file());
     for (unsigned int i = start_; i < limit_; i++) {
       y4m_write_frame_header(buf, sizeof(buf));
diff --git a/test/y4m_video_source.h b/test/y4m_video_source.h
index 094cdbe..b5a1ba5 100644
--- a/test/y4m_video_source.h
+++ b/test/y4m_video_source.h
@@ -22,12 +22,12 @@
 class Y4mVideoSource : public VideoSource {
  public:
   Y4mVideoSource(const std::string &file_name, unsigned int start, int limit)
-      : file_name_(file_name), input_file_(NULL), img_(new vpx_image_t()),
+      : file_name_(file_name), input_file_(NULL), img_(new aom_image_t()),
         start_(start), limit_(limit), frame_(0), framerate_numerator_(0),
         framerate_denominator_(0), y4m_() {}
 
   virtual ~Y4mVideoSource() {
-    vpx_img_free(img_.get());
+    aom_img_free(img_.get());
     CloseSource();
   }
 
@@ -60,17 +60,17 @@
     FillFrame();
   }
 
-  virtual vpx_image_t *img() const {
+  virtual aom_image_t *img() const {
     return (frame_ < limit_) ? img_.get() : NULL;
   }
 
   // Models a stream where Timebase = 1/FPS, so pts == frame.
-  virtual vpx_codec_pts_t pts() const { return frame_; }
+  virtual aom_codec_pts_t pts() const { return frame_; }
 
   virtual unsigned long duration() const { return 1; }
 
-  virtual vpx_rational_t timebase() const {
-    const vpx_rational_t t = { framerate_denominator_, framerate_numerator_ };
+  virtual aom_rational_t timebase() const {
+    const aom_rational_t t = { framerate_denominator_, framerate_numerator_ };
     return t;
   }
 
@@ -86,11 +86,11 @@
 
   // Swap buffers with another y4m source. This allows reading a new frame
   // while keeping the old frame around. A whole Y4mSource is required and
-  // not just a vpx_image_t because of how the y4m reader manipulates
-  // vpx_image_t internals,
+  // not just a aom_image_t because of how the y4m reader manipulates
+  // aom_image_t internals,
   void SwapBuffers(Y4mVideoSource *other) {
     std::swap(other->y4m_.dst_buf, y4m_.dst_buf);
-    vpx_image_t *tmp;
+    aom_image_t *tmp;
     tmp = other->img_.release();
     other->img_.reset(img_.release());
     img_.reset(tmp);
@@ -108,7 +108,7 @@
 
   std::string file_name_;
   FILE *input_file_;
-  testing::internal::scoped_ptr<vpx_image_t> img_;
+  testing::internal::scoped_ptr<aom_image_t> img_;
   unsigned int start_;
   unsigned int limit_;
   unsigned int frame_;
diff --git a/test/yuv_video_source.h b/test/yuv_video_source.h
index 33a31d4..a0eabce 100644
--- a/test/yuv_video_source.h
+++ b/test/yuv_video_source.h
@@ -15,7 +15,7 @@
 #include <string>
 
 #include "test/video_source.h"
-#include "aom/vpx_image.h"
+#include "aom/aom_image.h"
 
 namespace libaom_test {
 
@@ -24,19 +24,19 @@
 // do actual file encodes.
 class YUVVideoSource : public VideoSource {
  public:
-  YUVVideoSource(const std::string &file_name, vpx_img_fmt format,
+  YUVVideoSource(const std::string &file_name, aom_img_fmt format,
                  unsigned int width, unsigned int height, int rate_numerator,
                  int rate_denominator, unsigned int start, int limit)
       : file_name_(file_name), input_file_(NULL), img_(NULL), start_(start),
         limit_(limit), frame_(0), width_(0), height_(0),
-        format_(VPX_IMG_FMT_NONE), framerate_numerator_(rate_numerator),
+        format_(AOM_IMG_FMT_NONE), framerate_numerator_(rate_numerator),
         framerate_denominator_(rate_denominator) {
     // This initializes format_, raw_size_, width_, height_ and allocates img.
     SetSize(width, height, format);
   }
 
   virtual ~YUVVideoSource() {
-    vpx_img_free(img_);
+    aom_img_free(img_);
     if (input_file_) fclose(input_file_);
   }
 
@@ -57,15 +57,15 @@
     FillFrame();
   }
 
-  virtual vpx_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; }
+  virtual aom_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; }
 
   // Models a stream where Timebase = 1/FPS, so pts == frame.
-  virtual vpx_codec_pts_t pts() const { return frame_; }
+  virtual aom_codec_pts_t pts() const { return frame_; }
 
   virtual unsigned long duration() const { return 1; }
 
-  virtual vpx_rational_t timebase() const {
-    const vpx_rational_t t = { framerate_denominator_, framerate_numerator_ };
+  virtual aom_rational_t timebase() const {
+    const aom_rational_t t = { framerate_denominator_, framerate_numerator_ };
     return t;
   }
 
@@ -74,23 +74,23 @@
   virtual unsigned int limit() const { return limit_; }
 
   virtual void SetSize(unsigned int width, unsigned int height,
-                       vpx_img_fmt format) {
+                       aom_img_fmt format) {
     if (width != width_ || height != height_ || format != format_) {
-      vpx_img_free(img_);
-      img_ = vpx_img_alloc(NULL, format, width, height, 1);
+      aom_img_free(img_);
+      img_ = aom_img_alloc(NULL, format, width, height, 1);
       ASSERT_TRUE(img_ != NULL);
       width_ = width;
       height_ = height;
       format_ = format;
       switch (format) {
-        case VPX_IMG_FMT_I420: raw_size_ = width * height * 3 / 2; break;
-        case VPX_IMG_FMT_I422: raw_size_ = width * height * 2; break;
-        case VPX_IMG_FMT_I440: raw_size_ = width * height * 2; break;
-        case VPX_IMG_FMT_I444: raw_size_ = width * height * 3; break;
-        case VPX_IMG_FMT_I42016: raw_size_ = width * height * 3; break;
-        case VPX_IMG_FMT_I42216: raw_size_ = width * height * 4; break;
-        case VPX_IMG_FMT_I44016: raw_size_ = width * height * 4; break;
-        case VPX_IMG_FMT_I44416: raw_size_ = width * height * 6; break;
+        case AOM_IMG_FMT_I420: raw_size_ = width * height * 3 / 2; break;
+        case AOM_IMG_FMT_I422: raw_size_ = width * height * 2; break;
+        case AOM_IMG_FMT_I440: raw_size_ = width * height * 2; break;
+        case AOM_IMG_FMT_I444: raw_size_ = width * height * 3; break;
+        case AOM_IMG_FMT_I42016: raw_size_ = width * height * 3; break;
+        case AOM_IMG_FMT_I42216: raw_size_ = width * height * 4; break;
+        case AOM_IMG_FMT_I44016: raw_size_ = width * height * 4; break;
+        case AOM_IMG_FMT_I44416: raw_size_ = width * height * 6; break;
         default: ASSERT_TRUE(0);
       }
     }
@@ -107,14 +107,14 @@
  protected:
   std::string file_name_;
   FILE *input_file_;
-  vpx_image_t *img_;
+  aom_image_t *img_;
   size_t raw_size_;
   unsigned int start_;
   unsigned int limit_;
   unsigned int frame_;
   unsigned int width_;
   unsigned int height_;
-  vpx_img_fmt format_;
+  aom_img_fmt format_;
   int framerate_numerator_;
   int framerate_denominator_;
 };
diff --git a/third_party/libwebm/common/hdr_util.cc b/third_party/libwebm/common/hdr_util.cc
index e1a9842..0b53cd5 100644
--- a/third_party/libwebm/common/hdr_util.cc
+++ b/third_party/libwebm/common/hdr_util.cc
@@ -112,7 +112,7 @@
   return true;
 }
 
-// Format of VPx private data:
+// Format of AVx private data:
 //
 //   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 //  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -151,20 +151,18 @@
 //   62: Level 6.2
 //
 // See the following link for more information:
-// http://www.webmproject.org/vp9/profiles/
-int ParseVpxCodecPrivate(const uint8_t* private_data, int32_t length) {
-  const int kVpxCodecPrivateLength = 3;
-  if (!private_data || length != kVpxCodecPrivateLength)
-    return 0;
+// http://www.webmproject.org/av1/profiles/
+int ParseAvxCodecPrivate(const uint8_t *private_data, int32_t length) {
+  const int kAvxCodecPrivateLength = 3;
+  if (!private_data || length != kAvxCodecPrivateLength) return 0;
 
   const uint8_t id_byte = *private_data;
   if (id_byte != 1)
     return 0;
 
-  const int kVpxProfileLength = 1;
+  const int kAvxProfileLength = 1;
   const uint8_t length_byte = private_data[1];
-  if (length_byte != kVpxProfileLength)
-    return 0;
+  if (length_byte != kAvxProfileLength) return 0;
 
   const int level = static_cast<int>(private_data[2]);
 
diff --git a/third_party/libwebm/common/hdr_util.h b/third_party/libwebm/common/hdr_util.h
index d30c2b9..1a3ca74 100644
--- a/third_party/libwebm/common/hdr_util.h
+++ b/third_party/libwebm/common/hdr_util.h
@@ -43,8 +43,8 @@
 bool CopyColour(const mkvparser::Colour& parser_colour,
                 mkvmuxer::Colour* muxer_colour);
 
-// Returns VP9 profile upon success or 0 upon failure.
-int ParseVpxCodecPrivate(const uint8_t* private_data, int32_t length);
+// Returns AV1 profile upon success or 0 upon failure.
+int ParseAvxCodecPrivate(const uint8_t *private_data, int32_t length);
 
 }  // namespace libwebm
 
diff --git a/third_party/libwebm/mkvmuxer/mkvmuxer.cc b/third_party/libwebm/mkvmuxer/mkvmuxer.cc
index c79ce24..689b5ae 100644
--- a/third_party/libwebm/mkvmuxer/mkvmuxer.cc
+++ b/third_party/libwebm/mkvmuxer/mkvmuxer.cc
@@ -1397,7 +1397,7 @@
 const char Tracks::kVorbisCodecId[] = "A_VORBIS";
 const char Tracks::kVp8CodecId[] = "V_VP8";
 const char Tracks::kVp9CodecId[] = "V_VP9";
-const char Tracks::kVp10CodecId[] = "V_VP10";
+const char Tracks::kAv1CodecId[] = "V_AV1";
 
 Tracks::Tracks()
     : track_entries_(NULL), track_entries_size_(0), wrote_tracks_(false) {}
diff --git a/third_party/libwebm/mkvmuxer/mkvmuxer.h b/third_party/libwebm/mkvmuxer/mkvmuxer.h
index 55ba071..01b26a2 100644
--- a/third_party/libwebm/mkvmuxer/mkvmuxer.h
+++ b/third_party/libwebm/mkvmuxer/mkvmuxer.h
@@ -669,7 +669,7 @@
   static const char kVorbisCodecId[];
   static const char kVp8CodecId[];
   static const char kVp9CodecId[];
-  static const char kVp10CodecId[];
+  static const char kAv1CodecId[];
 
   Tracks();
   ~Tracks();
diff --git a/third_party/x86inc/README.libvpx b/third_party/x86inc/README.libvpx
index 6347614..07c4dad 100644
--- a/third_party/x86inc/README.libvpx
+++ b/third_party/x86inc/README.libvpx
@@ -8,8 +8,8 @@
 defines that help automatically allow assembly to work cross-platform.
 
 Local Modifications:
-Get configuration from vpx_config.asm.
-Prefix functions with vpx by default.
+Get configuration from aom_config.asm.
+Prefix functions with aom by default.
 Manage name mangling (prefixing with '_') manually because 'PREFIX' does not
   exist in libaom.
 Expand PIC default to macho64 and respect CONFIG_PIC from libaom
diff --git a/third_party/x86inc/x86inc.asm b/third_party/x86inc/x86inc.asm
index bb84f61..cfee99c7 100644
--- a/third_party/x86inc/x86inc.asm
+++ b/third_party/x86inc/x86inc.asm
@@ -34,10 +34,10 @@
 ; as this feature might be useful for others as well.  Send patches or ideas
 ; to x264-devel@videolan.org .
 
-%include "vpx_config.asm"
+%include "aom_config.asm"
 
 %ifndef private_prefix
-    %define private_prefix vpx
+    %define private_prefix aom
 %endif
 
 %ifndef public_prefix
diff --git a/tools/ftfy.sh b/tools/ftfy.sh
index c005918..315da1a 100755
--- a/tools/ftfy.sh
+++ b/tools/ftfy.sh
@@ -28,7 +28,7 @@
 }
 
 
-vpx_style() {
+aom_style() {
   for f; do
     case "$f" in
       *.h|*.c|*.cc)
@@ -123,7 +123,7 @@
   case "$f" in
     third_party/*) continue;;
   esac
-  vpx_style "$f"
+  aom_style "$f"
 done
 git diff --no-color --no-ext-diff > "${MODIFIED_DIFF}"
 
diff --git a/tools_common.c b/tools_common.c
index 33d8bef..d83d0d9 100644
--- a/tools_common.c
+++ b/tools_common.c
@@ -16,12 +16,12 @@
 
 #include "./tools_common.h"
 
-#if CONFIG_VP10_ENCODER
-#include "aom/vp8cx.h"
+#if CONFIG_AV1_ENCODER
+#include "aom/aomcx.h"
 #endif
 
-#if CONFIG_VP10_DECODER
-#include "aom/vp8dx.h"
+#if CONFIG_AV1_DECODER
+#include "aom/aomdx.h"
 #endif
 
 #if defined(_WIN32) || defined(__OS2__)
@@ -66,25 +66,25 @@
 
 void warn(const char *fmt, ...) { LOG_ERROR("Warning"); }
 
-void die_codec(vpx_codec_ctx_t *ctx, const char *s) {
-  const char *detail = vpx_codec_error_detail(ctx);
+void die_codec(aom_codec_ctx_t *ctx, const char *s) {
+  const char *detail = aom_codec_error_detail(ctx);
 
-  printf("%s: %s\n", s, vpx_codec_error(ctx));
+  printf("%s: %s\n", s, aom_codec_error(ctx));
   if (detail) printf("    %s\n", detail);
   exit(EXIT_FAILURE);
 }
 
-int read_yuv_frame(struct VpxInputContext *input_ctx, vpx_image_t *yuv_frame) {
+int read_yuv_frame(struct AvxInputContext *input_ctx, aom_image_t *yuv_frame) {
   FILE *f = input_ctx->file;
   struct FileTypeDetectionBuffer *detect = &input_ctx->detect;
   int plane = 0;
   int shortread = 0;
-  const int bytespp = (yuv_frame->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
+  const int bytespp = (yuv_frame->fmt & AOM_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
 
   for (plane = 0; plane < 3; ++plane) {
     uint8_t *ptr;
-    const int w = vpx_img_plane_width(yuv_frame, plane);
-    const int h = vpx_img_plane_height(yuv_frame, plane);
+    const int w = aom_img_plane_width(yuv_frame, plane);
+    const int h = aom_img_plane_height(yuv_frame, plane);
     int r;
 
     /* Determine the correct plane based on the image format. The for-loop
@@ -94,13 +94,13 @@
     switch (plane) {
       case 1:
         ptr =
-            yuv_frame->planes[yuv_frame->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_V
-                                                                 : VPX_PLANE_U];
+            yuv_frame->planes[yuv_frame->fmt == AOM_IMG_FMT_YV12 ? AOM_PLANE_V
+                                                                 : AOM_PLANE_U];
         break;
       case 2:
         ptr =
-            yuv_frame->planes[yuv_frame->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_U
-                                                                 : VPX_PLANE_V];
+            yuv_frame->planes[yuv_frame->fmt == AOM_IMG_FMT_YV12 ? AOM_PLANE_U
+                                                                 : AOM_PLANE_V];
         break;
       default: ptr = yuv_frame->planes[plane];
     }
@@ -129,23 +129,23 @@
 
 #if CONFIG_ENCODERS
 
-static const VpxInterface vpx_encoders[] = {
-#if CONFIG_VP10_ENCODER
-  { "vp10", VP10_FOURCC, &vpx_codec_vp10_cx },
+static const AvxInterface aom_encoders[] = {
+#if CONFIG_AV1_ENCODER
+  { "av1", AV1_FOURCC, &aom_codec_av1_cx },
 #endif
 };
 
-int get_vpx_encoder_count(void) {
-  return sizeof(vpx_encoders) / sizeof(vpx_encoders[0]);
+int get_aom_encoder_count(void) {
+  return sizeof(aom_encoders) / sizeof(aom_encoders[0]);
 }
 
-const VpxInterface *get_vpx_encoder_by_index(int i) { return &vpx_encoders[i]; }
+const AvxInterface *get_aom_encoder_by_index(int i) { return &aom_encoders[i]; }
 
-const VpxInterface *get_vpx_encoder_by_name(const char *name) {
+const AvxInterface *get_aom_encoder_by_name(const char *name) {
   int i;
 
-  for (i = 0; i < get_vpx_encoder_count(); ++i) {
-    const VpxInterface *encoder = get_vpx_encoder_by_index(i);
+  for (i = 0; i < get_aom_encoder_count(); ++i) {
+    const AvxInterface *encoder = get_aom_encoder_by_index(i);
     if (strcmp(encoder->name, name) == 0) return encoder;
   }
 
@@ -156,35 +156,35 @@
 
 #if CONFIG_DECODERS
 
-static const VpxInterface vpx_decoders[] = {
+static const AvxInterface aom_decoders[] = {
 
-#if CONFIG_VP10_DECODER
-  { "vp10", VP10_FOURCC, &vpx_codec_vp10_dx },
+#if CONFIG_AV1_DECODER
+  { "av1", AV1_FOURCC, &aom_codec_av1_dx },
 #endif
 };
 
-int get_vpx_decoder_count(void) {
-  return sizeof(vpx_decoders) / sizeof(vpx_decoders[0]);
+int get_aom_decoder_count(void) {
+  return sizeof(aom_decoders) / sizeof(aom_decoders[0]);
 }
 
-const VpxInterface *get_vpx_decoder_by_index(int i) { return &vpx_decoders[i]; }
+const AvxInterface *get_aom_decoder_by_index(int i) { return &aom_decoders[i]; }
 
-const VpxInterface *get_vpx_decoder_by_name(const char *name) {
+const AvxInterface *get_aom_decoder_by_name(const char *name) {
   int i;
 
-  for (i = 0; i < get_vpx_decoder_count(); ++i) {
-    const VpxInterface *const decoder = get_vpx_decoder_by_index(i);
+  for (i = 0; i < get_aom_decoder_count(); ++i) {
+    const AvxInterface *const decoder = get_aom_decoder_by_index(i);
     if (strcmp(decoder->name, name) == 0) return decoder;
   }
 
   return NULL;
 }
 
-const VpxInterface *get_vpx_decoder_by_fourcc(uint32_t fourcc) {
+const AvxInterface *get_aom_decoder_by_fourcc(uint32_t fourcc) {
   int i;
 
-  for (i = 0; i < get_vpx_decoder_count(); ++i) {
-    const VpxInterface *const decoder = get_vpx_decoder_by_index(i);
+  for (i = 0; i < get_aom_decoder_count(); ++i) {
+    const AvxInterface *const decoder = get_aom_decoder_by_index(i);
     if (decoder->fourcc == fourcc) return decoder;
   }
 
@@ -193,31 +193,31 @@
 
 #endif  // CONFIG_DECODERS
 
-// TODO(dkovalev): move this function to vpx_image.{c, h}, so it will be part
-// of vpx_image_t support
-int vpx_img_plane_width(const vpx_image_t *img, int plane) {
+// TODO(dkovalev): move this function to aom_image.{c, h}, so it will be part
+// of aom_image_t support
+int aom_img_plane_width(const aom_image_t *img, int plane) {
   if (plane > 0 && img->x_chroma_shift > 0)
     return (img->d_w + 1) >> img->x_chroma_shift;
   else
     return img->d_w;
 }
 
-int vpx_img_plane_height(const vpx_image_t *img, int plane) {
+int aom_img_plane_height(const aom_image_t *img, int plane) {
   if (plane > 0 && img->y_chroma_shift > 0)
     return (img->d_h + 1) >> img->y_chroma_shift;
   else
     return img->d_h;
 }
 
-void vpx_img_write(const vpx_image_t *img, FILE *file) {
+void aom_img_write(const aom_image_t *img, FILE *file) {
   int plane;
 
   for (plane = 0; plane < 3; ++plane) {
     const unsigned char *buf = img->planes[plane];
     const int stride = img->stride[plane];
-    const int w = vpx_img_plane_width(img, plane) *
-                  ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
-    const int h = vpx_img_plane_height(img, plane);
+    const int w = aom_img_plane_width(img, plane) *
+                  ((img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
+    const int h = aom_img_plane_height(img, plane);
     int y;
 
     for (y = 0; y < h; ++y) {
@@ -227,15 +227,15 @@
   }
 }
 
-int vpx_img_read(vpx_image_t *img, FILE *file) {
+int aom_img_read(aom_image_t *img, FILE *file) {
   int plane;
 
   for (plane = 0; plane < 3; ++plane) {
     unsigned char *buf = img->planes[plane];
     const int stride = img->stride[plane];
-    const int w = vpx_img_plane_width(img, plane) *
-                  ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
-    const int h = vpx_img_plane_height(img, plane);
+    const int w = aom_img_plane_width(img, plane) *
+                  ((img->fmt & AOM_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
+    const int h = aom_img_plane_height(img, plane);
     int y;
 
     for (y = 0; y < h; ++y) {
@@ -260,8 +260,8 @@
 }
 
 // TODO(debargha): Consolidate the functions below into a separate file.
-#if CONFIG_VP9_HIGHBITDEPTH
-static void highbd_img_upshift(vpx_image_t *dst, vpx_image_t *src,
+#if CONFIG_AOM_HIGHBITDEPTH
+static void highbd_img_upshift(aom_image_t *dst, aom_image_t *src,
                                int input_shift) {
   // Note the offset is 1 less than half.
   const int offset = input_shift > 0 ? (1 << (input_shift - 1)) - 1 : 0;
@@ -273,10 +273,10 @@
     fatal("Unsupported image conversion");
   }
   switch (src->fmt) {
-    case VPX_IMG_FMT_I42016:
-    case VPX_IMG_FMT_I42216:
-    case VPX_IMG_FMT_I44416:
-    case VPX_IMG_FMT_I44016: break;
+    case AOM_IMG_FMT_I42016:
+    case AOM_IMG_FMT_I42216:
+    case AOM_IMG_FMT_I44416:
+    case AOM_IMG_FMT_I44016: break;
     default: fatal("Unsupported image conversion"); break;
   }
   for (plane = 0; plane < 3; plane++) {
@@ -297,7 +297,7 @@
   }
 }
 
-static void lowbd_img_upshift(vpx_image_t *dst, vpx_image_t *src,
+static void lowbd_img_upshift(aom_image_t *dst, aom_image_t *src,
                               int input_shift) {
   // Note the offset is 1 less than half.
   const int offset = input_shift > 0 ? (1 << (input_shift - 1)) - 1 : 0;
@@ -305,14 +305,14 @@
   if (dst->d_w != src->d_w || dst->d_h != src->d_h ||
       dst->x_chroma_shift != src->x_chroma_shift ||
       dst->y_chroma_shift != src->y_chroma_shift ||
-      dst->fmt != src->fmt + VPX_IMG_FMT_HIGHBITDEPTH || input_shift < 0) {
+      dst->fmt != src->fmt + AOM_IMG_FMT_HIGHBITDEPTH || input_shift < 0) {
     fatal("Unsupported image conversion");
   }
   switch (src->fmt) {
-    case VPX_IMG_FMT_I420:
-    case VPX_IMG_FMT_I422:
-    case VPX_IMG_FMT_I444:
-    case VPX_IMG_FMT_I440: break;
+    case AOM_IMG_FMT_I420:
+    case AOM_IMG_FMT_I422:
+    case AOM_IMG_FMT_I444:
+    case AOM_IMG_FMT_I440: break;
     default: fatal("Unsupported image conversion"); break;
   }
   for (plane = 0; plane < 3; plane++) {
@@ -334,26 +334,26 @@
   }
 }
 
-void vpx_img_upshift(vpx_image_t *dst, vpx_image_t *src, int input_shift) {
-  if (src->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
+void aom_img_upshift(aom_image_t *dst, aom_image_t *src, int input_shift) {
+  if (src->fmt & AOM_IMG_FMT_HIGHBITDEPTH) {
     highbd_img_upshift(dst, src, input_shift);
   } else {
     lowbd_img_upshift(dst, src, input_shift);
   }
 }
 
-void vpx_img_truncate_16_to_8(vpx_image_t *dst, vpx_image_t *src) {
+void aom_img_truncate_16_to_8(aom_image_t *dst, aom_image_t *src) {
   int plane;
-  if (dst->fmt + VPX_IMG_FMT_HIGHBITDEPTH != src->fmt || dst->d_w != src->d_w ||
+  if (dst->fmt + AOM_IMG_FMT_HIGHBITDEPTH != src->fmt || dst->d_w != src->d_w ||
       dst->d_h != src->d_h || dst->x_chroma_shift != src->x_chroma_shift ||
       dst->y_chroma_shift != src->y_chroma_shift) {
     fatal("Unsupported image conversion");
   }
   switch (dst->fmt) {
-    case VPX_IMG_FMT_I420:
-    case VPX_IMG_FMT_I422:
-    case VPX_IMG_FMT_I444:
-    case VPX_IMG_FMT_I440: break;
+    case AOM_IMG_FMT_I420:
+    case AOM_IMG_FMT_I422:
+    case AOM_IMG_FMT_I444:
+    case AOM_IMG_FMT_I440: break;
     default: fatal("Unsupported image conversion"); break;
   }
   for (plane = 0; plane < 3; plane++) {
@@ -375,7 +375,7 @@
   }
 }
 
-static void highbd_img_downshift(vpx_image_t *dst, vpx_image_t *src,
+static void highbd_img_downshift(aom_image_t *dst, aom_image_t *src,
                                  int down_shift) {
   int plane;
   if (dst->d_w != src->d_w || dst->d_h != src->d_h ||
@@ -385,10 +385,10 @@
     fatal("Unsupported image conversion");
   }
   switch (src->fmt) {
-    case VPX_IMG_FMT_I42016:
-    case VPX_IMG_FMT_I42216:
-    case VPX_IMG_FMT_I44416:
-    case VPX_IMG_FMT_I44016: break;
+    case AOM_IMG_FMT_I42016:
+    case AOM_IMG_FMT_I42216:
+    case AOM_IMG_FMT_I44416:
+    case AOM_IMG_FMT_I44016: break;
     default: fatal("Unsupported image conversion"); break;
   }
   for (plane = 0; plane < 3; plane++) {
@@ -409,20 +409,20 @@
   }
 }
 
-static void lowbd_img_downshift(vpx_image_t *dst, vpx_image_t *src,
+static void lowbd_img_downshift(aom_image_t *dst, aom_image_t *src,
                                 int down_shift) {
   int plane;
   if (dst->d_w != src->d_w || dst->d_h != src->d_h ||
       dst->x_chroma_shift != src->x_chroma_shift ||
       dst->y_chroma_shift != src->y_chroma_shift ||
-      src->fmt != dst->fmt + VPX_IMG_FMT_HIGHBITDEPTH || down_shift < 0) {
+      src->fmt != dst->fmt + AOM_IMG_FMT_HIGHBITDEPTH || down_shift < 0) {
     fatal("Unsupported image conversion");
   }
   switch (dst->fmt) {
-    case VPX_IMG_FMT_I420:
-    case VPX_IMG_FMT_I422:
-    case VPX_IMG_FMT_I444:
-    case VPX_IMG_FMT_I440: break;
+    case AOM_IMG_FMT_I420:
+    case AOM_IMG_FMT_I422:
+    case AOM_IMG_FMT_I444:
+    case AOM_IMG_FMT_I440: break;
     default: fatal("Unsupported image conversion"); break;
   }
   for (plane = 0; plane < 3; plane++) {
@@ -444,11 +444,11 @@
   }
 }
 
-void vpx_img_downshift(vpx_image_t *dst, vpx_image_t *src, int down_shift) {
-  if (dst->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
+void aom_img_downshift(aom_image_t *dst, aom_image_t *src, int down_shift) {
+  if (dst->fmt & AOM_IMG_FMT_HIGHBITDEPTH) {
     highbd_img_downshift(dst, src, down_shift);
   } else {
     lowbd_img_downshift(dst, src, down_shift);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/tools_common.h b/tools_common.h
index 4cdb312..66463e8 100644
--- a/tools_common.h
+++ b/tools_common.h
@@ -12,10 +12,10 @@
 
 #include <stdio.h>
 
-#include "./vpx_config.h"
-#include "aom/vpx_codec.h"
-#include "aom/vpx_image.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_codec.h"
+#include "aom/aom_image.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/msvc.h"
 
 #if CONFIG_ENCODERS
@@ -60,9 +60,7 @@
 
 #define RAW_FRAME_HDR_SZ sizeof(uint32_t)
 
-#define VP8_FOURCC 0x30385056
-#define VP9_FOURCC 0x30395056
-#define VP10_FOURCC 0x303a5056
+#define AV1_FOURCC 0x31305641
 
 enum VideoFileType {
   FILE_TYPE_RAW,
@@ -77,12 +75,12 @@
   size_t position;
 };
 
-struct VpxRational {
+struct AvxRational {
   int numerator;
   int denominator;
 };
 
-struct VpxInputContext {
+struct AvxInputContext {
   const char *filename;
   FILE *file;
   int64_t length;
@@ -90,12 +88,12 @@
   enum VideoFileType file_type;
   uint32_t width;
   uint32_t height;
-  struct VpxRational pixel_aspect_ratio;
-  vpx_img_fmt_t fmt;
-  vpx_bit_depth_t bit_depth;
+  struct AvxRational pixel_aspect_ratio;
+  aom_img_fmt_t fmt;
+  aom_bit_depth_t bit_depth;
   int only_i420;
   uint32_t fourcc;
-  struct VpxRational framerate;
+  struct AvxRational framerate;
 #if CONFIG_ENCODERS
   y4m_input y4m;
 #endif
@@ -106,55 +104,55 @@
 #endif
 
 #if defined(__GNUC__)
-#define VPX_NO_RETURN __attribute__((noreturn))
+#define AOM_NO_RETURN __attribute__((noreturn))
 #else
-#define VPX_NO_RETURN
+#define AOM_NO_RETURN
 #endif
 
 /* Sets a stdio stream into binary mode */
 FILE *set_binary_mode(FILE *stream);
 
-void die(const char *fmt, ...) VPX_NO_RETURN;
-void fatal(const char *fmt, ...) VPX_NO_RETURN;
+void die(const char *fmt, ...) AOM_NO_RETURN;
+void fatal(const char *fmt, ...) AOM_NO_RETURN;
 void warn(const char *fmt, ...);
 
-void die_codec(vpx_codec_ctx_t *ctx, const char *s) VPX_NO_RETURN;
+void die_codec(aom_codec_ctx_t *ctx, const char *s) AOM_NO_RETURN;
 
 /* The tool including this file must define usage_exit() */
-void usage_exit(void) VPX_NO_RETURN;
+void usage_exit(void) AOM_NO_RETURN;
 
-#undef VPX_NO_RETURN
+#undef AOM_NO_RETURN
 
-int read_yuv_frame(struct VpxInputContext *input_ctx, vpx_image_t *yuv_frame);
+int read_yuv_frame(struct AvxInputContext *input_ctx, aom_image_t *yuv_frame);
 
-typedef struct VpxInterface {
+typedef struct AvxInterface {
   const char *const name;
   const uint32_t fourcc;
-  vpx_codec_iface_t *(*const codec_interface)();
-} VpxInterface;
+  aom_codec_iface_t *(*const codec_interface)();
+} AvxInterface;
 
-int get_vpx_encoder_count(void);
-const VpxInterface *get_vpx_encoder_by_index(int i);
-const VpxInterface *get_vpx_encoder_by_name(const char *name);
+int get_aom_encoder_count(void);
+const AvxInterface *get_aom_encoder_by_index(int i);
+const AvxInterface *get_aom_encoder_by_name(const char *name);
 
-int get_vpx_decoder_count(void);
-const VpxInterface *get_vpx_decoder_by_index(int i);
-const VpxInterface *get_vpx_decoder_by_name(const char *name);
-const VpxInterface *get_vpx_decoder_by_fourcc(uint32_t fourcc);
+int get_aom_decoder_count(void);
+const AvxInterface *get_aom_decoder_by_index(int i);
+const AvxInterface *get_aom_decoder_by_name(const char *name);
+const AvxInterface *get_aom_decoder_by_fourcc(uint32_t fourcc);
 
-// TODO(dkovalev): move this function to vpx_image.{c, h}, so it will be part
-// of vpx_image_t support
-int vpx_img_plane_width(const vpx_image_t *img, int plane);
-int vpx_img_plane_height(const vpx_image_t *img, int plane);
-void vpx_img_write(const vpx_image_t *img, FILE *file);
-int vpx_img_read(vpx_image_t *img, FILE *file);
+// TODO(dkovalev): move this function to aom_image.{c, h}, so it will be part
+// of aom_image_t support
+int aom_img_plane_width(const aom_image_t *img, int plane);
+int aom_img_plane_height(const aom_image_t *img, int plane);
+void aom_img_write(const aom_image_t *img, FILE *file);
+int aom_img_read(aom_image_t *img, FILE *file);
 
 double sse_to_psnr(double samples, double peak, double mse);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_img_upshift(vpx_image_t *dst, vpx_image_t *src, int input_shift);
-void vpx_img_downshift(vpx_image_t *dst, vpx_image_t *src, int down_shift);
-void vpx_img_truncate_16_to_8(vpx_image_t *dst, vpx_image_t *src);
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_img_upshift(aom_image_t *dst, aom_image_t *src, int input_shift);
+void aom_img_downshift(aom_image_t *dst, aom_image_t *src, int down_shift);
+void aom_img_truncate_16_to_8(aom_image_t *dst, aom_image_t *src);
 #endif
 
 #ifdef __cplusplus
diff --git a/usage.dox b/usage.dox
index 2b0874b..59239e8 100644
--- a/usage.dox
+++ b/usage.dox
@@ -1,6 +1,6 @@
 /*!\page usage Usage
 
-    The vpx multi-format codec SDK provides a unified interface amongst its
+    The aom multi-format codec SDK provides a unified interface amongst its
     supported codecs. This abstraction allows applications using this SDK to
     easily support multiple video formats with minimal code duplication or
     "special casing." This section describes the interface common to all codecs.
@@ -34,7 +34,7 @@
 
     Most operations require an initialized codec context. Codec context
     instances are codec specific. That is, the codec to be used for the encoded
-    video must be known at initialization time. See #vpx_codec_ctx_t for further
+    video must be known at initialization time. See #aom_codec_ctx_t for further
     information.
 
     \subsection usage_ifaces Interfaces
@@ -46,7 +46,7 @@
 
     Each supported codec will expose an interface structure to the application
     as an <code>extern</code> reference to a structure of the incomplete type
-    #vpx_codec_iface_t.
+    #aom_codec_iface_t.
 
     \section usage_features Features
     Several "features" are defined that are optionally implemented by codec
@@ -54,8 +54,8 @@
     different platforms. The purpose of defining these features is that when
     they are implemented, they conform to a common interface. The features, or
     capabilities, of an algorithm can be queried from it's interface by using
-    the vpx_codec_get_caps() method. Attempts to invoke features not supported
-    by an algorithm will generally result in #VPX_CODEC_INCAPABLE.
+    the aom_codec_get_caps() method. Attempts to invoke features not supported
+    by an algorithm will generally result in #AOM_CODEC_INCAPABLE.
 
     \if decoder
     Currently defined decoder features include:
@@ -72,41 +72,41 @@
     the ABI is versioned. The ABI version number must be passed at
     initialization time to ensure the application is using a header file that
     matches the library. The current ABI version number is stored in the
-    preprocessor macros #VPX_CODEC_ABI_VERSION, #VPX_ENCODER_ABI_VERSION, and
-    #VPX_DECODER_ABI_VERSION. For convenience, each initialization function has
+    preprocessor macros #AOM_CODEC_ABI_VERSION, #AOM_ENCODER_ABI_VERSION, and
+    #AOM_DECODER_ABI_VERSION. For convenience, each initialization function has
     a wrapper macro that inserts the correct version number. These macros are
     named like the initialization methods, but without the _ver suffix.
 
 
     The available initialization methods are:
     \if encoder
-    \li #vpx_codec_enc_init (calls vpx_codec_enc_init_ver())
-    \li #vpx_codec_enc_init_multi (calls vpx_codec_enc_init_multi_ver())
+    \li #aom_codec_enc_init (calls aom_codec_enc_init_ver())
+    \li #aom_codec_enc_init_multi (calls aom_codec_enc_init_multi_ver())
     \endif
     \if decoder
-    \li #vpx_codec_dec_init (calls vpx_codec_dec_init_ver())
+    \li #aom_codec_dec_init (calls aom_codec_dec_init_ver())
     \endif
 
 
     \section usage_errors Error Handling
-    Almost all codec functions return an error status of type #vpx_codec_err_t.
+    Almost all codec functions return an error status of type #aom_codec_err_t.
     The semantics of how each error condition should be processed is clearly
     defined in the definitions of each enumerated value. Error values can be
-    converted into ASCII strings with the vpx_codec_error() and
-    vpx_codec_err_to_string() methods. The difference between these two methods is
-    that vpx_codec_error() returns the error state from an initialized context,
-    whereas vpx_codec_err_to_string() can be used in cases where an error occurs
+    converted into ASCII strings with the aom_codec_error() and
+    aom_codec_err_to_string() methods. The difference between these two methods is
+    that aom_codec_error() returns the error state from an initialized context,
+    whereas aom_codec_err_to_string() can be used in cases where an error occurs
     outside any context. The enumerated value returned from the last call can be
     retrieved from the <code>err</code> member of the decoder context as well.
     Finally, more detailed error information may be able to be obtained by using
-    the vpx_codec_error_detail() method. Not all errors produce detailed error
+    the aom_codec_error_detail() method. Not all errors produce detailed error
     information.
 
     In addition to error information, the codec library's build configuration
     is available at runtime on some platforms. This information can be returned
-    by calling vpx_codec_build_config(), and is formatted as a base64 coded string
+    by calling aom_codec_build_config(), and is formatted as a base64 coded string
     (comprised of characters in the set [a-z_a-Z0-9+/]). This information is not
-    useful to an application at runtime, but may be of use to vpx for support.
+    useful to an application at runtime, but may be of use to aom for support.
 
 
     \section usage_deadline Deadline
@@ -116,7 +116,7 @@
     returning. This is a soft deadline -- that is, the semantics of the
     requested operation take precedence over meeting the deadline. If, for
     example, an application sets a <code>deadline</code> of 1000us, and the
-    frame takes 2000us to decode, the call to vpx_codec_decode() will return
+    frame takes 2000us to decode, the call to aom_codec_decode() will return
     after 2000us. In this case the deadline is not met, but the semantics of the
     function are preserved. If, for the same frame, an application instead sets
     a <code>deadline</code> of 5000us, the decoder will see that it has 3000us
diff --git a/usage_cx.dox b/usage_cx.dox
index 92b0d34..dcf267c 100644
--- a/usage_cx.dox
+++ b/usage_cx.dox
@@ -1,6 +1,6 @@
 /*! \page usage_encode Encoding
 
-    The vpx_codec_encode() function is at the core of the encode loop. It
+    The aom_codec_encode() function is at the core of the encode loop. It
     processes raw images passed by the application, producing packets of
     compressed data. The <code>deadline</code> parameter controls the amount
     of time in microseconds the encoder should spend working on the frame. For
diff --git a/usage_dx.dox b/usage_dx.dox
index 883ce24..6b76bf7 100644
--- a/usage_dx.dox
+++ b/usage_dx.dox
@@ -1,6 +1,6 @@
 /*! \page usage_decode Decoding
 
-    The vpx_codec_decode() function is at the core of the decode loop. It
+    The aom_codec_decode() function is at the core of the decode loop. It
     processes packets of compressed data passed by the application, producing
     decoded images. The decoder expects packets to comprise exactly one image
     frame of data. Packets \ref MUST be passed in decode order. If the
@@ -20,11 +20,11 @@
     that allow the application to register a callback to be invoked by the
     decoder when decoded data becomes available. Decoders are not required to
     support this feature, however. Like all \ref usage_features, support can be
-    determined by calling vpx_codec_get_caps(). Callbacks are available in both
+    determined by calling aom_codec_get_caps(). Callbacks are available in both
     frame-based and slice-based variants. Frame based callbacks conform to the
-    signature of #vpx_codec_put_frame_cb_fn_t and are invoked once the entire
+    signature of #aom_codec_put_frame_cb_fn_t and are invoked once the entire
     frame has been decoded. Slice based callbacks conform to the signature of
-    #vpx_codec_put_slice_cb_fn_t and are invoked after a subsection of the frame
+    #aom_codec_put_slice_cb_fn_t and are invoked after a subsection of the frame
     is decoded. For example, a slice callback could be issued for each
     macroblock row. However, the number and size of slices to return is
     implementation specific. Also, the image data passed in a slice callback is
@@ -39,9 +39,9 @@
     \section usage_frame_iter Frame Iterator Based Decoding
     If the codec does not support callback based decoding, or the application
     chooses not to make use of that feature, decoded frames are made available
-    through the vpx_codec_get_frame() iterator. The application initializes the
-    iterator storage (of type #vpx_codec_iter_t) to NULL, then calls
-    vpx_codec_get_frame repeatedly until it returns NULL, indicating that all
+    through the aom_codec_get_frame() iterator. The application initializes the
+    iterator storage (of type #aom_codec_iter_t) to NULL, then calls
+    aom_codec_get_frame repeatedly until it returns NULL, indicating that all
     images have been returned. This process may result in zero, one, or many
     frames that are ready for display, depending on the codec.
 
diff --git a/video_common.h b/video_common.h
index 44b27a8..502a19a 100644
--- a/video_common.h
+++ b/video_common.h
@@ -17,7 +17,7 @@
   uint32_t codec_fourcc;
   int frame_width;
   int frame_height;
-  struct VpxRational time_base;
-} VpxVideoInfo;
+  struct AvxRational time_base;
+} AvxVideoInfo;
 
 #endif  // VIDEO_COMMON_H_
diff --git a/video_reader.c b/video_reader.c
index c3a19d3..ac69a28 100644
--- a/video_reader.c
+++ b/video_reader.c
@@ -18,17 +18,17 @@
 
 static const char *const kIVFSignature = "DKIF";
 
-struct VpxVideoReaderStruct {
-  VpxVideoInfo info;
+struct AvxVideoReaderStruct {
+  AvxVideoInfo info;
   FILE *file;
   uint8_t *buffer;
   size_t buffer_size;
   size_t frame_size;
 };
 
-VpxVideoReader *vpx_video_reader_open(const char *filename) {
+AvxVideoReader *aom_video_reader_open(const char *filename) {
   char header[32];
-  VpxVideoReader *reader = NULL;
+  AvxVideoReader *reader = NULL;
   FILE *const file = fopen(filename, "rb");
   if (!file) return NULL;  // Can't open file
 
@@ -40,7 +40,7 @@
   if (mem_get_le16(header + 4) != 0) return NULL;  // Wrong IVF version
 
   reader = calloc(1, sizeof(*reader));
-  if (!reader) return NULL;  // Can't allocate VpxVideoReader
+  if (!reader) return NULL;  // Can't allocate AvxVideoReader
 
   reader->file = file;
   reader->info.codec_fourcc = mem_get_le32(header + 8);
@@ -52,7 +52,7 @@
   return reader;
 }
 
-void vpx_video_reader_close(VpxVideoReader *reader) {
+void aom_video_reader_close(AvxVideoReader *reader) {
   if (reader) {
     fclose(reader->file);
     free(reader->buffer);
@@ -60,18 +60,18 @@
   }
 }
 
-int vpx_video_reader_read_frame(VpxVideoReader *reader) {
+int aom_video_reader_read_frame(AvxVideoReader *reader) {
   return !ivf_read_frame(reader->file, &reader->buffer, &reader->frame_size,
                          &reader->buffer_size);
 }
 
-const uint8_t *vpx_video_reader_get_frame(VpxVideoReader *reader,
+const uint8_t *aom_video_reader_get_frame(AvxVideoReader *reader,
                                           size_t *size) {
   if (size) *size = reader->frame_size;
 
   return reader->buffer;
 }
 
-const VpxVideoInfo *vpx_video_reader_get_info(VpxVideoReader *reader) {
+const AvxVideoInfo *aom_video_reader_get_info(AvxVideoReader *reader) {
   return &reader->info;
 }
diff --git a/video_reader.h b/video_reader.h
index 73c25b0..daf4dc4 100644
--- a/video_reader.h
+++ b/video_reader.h
@@ -16,33 +16,33 @@
 // The following code is work in progress. It is going to  support transparent
 // reading of input files. Right now only IVF format is supported for
 // simplicity. The main goal the API is to be simple and easy to use in example
-// code and in vpxenc/vpxdec later. All low-level details like memory
+// code and in aomenc/aomdec later. All low-level details like memory
 // buffer management are hidden from API users.
-struct VpxVideoReaderStruct;
-typedef struct VpxVideoReaderStruct VpxVideoReader;
+struct AvxVideoReaderStruct;
+typedef struct AvxVideoReaderStruct AvxVideoReader;
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 // Opens the input file for reading and inspects it to determine file type.
-// Returns an opaque VpxVideoReader* upon success, or NULL upon failure.
+// Returns an opaque AvxVideoReader* upon success, or NULL upon failure.
 // Right now only IVF format is supported.
-VpxVideoReader *vpx_video_reader_open(const char *filename);
+AvxVideoReader *aom_video_reader_open(const char *filename);
 
-// Frees all resources associated with VpxVideoReader* returned from
-// vpx_video_reader_open() call.
-void vpx_video_reader_close(VpxVideoReader *reader);
+// Frees all resources associated with AvxVideoReader* returned from
+// aom_video_reader_open() call.
+void aom_video_reader_close(AvxVideoReader *reader);
 
 // Reads frame from the file and stores it in internal buffer.
-int vpx_video_reader_read_frame(VpxVideoReader *reader);
+int aom_video_reader_read_frame(AvxVideoReader *reader);
 
 // Returns the pointer to memory buffer with frame data read by last call to
-// vpx_video_reader_read_frame().
-const uint8_t *vpx_video_reader_get_frame(VpxVideoReader *reader, size_t *size);
+// aom_video_reader_read_frame().
+const uint8_t *aom_video_reader_get_frame(AvxVideoReader *reader, size_t *size);
 
-// Fills VpxVideoInfo with information from opened video file.
-const VpxVideoInfo *vpx_video_reader_get_info(VpxVideoReader *reader);
+// Fills AvxVideoInfo with information from opened video file.
+const AvxVideoInfo *aom_video_reader_get_info(AvxVideoReader *reader);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/video_writer.c b/video_writer.c
index c0c292f..5e2df64 100644
--- a/video_writer.c
+++ b/video_writer.c
@@ -12,17 +12,17 @@
 
 #include "./ivfenc.h"
 #include "./video_writer.h"
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 
-struct VpxVideoWriterStruct {
-  VpxVideoInfo info;
+struct AvxVideoWriterStruct {
+  AvxVideoInfo info;
   FILE *file;
   int frame_count;
 };
 
-static void write_header(FILE *file, const VpxVideoInfo *info,
+static void write_header(FILE *file, const AvxVideoInfo *info,
                          int frame_count) {
-  struct vpx_codec_enc_cfg cfg;
+  struct aom_codec_enc_cfg cfg;
   cfg.g_w = info->frame_width;
   cfg.g_h = info->frame_height;
   cfg.g_timebase.num = info->time_base.numerator;
@@ -31,11 +31,11 @@
   ivf_write_file_header(file, &cfg, info->codec_fourcc, frame_count);
 }
 
-VpxVideoWriter *vpx_video_writer_open(const char *filename,
-                                      VpxContainer container,
-                                      const VpxVideoInfo *info) {
+AvxVideoWriter *aom_video_writer_open(const char *filename,
+                                      AvxContainer container,
+                                      const AvxVideoInfo *info) {
   if (container == kContainerIVF) {
-    VpxVideoWriter *writer = NULL;
+    AvxVideoWriter *writer = NULL;
     FILE *const file = fopen(filename, "wb");
     if (!file) return NULL;
 
@@ -54,7 +54,7 @@
   return NULL;
 }
 
-void vpx_video_writer_close(VpxVideoWriter *writer) {
+void aom_video_writer_close(AvxVideoWriter *writer) {
   if (writer) {
     // Rewriting frame header with real frame count
     rewind(writer->file);
@@ -65,7 +65,7 @@
   }
 }
 
-int vpx_video_writer_write_frame(VpxVideoWriter *writer, const uint8_t *buffer,
+int aom_video_writer_write_frame(AvxVideoWriter *writer, const uint8_t *buffer,
                                  size_t size, int64_t pts) {
   ivf_write_frame_header(writer->file, pts, size);
   if (fwrite(buffer, 1, size, writer->file) != size) return 0;
diff --git a/video_writer.h b/video_writer.h
index a769811..fef2fd7 100644
--- a/video_writer.h
+++ b/video_writer.h
@@ -13,28 +13,28 @@
 
 #include "./video_common.h"
 
-typedef enum { kContainerIVF } VpxContainer;
+typedef enum { kContainerIVF } AvxContainer;
 
-struct VpxVideoWriterStruct;
-typedef struct VpxVideoWriterStruct VpxVideoWriter;
+struct AvxVideoWriterStruct;
+typedef struct AvxVideoWriterStruct AvxVideoWriter;
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 // Finds and opens writer for specified container format.
-// Returns an opaque VpxVideoWriter* upon success, or NULL upon failure.
+// Returns an opaque AvxVideoWriter* upon success, or NULL upon failure.
 // Right now only IVF format is supported.
-VpxVideoWriter *vpx_video_writer_open(const char *filename,
-                                      VpxContainer container,
-                                      const VpxVideoInfo *info);
+AvxVideoWriter *aom_video_writer_open(const char *filename,
+                                      AvxContainer container,
+                                      const AvxVideoInfo *info);
 
-// Frees all resources associated with VpxVideoWriter* returned from
-// vpx_video_writer_open() call.
-void vpx_video_writer_close(VpxVideoWriter *writer);
+// Frees all resources associated with AvxVideoWriter* returned from
+// aom_video_writer_open() call.
+void aom_video_writer_close(AvxVideoWriter *writer);
 
 // Writes frame bytes to the file.
-int vpx_video_writer_write_frame(VpxVideoWriter *writer, const uint8_t *buffer,
+int aom_video_writer_write_frame(AvxVideoWriter *writer, const uint8_t *buffer,
                                  size_t size, int64_t pts);
 
 #ifdef __cplusplus
diff --git a/warnings.c b/warnings.c
index eea6abc..a9690d4 100644
--- a/warnings.c
+++ b/warnings.c
@@ -15,10 +15,10 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 
 #include "./tools_common.h"
-#include "./vpxenc.h"
+#include "./aomenc.h"
 
 static const char quantizer_warning_string[] =
     "Bad quantizer values. Quantizer values should not be equal, and should "
@@ -78,13 +78,13 @@
 
 static void check_lag_in_frames_realtime_deadline(
     int lag_in_frames, int deadline, struct WarningList *warning_list) {
-  if (deadline == VPX_DL_REALTIME && lag_in_frames != 0)
+  if (deadline == AOM_DL_REALTIME && lag_in_frames != 0)
     add_warning(lag_in_frames_with_realtime, warning_list);
 }
 
 void check_encoder_config(int disable_prompt,
-                          const struct VpxEncoderConfig *global_config,
-                          const struct vpx_codec_enc_cfg *stream_config) {
+                          const struct AvxEncoderConfig *global_config,
+                          const struct aom_codec_enc_cfg *stream_config) {
   int num_warnings = 0;
   struct WarningListNode *warning = NULL;
   struct WarningList warning_list = { 0 };
diff --git a/warnings.h b/warnings.h
index 6b8ae67..361b4a9 100644
--- a/warnings.h
+++ b/warnings.h
@@ -14,8 +14,8 @@
 extern "C" {
 #endif
 
-struct vpx_codec_enc_cfg;
-struct VpxEncoderConfig;
+struct aom_codec_enc_cfg;
+struct AvxEncoderConfig;
 
 /*
  * Checks config for improperly used settings. Warns user upon encountering
@@ -23,8 +23,8 @@
  * when warnings are issued.
  */
 void check_encoder_config(int disable_prompt,
-                          const struct VpxEncoderConfig *global_config,
-                          const struct vpx_codec_enc_cfg *stream_config);
+                          const struct AvxEncoderConfig *global_config,
+                          const struct aom_codec_enc_cfg *stream_config);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/webmdec.cc b/webmdec.cc
index 4f30540..ceec279 100644
--- a/webmdec.cc
+++ b/webmdec.cc
@@ -52,34 +52,34 @@
 }
 
 void rewind_and_reset(struct WebmInputContext *const webm_ctx,
-                      struct VpxInputContext *const vpx_ctx) {
-  rewind(vpx_ctx->file);
+                      struct AvxInputContext *const aom_ctx) {
+  rewind(aom_ctx->file);
   reset(webm_ctx);
 }
 
 }  // namespace
 
 int file_is_webm(struct WebmInputContext *webm_ctx,
-                 struct VpxInputContext *vpx_ctx) {
-  mkvparser::MkvReader *const reader = new mkvparser::MkvReader(vpx_ctx->file);
+                 struct AvxInputContext *aom_ctx) {
+  mkvparser::MkvReader *const reader = new mkvparser::MkvReader(aom_ctx->file);
   webm_ctx->reader = reader;
   webm_ctx->reached_eos = 0;
 
   mkvparser::EBMLHeader header;
   long long pos = 0;
   if (header.Parse(reader, pos) < 0) {
-    rewind_and_reset(webm_ctx, vpx_ctx);
+    rewind_and_reset(webm_ctx, aom_ctx);
     return 0;
   }
 
   mkvparser::Segment *segment;
   if (mkvparser::Segment::CreateInstance(reader, pos, segment)) {
-    rewind_and_reset(webm_ctx, vpx_ctx);
+    rewind_and_reset(webm_ctx, aom_ctx);
     return 0;
   }
   webm_ctx->segment = segment;
   if (segment->Load() < 0) {
-    rewind_and_reset(webm_ctx, vpx_ctx);
+    rewind_and_reset(webm_ctx, aom_ctx);
     return 0;
   }
 
@@ -95,25 +95,21 @@
   }
 
   if (video_track == NULL || video_track->GetCodecId() == NULL) {
-    rewind_and_reset(webm_ctx, vpx_ctx);
+    rewind_and_reset(webm_ctx, aom_ctx);
     return 0;
   }
 
-  if (!strncmp(video_track->GetCodecId(), "V_VP8", 5)) {
-    vpx_ctx->fourcc = VP8_FOURCC;
-  } else if (!strncmp(video_track->GetCodecId(), "V_VP9", 5)) {
-    vpx_ctx->fourcc = VP9_FOURCC;
-  } else if (!strncmp(video_track->GetCodecId(), "V_VP10", 6)) {
-    vpx_ctx->fourcc = VP10_FOURCC;
+  if (!strncmp(video_track->GetCodecId(), "V_AV1", 5)) {
+    aom_ctx->fourcc = AV1_FOURCC;
   } else {
-    rewind_and_reset(webm_ctx, vpx_ctx);
+    rewind_and_reset(webm_ctx, aom_ctx);
     return 0;
   }
 
-  vpx_ctx->framerate.denominator = 0;
-  vpx_ctx->framerate.numerator = 0;
-  vpx_ctx->width = static_cast<uint32_t>(video_track->GetWidth());
-  vpx_ctx->height = static_cast<uint32_t>(video_track->GetHeight());
+  aom_ctx->framerate.denominator = 0;
+  aom_ctx->framerate.numerator = 0;
+  aom_ctx->width = static_cast<uint32_t>(video_track->GetWidth());
+  aom_ctx->height = static_cast<uint32_t>(video_track->GetHeight());
 
   get_first_cluster(webm_ctx);
 
@@ -197,7 +193,7 @@
 }
 
 int webm_guess_framerate(struct WebmInputContext *webm_ctx,
-                         struct VpxInputContext *vpx_ctx) {
+                         struct AvxInputContext *aom_ctx) {
   uint32_t i = 0;
   uint8_t *buffer = NULL;
   size_t buffer_size = 0;
@@ -207,8 +203,8 @@
     }
     ++i;
   }
-  vpx_ctx->framerate.numerator = (i - 1) * 1000000;
-  vpx_ctx->framerate.denominator =
+  aom_ctx->framerate.numerator = (i - 1) * 1000000;
+  aom_ctx->framerate.denominator =
       static_cast<int>(webm_ctx->timestamp_ns / 1000);
   delete[] buffer;
 
diff --git a/webmdec.h b/webmdec.h
index 7dcb170..6f2f598 100644
--- a/webmdec.h
+++ b/webmdec.h
@@ -16,7 +16,7 @@
 extern "C" {
 #endif
 
-struct VpxInputContext;
+struct AvxInputContext;
 
 struct WebmInputContext {
   void *reader;
@@ -38,7 +38,7 @@
 // TODO(vigneshv): Refactor this function into two smaller functions specific
 // to their task.
 int file_is_webm(struct WebmInputContext *webm_ctx,
-                 struct VpxInputContext *vpx_ctx);
+                 struct AvxInputContext *aom_ctx);
 
 // Reads a WebM Video Frame. Memory for the buffer is created, owned and managed
 // by this function. For the first call, |buffer| should be NULL and
@@ -57,7 +57,7 @@
 
 // Guesses the frame rate of the input file based on the container timestamps.
 int webm_guess_framerate(struct WebmInputContext *webm_ctx,
-                         struct VpxInputContext *vpx_ctx);
+                         struct AvxInputContext *aom_ctx);
 
 // Resets the WebMInputContext.
 void webm_free(struct WebmInputContext *webm_ctx);
diff --git a/webmenc.cc b/webmenc.cc
index 3605d0d..d62bfc1 100644
--- a/webmenc.cc
+++ b/webmenc.cc
@@ -21,10 +21,10 @@
 }  // namespace
 
 void write_webm_file_header(struct WebmOutputContext *webm_ctx,
-                            const vpx_codec_enc_cfg_t *cfg,
-                            const struct vpx_rational *fps,
+                            const aom_codec_enc_cfg_t *cfg,
+                            const struct aom_rational *fps,
                             stereo_format_t stereo_fmt, unsigned int fourcc,
-                            const struct VpxRational *par) {
+                            const struct AvxRational *par) {
   mkvmuxer::MkvWriter *const writer = new mkvmuxer::MkvWriter(webm_ctx->stream);
   mkvmuxer::Segment *const segment = new mkvmuxer::Segment();
   segment->Init(writer);
@@ -34,9 +34,9 @@
   mkvmuxer::SegmentInfo *const info = segment->GetSegmentInfo();
   const uint64_t kTimecodeScale = 1000000;
   info->set_timecode_scale(kTimecodeScale);
-  std::string version = "vpxenc";
+  std::string version = "aomenc";
   if (!webm_ctx->debug) {
-    version.append(std::string(" ") + vpx_codec_version_str());
+    version.append(std::string(" ") + aom_codec_version_str());
   }
   info->set_writing_app(version.c_str());
 
@@ -48,10 +48,8 @@
   video_track->SetStereoMode(stereo_fmt);
   const char *codec_id;
   switch (fourcc) {
-    case VP8_FOURCC: codec_id = "V_VP8"; break;
-    case VP9_FOURCC: codec_id = "V_VP9"; break;
-    case VP10_FOURCC: codec_id = "V_VP10"; break;
-    default: codec_id = "V_VP10"; break;
+    case AV1_FOURCC: codec_id = "V_AV1"; break;
+    default: codec_id = "V_AV1"; break;
   }
   video_track->set_codec_id(codec_id);
   if (par->numerator > 1 || par->denominator > 1) {
@@ -70,8 +68,8 @@
 }
 
 void write_webm_block(struct WebmOutputContext *webm_ctx,
-                      const vpx_codec_enc_cfg_t *cfg,
-                      const vpx_codec_cx_pkt_t *pkt) {
+                      const aom_codec_enc_cfg_t *cfg,
+                      const aom_codec_cx_pkt_t *pkt) {
   mkvmuxer::Segment *const segment =
       reinterpret_cast<mkvmuxer::Segment *>(webm_ctx->segment);
   int64_t pts_ns = pkt->data.frame.pts * 1000000000ll * cfg->g_timebase.num /
@@ -81,7 +79,7 @@
 
   segment->AddFrame(static_cast<uint8_t *>(pkt->data.frame.buf),
                     pkt->data.frame.sz, kVideoTrackNumber, pts_ns,
-                    pkt->data.frame.flags & VPX_FRAME_IS_KEY);
+                    pkt->data.frame.flags & AOM_FRAME_IS_KEY);
 }
 
 void write_webm_file_footer(struct WebmOutputContext *webm_ctx) {
diff --git a/webmenc.h b/webmenc.h
index df46a78..87fe7c6 100644
--- a/webmenc.h
+++ b/webmenc.h
@@ -14,7 +14,7 @@
 #include <stdlib.h>
 
 #include "tools_common.h"
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -38,14 +38,14 @@
 } stereo_format_t;
 
 void write_webm_file_header(struct WebmOutputContext *webm_ctx,
-                            const vpx_codec_enc_cfg_t *cfg,
-                            const struct vpx_rational *fps,
+                            const aom_codec_enc_cfg_t *cfg,
+                            const struct aom_rational *fps,
                             stereo_format_t stereo_fmt, unsigned int fourcc,
-                            const struct VpxRational *par);
+                            const struct AvxRational *par);
 
 void write_webm_block(struct WebmOutputContext *webm_ctx,
-                      const vpx_codec_enc_cfg_t *cfg,
-                      const vpx_codec_cx_pkt_t *pkt);
+                      const aom_codec_enc_cfg_t *cfg,
+                      const aom_codec_cx_pkt_t *pkt);
 
 void write_webm_file_footer(struct WebmOutputContext *webm_ctx);
 
diff --git a/y4menc.c b/y4menc.c
index e26fcaf..6400f6b 100644
--- a/y4menc.c
+++ b/y4menc.c
@@ -12,45 +12,45 @@
 #include "./y4menc.h"
 
 int y4m_write_file_header(char *buf, size_t len, int width, int height,
-                          const struct VpxRational *framerate,
-                          vpx_img_fmt_t fmt, unsigned int bit_depth) {
+                          const struct AvxRational *framerate,
+                          aom_img_fmt_t fmt, unsigned int bit_depth) {
   const char *color;
   switch (bit_depth) {
     case 8:
-      color = fmt == VPX_IMG_FMT_444A
+      color = fmt == AOM_IMG_FMT_444A
                   ? "C444alpha\n"
-                  : fmt == VPX_IMG_FMT_I444 ? "C444\n" : fmt == VPX_IMG_FMT_I422
+                  : fmt == AOM_IMG_FMT_I444 ? "C444\n" : fmt == AOM_IMG_FMT_I422
                                                              ? "C422\n"
                                                              : "C420jpeg\n";
       break;
     case 9:
-      color = fmt == VPX_IMG_FMT_I44416
+      color = fmt == AOM_IMG_FMT_I44416
                   ? "C444p9 XYSCSS=444P9\n"
-                  : fmt == VPX_IMG_FMT_I42216 ? "C422p9 XYSCSS=422P9\n"
+                  : fmt == AOM_IMG_FMT_I42216 ? "C422p9 XYSCSS=422P9\n"
                                               : "C420p9 XYSCSS=420P9\n";
       break;
     case 10:
-      color = fmt == VPX_IMG_FMT_I44416
+      color = fmt == AOM_IMG_FMT_I44416
                   ? "C444p10 XYSCSS=444P10\n"
-                  : fmt == VPX_IMG_FMT_I42216 ? "C422p10 XYSCSS=422P10\n"
+                  : fmt == AOM_IMG_FMT_I42216 ? "C422p10 XYSCSS=422P10\n"
                                               : "C420p10 XYSCSS=420P10\n";
       break;
     case 12:
-      color = fmt == VPX_IMG_FMT_I44416
+      color = fmt == AOM_IMG_FMT_I44416
                   ? "C444p12 XYSCSS=444P12\n"
-                  : fmt == VPX_IMG_FMT_I42216 ? "C422p12 XYSCSS=422P12\n"
+                  : fmt == AOM_IMG_FMT_I42216 ? "C422p12 XYSCSS=422P12\n"
                                               : "C420p12 XYSCSS=420P12\n";
       break;
     case 14:
-      color = fmt == VPX_IMG_FMT_I44416
+      color = fmt == AOM_IMG_FMT_I44416
                   ? "C444p14 XYSCSS=444P14\n"
-                  : fmt == VPX_IMG_FMT_I42216 ? "C422p14 XYSCSS=422P14\n"
+                  : fmt == AOM_IMG_FMT_I42216 ? "C422p14 XYSCSS=422P14\n"
                                               : "C420p14 XYSCSS=420P14\n";
       break;
     case 16:
-      color = fmt == VPX_IMG_FMT_I44416
+      color = fmt == AOM_IMG_FMT_I44416
                   ? "C444p16 XYSCSS=444P16\n"
-                  : fmt == VPX_IMG_FMT_I42216 ? "C422p16 XYSCSS=422P16\n"
+                  : fmt == AOM_IMG_FMT_I42216 ? "C422p16 XYSCSS=422P16\n"
                                               : "C420p16 XYSCSS=420P16\n";
       break;
     default: color = NULL; assert(0);
diff --git a/y4menc.h b/y4menc.h
index bd92e02..89271ca 100644
--- a/y4menc.h
+++ b/y4menc.h
@@ -13,7 +13,7 @@
 
 #include "./tools_common.h"
 
-#include "aom/vpx_decoder.h"
+#include "aom/aom_decoder.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -22,8 +22,8 @@
 #define Y4M_BUFFER_SIZE 128
 
 int y4m_write_file_header(char *buf, size_t len, int width, int height,
-                          const struct VpxRational *framerate,
-                          vpx_img_fmt_t fmt, unsigned int bit_depth);
+                          const struct AvxRational *framerate,
+                          aom_img_fmt_t fmt, unsigned int bit_depth);
 int y4m_write_frame_header(char *buf, size_t len);
 
 #ifdef __cplusplus
diff --git a/y4minput.c b/y4minput.c
index 0a923c1..d9f14bd 100644
--- a/y4minput.c
+++ b/y4minput.c
@@ -14,7 +14,7 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "y4minput.h"
 
 // Reads 'size' bytes from 'file' into 'buf' with some fault tolerance.
@@ -805,7 +805,7 @@
             "Only progressive scan handled.\n");
     return -1;
   }
-  _y4m->vpx_fmt = VPX_IMG_FMT_I420;
+  _y4m->aom_fmt = AOM_IMG_FMT_I420;
   _y4m->bps = 12;
   _y4m->bit_depth = 8;
   if (strcmp(_y4m->chroma_type, "420") == 0 ||
@@ -831,7 +831,7 @@
     _y4m->convert = y4m_convert_null;
     _y4m->bit_depth = 10;
     _y4m->bps = 15;
-    _y4m->vpx_fmt = VPX_IMG_FMT_I42016;
+    _y4m->aom_fmt = AOM_IMG_FMT_I42016;
     if (only_420) {
       fprintf(stderr, "Unsupported conversion from 420p10 to 420jpeg\n");
       return -1;
@@ -849,7 +849,7 @@
     _y4m->convert = y4m_convert_null;
     _y4m->bit_depth = 12;
     _y4m->bps = 18;
-    _y4m->vpx_fmt = VPX_IMG_FMT_I42016;
+    _y4m->aom_fmt = AOM_IMG_FMT_I42016;
     if (only_420) {
       fprintf(stderr, "Unsupported conversion from 420p12 to 420jpeg\n");
       return -1;
@@ -897,7 +897,7 @@
           _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
       _y4m->convert = y4m_convert_422_420jpeg;
     } else {
-      _y4m->vpx_fmt = VPX_IMG_FMT_I422;
+      _y4m->aom_fmt = AOM_IMG_FMT_I422;
       _y4m->bps = 16;
       _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
       _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
@@ -910,7 +910,7 @@
   } else if (strcmp(_y4m->chroma_type, "422p10") == 0) {
     _y4m->src_c_dec_h = 2;
     _y4m->src_c_dec_v = 1;
-    _y4m->vpx_fmt = VPX_IMG_FMT_I42216;
+    _y4m->aom_fmt = AOM_IMG_FMT_I42216;
     _y4m->bps = 20;
     _y4m->bit_depth = 10;
     _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
@@ -926,7 +926,7 @@
   } else if (strcmp(_y4m->chroma_type, "422p12") == 0) {
     _y4m->src_c_dec_h = 2;
     _y4m->src_c_dec_v = 1;
-    _y4m->vpx_fmt = VPX_IMG_FMT_I42216;
+    _y4m->aom_fmt = AOM_IMG_FMT_I42216;
     _y4m->bps = 24;
     _y4m->bit_depth = 12;
     _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
@@ -967,7 +967,7 @@
           _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
       _y4m->convert = y4m_convert_444_420jpeg;
     } else {
-      _y4m->vpx_fmt = VPX_IMG_FMT_I444;
+      _y4m->aom_fmt = AOM_IMG_FMT_I444;
       _y4m->bps = 24;
       _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
       _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
@@ -979,7 +979,7 @@
   } else if (strcmp(_y4m->chroma_type, "444p10") == 0) {
     _y4m->src_c_dec_h = 1;
     _y4m->src_c_dec_v = 1;
-    _y4m->vpx_fmt = VPX_IMG_FMT_I44416;
+    _y4m->aom_fmt = AOM_IMG_FMT_I44416;
     _y4m->bps = 30;
     _y4m->bit_depth = 10;
     _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
@@ -994,7 +994,7 @@
   } else if (strcmp(_y4m->chroma_type, "444p12") == 0) {
     _y4m->src_c_dec_h = 1;
     _y4m->src_c_dec_v = 1;
-    _y4m->vpx_fmt = VPX_IMG_FMT_I44416;
+    _y4m->aom_fmt = AOM_IMG_FMT_I44416;
     _y4m->bps = 36;
     _y4m->bit_depth = 12;
     _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
@@ -1021,7 +1021,7 @@
       _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 3 * _y4m->pic_w * _y4m->pic_h;
       _y4m->convert = y4m_convert_444_420jpeg;
     } else {
-      _y4m->vpx_fmt = VPX_IMG_FMT_444A;
+      _y4m->aom_fmt = AOM_IMG_FMT_444A;
       _y4m->bps = 32;
       _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
       _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
@@ -1062,7 +1062,7 @@
   free(_y4m->aux_buf);
 }
 
-int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *_img) {
+int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, aom_image_t *_img) {
   char frame[6];
   int pic_sz;
   int c_w;
@@ -1098,11 +1098,11 @@
   /*Now convert the just read frame.*/
   (*_y4m->convert)(_y4m, _y4m->dst_buf, _y4m->aux_buf);
   /*Fill in the frame buffer pointers.
-    We don't use vpx_img_wrap() because it forces padding for odd picture
+    We don't use aom_img_wrap() because it forces padding for odd picture
      sizes, which would require a separate fread call for every row.*/
   memset(_img, 0, sizeof(*_img));
   /*Y4M has the planes in Y'CbCr order, which libaom calls Y, U, and V.*/
-  _img->fmt = _y4m->vpx_fmt;
+  _img->fmt = _y4m->aom_fmt;
   _img->w = _img->d_w = _y4m->pic_w;
   _img->h = _img->d_h = _y4m->pic_h;
   _img->x_chroma_shift = _y4m->dst_c_dec_h >> 1;
@@ -1115,12 +1115,12 @@
   c_w *= bytes_per_sample;
   c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
   c_sz = c_w * c_h;
-  _img->stride[VPX_PLANE_Y] = _img->stride[VPX_PLANE_ALPHA] =
+  _img->stride[AOM_PLANE_Y] = _img->stride[AOM_PLANE_ALPHA] =
       _y4m->pic_w * bytes_per_sample;
-  _img->stride[VPX_PLANE_U] = _img->stride[VPX_PLANE_V] = c_w;
-  _img->planes[VPX_PLANE_Y] = _y4m->dst_buf;
-  _img->planes[VPX_PLANE_U] = _y4m->dst_buf + pic_sz;
-  _img->planes[VPX_PLANE_V] = _y4m->dst_buf + pic_sz + c_sz;
-  _img->planes[VPX_PLANE_ALPHA] = _y4m->dst_buf + pic_sz + 2 * c_sz;
+  _img->stride[AOM_PLANE_U] = _img->stride[AOM_PLANE_V] = c_w;
+  _img->planes[AOM_PLANE_Y] = _y4m->dst_buf;
+  _img->planes[AOM_PLANE_U] = _y4m->dst_buf + pic_sz;
+  _img->planes[AOM_PLANE_V] = _y4m->dst_buf + pic_sz + c_sz;
+  _img->planes[AOM_PLANE_ALPHA] = _y4m->dst_buf + pic_sz + 2 * c_sz;
   return 1;
 }
diff --git a/y4minput.h b/y4minput.h
index 2037449..ce71253 100644
--- a/y4minput.h
+++ b/y4minput.h
@@ -15,7 +15,7 @@
 #define Y4MINPUT_H_
 
 #include <stdio.h>
-#include "aom/vpx_image.h"
+#include "aom/aom_image.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -51,7 +51,7 @@
   y4m_convert_func convert;
   unsigned char *dst_buf;
   unsigned char *aux_buf;
-  enum vpx_img_fmt vpx_fmt;
+  enum aom_img_fmt aom_fmt;
   int bps;
   unsigned int bit_depth;
 };
@@ -59,7 +59,7 @@
 int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
                    int only_420);
 void y4m_input_close(y4m_input *_y4m);
-int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *img);
+int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, aom_image_t *img);
 
 #ifdef __cplusplus
 }  // extern "C"