Port renaming changes from AOMedia

Cherry-Picked the following commits:
0defd8f Changed "WebM" to "AOMedia" & "webm" to "aomedia"
54e6676 Replace "VPx" by "AVx"
5082a36 Change "Vpx" to "Avx"
7df44f1 Replace "Vp9" w/ "Av1"
967f722 Remove kVp9CodecId
828f30c Change "Vp8" to "AOM"
030b5ff AUTHORS regenerated
2524cae Add ref-mv experimental flag
016762b Change copyright notice to AOMedia form
81e5526 Replace vp9 w/ av1
9b94565 Add missing files
fa8ca9f Change "vp9" to "av1"
ec838b7  Convert "vp8" to "aom"
80edfa0 Change "VP9" to "AV1"
d1a11fb Change "vp8" to "aom"
7b58251 Point to WebM test data
dd1a5c8 Replace "VP8" with "AOM"
ff00fc0 Change "VPX" to "AOM"
01dee0b Change "vp10" to "av1" in source code
cebe6f0 Convert "vpx" to "aom"
17b0567 rename vp10*.mk to av1_*.mk
fe5f8a8 rename files vp10_* to av1_*

Change-Id: I6fc3d18eb11fc171e46140c836ad5339cf6c9419
diff --git a/av1/common/alloccommon.c b/av1/common/alloccommon.c
index b6ff12a..eb4f8e6 100644
--- a/av1/common/alloccommon.c
+++ b/av1/common/alloccommon.c
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_config.h"
+#include "aom_mem/aom_mem.h"
 
 #include "av1/common/alloccommon.h"
 #include "av1/common/blockd.h"
@@ -17,7 +17,7 @@
 #include "av1/common/entropymv.h"
 #include "av1/common/onyxc_int.h"
 
-void vp10_set_mb_mi(VP10_COMMON *cm, int width, int height) {
+void av1_set_mb_mi(AV1_COMMON *cm, int width, int height) {
   const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2);
   const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2);
 
@@ -30,11 +30,11 @@
   cm->MBs = cm->mb_rows * cm->mb_cols;
 }
 
-static int alloc_seg_map(VP10_COMMON *cm, int seg_map_size) {
+static int alloc_seg_map(AV1_COMMON *cm, int seg_map_size) {
   int i;
 
   for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
-    cm->seg_map_array[i] = (uint8_t *)vpx_calloc(seg_map_size, 1);
+    cm->seg_map_array[i] = (uint8_t *)aom_calloc(seg_map_size, 1);
     if (cm->seg_map_array[i] == NULL) return 1;
   }
   cm->seg_map_alloc_size = seg_map_size;
@@ -50,11 +50,11 @@
   return 0;
 }
 
-static void free_seg_map(VP10_COMMON *cm) {
+static void free_seg_map(AV1_COMMON *cm) {
   int i;
 
   for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
-    vpx_free(cm->seg_map_array[i]);
+    aom_free(cm->seg_map_array[i]);
     cm->seg_map_array[i] = NULL;
   }
 
@@ -65,7 +65,7 @@
   }
 }
 
-void vp10_free_ref_frame_buffers(BufferPool *pool) {
+void av1_free_ref_frame_buffers(BufferPool *pool) {
   int i;
 
   for (i = 0; i < FRAME_BUFFERS; ++i) {
@@ -74,45 +74,45 @@
       pool->release_fb_cb(pool->cb_priv, &pool->frame_bufs[i].raw_frame_buffer);
       pool->frame_bufs[i].ref_count = 0;
     }
-    vpx_free(pool->frame_bufs[i].mvs);
+    aom_free(pool->frame_bufs[i].mvs);
     pool->frame_bufs[i].mvs = NULL;
-    vpx_free_frame_buffer(&pool->frame_bufs[i].buf);
+    aom_free_frame_buffer(&pool->frame_bufs[i].buf);
   }
 }
 
 #if CONFIG_LOOP_RESTORATION
-void vp10_free_restoration_buffers(VP10_COMMON *cm) {
-  vpx_free(cm->rst_info.bilateral_level);
+void av1_free_restoration_buffers(AV1_COMMON *cm) {
+  aom_free(cm->rst_info.bilateral_level);
   cm->rst_info.bilateral_level = NULL;
-  vpx_free(cm->rst_info.vfilter);
+  aom_free(cm->rst_info.vfilter);
   cm->rst_info.vfilter = NULL;
-  vpx_free(cm->rst_info.hfilter);
+  aom_free(cm->rst_info.hfilter);
   cm->rst_info.hfilter = NULL;
-  vpx_free(cm->rst_info.wiener_level);
+  aom_free(cm->rst_info.wiener_level);
   cm->rst_info.wiener_level = NULL;
 }
 #endif  // CONFIG_LOOP_RESTORATION
 
-void vp10_free_context_buffers(VP10_COMMON *cm) {
+void av1_free_context_buffers(AV1_COMMON *cm) {
   int i;
   cm->free_mi(cm);
   free_seg_map(cm);
   for (i = 0; i < MAX_MB_PLANE; i++) {
-    vpx_free(cm->above_context[i]);
+    aom_free(cm->above_context[i]);
     cm->above_context[i] = NULL;
   }
-  vpx_free(cm->above_seg_context);
+  aom_free(cm->above_seg_context);
   cm->above_seg_context = NULL;
 #if CONFIG_VAR_TX
-  vpx_free(cm->above_txfm_context);
+  aom_free(cm->above_txfm_context);
   cm->above_txfm_context = NULL;
 #endif
 }
 
-int vp10_alloc_context_buffers(VP10_COMMON *cm, int width, int height) {
+int av1_alloc_context_buffers(AV1_COMMON *cm, int width, int height) {
   int new_mi_size;
 
-  vp10_set_mb_mi(cm, width, height);
+  av1_set_mb_mi(cm, width, height);
   new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows);
   if (cm->mi_alloc_size < new_mi_size) {
     cm->free_mi(cm);
@@ -134,20 +134,20 @@
     int i;
 
     for (i = 0; i < MAX_MB_PLANE; i++) {
-      vpx_free(cm->above_context[i]);
-      cm->above_context[i] = (ENTROPY_CONTEXT *)vpx_calloc(
+      aom_free(cm->above_context[i]);
+      cm->above_context[i] = (ENTROPY_CONTEXT *)aom_calloc(
           2 * aligned_mi_cols, sizeof(*cm->above_context[0]));
       if (!cm->above_context[i]) goto fail;
     }
 
-    vpx_free(cm->above_seg_context);
-    cm->above_seg_context = (PARTITION_CONTEXT *)vpx_calloc(
+    aom_free(cm->above_seg_context);
+    cm->above_seg_context = (PARTITION_CONTEXT *)aom_calloc(
         aligned_mi_cols, sizeof(*cm->above_seg_context));
     if (!cm->above_seg_context) goto fail;
 
 #if CONFIG_VAR_TX
-    vpx_free(cm->above_txfm_context);
-    cm->above_txfm_context = (TXFM_CONTEXT *)vpx_calloc(
+    aom_free(cm->above_txfm_context);
+    cm->above_txfm_context = (TXFM_CONTEXT *)aom_calloc(
         aligned_mi_cols, sizeof(*cm->above_txfm_context));
     if (!cm->above_txfm_context) goto fail;
 #endif
@@ -159,27 +159,27 @@
 
 fail:
   // clear the mi_* values to force a realloc on resync
-  vp10_set_mb_mi(cm, 0, 0);
-  vp10_free_context_buffers(cm);
+  av1_set_mb_mi(cm, 0, 0);
+  av1_free_context_buffers(cm);
   return 1;
 }
 
-void vp10_remove_common(VP10_COMMON *cm) {
-  vp10_free_context_buffers(cm);
+void av1_remove_common(AV1_COMMON *cm) {
+  av1_free_context_buffers(cm);
 
-  vpx_free(cm->fc);
+  aom_free(cm->fc);
   cm->fc = NULL;
-  vpx_free(cm->frame_contexts);
+  aom_free(cm->frame_contexts);
   cm->frame_contexts = NULL;
 }
 
-void vp10_init_context_buffers(VP10_COMMON *cm) {
+void av1_init_context_buffers(AV1_COMMON *cm) {
   cm->setup_mi(cm);
   if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
     memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
 }
 
-void vp10_swap_current_and_last_seg_map(VP10_COMMON *cm) {
+void av1_swap_current_and_last_seg_map(AV1_COMMON *cm) {
   // Swap indices.
   const int tmp = cm->seg_map_idx;
   cm->seg_map_idx = cm->prev_seg_map_idx;
diff --git a/av1/common/alloccommon.h b/av1/common/alloccommon.h
index d2d2643..ad0b454 100644
--- a/av1/common/alloccommon.h
+++ b/av1/common/alloccommon.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ALLOCCOMMON_H_
-#define VP10_COMMON_ALLOCCOMMON_H_
+#ifndef AV1_COMMON_ALLOCCOMMON_H_
+#define AV1_COMMON_ALLOCCOMMON_H_
 
 #define INVALID_IDX -1  // Invalid buffer index.
 
@@ -17,29 +17,29 @@
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 struct BufferPool;
 
-void vp10_remove_common(struct VP10Common *cm);
+void av1_remove_common(struct AV1Common *cm);
 
-int vp10_alloc_context_buffers(struct VP10Common *cm, int width, int height);
-void vp10_init_context_buffers(struct VP10Common *cm);
-void vp10_free_context_buffers(struct VP10Common *cm);
+int av1_alloc_context_buffers(struct AV1Common *cm, int width, int height);
+void av1_init_context_buffers(struct AV1Common *cm);
+void av1_free_context_buffers(struct AV1Common *cm);
 
-void vp10_free_ref_frame_buffers(struct BufferPool *pool);
+void av1_free_ref_frame_buffers(struct BufferPool *pool);
 #if CONFIG_LOOP_RESTORATION
-void vp10_free_restoration_buffers(struct VP10Common *cm);
+void av1_free_restoration_buffers(struct AV1Common *cm);
 #endif  // CONFIG_LOOP_RESTORATION
 
-int vp10_alloc_state_buffers(struct VP10Common *cm, int width, int height);
-void vp10_free_state_buffers(struct VP10Common *cm);
+int av1_alloc_state_buffers(struct AV1Common *cm, int width, int height);
+void av1_free_state_buffers(struct AV1Common *cm);
 
-void vp10_set_mb_mi(struct VP10Common *cm, int width, int height);
+void av1_set_mb_mi(struct AV1Common *cm, int width, int height);
 
-void vp10_swap_current_and_last_seg_map(struct VP10Common *cm);
+void av1_swap_current_and_last_seg_map(struct AV1Common *cm);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ALLOCCOMMON_H_
+#endif  // AV1_COMMON_ALLOCCOMMON_H_
diff --git a/av1/common/ans.h b/av1/common/ans.h
index c974ada..1a632ee 100644
--- a/av1/common/ans.h
+++ b/av1/common/ans.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ANS_H_
-#define VP10_COMMON_ANS_H_
+#ifndef AV1_COMMON_ANS_H_
+#define AV1_COMMON_ANS_H_
 // An implementation of Asymmetric Numeral Systems
 // http://arxiv.org/abs/1311.2540v2
 
 #include <assert.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/prob.h"
 #include "aom_ports/mem_ops.h"
 
@@ -250,9 +250,9 @@
 
 // TODO(aconverse): Replace trees with tokensets.
 static INLINE int uabs_read_tree(struct AnsDecoder *ans,
-                                 const vpx_tree_index *tree,
+                                 const aom_tree_index *tree,
                                  const AnsP8 *probs) {
-  vpx_tree_index i = 0;
+  aom_tree_index i = 0;
 
   while ((i = tree[i + uabs_read(ans, probs[i >> 1])]) > 0) continue;
 
@@ -313,8 +313,8 @@
   adjustment -= out_pdf[0];
   for (i = 0; i < in_syms; ++i) {
     int p = (p1 * src_pdf[i] + round_fact) >> ans_p8_shift;
-    p = VPXMIN(p, (int)rans_precision - in_syms);
-    p = VPXMAX(p, 1);
+    p = AOMMIN(p, (int)rans_precision - in_syms);
+    p = AOMMAX(p, 1);
     out_pdf[i + 1] = p;
     adjustment -= p;
   }
@@ -411,4 +411,4 @@
 #ifdef __cplusplus
 }  // extern "C"
 #endif  // __cplusplus
-#endif  // VP10_COMMON_ANS_H_
+#endif  // AV1_COMMON_ANS_H_
diff --git a/av1/common/arm/neon/iht4x4_add_neon.c b/av1/common/arm/neon/iht4x4_add_neon.c
index 600e66b..fc72c98 100644
--- a/av1/common/arm/neon/iht4x4_add_neon.c
+++ b/av1/common/arm/neon/iht4x4_add_neon.c
@@ -11,8 +11,8 @@
 #include <arm_neon.h>
 #include <assert.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
 #include "av1/common/common.h"
 
 static int16_t sinpi_1_9 = 0x14a3;
@@ -139,8 +139,8 @@
   return;
 }
 
-void vp10_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
-                             int dest_stride, int tx_type) {
+void av1_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
+                            int dest_stride, int tx_type) {
   uint8x8_t d26u8, d27u8;
   int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16;
   uint32x2_t d26u32, d27u32;
@@ -156,7 +156,7 @@
 
   switch (tx_type) {
     case 0:  // idct_idct is not supported. Fall back to C
-      vp10_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
+      av1_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
       return;
       break;
     case 1:  // iadst_idct
diff --git a/av1/common/arm/neon/iht8x8_add_neon.c b/av1/common/arm/neon/iht8x8_add_neon.c
index ff5578d..8421926 100644
--- a/av1/common/arm/neon/iht8x8_add_neon.c
+++ b/av1/common/arm/neon/iht8x8_add_neon.c
@@ -11,8 +11,8 @@
 #include <arm_neon.h>
 #include <assert.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
 #include "av1/common/common.h"
 
 static int16_t cospi_2_64 = 16305;
@@ -471,8 +471,8 @@
   return;
 }
 
-void vp10_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
-                             int dest_stride, int tx_type) {
+void av1_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
+                            int dest_stride, int tx_type) {
   int i;
   uint8_t *d1, *d2;
   uint8x8_t d0u8, d1u8, d2u8, d3u8;
@@ -494,7 +494,7 @@
 
   switch (tx_type) {
     case 0:  // idct_idct is not supported. Fall back to C
-      vp10_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
+      av1_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
       return;
       break;
     case 1:  // iadst_idct
diff --git a/av1/common/vp10_convolve.c b/av1/common/av1_convolve.c
similarity index 66%
rename from av1/common/vp10_convolve.c
rename to av1/common/av1_convolve.c
index b62bae5..dec6759 100644
--- a/av1/common/vp10_convolve.c
+++ b/av1/common/av1_convolve.c
@@ -1,10 +1,10 @@
 #include <assert.h>
 #include <string.h>
 
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_convolve.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_convolve.h"
 #include "av1/common/filter.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 
 #define MAX_BLOCK_WIDTH (MAX_SB_SIZE)
@@ -12,10 +12,10 @@
 #define MAX_STEP (32)
 #define MAX_FILTER_TAP (12)
 
-void vp10_convolve_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst,
-                           int dst_stride, int w, int h,
-                           const InterpFilterParams filter_params,
-                           const int subpel_x_q4, int x_step_q4, int avg) {
+void av1_convolve_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst,
+                          int dst_stride, int w, int h,
+                          const InterpFilterParams filter_params,
+                          const int subpel_x_q4, int x_step_q4, int avg) {
   int x, y;
   int filter_size = filter_params.taps;
   src -= filter_size / 2 - 1;
@@ -23,7 +23,7 @@
     int x_q4 = subpel_x_q4;
     for (x = 0; x < w; ++x) {
       const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
-      const int16_t *x_filter = vp10_get_interp_filter_subpel_kernel(
+      const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
           filter_params, x_q4 & SUBPEL_MASK);
       int k, sum = 0;
       for (k = 0; k < filter_size; ++k) sum += src_x[k] * x_filter[k];
@@ -40,10 +40,10 @@
   }
 }
 
-void vp10_convolve_vert_c(const uint8_t *src, int src_stride, uint8_t *dst,
-                          int dst_stride, int w, int h,
-                          const InterpFilterParams filter_params,
-                          const int subpel_y_q4, int y_step_q4, int avg) {
+void av1_convolve_vert_c(const uint8_t *src, int src_stride, uint8_t *dst,
+                         int dst_stride, int w, int h,
+                         const InterpFilterParams filter_params,
+                         const int subpel_y_q4, int y_step_q4, int avg) {
   int x, y;
   int filter_size = filter_params.taps;
   src -= src_stride * (filter_size / 2 - 1);
@@ -52,7 +52,7 @@
     int y_q4 = subpel_y_q4;
     for (y = 0; y < h; ++y) {
       const uint8_t *const src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
-      const int16_t *y_filter = vp10_get_interp_filter_subpel_kernel(
+      const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
           filter_params, y_q4 & SUBPEL_MASK);
       int k, sum = 0;
       for (k = 0; k < filter_size; ++k)
@@ -93,15 +93,15 @@
   }
 }
 
-void vp10_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
-                   int dst_stride, int w, int h,
+void av1_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
+                  int dst_stride, int w, int h,
 #if CONFIG_DUAL_FILTER
-                   const INTERP_FILTER *interp_filter,
+                  const INTERP_FILTER *interp_filter,
 #else
-                   const INTERP_FILTER interp_filter,
+                  const INTERP_FILTER interp_filter,
 #endif
-                   const int subpel_x_q4, int x_step_q4, const int subpel_y_q4,
-                   int y_step_q4, int ref_idx) {
+                  const int subpel_x_q4, int x_step_q4, const int subpel_y_q4,
+                  int y_step_q4, int ref_idx) {
   int ignore_horiz = x_step_q4 == 16 && subpel_x_q4 == 0;
   int ignore_vert = y_step_q4 == 16 && subpel_y_q4 == 0;
 
@@ -115,25 +115,25 @@
   } else if (ignore_vert) {
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
 #endif
     assert(filter_params.taps <= MAX_FILTER_TAP);
-    vp10_convolve_horiz(src, src_stride, dst, dst_stride, w, h, filter_params,
-                        subpel_x_q4, x_step_q4, ref_idx);
+    av1_convolve_horiz(src, src_stride, dst, dst_stride, w, h, filter_params,
+                       subpel_x_q4, x_step_q4, ref_idx);
   } else if (ignore_horiz) {
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter[2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[2 * ref_idx]);
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
 #endif
     assert(filter_params.taps <= MAX_FILTER_TAP);
-    vp10_convolve_vert(src, src_stride, dst, dst_stride, w, h, filter_params,
-                       subpel_y_q4, y_step_q4, ref_idx);
+    av1_convolve_vert(src, src_stride, dst, dst_stride, w, h, filter_params,
+                      subpel_y_q4, y_step_q4, ref_idx);
   } else {
     // temp's size is set to (maximum possible intermediate_height) *
     // MAX_BLOCK_WIDTH
@@ -143,9 +143,9 @@
     int temp_stride = MAX_BLOCK_WIDTH;
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params_x =
-        vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
     InterpFilterParams filter_params_y =
-        vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
     InterpFilterParams filter_params = filter_params_x;
 
     // The filter size implies the required number of reference pixels for
@@ -154,7 +154,7 @@
     int filter_size = filter_params_y.taps;
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
     int filter_size = filter_params.taps;
 #endif
     int intermediate_height =
@@ -162,30 +162,30 @@
 
     assert(filter_params.taps <= MAX_FILTER_TAP);
 
-    vp10_convolve_horiz(src - src_stride * (filter_size / 2 - 1), src_stride,
-                        temp, temp_stride, w, intermediate_height,
-                        filter_params, subpel_x_q4, x_step_q4, 0);
+    av1_convolve_horiz(src - src_stride * (filter_size / 2 - 1), src_stride,
+                       temp, temp_stride, w, intermediate_height, filter_params,
+                       subpel_x_q4, x_step_q4, 0);
 
 #if CONFIG_DUAL_FILTER
     filter_params = filter_params_y;
 #else
-    filter_params = vp10_get_interp_filter_params(interp_filter);
+    filter_params = av1_get_interp_filter_params(interp_filter);
 #endif
     filter_size = filter_params.taps;
     assert(filter_params.taps <= MAX_FILTER_TAP);
 
-    vp10_convolve_vert(temp + temp_stride * (filter_size / 2 - 1), temp_stride,
-                       dst, dst_stride, w, h, filter_params, subpel_y_q4,
-                       y_step_q4, ref_idx);
+    av1_convolve_vert(temp + temp_stride * (filter_size / 2 - 1), temp_stride,
+                      dst, dst_stride, w, h, filter_params, subpel_y_q4,
+                      y_step_q4, ref_idx);
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_convolve_horiz_c(const uint16_t *src, int src_stride,
-                                  uint16_t *dst, int dst_stride, int w, int h,
-                                  const InterpFilterParams filter_params,
-                                  const int subpel_x_q4, int x_step_q4, int avg,
-                                  int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_convolve_horiz_c(const uint16_t *src, int src_stride,
+                                 uint16_t *dst, int dst_stride, int w, int h,
+                                 const InterpFilterParams filter_params,
+                                 const int subpel_x_q4, int x_step_q4, int avg,
+                                 int bd) {
   int x, y;
   int filter_size = filter_params.taps;
   src -= filter_size / 2 - 1;
@@ -193,7 +193,7 @@
     int x_q4 = subpel_x_q4;
     for (x = 0; x < w; ++x) {
       const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
-      const int16_t *x_filter = vp10_get_interp_filter_subpel_kernel(
+      const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
           filter_params, x_q4 & SUBPEL_MASK);
       int k, sum = 0;
       for (k = 0; k < filter_size; ++k) sum += src_x[k] * x_filter[k];
@@ -211,11 +211,11 @@
   }
 }
 
-void vp10_highbd_convolve_vert_c(const uint16_t *src, int src_stride,
-                                 uint16_t *dst, int dst_stride, int w, int h,
-                                 const InterpFilterParams filter_params,
-                                 const int subpel_y_q4, int y_step_q4, int avg,
-                                 int bd) {
+void av1_highbd_convolve_vert_c(const uint16_t *src, int src_stride,
+                                uint16_t *dst, int dst_stride, int w, int h,
+                                const InterpFilterParams filter_params,
+                                const int subpel_y_q4, int y_step_q4, int avg,
+                                int bd) {
   int x, y;
   int filter_size = filter_params.taps;
   src -= src_stride * (filter_size / 2 - 1);
@@ -224,7 +224,7 @@
     int y_q4 = subpel_y_q4;
     for (y = 0; y < h; ++y) {
       const uint16_t *const src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
-      const int16_t *y_filter = vp10_get_interp_filter_subpel_kernel(
+      const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
           filter_params, y_q4 & SUBPEL_MASK);
       int k, sum = 0;
       for (k = 0; k < filter_size; ++k)
@@ -267,16 +267,16 @@
   }
 }
 
-void vp10_highbd_convolve(const uint8_t *src8, int src_stride, uint8_t *dst8,
-                          int dst_stride, int w, int h,
+void av1_highbd_convolve(const uint8_t *src8, int src_stride, uint8_t *dst8,
+                         int dst_stride, int w, int h,
 #if CONFIG_DUAL_FILTER
-                          const INTERP_FILTER *interp_filter,
+                         const INTERP_FILTER *interp_filter,
 #else
-                          const INTERP_FILTER interp_filter,
+                         const INTERP_FILTER interp_filter,
 #endif
-                          const int subpel_x_q4, int x_step_q4,
-                          const int subpel_y_q4, int y_step_q4, int ref_idx,
-                          int bd) {
+                         const int subpel_x_q4, int x_step_q4,
+                         const int subpel_y_q4, int y_step_q4, int ref_idx,
+                         int bd) {
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
   int ignore_horiz = x_step_q4 == 16 && subpel_x_q4 == 0;
@@ -292,25 +292,25 @@
   } else if (ignore_vert) {
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
 #endif
-    vp10_highbd_convolve_horiz(src, src_stride, dst, dst_stride, w, h,
-                               filter_params, subpel_x_q4, x_step_q4, ref_idx,
-                               bd);
+    av1_highbd_convolve_horiz(src, src_stride, dst, dst_stride, w, h,
+                              filter_params, subpel_x_q4, x_step_q4, ref_idx,
+                              bd);
   } else if (ignore_horiz) {
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
 #endif
-    vp10_highbd_convolve_vert(src, src_stride, dst, dst_stride, w, h,
-                              filter_params, subpel_y_q4, y_step_q4, ref_idx,
-                              bd);
+    av1_highbd_convolve_vert(src, src_stride, dst, dst_stride, w, h,
+                             filter_params, subpel_y_q4, y_step_q4, ref_idx,
+                             bd);
   } else {
     // temp's size is set to (maximum possible intermediate_height) *
     // MAX_BLOCK_WIDTH
@@ -321,21 +321,21 @@
 
 #if CONFIG_DUAL_FILTER
     InterpFilterParams filter_params_x =
-        vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
     InterpFilterParams filter_params_y =
-        vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+        av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
     InterpFilterParams filter_params = filter_params_x;
     int filter_size = filter_params_y.taps;
 #else
     InterpFilterParams filter_params =
-        vp10_get_interp_filter_params(interp_filter);
+        av1_get_interp_filter_params(interp_filter);
     int filter_size = filter_params.taps;
 #endif
 
     int intermediate_height =
         (((h - 1) * y_step_q4 + subpel_y_q4) >> SUBPEL_BITS) + filter_size;
 
-    vp10_highbd_convolve_horiz(
+    av1_highbd_convolve_horiz(
         src - src_stride * (filter_size / 2 - 1), src_stride, temp, temp_stride,
         w, intermediate_height, filter_params, subpel_x_q4, x_step_q4, 0, bd);
 
@@ -345,9 +345,9 @@
     filter_size = filter_params.taps;
     assert(filter_params.taps <= MAX_FILTER_TAP);
 
-    vp10_highbd_convolve_vert(temp + temp_stride * (filter_size / 2 - 1),
-                              temp_stride, dst, dst_stride, w, h, filter_params,
-                              subpel_y_q4, y_step_q4, ref_idx, bd);
+    av1_highbd_convolve_vert(temp + temp_stride * (filter_size / 2 - 1),
+                             temp_stride, dst, dst_stride, w, h, filter_params,
+                             subpel_y_q4, y_step_q4, ref_idx, bd);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/av1_convolve.h b/av1/common/av1_convolve.h
new file mode 100644
index 0000000..f082a8a
--- /dev/null
+++ b/av1/common/av1_convolve.h
@@ -0,0 +1,35 @@
+#ifndef AV1_COMMON_AV1_CONVOLVE_H_
+#define AV1_COMMON_AV1_CONVOLVE_H_
+#include "av1/common/filter.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void av1_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
+                  int dst_stride, int w, int h,
+#if CONFIG_DUAL_FILTER
+                  const INTERP_FILTER *interp_filter,
+#else
+                  const INTERP_FILTER interp_filter,
+#endif
+                  const int subpel_x, int xstep, const int subpel_y, int ystep,
+                  int avg);
+
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
+                         int dst_stride, int w, int h,
+#if CONFIG_DUAL_FILTER
+                         const INTERP_FILTER *interp_filter,
+#else
+                         const INTERP_FILTER interp_filter,
+#endif
+                         const int subpel_x, int xstep, const int subpel_y,
+                         int ystep, int avg, int bd);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // AV1_COMMON_AV1_CONVOLVE_H_
diff --git a/av1/common/vp10_fwd_txfm.c b/av1/common/av1_fwd_txfm.c
similarity index 92%
rename from av1/common/vp10_fwd_txfm.c
rename to av1/common/av1_fwd_txfm.c
index eb1c018..221f4e1 100644
--- a/av1/common/vp10_fwd_txfm.c
+++ b/av1/common/av1_fwd_txfm.c
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_fwd_txfm.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_fwd_txfm.h"
 
-void vp10_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
   // The 2D transform is done with two passes which are actually pretty
   // similar. In the first one, we transform the columns and transpose
   // the results. In the second one, we transform the rows. To achieve that,
@@ -77,7 +77,7 @@
   }
 }
 
-void vp10_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 4; ++r)
@@ -87,8 +87,7 @@
   output[1] = 0;
 }
 
-void vp10_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
-                    int stride) {
+void av1_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
   int i, j;
   tran_low_t intermediate[64];
   int pass;
@@ -173,7 +172,7 @@
   }
 }
 
-void vp10_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 8; ++r)
@@ -183,7 +182,7 @@
   output[1] = 0;
 }
 
-void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
   // The 2D transform is done with two passes which are actually pretty
   // similar. In the first one, we transform the columns and transpose
   // the results. In the second one, we transform the rows. To achieve that,
@@ -363,7 +362,7 @@
   }
 }
 
-void vp10_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 16; ++r)
@@ -386,7 +385,7 @@
   return rv;
 }
 
-void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
+void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
   tran_high_t step[32];
   // Stage 1
   step[0] = input[0] + input[(32 - 1)];
@@ -709,7 +708,7 @@
   output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
 }
 
-void vp10_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+void av1_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
   int i, j;
   tran_high_t output[32 * 32];
 
@@ -717,7 +716,7 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
   }
@@ -726,7 +725,7 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       out[j + i * 32] =
           (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
@@ -736,7 +735,7 @@
 // Note that although we use dct_32_round in dct32 computation flow,
 // this 2d fdct32x32 for rate-distortion optimization loop is operating
 // within 16 bits precision.
-void vp10_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
+void av1_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
   int i, j;
   tran_high_t output[32 * 32];
 
@@ -744,11 +743,11 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       // TODO(cd): see quality impact of only doing
       //           output[j * 32 + i] = (temp_out[j] + 1) >> 2;
-      //           PS: also change code in vp10_dsp/x86/vp10_dct_sse2.c
+      //           PS: also change code in av1_dsp/x86/av1_dct_sse2.c
       output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
   }
 
@@ -756,12 +755,12 @@
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
-    vp10_fdct32(temp_in, temp_out, 1);
+    av1_fdct32(temp_in, temp_out, 1);
     for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
   }
 }
 
-void vp10_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 32; ++r)
@@ -771,44 +770,43 @@
   output[1] = 0;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
-                           int stride) {
-  vp10_fdct4x4_c(input, output, stride);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
+                          int stride) {
+  av1_fdct4x4_c(input, output, stride);
 }
 
-void vp10_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
-                           int stride) {
-  vp10_fdct8x8_c(input, final_output, stride);
+void av1_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
+                          int stride) {
+  av1_fdct8x8_c(input, final_output, stride);
 }
 
-void vp10_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
-                             int stride) {
-  vp10_fdct8x8_1_c(input, final_output, stride);
+void av1_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
+                            int stride) {
+  av1_fdct8x8_1_c(input, final_output, stride);
 }
 
-void vp10_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
-                             int stride) {
-  vp10_fdct16x16_c(input, output, stride);
+void av1_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
+                            int stride) {
+  av1_fdct16x16_c(input, output, stride);
 }
 
-void vp10_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
+void av1_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
+                              int stride) {
+  av1_fdct16x16_1_c(input, output, stride);
+}
+
+void av1_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+  av1_fdct32x32_c(input, out, stride);
+}
+
+void av1_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
                                int stride) {
-  vp10_fdct16x16_1_c(input, output, stride);
+  av1_fdct32x32_rd_c(input, out, stride);
 }
 
-void vp10_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out,
-                             int stride) {
-  vp10_fdct32x32_c(input, out, stride);
+void av1_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
+                              int stride) {
+  av1_fdct32x32_1_c(input, out, stride);
 }
-
-void vp10_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
-                                int stride) {
-  vp10_fdct32x32_rd_c(input, out, stride);
-}
-
-void vp10_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
-                               int stride) {
-  vp10_fdct32x32_1_c(input, out, stride);
-}
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/vp10_fwd_txfm.h b/av1/common/av1_fwd_txfm.h
similarity index 71%
rename from av1/common/vp10_fwd_txfm.h
rename to av1/common/av1_fwd_txfm.h
index a0481d3..96d942e 100644
--- a/av1/common/vp10_fwd_txfm.h
+++ b/av1/common/av1_fwd_txfm.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_VP10_FWD_TXFM_H_
-#define VP10_COMMON_VP10_FWD_TXFM_H_
+#ifndef AV1_COMMON_AV1_FWD_TXFM_H_
+#define AV1_COMMON_AV1_FWD_TXFM_H_
 
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/fwd_txfm.h"
 
-void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round);
-#endif  // VP10_COMMON_VP10_FWD_TXFM_H_
+void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round);
+#endif  // AV1_COMMON_AV1_FWD_TXFM_H_
diff --git a/av1/common/vp10_fwd_txfm1d.c b/av1/common/av1_fwd_txfm1d.c
similarity index 98%
rename from av1/common/vp10_fwd_txfm1d.c
rename to av1/common/av1_fwd_txfm1d.c
index 6dff077..3dc960c 100644
--- a/av1/common/vp10_fwd_txfm1d.c
+++ b/av1/common/av1_fwd_txfm1d.c
@@ -9,7 +9,7 @@
  */
 
 #include <stdlib.h>
-#include "av1/common/vp10_fwd_txfm1d.h"
+#include "av1/common/av1_fwd_txfm1d.h"
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
 #define range_check(stage, input, buf, size, bit)                         \
   {                                                                       \
@@ -40,8 +40,8 @@
   }
 #endif
 
-void vp10_fdct4_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range) {
   const int32_t size = 4;
   const int32_t *cospi;
 
@@ -83,8 +83,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fdct8_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range) {
   const int32_t size = 8;
   const int32_t *cospi;
 
@@ -168,8 +168,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fdct16_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct16_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 16;
   const int32_t *cospi;
 
@@ -339,8 +339,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fdct32_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct32_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 32;
   const int32_t *cospi;
 
@@ -700,8 +700,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fadst4_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst4_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 4;
   const int32_t *cospi;
 
@@ -765,8 +765,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fadst8_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst8_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 8;
   const int32_t *cospi;
 
@@ -880,8 +880,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fadst16_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst16_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 16;
   const int32_t *cospi;
 
@@ -1094,8 +1094,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_fadst32_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst32_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 32;
   const int32_t *cospi;
 
diff --git a/av1/common/av1_fwd_txfm1d.h b/av1/common/av1_fwd_txfm1d.h
new file mode 100644
index 0000000..7aab70e
--- /dev/null
+++ b/av1/common/av1_fwd_txfm1d.h
@@ -0,0 +1,44 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AV1_FWD_TXFM1D_H_
+#define AV1_FWD_TXFM1D_H_
+
+#include "av1/common/av1_txfm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void av1_fdct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range);
+void av1_fdct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range);
+void av1_fdct16_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct32_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct64_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+
+void av1_fadst4_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst8_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst16_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst32_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // AV1_FWD_TXFM1D_H_
diff --git a/av1/common/vp10_fwd_txfm2d.c b/av1/common/av1_fwd_txfm2d.c
similarity index 76%
rename from av1/common/vp10_fwd_txfm2d.c
rename to av1/common/av1_fwd_txfm2d.c
index 85c6b68..dc984e1 100644
--- a/av1/common/vp10_fwd_txfm2d.c
+++ b/av1/common/av1_fwd_txfm2d.c
@@ -10,22 +10,22 @@
 
 #include <assert.h>
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "av1/common/enums.h"
-#include "av1/common/vp10_fwd_txfm1d.h"
-#include "av1/common/vp10_fwd_txfm2d_cfg.h"
-#include "av1/common/vp10_txfm.h"
+#include "av1/common/av1_fwd_txfm1d.h"
+#include "av1/common/av1_fwd_txfm2d_cfg.h"
+#include "av1/common/av1_txfm.h"
 
 static INLINE TxfmFunc fwd_txfm_type_to_func(TXFM_TYPE txfm_type) {
   switch (txfm_type) {
-    case TXFM_TYPE_DCT4: return vp10_fdct4_new;
-    case TXFM_TYPE_DCT8: return vp10_fdct8_new;
-    case TXFM_TYPE_DCT16: return vp10_fdct16_new;
-    case TXFM_TYPE_DCT32: return vp10_fdct32_new;
-    case TXFM_TYPE_ADST4: return vp10_fadst4_new;
-    case TXFM_TYPE_ADST8: return vp10_fadst8_new;
-    case TXFM_TYPE_ADST16: return vp10_fadst16_new;
-    case TXFM_TYPE_ADST32: return vp10_fadst32_new;
+    case TXFM_TYPE_DCT4: return av1_fdct4_new;
+    case TXFM_TYPE_DCT8: return av1_fdct8_new;
+    case TXFM_TYPE_DCT16: return av1_fdct16_new;
+    case TXFM_TYPE_DCT32: return av1_fdct32_new;
+    case TXFM_TYPE_ADST4: return av1_fadst4_new;
+    case TXFM_TYPE_ADST8: return av1_fadst8_new;
+    case TXFM_TYPE_ADST16: return av1_fadst16_new;
+    case TXFM_TYPE_ADST32: return av1_fadst32_new;
     default: assert(0); return NULL;
   }
 }
@@ -76,42 +76,42 @@
   }
 }
 
-void vp10_fwd_txfm2d_4x4_c(const int16_t *input, int32_t *output, int stride,
-                           int tx_type, int bd) {
+void av1_fwd_txfm2d_4x4_c(const int16_t *input, int32_t *output, int stride,
+                          int tx_type, int bd) {
   int32_t txfm_buf[4 * 4];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_4X4);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_4X4);
   (void)bd;
   fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
 }
 
-void vp10_fwd_txfm2d_8x8_c(const int16_t *input, int32_t *output, int stride,
-                           int tx_type, int bd) {
+void av1_fwd_txfm2d_8x8_c(const int16_t *input, int32_t *output, int stride,
+                          int tx_type, int bd) {
   int32_t txfm_buf[8 * 8];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_8X8);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_8X8);
   (void)bd;
   fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
 }
 
-void vp10_fwd_txfm2d_16x16_c(const int16_t *input, int32_t *output, int stride,
-                             int tx_type, int bd) {
+void av1_fwd_txfm2d_16x16_c(const int16_t *input, int32_t *output, int stride,
+                            int tx_type, int bd) {
   int32_t txfm_buf[16 * 16];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_16X16);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_16X16);
   (void)bd;
   fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
 }
 
-void vp10_fwd_txfm2d_32x32_c(const int16_t *input, int32_t *output, int stride,
-                             int tx_type, int bd) {
+void av1_fwd_txfm2d_32x32_c(const int16_t *input, int32_t *output, int stride,
+                            int tx_type, int bd) {
   int32_t txfm_buf[32 * 32];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_32X32);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_32X32);
   (void)bd;
   fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
 }
 
-void vp10_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output, int stride,
-                             int tx_type, int bd) {
+void av1_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output, int stride,
+                            int tx_type, int bd) {
   int32_t txfm_buf[64 * 64];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_64x64_cfg(tx_type);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_64x64_cfg(tx_type);
   (void)bd;
   fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
 }
@@ -150,14 +150,14 @@
 };
 #endif  // CONFIG_EXT_TX
 
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_cfg(int tx_type, int tx_size) {
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(int tx_type, int tx_size) {
   TXFM_2D_FLIP_CFG cfg;
   set_flip_cfg(tx_type, &cfg);
   cfg.cfg = fwd_txfm_cfg_ls[tx_type][tx_size];
   return cfg;
 }
 
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_64x64_cfg(int tx_type) {
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(int tx_type) {
   TXFM_2D_FLIP_CFG cfg;
   switch (tx_type) {
     case DCT_DCT:
diff --git a/av1/common/vp10_fwd_txfm2d_cfg.h b/av1/common/av1_fwd_txfm2d_cfg.h
similarity index 99%
rename from av1/common/vp10_fwd_txfm2d_cfg.h
rename to av1/common/av1_fwd_txfm2d_cfg.h
index f780b87..49d324d 100644
--- a/av1/common/vp10_fwd_txfm2d_cfg.h
+++ b/av1/common/av1_fwd_txfm2d_cfg.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_FWD_TXFM2D_CFG_H_
-#define VP10_FWD_TXFM2D_CFG_H_
+#ifndef AV1_FWD_TXFM2D_CFG_H_
+#define AV1_FWD_TXFM2D_CFG_H_
 #include "av1/common/enums.h"
-#include "av1/common/vp10_fwd_txfm1d.h"
+#include "av1/common/av1_fwd_txfm1d.h"
 //  ---------------- config fwd_dct_dct_4 ----------------
 static const int8_t fwd_shift_dct_dct_4[3] = { 2, 0, 0 };
 static const int8_t fwd_stage_range_col_dct_dct_4[4] = { 15, 16, 17, 17 };
@@ -440,4 +440,4 @@
   TXFM_TYPE_ADST32,                 // .txfm_type_col
   TXFM_TYPE_DCT32
 };      // .txfm_type_row
-#endif  // VP10_FWD_TXFM2D_CFG_H_
+#endif  // AV1_FWD_TXFM2D_CFG_H_
diff --git a/av1/common/vp10_inv_txfm.c b/av1/common/av1_inv_txfm.c
similarity index 95%
rename from av1/common/vp10_inv_txfm.c
rename to av1/common/av1_inv_txfm.c
index a74de09..76a49a2 100644
--- a/av1/common/vp10_inv_txfm.c
+++ b/av1/common/av1_inv_txfm.c
@@ -12,10 +12,10 @@
 #include <math.h>
 #include <string.h>
 
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_inv_txfm.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_inv_txfm.h"
 
-void vp10_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
      0.5 shifts per pixel. */
   int i;
@@ -67,8 +67,7 @@
   }
 }
 
-void vp10_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest,
-                          int dest_stride) {
+void av1_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) {
   int i;
   tran_high_t a1, e1;
   tran_low_t tmp[4];
@@ -94,7 +93,7 @@
   }
 }
 
-void vp10_idct4_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct4_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step[4];
   tran_high_t temp1, temp2;
   // stage 1
@@ -114,7 +113,7 @@
   output[3] = WRAPLOW(step[0] - step[3]);
 }
 
-void vp10_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
   int i, j;
@@ -122,7 +121,7 @@
 
   // Rows
   for (i = 0; i < 4; ++i) {
-    vp10_idct4_c(input, outptr);
+    av1_idct4_c(input, outptr);
     input += 4;
     outptr += 4;
   }
@@ -130,7 +129,7 @@
   // Columns
   for (i = 0; i < 4; ++i) {
     for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
-    vp10_idct4_c(temp_in, temp_out);
+    av1_idct4_c(temp_in, temp_out);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 4));
@@ -138,8 +137,8 @@
   }
 }
 
-void vp10_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
-                          int dest_stride) {
+void av1_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
+                         int dest_stride) {
   int i;
   tran_high_t a1;
   tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -155,7 +154,7 @@
   }
 }
 
-void vp10_idct8_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct8_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step1[8], step2[8];
   tran_high_t temp1, temp2;
   // stage 1
@@ -209,7 +208,7 @@
   output[7] = WRAPLOW(step1[0] - step1[7]);
 }
 
-void vp10_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
   int i, j;
@@ -217,7 +216,7 @@
 
   // First transform rows
   for (i = 0; i < 8; ++i) {
-    vp10_idct8_c(input, outptr);
+    av1_idct8_c(input, outptr);
     input += 8;
     outptr += 8;
   }
@@ -225,7 +224,7 @@
   // Then transform columns
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_idct8_c(temp_in, temp_out);
+    av1_idct8_c(temp_in, temp_out);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 5));
@@ -233,7 +232,7 @@
   }
 }
 
-void vp10_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   int i, j;
   tran_high_t a1;
   tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -245,7 +244,7 @@
   }
 }
 
-void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst4_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_low_t x0 = input[0];
@@ -282,7 +281,7 @@
   output[3] = WRAPLOW(dct_const_round_shift(s0 + s1 - s3));
 }
 
-void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst8_c(const tran_low_t *input, tran_low_t *output) {
   int s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_high_t x0 = input[7];
@@ -359,7 +358,7 @@
   output[7] = WRAPLOW(-x1);
 }
 
-void vp10_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -368,7 +367,7 @@
   // First transform rows
   // only first 4 row has non-zero coefs
   for (i = 0; i < 4; ++i) {
-    vp10_idct8_c(input, outptr);
+    av1_idct8_c(input, outptr);
     input += 8;
     outptr += 8;
   }
@@ -376,7 +375,7 @@
   // Then transform columns
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_idct8_c(temp_in, temp_out);
+    av1_idct8_c(temp_in, temp_out);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 5));
@@ -384,7 +383,7 @@
   }
 }
 
-void vp10_idct16_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct16_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step1[16], step2[16];
   tran_high_t temp1, temp2;
 
@@ -549,8 +548,8 @@
   output[15] = WRAPLOW(step2[0] - step2[15]);
 }
 
-void vp10_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
-                              int stride) {
+void av1_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
+                             int stride) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
   int i, j;
@@ -558,7 +557,7 @@
 
   // First transform rows
   for (i = 0; i < 16; ++i) {
-    vp10_idct16_c(input, outptr);
+    av1_idct16_c(input, outptr);
     input += 16;
     outptr += 16;
   }
@@ -566,7 +565,7 @@
   // Then transform columns
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_idct16_c(temp_in, temp_out);
+    av1_idct16_c(temp_in, temp_out);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -574,7 +573,7 @@
   }
 }
 
-void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst16_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
   tran_high_t s9, s10, s11, s12, s13, s14, s15;
 
@@ -745,8 +744,8 @@
   output[15] = WRAPLOW(-x1);
 }
 
-void vp10_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
-                             int stride) {
+void av1_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
+                            int stride) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -755,7 +754,7 @@
   // First transform rows. Since all non-zero dct coefficients are in
   // upper-left 4x4 area, we only need to calculate first 4 rows here.
   for (i = 0; i < 4; ++i) {
-    vp10_idct16_c(input, outptr);
+    av1_idct16_c(input, outptr);
     input += 16;
     outptr += 16;
   }
@@ -763,7 +762,7 @@
   // Then transform columns
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_idct16_c(temp_in, temp_out);
+    av1_idct16_c(temp_in, temp_out);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -771,8 +770,7 @@
   }
 }
 
-void vp10_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest,
-                            int stride) {
+void av1_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   int i, j;
   tran_high_t a1;
   tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -784,7 +782,7 @@
   }
 }
 
-void vp10_idct32_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct32_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step1[32], step2[32];
   tran_high_t temp1, temp2;
 
@@ -1151,8 +1149,8 @@
   output[31] = WRAPLOW(step1[0] - step1[31]);
 }
 
-void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
-                               int stride) {
+void av1_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
+                              int stride) {
   tran_low_t out[32 * 32];
   tran_low_t *outptr = out;
   int i, j;
@@ -1170,7 +1168,7 @@
       zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
 
     if (zero_coeff[0] | zero_coeff[1])
-      vp10_idct32_c(input, outptr);
+      av1_idct32_c(input, outptr);
     else
       memset(outptr, 0, sizeof(tran_low_t) * 32);
     input += 32;
@@ -1180,7 +1178,7 @@
   // Columns
   for (i = 0; i < 32; ++i) {
     for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
-    vp10_idct32_c(temp_in, temp_out);
+    av1_idct32_c(temp_in, temp_out);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -1188,8 +1186,8 @@
   }
 }
 
-void vp10_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
-                             int stride) {
+void av1_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
+                            int stride) {
   tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -1198,7 +1196,7 @@
   // Rows
   // only upper-left 8x8 has non-zero coeff
   for (i = 0; i < 8; ++i) {
-    vp10_idct32_c(input, outptr);
+    av1_idct32_c(input, outptr);
     input += 32;
     outptr += 32;
   }
@@ -1206,7 +1204,7 @@
   // Columns
   for (i = 0; i < 32; ++i) {
     for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
-    vp10_idct32_c(temp_in, temp_out);
+    av1_idct32_c(temp_in, temp_out);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -1214,8 +1212,7 @@
   }
 }
 
-void vp10_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest,
-                            int stride) {
+void av1_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   int i, j;
   tran_high_t a1;
 
@@ -1229,9 +1226,9 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
-                                  int stride, int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+                                 int stride, int bd) {
   /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
      0.5 shifts per pixel. */
   int i;
@@ -1287,8 +1284,8 @@
   }
 }
 
-void vp10_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
-                                 int dest_stride, int bd) {
+void av1_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
+                                int dest_stride, int bd) {
   int i;
   tran_high_t a1, e1;
   tran_low_t tmp[4];
@@ -1320,7 +1317,7 @@
   }
 }
 
-void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step[4];
   tran_high_t temp1, temp2;
   (void)bd;
@@ -1341,8 +1338,8 @@
   output[3] = HIGHBD_WRAPLOW(step[0] - step[3], bd);
 }
 
-void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
-                                  int stride, int bd) {
+void av1_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+                                 int stride, int bd) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
   int i, j;
@@ -1351,7 +1348,7 @@
 
   // Rows
   for (i = 0; i < 4; ++i) {
-    vp10_highbd_idct4_c(input, outptr, bd);
+    av1_highbd_idct4_c(input, outptr, bd);
     input += 4;
     outptr += 4;
   }
@@ -1359,7 +1356,7 @@
   // Columns
   for (i = 0; i < 4; ++i) {
     for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
-    vp10_highbd_idct4_c(temp_in, temp_out, bd);
+    av1_highbd_idct4_c(temp_in, temp_out, bd);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -1367,8 +1364,8 @@
   }
 }
 
-void vp10_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int dest_stride, int bd) {
+void av1_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int dest_stride, int bd) {
   int i;
   tran_high_t a1;
   tran_low_t out =
@@ -1387,7 +1384,7 @@
   }
 }
 
-void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step1[8], step2[8];
   tran_high_t temp1, temp2;
   // stage 1
@@ -1405,7 +1402,7 @@
   step1[6] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd);
 
   // stage 2 & stage 3 - even half
-  vp10_highbd_idct4_c(step1, step1, bd);
+  av1_highbd_idct4_c(step1, step1, bd);
 
   // stage 2 - odd half
   step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
@@ -1432,8 +1429,8 @@
   output[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
 }
 
-void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
-                                  int stride, int bd) {
+void av1_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+                                 int stride, int bd) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
   int i, j;
@@ -1442,7 +1439,7 @@
 
   // First transform rows.
   for (i = 0; i < 8; ++i) {
-    vp10_highbd_idct8_c(input, outptr, bd);
+    av1_highbd_idct8_c(input, outptr, bd);
     input += 8;
     outptr += 8;
   }
@@ -1450,7 +1447,7 @@
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_highbd_idct8_c(temp_in, temp_out, bd);
+    av1_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1458,8 +1455,8 @@
   }
 }
 
-void vp10_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int bd) {
+void av1_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int stride, int bd) {
   int i, j;
   tran_high_t a1;
   tran_low_t out =
@@ -1473,7 +1470,7 @@
   }
 }
 
-void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_low_t x0 = input[0];
@@ -1511,7 +1508,7 @@
   output[3] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3), bd);
 }
 
-void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_low_t x0 = input[7];
@@ -1588,8 +1585,8 @@
   output[7] = HIGHBD_WRAPLOW(-x1, bd);
 }
 
-void vp10_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
-                                  int stride, int bd) {
+void av1_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
+                                 int stride, int bd) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -1599,14 +1596,14 @@
   // First transform rows.
   // Only first 4 row has non-zero coefs.
   for (i = 0; i < 4; ++i) {
-    vp10_highbd_idct8_c(input, outptr, bd);
+    av1_highbd_idct8_c(input, outptr, bd);
     input += 8;
     outptr += 8;
   }
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_highbd_idct8_c(temp_in, temp_out, bd);
+    av1_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1614,7 +1611,7 @@
   }
 }
 
-void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step1[16], step2[16];
   tran_high_t temp1, temp2;
   (void)bd;
@@ -1780,8 +1777,8 @@
   output[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
 }
 
-void vp10_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
-                                     int stride, int bd) {
+void av1_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int bd) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
   int i, j;
@@ -1790,7 +1787,7 @@
 
   // First transform rows.
   for (i = 0; i < 16; ++i) {
-    vp10_highbd_idct16_c(input, outptr, bd);
+    av1_highbd_idct16_c(input, outptr, bd);
     input += 16;
     outptr += 16;
   }
@@ -1798,7 +1795,7 @@
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_highbd_idct16_c(temp_in, temp_out, bd);
+    av1_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -1806,8 +1803,7 @@
   }
 }
 
-void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output,
-                           int bd) {
+void av1_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
   tran_high_t s9, s10, s11, s12, s13, s14, s15;
 
@@ -1977,8 +1973,8 @@
   output[15] = HIGHBD_WRAPLOW(-x1, bd);
 }
 
-void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int bd) {
+void av1_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
+                                   int stride, int bd) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -1988,7 +1984,7 @@
   // First transform rows. Since all non-zero dct coefficients are in
   // upper-left 4x4 area, we only need to calculate first 4 rows here.
   for (i = 0; i < 4; ++i) {
-    vp10_highbd_idct16_c(input, outptr, bd);
+    av1_highbd_idct16_c(input, outptr, bd);
     input += 16;
     outptr += 16;
   }
@@ -1996,7 +1992,7 @@
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_highbd_idct16_c(temp_in, temp_out, bd);
+    av1_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -2004,8 +2000,8 @@
   }
 }
 
-void vp10_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                   int stride, int bd) {
+void av1_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
+                                  int stride, int bd) {
   int i, j;
   tran_high_t a1;
   tran_low_t out =
@@ -2389,8 +2385,8 @@
   output[31] = HIGHBD_WRAPLOW(step1[0] - step1[31], bd);
 }
 
-void vp10_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
-                                      int stride, int bd) {
+void av1_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
+                                     int stride, int bd) {
   tran_low_t out[32 * 32];
   tran_low_t *outptr = out;
   int i, j;
@@ -2427,8 +2423,8 @@
   }
 }
 
-void vp10_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int bd) {
+void av1_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
+                                   int stride, int bd) {
   tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -2453,8 +2449,8 @@
   }
 }
 
-void vp10_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                   int stride, int bd) {
+void av1_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
+                                  int stride, int bd) {
   int i, j;
   int a1;
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
@@ -2469,4 +2465,4 @@
     dest += stride;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/vp10_inv_txfm.h b/av1/common/av1_inv_txfm.h
similarity index 76%
rename from av1/common/vp10_inv_txfm.h
rename to av1/common/av1_inv_txfm.h
index b53db48..4295aa0 100644
--- a/av1/common/vp10_inv_txfm.h
+++ b/av1/common/av1_inv_txfm.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_INV_TXFM_H_
-#define VPX_DSP_INV_TXFM_H_
+#ifndef AOM_DSP_INV_TXFM_H_
+#define AOM_DSP_INV_TXFM_H_
 
 #include <assert.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/inv_txfm.h"
 #include "aom_ports/mem.h"
@@ -41,7 +41,7 @@
   return rv;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE tran_high_t highbd_check_range(tran_high_t input, int bd) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
   // For valid highbitdepth streams, intermediate stage coefficients will
@@ -63,7 +63,7 @@
   tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
   return rv;
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #if CONFIG_EMULATE_HARDWARE
 // When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
@@ -84,36 +84,36 @@
 // bd of x uses trans_low with 8+x bits, need to remove 24-x bits
 
 #define WRAPLOW(x) ((((int32_t)check_range(x)) << 16) >> 16)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define HIGHBD_WRAPLOW(x, bd) \
   ((((int32_t)highbd_check_range((x), bd)) << (24 - bd)) >> (24 - bd))
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #else  // CONFIG_EMULATE_HARDWARE
 
 #define WRAPLOW(x) ((int32_t)check_range(x))
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define HIGHBD_WRAPLOW(x, bd) ((int32_t)highbd_check_range((x), bd))
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #endif  // CONFIG_EMULATE_HARDWARE
 
-void vp10_idct4_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct8_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct16_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct32_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct4_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct8_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct16_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct32_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst4_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst8_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst16_c(const tran_low_t *input, tran_low_t *output);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
 
-void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
 
 static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans,
                                              int bd) {
@@ -129,4 +129,4 @@
 #ifdef __cplusplus
 }  // extern "C"
 #endif
-#endif  // VPX_DSP_INV_TXFM_H_
+#endif  // AOM_DSP_INV_TXFM_H_
diff --git a/av1/common/vp10_inv_txfm1d.c b/av1/common/av1_inv_txfm1d.c
similarity index 98%
rename from av1/common/vp10_inv_txfm1d.c
rename to av1/common/av1_inv_txfm1d.c
index 76fb623..dbb463f 100644
--- a/av1/common/vp10_inv_txfm1d.c
+++ b/av1/common/av1_inv_txfm1d.c
@@ -9,7 +9,7 @@
  */
 
 #include <stdlib.h>
-#include "av1/common/vp10_inv_txfm1d.h"
+#include "av1/common/av1_inv_txfm1d.h"
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
 #define range_check(stage, input, buf, size, bit)                         \
   {                                                                       \
@@ -40,8 +40,8 @@
   }
 #endif
 
-void vp10_idct4_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range) {
   const int32_t size = 4;
   const int32_t *cospi;
 
@@ -83,8 +83,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_idct8_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range) {
   const int32_t size = 8;
   const int32_t *cospi;
 
@@ -168,8 +168,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_idct16_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct16_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 16;
   const int32_t *cospi;
 
@@ -339,8 +339,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_idct32_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_idct32_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 32;
   const int32_t *cospi;
 
@@ -700,8 +700,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_iadst4_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst4_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 4;
   const int32_t *cospi;
 
@@ -765,8 +765,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_iadst8_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst8_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 8;
   const int32_t *cospi;
 
@@ -880,8 +880,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_iadst16_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst16_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 16;
   const int32_t *cospi;
 
@@ -1097,8 +1097,8 @@
   range_check(stage, input, bf1, size, stage_range[stage]);
 }
 
-void vp10_iadst32_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_iadst32_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range) {
   const int32_t size = 32;
   const int32_t *cospi;
 
diff --git a/av1/common/av1_inv_txfm1d.h b/av1/common/av1_inv_txfm1d.h
new file mode 100644
index 0000000..5937617
--- /dev/null
+++ b/av1/common/av1_inv_txfm1d.h
@@ -0,0 +1,44 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AV1_INV_TXFM1D_H_
+#define AV1_INV_TXFM1D_H_
+
+#include "av1/common/av1_txfm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void av1_idct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range);
+void av1_idct8_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
+                   const int8_t *stage_range);
+void av1_idct16_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct32_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct64_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+
+void av1_iadst4_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst8_new(const int32_t *input, int32_t *output,
+                    const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst16_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst32_new(const int32_t *input, int32_t *output,
+                     const int8_t *cos_bit, const int8_t *stage_range);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // AV1_INV_TXFM1D_H_
diff --git a/av1/common/vp10_inv_txfm2d.c b/av1/common/av1_inv_txfm2d.c
similarity index 79%
rename from av1/common/vp10_inv_txfm2d.c
rename to av1/common/av1_inv_txfm2d.c
index 60606c9..844a38a 100644
--- a/av1/common/vp10_inv_txfm2d.c
+++ b/av1/common/av1_inv_txfm2d.c
@@ -8,22 +8,22 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "av1/common/enums.h"
-#include "av1/common/vp10_txfm.h"
-#include "av1/common/vp10_inv_txfm1d.h"
-#include "av1/common/vp10_inv_txfm2d_cfg.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/common/av1_inv_txfm1d.h"
+#include "av1/common/av1_inv_txfm2d_cfg.h"
 
 static INLINE TxfmFunc inv_txfm_type_to_func(TXFM_TYPE txfm_type) {
   switch (txfm_type) {
-    case TXFM_TYPE_DCT4: return vp10_idct4_new;
-    case TXFM_TYPE_DCT8: return vp10_idct8_new;
-    case TXFM_TYPE_DCT16: return vp10_idct16_new;
-    case TXFM_TYPE_DCT32: return vp10_idct32_new;
-    case TXFM_TYPE_ADST4: return vp10_iadst4_new;
-    case TXFM_TYPE_ADST8: return vp10_iadst8_new;
-    case TXFM_TYPE_ADST16: return vp10_iadst16_new;
-    case TXFM_TYPE_ADST32: return vp10_iadst32_new;
+    case TXFM_TYPE_DCT4: return av1_idct4_new;
+    case TXFM_TYPE_DCT8: return av1_idct8_new;
+    case TXFM_TYPE_DCT16: return av1_idct16_new;
+    case TXFM_TYPE_DCT32: return av1_idct32_new;
+    case TXFM_TYPE_ADST4: return av1_iadst4_new;
+    case TXFM_TYPE_ADST8: return av1_iadst8_new;
+    case TXFM_TYPE_ADST16: return av1_iadst16_new;
+    case TXFM_TYPE_ADST32: return av1_iadst32_new;
     default: assert(0); return NULL;
   }
 }
@@ -62,14 +62,14 @@
 };
 #endif
 
-TXFM_2D_FLIP_CFG vp10_get_inv_txfm_cfg(int tx_type, int tx_size) {
+TXFM_2D_FLIP_CFG av1_get_inv_txfm_cfg(int tx_type, int tx_size) {
   TXFM_2D_FLIP_CFG cfg;
   set_flip_cfg(tx_type, &cfg);
   cfg.cfg = inv_txfm_cfg_ls[tx_type][tx_size];
   return cfg;
 }
 
-TXFM_2D_FLIP_CFG vp10_get_inv_txfm_64x64_cfg(int tx_type) {
+TXFM_2D_FLIP_CFG av1_get_inv_txfm_64x64_cfg(int tx_type) {
   TXFM_2D_FLIP_CFG cfg = { 0, 0, NULL };
   switch (tx_type) {
     case DCT_DCT:
@@ -130,62 +130,62 @@
   }
 }
 
-void vp10_inv_txfm2d_add_4x4_c(const int32_t *input, uint16_t *output,
-                               int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_4x4_c(const int32_t *input, uint16_t *output,
+                              int stride, int tx_type, int bd) {
   int txfm_buf[4 * 4 + 4 + 4];
   // output contains the prediction signal which is always positive and smaller
   // than (1 << bd) - 1
   // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
   // int16_t*
-  TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_4X4);
+  TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_4X4);
   inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
   clamp_block((int16_t *)output, 4, stride, 0, (1 << bd) - 1);
 }
 
-void vp10_inv_txfm2d_add_8x8_c(const int32_t *input, uint16_t *output,
-                               int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_8x8_c(const int32_t *input, uint16_t *output,
+                              int stride, int tx_type, int bd) {
   int txfm_buf[8 * 8 + 8 + 8];
   // output contains the prediction signal which is always positive and smaller
   // than (1 << bd) - 1
   // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
   // int16_t*
-  TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_8X8);
+  TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_8X8);
   inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
   clamp_block((int16_t *)output, 8, stride, 0, (1 << bd) - 1);
 }
 
-void vp10_inv_txfm2d_add_16x16_c(const int32_t *input, uint16_t *output,
-                                 int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_16x16_c(const int32_t *input, uint16_t *output,
+                                int stride, int tx_type, int bd) {
   int txfm_buf[16 * 16 + 16 + 16];
   // output contains the prediction signal which is always positive and smaller
   // than (1 << bd) - 1
   // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
   // int16_t*
-  TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_16X16);
+  TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_16X16);
   inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
   clamp_block((int16_t *)output, 16, stride, 0, (1 << bd) - 1);
 }
 
-void vp10_inv_txfm2d_add_32x32_c(const int32_t *input, uint16_t *output,
-                                 int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_32x32_c(const int32_t *input, uint16_t *output,
+                                int stride, int tx_type, int bd) {
   int txfm_buf[32 * 32 + 32 + 32];
   // output contains the prediction signal which is always positive and smaller
   // than (1 << bd) - 1
   // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
   // int16_t*
-  TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_cfg(tx_type, TX_32X32);
+  TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, TX_32X32);
   inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
   clamp_block((int16_t *)output, 32, stride, 0, (1 << bd) - 1);
 }
 
-void vp10_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output,
-                                 int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output,
+                                int stride, int tx_type, int bd) {
   int txfm_buf[64 * 64 + 64 + 64];
   // output contains the prediction signal which is always positive and smaller
   // than (1 << bd) - 1
   // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
   // int16_t*
-  TXFM_2D_FLIP_CFG cfg = vp10_get_inv_txfm_64x64_cfg(tx_type);
+  TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_64x64_cfg(tx_type);
   inv_txfm2d_add_c(input, (int16_t *)output, stride, &cfg, txfm_buf);
   clamp_block((int16_t *)output, 64, stride, 0, (1 << bd) - 1);
 }
diff --git a/av1/common/vp10_inv_txfm2d_cfg.h b/av1/common/av1_inv_txfm2d_cfg.h
similarity index 99%
rename from av1/common/vp10_inv_txfm2d_cfg.h
rename to av1/common/av1_inv_txfm2d_cfg.h
index 9bfa420..ee018fb 100644
--- a/av1/common/vp10_inv_txfm2d_cfg.h
+++ b/av1/common/av1_inv_txfm2d_cfg.h
@@ -8,9 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_INV_TXFM2D_CFG_H_
-#define VP10_INV_TXFM2D_CFG_H_
-#include "av1/common/vp10_inv_txfm1d.h"
+#ifndef AV1_INV_TXFM2D_CFG_H_
+#define AV1_INV_TXFM2D_CFG_H_
+#include "av1/common/av1_inv_txfm1d.h"
 //  ---------------- config inv_dct_dct_4 ----------------
 static const int8_t inv_shift_dct_dct_4[2] = { 0, -4 };
 static const int8_t inv_stage_range_col_dct_dct_4[4] = { 18, 18, 17, 17 };
@@ -441,4 +441,4 @@
   TXFM_TYPE_DCT32
 };  // .txfm_type_row
 
-#endif  // VP10_INV_TXFM2D_CFG_H_
+#endif  // AV1_INV_TXFM2D_CFG_H_
diff --git a/av1/common/vp10_rtcd.c b/av1/common/av1_rtcd.c
similarity index 85%
rename from av1/common/vp10_rtcd.c
rename to av1/common/av1_rtcd.c
index 7fce6b9..fad509c 100644
--- a/av1/common/vp10_rtcd.c
+++ b/av1/common/av1_rtcd.c
@@ -7,12 +7,12 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #define RTCD_C
-#include "./vp10_rtcd.h"
-#include "aom_ports/vpx_once.h"
+#include "./av1_rtcd.h"
+#include "aom_ports/aom_once.h"
 
-void vp10_rtcd() {
+void av1_rtcd() {
   // TODO(JBB): Remove this once, by insuring that both the encoder and
   // decoder setup functions are protected by once();
   once(setup_rtcd_internal);
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
new file mode 100644
index 0000000..c1b0f9e
--- /dev/null
+++ b/av1/common/av1_rtcd_defs.pl
@@ -0,0 +1,912 @@
+sub av1_common_forward_decls() {
+print <<EOF
+/*
+ * AV1
+ */
+
+#include "aom/aom_integer.h"
+#include "av1/common/common.h"
+#include "av1/common/enums.h"
+#include "av1/common/quant_common.h"
+#include "av1/common/filter.h"
+#include "av1/common/av1_txfm.h"
+
+struct macroblockd;
+
+/* Encoder forward decls */
+struct macroblock;
+struct aom_variance_vtable;
+struct search_site_config;
+struct mv;
+union int_mv;
+struct yv12_buffer_config;
+EOF
+}
+forward_decls qw/av1_common_forward_decls/;
+
+# functions that are 64 bit only.
+$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
+if ($opts{arch} eq "x86_64") {
+  $mmx_x86_64 = 'mmx';
+  $sse2_x86_64 = 'sse2';
+  $ssse3_x86_64 = 'ssse3';
+  $avx_x86_64 = 'avx';
+  $avx2_x86_64 = 'avx2';
+}
+
+#
+# 10/12-tap convolution filters
+#
+add_proto qw/void av1_convolve_horiz/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
+specialize qw/av1_convolve_horiz ssse3/;
+
+add_proto qw/void av1_convolve_vert/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
+specialize qw/av1_convolve_vert ssse3/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  add_proto qw/void av1_highbd_convolve_horiz/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
+  specialize qw/av1_highbd_convolve_horiz sse4_1/;
+  add_proto qw/void av1_highbd_convolve_vert/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
+  specialize qw/av1_highbd_convolve_vert sse4_1/;
+}
+
+#
+# dct
+#
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  # Note as optimized versions of these functions are added we need to add a check to ensure
+  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
+  if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add/;
+
+    add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x8_32_add/;
+
+    add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x4_32_add/;
+
+    add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x16_128_add/;
+
+    add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x8_128_add/;
+
+    add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x32_512_add/;
+
+    add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht32x16_512_add/;
+
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add/;
+
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add/;
+
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4/;
+
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1/;
+
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8/;
+
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1/;
+
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16/;
+
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1/;
+
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32/;
+
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd/;
+
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1/;
+
+    add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct4x4/;
+
+    add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8/;
+
+    add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8_1/;
+
+    add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16/;
+
+    add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16_1/;
+
+    add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32/;
+
+    add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_rd/;
+
+    add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_1/;
+  } else {
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add sse2/;
+
+    add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x8_32_add/;
+
+    add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x4_32_add/;
+
+    add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x16_128_add/;
+
+    add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x8_128_add/;
+
+    add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x32_512_add/;
+
+    add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht32x16_512_add/;
+
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add sse2/;
+
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add sse2/;
+
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4 sse2/;
+
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1 sse2/;
+
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8 sse2/;
+
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1 sse2/;
+
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16 sse2/;
+
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1 sse2/;
+
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32 sse2/;
+
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd sse2/;
+
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1 sse2/;
+
+    add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct4x4 sse2/;
+
+    add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8 sse2/;
+
+    add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8_1/;
+
+    add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16 sse2/;
+
+    add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16_1/;
+
+    add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32 sse2/;
+
+    add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_rd sse2/;
+
+    add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_1/;
+  }
+} else {
+  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+  if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add/;
+
+    add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x8_32_add/;
+
+    add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x4_32_add/;
+
+    add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x16_128_add/;
+
+    add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x8_128_add/;
+
+    add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x32_512_add/;
+
+    add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht32x16_512_add/;
+
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add/;
+
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add/;
+
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4/;
+
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1/;
+
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8/;
+
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1/;
+
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16/;
+
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1/;
+
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32/;
+
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd/;
+
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1/;
+  } else {
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add sse2 neon dspr2/;
+
+    add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x8_32_add/;
+
+    add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x4_32_add/;
+
+    add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x16_128_add/;
+
+    add_proto qw/void av1_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x8_128_add/;
+
+    add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht16x32_512_add/;
+
+    add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht32x16_512_add/;
+
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add sse2 neon dspr2/;
+
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add sse2 dspr2/;
+
+    if (aom_config("CONFIG_EXT_TX") ne "yes") {
+      specialize qw/av1_iht4x4_16_add msa/;
+      specialize qw/av1_iht8x8_64_add msa/;
+      specialize qw/av1_iht16x16_256_add msa/;
+    }
+
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4 sse2/;
+
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1 sse2/;
+
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8 sse2/;
+
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1 sse2/;
+
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16 sse2/;
+
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1 sse2/;
+
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32 sse2/;
+
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd sse2/;
+
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1 sse2/;
+  }
+}
+
+if (aom_config("CONFIG_NEW_QUANT") eq "yes") {
+  add_proto qw/void quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+  specialize qw/quantize_nuq/;
+
+  add_proto qw/void quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+  specialize qw/quantize_fp_nuq/;
+
+  add_proto qw/void quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+  specialize qw/quantize_32x32_nuq/;
+
+  add_proto qw/void quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+  specialize qw/quantize_32x32_fp_nuq/;
+}
+
+# EXT_INTRA predictor functions
+if (aom_config("CONFIG_EXT_INTRA") eq "yes") {
+  add_proto qw/void av1_dc_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_dc_filter_predictor sse4_1/;
+  add_proto qw/void av1_v_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_v_filter_predictor sse4_1/;
+  add_proto qw/void av1_h_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_h_filter_predictor sse4_1/;
+  add_proto qw/void av1_d45_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d45_filter_predictor sse4_1/;
+  add_proto qw/void av1_d135_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d135_filter_predictor sse4_1/;
+  add_proto qw/void av1_d117_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d117_filter_predictor sse4_1/;
+  add_proto qw/void av1_d153_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d153_filter_predictor sse4_1/;
+  add_proto qw/void av1_d207_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d207_filter_predictor sse4_1/;
+  add_proto qw/void av1_d63_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_d63_filter_predictor sse4_1/;
+  add_proto qw/void av1_tm_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
+  specialize qw/av1_tm_filter_predictor sse4_1/;
+  # High bitdepth functions
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    add_proto qw/void av1_highbd_dc_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_dc_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_v_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_v_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_h_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_h_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d45_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d45_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d135_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d135_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d117_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d117_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d153_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d153_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d207_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d207_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_d63_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_d63_filter_predictor sse4_1/;
+    add_proto qw/void av1_highbd_tm_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
+    specialize qw/av1_highbd_tm_filter_predictor sse4_1/;
+  }
+}
+
+# High bitdepth functions
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  #
+  # Sub Pixel Filters
+  #
+  add_proto qw/void av1_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve_copy/;
+
+  add_proto qw/void av1_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve_avg/;
+
+  add_proto qw/void av1_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8/, "$sse2_x86_64";
+
+  add_proto qw/void av1_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_horiz/, "$sse2_x86_64";
+
+  add_proto qw/void av1_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_vert/, "$sse2_x86_64";
+
+  add_proto qw/void av1_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_avg/, "$sse2_x86_64";
+
+  add_proto qw/void av1_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
+
+  add_proto qw/void av1_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_avg_vert/, "$sse2_x86_64";
+
+  #
+  # dct
+  #
+  # Note as optimized versions of these functions are added we need to add a check to ensure
+  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
+  add_proto qw/void av1_highbd_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht4x4_16_add/;
+
+  add_proto qw/void av1_highbd_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht4x8_32_add/;
+
+  add_proto qw/void av1_highbd_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht8x4_32_add/;
+
+  add_proto qw/void av1_highbd_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht8x16_128_add/;
+
+  add_proto qw/void av1_highbd_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht16x8_128_add/;
+
+  add_proto qw/void av1_highbd_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht16x32_512_add/;
+
+  add_proto qw/void av1_highbd_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht32x16_512_add/;
+
+  add_proto qw/void av1_highbd_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht8x8_64_add/;
+
+  add_proto qw/void av1_highbd_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type, int bd";
+  specialize qw/av1_highbd_iht16x16_256_add/;
+}
+
+#
+# Encoder functions below this point.
+#
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
+
+# ENCODEMB INVOKE
+
+if (aom_config("CONFIG_AOM_QM") eq "yes") {
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    # the transform coefficients are held in 32-bit
+    # values, so the assembler code for  av1_block_error can no longer be used.
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error/;
+
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+    specialize qw/av1_fdct8x8_quant/;
+  } else {
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error avx2 msa/, "$sse2_x86inc";
+
+    add_proto qw/int64_t av1_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
+    specialize qw/av1_block_error_fp neon/, "$sse2_x86inc";
+
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+  }
+} else {
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+    # the transform coefficients are held in 32-bit
+    # values, so the assembler code for  av1_block_error can no longer be used.
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error/;
+
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp/;
+
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp_32x32/;
+
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_fdct8x8_quant/;
+  } else {
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error sse2 avx2 msa/;
+
+    add_proto qw/int64_t av1_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
+    specialize qw/av1_block_error_fp neon sse2/;
+
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp neon sse2/, "$ssse3_x86_64";
+
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp_32x32/, "$ssse3_x86_64";
+
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_fdct8x8_quant sse2 ssse3 neon/;
+  }
+
+}
+
+# fdct functions
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  add_proto qw/void av1_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht4x4 sse2/;
+
+  add_proto qw/void av1_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht4x8/;
+
+  add_proto qw/void av1_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x4/;
+
+  add_proto qw/void av1_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x16/;
+
+  add_proto qw/void av1_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x8/;
+
+  add_proto qw/void av1_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x32/;
+
+  add_proto qw/void av1_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht32x16/;
+
+  add_proto qw/void av1_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x8 sse2/;
+
+  add_proto qw/void av1_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x16 sse2/;
+
+  add_proto qw/void av1_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht32x32/;
+
+  add_proto qw/void av1_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/av1_fwht4x4/;
+} else {
+  add_proto qw/void av1_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht4x4 sse2/;
+
+  add_proto qw/void av1_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht4x8/;
+
+  add_proto qw/void av1_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x4/;
+
+  add_proto qw/void av1_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x16/;
+
+  add_proto qw/void av1_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x8/;
+
+  add_proto qw/void av1_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x32/;
+
+  add_proto qw/void av1_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht32x16/;
+
+  add_proto qw/void av1_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x8 sse2/;
+
+  add_proto qw/void av1_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x16 sse2/;
+
+  if (aom_config("CONFIG_EXT_TX") ne "yes") {
+    specialize qw/av1_fht4x4 msa/;
+    specialize qw/av1_fht8x8 msa/;
+    specialize qw/av1_fht16x16 msa/;
+  }
+
+  add_proto qw/void av1_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht32x32/;
+
+  add_proto qw/void av1_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/av1_fwht4x4/;
+}
+
+add_proto qw/void av1_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bs, int tx_type";
+  specialize qw/av1_fwd_idtx/;
+
+# Inverse transform
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  # Note as optimized versions of these functions are added we need to add a check to ensure
+  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
+  add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct4x4_1_add/;
+
+  add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct4x4_16_add/;
+
+  add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct8x8_1_add/;
+
+  add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct8x8_64_add/;
+
+  add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct8x8_12_add/;
+
+  add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct16x16_1_add/;
+
+  add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct16x16_256_add/;
+
+  add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct16x16_10_add/;
+
+  add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct32x32_1024_add/;
+
+  add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct32x32_34_add/;
+
+  add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct32x32_1_add/;
+
+  add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_iwht4x4_1_add/;
+
+  add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_iwht4x4_16_add/;
+
+  add_proto qw/void av1_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct4x4_1_add/;
+
+  add_proto qw/void av1_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct8x8_1_add/;
+
+  add_proto qw/void av1_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct16x16_1_add/;
+
+  add_proto qw/void av1_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct32x32_1024_add/;
+
+  add_proto qw/void av1_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct32x32_34_add/;
+
+  add_proto qw/void av1_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct32x32_1_add/;
+
+  add_proto qw/void av1_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_iwht4x4_1_add/;
+
+  add_proto qw/void av1_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_iwht4x4_16_add/;
+
+  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+  if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+    add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct4x4_16_add/;
+
+    add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_64_add/;
+
+    add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_10_add/;
+
+    add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_256_add/;
+
+    add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_10_add/;
+  } else {
+    add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct4x4_16_add sse2/;
+
+    add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_64_add sse2/;
+
+    add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_10_add sse2/;
+
+    add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_256_add sse2/;
+
+    add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_10_add sse2/;
+  }  # CONFIG_EMULATE_HARDWARE
+} else {
+  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+  if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+    add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_1_add/;
+
+    add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_16_add/;
+
+    add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_1_add/;
+
+    add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_64_add/;
+
+    add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_12_add/;
+
+    add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_1_add/;
+
+    add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_256_add/;
+
+    add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_10_add/;
+
+    add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1024_add/;
+
+    add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_34_add/;
+
+    add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1_add/;
+
+    add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_1_add/;
+
+    add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_16_add/;
+  } else {
+    add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_1_add sse2/;
+
+    add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_16_add sse2/;
+
+    add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_1_add sse2/;
+
+    add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_64_add sse2/;
+
+    add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_12_add sse2/;
+
+    add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_1_add sse2/;
+
+    add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_256_add sse2/;
+
+    add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_10_add sse2/;
+
+    add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1024_add sse2/;
+
+    add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_34_add sse2/;
+
+    add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1_add sse2/;
+
+    add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_1_add/;
+
+    add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_16_add/;
+  }  # CONFIG_EMULATE_HARDWARE
+}  # CONFIG_AOM_HIGHBITDEPTH
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  #fwd txfm
+  add_proto qw/void av1_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_fwd_txfm2d_4x4 sse4_1/;
+  add_proto qw/void av1_fwd_txfm2d_8x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_fwd_txfm2d_8x8 sse4_1/;
+  add_proto qw/void av1_fwd_txfm2d_16x16/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_fwd_txfm2d_16x16 sse4_1/;
+  add_proto qw/void av1_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_fwd_txfm2d_32x32 sse4_1/;
+  add_proto qw/void av1_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_fwd_txfm2d_64x64 sse4_1/;
+
+  #inv txfm
+  add_proto qw/void av1_inv_txfm2d_add_4x4/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_inv_txfm2d_add_4x4 sse4_1/;
+  add_proto qw/void av1_inv_txfm2d_add_8x8/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_inv_txfm2d_add_8x8 sse4_1/;
+  add_proto qw/void av1_inv_txfm2d_add_16x16/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_inv_txfm2d_add_16x16 sse4_1/;
+  add_proto qw/void av1_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_inv_txfm2d_add_32x32/;
+  add_proto qw/void av1_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+  specialize qw/av1_inv_txfm2d_add_64x64/;
+}
+
+#
+# Motion search
+#
+add_proto qw/int av1_full_search_sad/, "const struct macroblock *x, const struct mv *ref_mv, int sad_per_bit, int distance, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv, struct mv *best_mv";
+specialize qw/av1_full_search_sad sse3 sse4_1/;
+$av1_full_search_sad_sse3=av1_full_search_sadx3;
+$av1_full_search_sad_sse4_1=av1_full_search_sadx8;
+
+add_proto qw/int av1_diamond_search_sad/, "struct macroblock *x, const struct search_site_config *cfg,  struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
+specialize qw/av1_diamond_search_sad/;
+
+add_proto qw/int av1_full_range_search/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
+specialize qw/av1_full_range_search/;
+
+add_proto qw/void av1_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
+specialize qw/av1_temporal_filter_apply sse2 msa/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+
+  # ENCODEMB INVOKE
+  if (aom_config("CONFIG_NEW_QUANT") eq "yes") {
+    add_proto qw/void highbd_quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+    specialize qw/highbd_quantize_nuq/;
+
+    add_proto qw/void highbd_quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+    specialize qw/highbd_quantize_fp_nuq/;
+
+    add_proto qw/void highbd_quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+    specialize qw/highbd_quantize_32x32_nuq/;
+
+    add_proto qw/void highbd_quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
+    specialize qw/highbd_quantize_32x32_fp_nuq/;
+  }
+
+  add_proto qw/int64_t av1_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
+  specialize qw/av1_highbd_block_error sse2/;
+
+  if (aom_config("CONFIG_AOM_QM") eq "yes") {
+    add_proto qw/void av1_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+
+    add_proto qw/void av1_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+  } else {
+    add_proto qw/void av1_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
+    specialize qw/av1_highbd_quantize_fp sse4_1/;
+
+    add_proto qw/void av1_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
+    specialize qw/av1_highbd_quantize_b/;
+  }
+
+  # fdct functions
+  add_proto qw/void av1_highbd_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht4x4 sse4_1/;
+
+  add_proto qw/void av1_highbd_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht4x8/;
+
+  add_proto qw/void av1_highbd_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht8x4/;
+
+  add_proto qw/void av1_highbd_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht8x16/;
+
+  add_proto qw/void av1_highbd_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht16x8/;
+
+  add_proto qw/void av1_highbd_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht16x32/;
+
+  add_proto qw/void av1_highbd_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht32x16/;
+
+  add_proto qw/void av1_highbd_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht8x8/;
+
+  add_proto qw/void av1_highbd_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht16x16/;
+
+  add_proto qw/void av1_highbd_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht32x32/;
+
+  add_proto qw/void av1_highbd_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/av1_highbd_fwht4x4/;
+
+  add_proto qw/void av1_highbd_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
+  specialize qw/av1_highbd_temporal_filter_apply/;
+
+}
+# End av1_high encoder functions
+
+if (aom_config("CONFIG_EXT_INTER") eq "yes") {
+  add_proto qw/uint64_t av1_wedge_sse_from_residuals/, "const int16_t *r1, const int16_t *d, const uint8_t *m, int N";
+  specialize qw/av1_wedge_sse_from_residuals sse2/;
+  add_proto qw/int av1_wedge_sign_from_residuals/, "const int16_t *ds, const uint8_t *m, int N, int64_t limit";
+  specialize qw/av1_wedge_sign_from_residuals sse2/;
+  add_proto qw/void av1_wedge_compute_delta_squares/, "int16_t *d, const int16_t *a, const int16_t *b, int N";
+  specialize qw/av1_wedge_compute_delta_squares sse2/;
+}
+
+}
+# end encoder functions
+1;
diff --git a/av1/common/vp10_txfm.h b/av1/common/av1_txfm.h
similarity index 96%
rename from av1/common/vp10_txfm.h
rename to av1/common/av1_txfm.h
index bfeb3ea..289f953 100644
--- a/av1/common/vp10_txfm.h
+++ b/av1/common/av1_txfm.h
@@ -7,16 +7,16 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#ifndef VP10_TXFM_H_
-#define VP10_TXFM_H_
+#ifndef AV1_TXFM_H_
+#define AV1_TXFM_H_
 
 #include <assert.h>
 #include <math.h>
 #include <stdio.h>
 
 #include "av1/common/enums.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 static const int cos_bit_min = 10;
 static const int cos_bit_max = 16;
@@ -198,10 +198,10 @@
 #ifdef __cplusplus
 extern "C" {
 #endif
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_cfg(int tx_type, int tx_size);
-TXFM_2D_FLIP_CFG vp10_get_fwd_txfm_64x64_cfg(int tx_type);
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(int tx_type, int tx_size);
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(int tx_type);
 #ifdef __cplusplus
 }
 #endif  // __cplusplus
 
-#endif  // VP10_TXFM_H_
+#endif  // AV1_TXFM_H_
diff --git a/av1/common/blockd.c b/av1/common/blockd.c
index ee95271..b13e562 100644
--- a/av1/common/blockd.c
+++ b/av1/common/blockd.c
@@ -14,8 +14,8 @@
 
 #include "av1/common/blockd.h"
 
-PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
-                                     const MODE_INFO *left_mi, int b) {
+PREDICTION_MODE av1_left_block_mode(const MODE_INFO *cur_mi,
+                                    const MODE_INFO *left_mi, int b) {
   if (b == 0 || b == 2) {
     if (!left_mi || is_inter_block(&left_mi->mbmi)) return DC_PRED;
 
@@ -26,8 +26,8 @@
   }
 }
 
-PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
-                                      const MODE_INFO *above_mi, int b) {
+PREDICTION_MODE av1_above_block_mode(const MODE_INFO *cur_mi,
+                                     const MODE_INFO *above_mi, int b) {
   if (b == 0 || b == 1) {
     if (!above_mi || is_inter_block(&above_mi->mbmi)) return DC_PRED;
 
@@ -38,7 +38,7 @@
   }
 }
 
-void vp10_foreach_transformed_block_in_plane(
+void av1_foreach_transformed_block_in_plane(
     const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
     foreach_transformed_block_visitor visit, void *arg) {
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -81,18 +81,18 @@
   }
 }
 
-void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
-                                    BLOCK_SIZE bsize,
-                                    foreach_transformed_block_visitor visit,
-                                    void *arg) {
+void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
+                                   BLOCK_SIZE bsize,
+                                   foreach_transformed_block_visitor visit,
+                                   void *arg) {
   int plane;
   for (plane = 0; plane < MAX_MB_PLANE; ++plane)
-    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
+    av1_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
 }
 
-void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
-                       BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
-                       int aoff, int loff) {
+void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+                      BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
+                      int aoff, int loff) {
   ENTROPY_CONTEXT *const a = pd->above_context + aoff;
   ENTROPY_CONTEXT *const l = pd->left_context + loff;
   const int tx_w_in_blocks = num_4x4_blocks_wide_txsize_lookup[tx_size];
@@ -128,7 +128,7 @@
   }
 }
 
-void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
+void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
   int i;
 
   for (i = 0; i < MAX_MB_PLANE; i++) {
@@ -151,7 +151,7 @@
 
 // Returns whether filter selection is needed for a given
 // intra prediction angle.
-int vp10_is_intra_filter_switchable(int angle) {
+int av1_is_intra_filter_switchable(int angle) {
   assert(angle > 0 && angle < 270);
   if (angle % 45 == 0) return 0;
   if (angle > 90 && angle < 180) {
diff --git a/av1/common/blockd.h b/av1/common/blockd.h
index 4dcc1f0..327a8d0 100644
--- a/av1/common/blockd.h
+++ b/av1/common/blockd.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_BLOCKD_H_
-#define VP10_COMMON_BLOCKD_H_
+#ifndef AV1_COMMON_BLOCKD_H_
+#define AV1_COMMON_BLOCKD_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/mem.h"
 #include "aom_scale/yv12config.h"
 
@@ -39,7 +39,7 @@
 } FRAME_TYPE;
 
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
-#define IsInterpolatingFilter(filter) (vp10_is_interpolating_filter(filter))
+#define IsInterpolatingFilter(filter) (av1_is_interpolating_filter(filter))
 #else
 #define IsInterpolatingFilter(filter) (1)
 #endif  // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
@@ -158,11 +158,11 @@
   // Number of base colors for Y (0) and UV (1)
   uint8_t palette_size[2];
 // Value of base colors for Y, U, and V
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint16_t palette_colors[3 * PALETTE_MAX_SIZE];
 #else
   uint8_t palette_colors[3 * PALETTE_MAX_SIZE];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   // Only used by encoder to store the color index of the top left pixel.
   // TODO(huisu): move this to encoder
   uint8_t palette_first_color_idx[2];
@@ -260,11 +260,11 @@
   return mbmi->ref_frame[1] > INTRA_FRAME;
 }
 
-PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
-                                     const MODE_INFO *left_mi, int b);
+PREDICTION_MODE av1_left_block_mode(const MODE_INFO *cur_mi,
+                                    const MODE_INFO *left_mi, int b);
 
-PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
-                                      const MODE_INFO *above_mi, int b);
+PREDICTION_MODE av1_above_block_mode(const MODE_INFO *cur_mi,
+                                     const MODE_INFO *above_mi, int b);
 
 enum mv_precision { MV_PRECISION_Q3, MV_PRECISION_Q4 };
 
@@ -314,7 +314,7 @@
 
 typedef struct RefBuffer {
   // TODO(dkovalev): idx is not really required and should be removed, now it
-  // is used in vp10_onyxd_if.c
+  // is used in av1_onyxd_if.c
   int idx;
   YV12_BUFFER_CONFIG *buf;
   struct scale_factors sf;
@@ -339,7 +339,7 @@
   int up_available;
   int left_available;
 
-  const vpx_prob (*partition_probs)[PARTITION_TYPES - 1];
+  const aom_prob (*partition_probs)[PARTITION_TYPES - 1];
 
   /* Distance of MB away from frame edges */
   int mb_to_left_edge;
@@ -381,7 +381,7 @@
   uint8_t is_sec_rect;
 #endif
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   /* Bit depth: 8, 10, 12 */
   int bd;
 #endif
@@ -389,7 +389,7 @@
   int lossless[MAX_SEGMENTS];
   int corrupted;
 
-  struct vpx_internal_error_info *error_info;
+  struct aom_internal_error_info *error_info;
 #if CONFIG_GLOBAL_MOTION
   Global_Motion_Params *global_motion;
 #endif  // CONFIG_GLOBAL_MOTION
@@ -419,7 +419,7 @@
 #if CONFIG_SUPERTX
 static INLINE int supertx_enabled(const MB_MODE_INFO *mbmi) {
   return (int)txsize_sqr_map[mbmi->tx_size] >
-         VPXMIN(b_width_log2_lookup[mbmi->sb_type],
+         AOMMIN(b_width_log2_lookup[mbmi->sb_type],
                 b_height_log2_lookup[mbmi->sb_type]);
 }
 #endif  // CONFIG_SUPERTX
@@ -567,7 +567,7 @@
 
 #if CONFIG_EXT_TX && CONFIG_RECT_TX
   if (!is_inter) {
-    return VPXMIN(max_tx_size, largest_tx_size);
+    return AOMMIN(max_tx_size, largest_tx_size);
   } else {
     const TX_SIZE max_rect_tx_size = max_txsize_rect_lookup[bsize];
     if (txsize_sqr_up_map[max_rect_tx_size] <= largest_tx_size) {
@@ -578,7 +578,7 @@
   }
 #else
   (void)is_inter;
-  return VPXMIN(max_tx_size, largest_tx_size);
+  return AOMMIN(max_tx_size, largest_tx_size);
 #endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
 }
 
@@ -606,7 +606,7 @@
   ADST_ADST,  // FILTER_TM
 };
 
-int vp10_is_intra_filter_switchable(int angle);
+int av1_is_intra_filter_switchable(int angle);
 #endif  // CONFIG_EXT_INTRA
 
 #if CONFIG_EXT_TILE
@@ -718,7 +718,7 @@
 #endif  // CONFIG_EXT_TX
 }
 
-void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
+void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
 
 static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize,
                                           int xss, int yss) {
@@ -726,7 +726,7 @@
     return TX_4X4;
   } else {
     const BLOCK_SIZE plane_bsize = ss_size_lookup[bsize][xss][yss];
-    return VPXMIN(txsize_sqr_map[y_tx_size], max_txsize_lookup[plane_bsize]);
+    return AOMMIN(txsize_sqr_map[y_tx_size], max_txsize_lookup[plane_bsize]);
   }
 }
 
@@ -763,18 +763,18 @@
                                                   BLOCK_SIZE plane_bsize,
                                                   TX_SIZE tx_size, void *arg);
 
-void vp10_foreach_transformed_block_in_plane(
+void av1_foreach_transformed_block_in_plane(
     const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
     foreach_transformed_block_visitor visit, void *arg);
 
-void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
-                                    BLOCK_SIZE bsize,
-                                    foreach_transformed_block_visitor visit,
-                                    void *arg);
+void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
+                                   BLOCK_SIZE bsize,
+                                   foreach_transformed_block_visitor visit,
+                                   void *arg);
 
-void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
-                       BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
-                       int aoff, int loff);
+void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+                      BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
+                      int aoff, int loff);
 
 #if CONFIG_EXT_INTER
 static INLINE int is_interintra_allowed_bsize(const BLOCK_SIZE bsize) {
@@ -830,4 +830,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_BLOCKD_H_
+#endif  // AV1_COMMON_BLOCKD_H_
diff --git a/av1/common/clpf.c b/av1/common/clpf.c
index bba40cb..2309391 100644
--- a/av1/common/clpf.c
+++ b/av1/common/clpf.c
@@ -28,9 +28,9 @@
 #define BS (MI_SIZE * MAX_MIB_SIZE)
 
 // Iterate over blocks within a superblock
-static void vp10_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
-                         const VP10_COMMON *cm, MACROBLOCKD *xd,
-                         MODE_INFO *const *mi_8x8, int xpos, int ypos) {
+static void av1_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
+                        const AV1_COMMON *cm, MACROBLOCKD *xd,
+                        MODE_INFO *const *mi_8x8, int xpos, int ypos) {
   // Temporary buffer (to allow SIMD parallelism)
   uint8_t buf_unaligned[BS * BS + 15];
   uint8_t *buf = (uint8_t *)(((intptr_t)buf_unaligned + 15) & ~15);
@@ -56,7 +56,7 @@
           has_bottom &= y != MAX_MIB_SIZE - 1;
           has_right &= x != MAX_MIB_SIZE - 1;
 #endif
-          vp10_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
+          av1_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
           clpf_block(
               xd->plane[p].dst.buf, CLPF_ALLOW_PIXEL_PARALLELISM
                                         ? buf + y * MI_SIZE * BS + x * MI_SIZE
@@ -74,7 +74,7 @@
       for (x = 0; x < MAX_MIB_SIZE && xpos + x < cm->mi_cols; x++) {
         const MB_MODE_INFO *mbmi =
             &mi_8x8[(ypos + y) * cm->mi_stride + xpos + x]->mbmi;
-        vp10_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
+        av1_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
         if (!mbmi->skip) {
           int i = 0;
           for (i = 0; i<MI_SIZE>> xd->plane[p].subsampling_y; i++)
@@ -89,11 +89,11 @@
 }
 
 // Iterate over the superblocks of an entire frame
-void vp10_clpf_frame(const YV12_BUFFER_CONFIG *frame, const VP10_COMMON *cm,
-                     MACROBLOCKD *xd) {
+void av1_clpf_frame(const YV12_BUFFER_CONFIG *frame, const AV1_COMMON *cm,
+                    MACROBLOCKD *xd) {
   int x, y;
 
   for (y = 0; y < cm->mi_rows; y += MAX_MIB_SIZE)
     for (x = 0; x < cm->mi_cols; x += MAX_MIB_SIZE)
-      vp10_clpf_sb(frame, cm, xd, cm->mi_grid_visible, x, y);
+      av1_clpf_sb(frame, cm, xd, cm->mi_grid_visible, x, y);
 }
diff --git a/av1/common/clpf.h b/av1/common/clpf.h
index 5b9d55b..85f29d9 100644
--- a/av1/common/clpf.h
+++ b/av1/common/clpf.h
@@ -3,8 +3,8 @@
 (Replace with proper AOM header)
 */
 
-#ifndef VP10_COMMON_CLPF_H_
-#define VP10_COMMON_CLPF_H_
+#ifndef AV1_COMMON_CLPF_H_
+#define AV1_COMMON_CLPF_H_
 
 #include "av1/common/reconinter.h"
 
@@ -16,7 +16,7 @@
 #define CLPF_FILTER_ALL_PLANES \
   0  // 1 = filter both luma and chroma, 0 = filter only luma
 
-void vp10_clpf_frame(const YV12_BUFFER_CONFIG *frame, const VP10_COMMON *cm,
-                     MACROBLOCKD *xd);
+void av1_clpf_frame(const YV12_BUFFER_CONFIG *frame, const AV1_COMMON *cm,
+                    MACROBLOCKD *xd);
 
 #endif
diff --git a/av1/common/common.h b/av1/common/common.h
index 4e30034..c333a17 100644
--- a/av1/common/common.h
+++ b/av1/common/common.h
@@ -8,17 +8,17 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_COMMON_H_
-#define VP10_COMMON_COMMON_H_
+#ifndef AV1_COMMON_COMMON_H_
+#define AV1_COMMON_COMMON_H_
 
 /* Interface header for common constant data structures and lookup tables */
 
 #include <assert.h>
 
-#include "./vpx_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/bitops.h"
 
 #ifdef __cplusplus
@@ -28,21 +28,21 @@
 #define PI 3.141592653589793238462643383279502884
 
 // Only need this for fixed-size arrays, for structs just assign.
-#define vp10_copy(dest, src)             \
+#define av1_copy(dest, src)              \
   {                                      \
     assert(sizeof(dest) == sizeof(src)); \
     memcpy(dest, src, sizeof(src));      \
   }
 
 // Use this for variably-sized arrays.
-#define vp10_copy_array(dest, src, n)          \
+#define av1_copy_array(dest, src, n)           \
   {                                            \
     assert(sizeof(*(dest)) == sizeof(*(src))); \
     memcpy(dest, src, n * sizeof(*(src)));     \
   }
 
-#define vp10_zero(dest) memset(&(dest), 0, sizeof(dest))
-#define vp10_zero_array(dest, n) memset(dest, 0, n * sizeof(*(dest)))
+#define av1_zero(dest) memset(&(dest), 0, sizeof(dest))
+#define av1_zero_array(dest, n) memset(dest, 0, n * sizeof(*(dest)))
 
 static INLINE int get_unsigned_bits(unsigned int num_values) {
   return num_values > 0 ? get_msb(num_values) + 1 : 0;
@@ -53,7 +53,7 @@
   do {                                                                      \
     lval = (expr);                                                          \
     if (!lval)                                                              \
-      vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,                   \
+      aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,                   \
                          "Failed to allocate " #lval " at %s:%d", __FILE__, \
                          __LINE__);                                         \
   } while (0)
@@ -62,19 +62,19 @@
   do {                                                    \
     lval = (expr);                                        \
     if (!lval)                                            \
-      vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \
+      aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, \
                          "Failed to allocate " #lval);    \
   } while (0)
 #endif
 // TODO(yaowu: validate the usage of these codes or develop new ones.)
-#define VP10_SYNC_CODE_0 0x49
-#define VP10_SYNC_CODE_1 0x83
-#define VP10_SYNC_CODE_2 0x43
+#define AV1_SYNC_CODE_0 0x49
+#define AV1_SYNC_CODE_1 0x83
+#define AV1_SYNC_CODE_2 0x43
 
-#define VPX_FRAME_MARKER 0x2
+#define AOM_FRAME_MARKER 0x2
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_COMMON_H_
+#endif  // AV1_COMMON_COMMON_H_
diff --git a/av1/common/common_data.h b/av1/common/common_data.h
index 4348f08..1fdabfa 100644
--- a/av1/common/common_data.h
+++ b/av1/common/common_data.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_COMMON_DATA_H_
-#define VP10_COMMON_COMMON_DATA_H_
+#ifndef AV1_COMMON_COMMON_DATA_H_
+#define AV1_COMMON_COMMON_DATA_H_
 
 #include "av1/common/enums.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -98,7 +98,7 @@
 #endif  // CONFIG_EXT_TX
 };
 
-// VPXMIN(3, VPXMIN(b_width_log2(bsize), b_height_log2(bsize)))
+// AOMMIN(3, AOMMIN(b_width_log2(bsize), b_height_log2(bsize)))
 static const uint8_t size_group_lookup[BLOCK_SIZES] = {
   0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, IF_EXT_PARTITION(3, 3, 3)
 };
@@ -583,4 +583,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_COMMON_DATA_H_
+#endif  // AV1_COMMON_COMMON_DATA_H_
diff --git a/av1/common/debugmodes.c b/av1/common/debugmodes.c
index 6c958a8..d4169fd 100644
--- a/av1/common/debugmodes.c
+++ b/av1/common/debugmodes.c
@@ -13,7 +13,7 @@
 #include "av1/common/blockd.h"
 #include "av1/common/onyxc_int.h"
 
-static void log_frame_info(VP10_COMMON *cm, const char *str, FILE *f) {
+static void log_frame_info(AV1_COMMON *cm, const char *str, FILE *f) {
   fprintf(f, "%s", str);
   fprintf(f, "(Frame %d, Show:%d, Q:%d): \n", cm->current_video_frame,
           cm->show_frame, cm->base_qindex);
@@ -22,7 +22,7 @@
  * and uses the passed in member offset to print out the value of an integer
  * for each mbmi member value in the mi structure.
  */
-static void print_mi_data(VP10_COMMON *cm, FILE *file, const char *descriptor,
+static void print_mi_data(AV1_COMMON *cm, FILE *file, const char *descriptor,
                           size_t member_offset) {
   int mi_row, mi_col;
   MODE_INFO **mi = cm->mi_grid_visible;
@@ -43,7 +43,7 @@
   fprintf(file, "\n");
 }
 
-void vp10_print_modes_and_motion_vectors(VP10_COMMON *cm, const char *file) {
+void av1_print_modes_and_motion_vectors(AV1_COMMON *cm, const char *file) {
   int mi_row;
   int mi_col;
   FILE *mvs = fopen(file, "a");
diff --git a/av1/common/dering.c b/av1/common/dering.c
index 7c116a2..97b31af 100644
--- a/av1/common/dering.c
+++ b/av1/common/dering.c
@@ -11,8 +11,8 @@
 #include <string.h>
 #include <math.h>
 
-#include "./vpx_scale_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_scale_rtcd.h"
+#include "aom/aom_integer.h"
 #include "av1/common/dering.h"
 #include "av1/common/onyxc_int.h"
 #include "av1/common/reconinter.h"
@@ -26,7 +26,7 @@
   return clamp(level, gi, MAX_DERING_LEVEL - 1);
 }
 
-int sb_all_skip(const VP10_COMMON *const cm, int mi_row, int mi_col) {
+int sb_all_skip(const AV1_COMMON *const cm, int mi_row, int mi_col) {
   int r, c;
   int maxc, maxr;
   int skip = 1;
@@ -44,8 +44,8 @@
   return skip;
 }
 
-void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                       MACROBLOCKD *xd, int global_level) {
+void av1_dering_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                      MACROBLOCKD *xd, int global_level) {
   int r, c;
   int sbr, sbc;
   int nhsb, nvsb;
@@ -56,21 +56,21 @@
   int bsize[3];
   int dec[3];
   int pli;
-  int coeff_shift = VPXMAX(cm->bit_depth - 8, 0);
+  int coeff_shift = AOMMAX(cm->bit_depth - 8, 0);
   nvsb = (cm->mi_rows + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
   nhsb = (cm->mi_cols + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
-  bskip = vpx_malloc(sizeof(*bskip) * cm->mi_rows * cm->mi_cols);
-  vp10_setup_dst_planes(xd->plane, frame, 0, 0);
+  bskip = aom_malloc(sizeof(*bskip) * cm->mi_rows * cm->mi_cols);
+  av1_setup_dst_planes(xd->plane, frame, 0, 0);
   for (pli = 0; pli < 3; pli++) {
     dec[pli] = xd->plane[pli].subsampling_x;
     bsize[pli] = 8 >> dec[pli];
   }
   stride = bsize[0] * cm->mi_cols;
   for (pli = 0; pli < 3; pli++) {
-    src[pli] = vpx_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64);
+    src[pli] = aom_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64);
     for (r = 0; r < bsize[pli] * cm->mi_rows; ++r) {
       for (c = 0; c < bsize[pli] * cm->mi_cols; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (cm->use_highbitdepth) {
           src[pli][r * stride + c] = CONVERT_TO_SHORTPTR(
               xd->plane[pli].dst.buf)[r * xd->plane[pli].dst.stride + c];
@@ -78,7 +78,7 @@
 #endif
           src[pli][r * stride + c] =
               xd->plane[pli].dst.buf[r * xd->plane[pli].dst.stride + c];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         }
 #endif
       }
@@ -95,8 +95,8 @@
     for (sbc = 0; sbc < nhsb; sbc++) {
       int level;
       int nhb, nvb;
-      nhb = VPXMIN(MAX_MIB_SIZE, cm->mi_cols - MAX_MIB_SIZE * sbc);
-      nvb = VPXMIN(MAX_MIB_SIZE, cm->mi_rows - MAX_MIB_SIZE * sbr);
+      nhb = AOMMIN(MAX_MIB_SIZE, cm->mi_cols - MAX_MIB_SIZE * sbc);
+      nvb = AOMMIN(MAX_MIB_SIZE, cm->mi_rows - MAX_MIB_SIZE * sbr);
       for (pli = 0; pli < 3; pli++) {
         int16_t dst[MAX_MIB_SIZE * MAX_MIB_SIZE * 8 * 8];
         int threshold;
@@ -123,7 +123,7 @@
                   coeff_shift);
         for (r = 0; r < bsize[pli] * nvb; ++r) {
           for (c = 0; c < bsize[pli] * nhb; ++c) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
             if (cm->use_highbitdepth) {
               CONVERT_TO_SHORTPTR(xd->plane[pli].dst.buf)
               [xd->plane[pli].dst.stride *
@@ -136,7 +136,7 @@
                                          (bsize[pli] * MAX_MIB_SIZE * sbr + r) +
                                      sbc * bsize[pli] * MAX_MIB_SIZE + c] =
                   dst[r * MAX_MIB_SIZE * bsize[pli] + c];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
             }
 #endif
           }
@@ -145,7 +145,7 @@
     }
   }
   for (pli = 0; pli < 3; pli++) {
-    vpx_free(src[pli]);
+    aom_free(src[pli]);
   }
-  vpx_free(bskip);
+  aom_free(bskip);
 }
diff --git a/av1/common/dering.h b/av1/common/dering.h
index de59c86..254d34f 100644
--- a/av1/common/dering.h
+++ b/av1/common/dering.h
@@ -1,10 +1,10 @@
-#ifndef VP10_COMMON_DERING_H_
-#define VP10_COMMON_DERING_H_
+#ifndef AV1_COMMON_DERING_H_
+#define AV1_COMMON_DERING_H_
 
 #include "av1/common/od_dering.h"
 #include "av1/common/onyxc_int.h"
-#include "aom/vpx_integer.h"
-#include "./vpx_config.h"
+#include "aom/aom_integer.h"
+#include "./aom_config.h"
 #include "aom_ports/mem.h"
 
 #ifdef __cplusplus
@@ -19,14 +19,14 @@
 #define DERING_REFINEMENT_LEVELS 4
 
 int compute_level_from_index(int global_level, int gi);
-int sb_all_skip(const VP10_COMMON *const cm, int mi_row, int mi_col);
-void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                       MACROBLOCKD *xd, int global_level);
+int sb_all_skip(const AV1_COMMON *const cm, int mi_row, int mi_col);
+void av1_dering_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                      MACROBLOCKD *xd, int global_level);
 
-int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
-                       VP10_COMMON *cm, MACROBLOCKD *xd);
+int av1_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
+                      AV1_COMMON *cm, MACROBLOCKD *xd);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
-#endif  // VP10_COMMON_DERING_H_
+#endif  // AV1_COMMON_DERING_H_
diff --git a/av1/common/divide.c b/av1/common/divide.c
index f0c6730..3c82be8 100644
--- a/av1/common/divide.c
+++ b/av1/common/divide.c
@@ -25,7 +25,7 @@
   }
 }
 */
-const struct fastdiv_elem vp10_fastdiv_tab[256] = {
+const struct fastdiv_elem av1_fastdiv_tab[256] = {
   { 0, 0 },           { 0, 0 },           { 0, 1 },
   { 1431655766, 2 },  { 0, 2 },           { 2576980378u, 3 },
   { 1431655766, 3 },  { 613566757, 3 },   { 0, 3 },
diff --git a/av1/common/divide.h b/av1/common/divide.h
index 7de6c91..b96ad4c 100644
--- a/av1/common/divide.h
+++ b/av1/common/divide.h
@@ -8,15 +8,15 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_DIVIDE_H_
-#define VP10_COMMON_DIVIDE_H_
+#ifndef AV1_COMMON_DIVIDE_H_
+#define AV1_COMMON_DIVIDE_H_
 // An implemntation of the divide by multiply alogrithm
 // https://gmplib.org/~tege/divcnst-pldi94.pdf
 
 #include <limits.h>
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -27,14 +27,14 @@
   unsigned shift;
 };
 
-extern const struct fastdiv_elem vp10_fastdiv_tab[256];
+extern const struct fastdiv_elem av1_fastdiv_tab[256];
 
 static INLINE unsigned fastdiv(unsigned x, int y) {
   unsigned t =
-      ((uint64_t)x * vp10_fastdiv_tab[y].mult) >> (sizeof(x) * CHAR_BIT);
-  return (t + x) >> vp10_fastdiv_tab[y].shift;
+      ((uint64_t)x * av1_fastdiv_tab[y].mult) >> (sizeof(x) * CHAR_BIT);
+  return (t + x) >> av1_fastdiv_tab[y].shift;
 }
 #ifdef __cplusplus
 }  // extern "C"
 #endif  // __cplusplus
-#endif  // VP10_COMMON_DIVIDE_H_
+#endif  // AV1_COMMON_DIVIDE_H_
diff --git a/av1/common/entropy.c b/av1/common/entropy.c
index be96c42..83f8f65 100644
--- a/av1/common/entropy.c
+++ b/av1/common/entropy.c
@@ -12,12 +12,12 @@
 #include "av1/common/blockd.h"
 #include "av1/common/onyxc_int.h"
 #include "av1/common/entropymode.h"
-#include "aom_mem/vpx_mem.h"
-#include "aom/vpx_integer.h"
+#include "aom_mem/aom_mem.h"
+#include "aom/aom_integer.h"
 
 // Unconstrained Node Tree
 /* clang-format off */
-const vpx_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
+const aom_tree_index av1_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
   2, 6,                                // 0 = LOW_VAL
   -TWO_TOKEN, 4,                       // 1 = TWO
   -THREE_TOKEN, -FOUR_TOKEN,           // 2 = THREE
@@ -29,30 +29,30 @@
 };
 /* clang-format on */
 
-const vpx_prob vp10_cat1_prob[] = { 159 };
-const vpx_prob vp10_cat2_prob[] = { 165, 145 };
-const vpx_prob vp10_cat3_prob[] = { 173, 148, 140 };
-const vpx_prob vp10_cat4_prob[] = { 176, 155, 140, 135 };
-const vpx_prob vp10_cat5_prob[] = { 180, 157, 141, 134, 130 };
-const vpx_prob vp10_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230,
-                                    196, 177, 153, 140, 133, 130, 129 };
-#if CONFIG_VP9_HIGHBITDEPTH
-const vpx_prob vp10_cat1_prob_high10[] = { 159 };
-const vpx_prob vp10_cat2_prob_high10[] = { 165, 145 };
-const vpx_prob vp10_cat3_prob_high10[] = { 173, 148, 140 };
-const vpx_prob vp10_cat4_prob_high10[] = { 176, 155, 140, 135 };
-const vpx_prob vp10_cat5_prob_high10[] = { 180, 157, 141, 134, 130 };
-const vpx_prob vp10_cat6_prob_high10[] = {
+const aom_prob av1_cat1_prob[] = { 159 };
+const aom_prob av1_cat2_prob[] = { 165, 145 };
+const aom_prob av1_cat3_prob[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230,
+                                   196, 177, 153, 140, 133, 130, 129 };
+#if CONFIG_AOM_HIGHBITDEPTH
+const aom_prob av1_cat1_prob_high10[] = { 159 };
+const aom_prob av1_cat2_prob_high10[] = { 165, 145 };
+const aom_prob av1_cat3_prob_high10[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob_high10[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob_high10[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob_high10[] = {
   255, 255, 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129
 };
-const vpx_prob vp10_cat1_prob_high12[] = { 159 };
-const vpx_prob vp10_cat2_prob_high12[] = { 165, 145 };
-const vpx_prob vp10_cat3_prob_high12[] = { 173, 148, 140 };
-const vpx_prob vp10_cat4_prob_high12[] = { 176, 155, 140, 135 };
-const vpx_prob vp10_cat5_prob_high12[] = { 180, 157, 141, 134, 130 };
-const vpx_prob vp10_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254,
-                                           254, 252, 249, 243, 230, 196,
-                                           177, 153, 140, 133, 130, 129 };
+const aom_prob av1_cat1_prob_high12[] = { 159 };
+const aom_prob av1_cat2_prob_high12[] = { 165, 145 };
+const aom_prob av1_cat3_prob_high12[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob_high12[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob_high12[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254,
+                                          254, 252, 249, 243, 230, 196,
+                                          177, 153, 140, 133, 130, 129 };
 #endif
 
 const uint16_t band_count_table[TX_SIZES_ALL][8] = {
@@ -75,7 +75,7 @@
 #endif  // CONFIG_EXT_TX
 };
 
-const uint8_t vp10_coefband_trans_8x8plus[1024] = {
+const uint8_t av1_coefband_trans_8x8plus[1024] = {
   0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
   // beyond MAXBAND_INDEX+1 all values are filled as 5
   5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
@@ -120,18 +120,18 @@
 };
 
 #if CONFIG_EXT_TX
-const uint8_t vp10_coefband_trans_4x8_8x4[32] = {
+const uint8_t av1_coefband_trans_4x8_8x4[32] = {
   0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
   4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
 };
 #endif  // CONFIG_EXT_TX
 
-const uint8_t vp10_coefband_trans_4x4[16] = {
+const uint8_t av1_coefband_trans_4x4[16] = {
   0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
 };
 
-const uint8_t vp10_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
-                                                       4, 5, 5, 5, 5, 5 };
+const uint8_t av1_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
+                                                      4, 5, 5, 5, 5, 5 };
 
 // Model obtained from a 2-sided zero-centered distribution derived
 // from a Pareto distribution. The cdf of the distribution is:
@@ -145,9 +145,9 @@
 
 // Every odd line in this table can be generated from the even lines
 // by averaging :
-// vp10_pareto8_full[l][node] = (vp10_pareto8_full[l-1][node] +
-//                              vp10_pareto8_full[l+1][node] ) >> 1;
-const vpx_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
+// av1_pareto8_full[l][node] = (av1_pareto8_full[l-1][node] +
+//                              av1_pareto8_full[l+1][node] ) >> 1;
+const aom_prob av1_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
   { 3, 86, 128, 6, 86, 23, 88, 29 },
   { 6, 86, 128, 11, 87, 42, 91, 52 },
   { 9, 86, 129, 17, 88, 61, 94, 76 },
@@ -417,7 +417,7 @@
 // beta = 8
 // Values for tokens ONE_TOKEN through CATEGORY6_TOKEN included here.
 // ZERO_TOKEN and EOB_TOKEN are coded as flags outside this coder.
-const AnsP10 vp10_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2] = {
+const AnsP10 av1_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2] = {
   { 4, 4, 4, 4, 8, 15, 30, 57, 103, 795 },
   { 8, 8, 8, 8, 15, 30, 57, 103, 168, 619 },
   { 12, 12, 12, 12, 23, 43, 80, 138, 205, 487 },
@@ -678,7 +678,7 @@
 
 /* clang-format off */
 #if CONFIG_ENTROPY
-const vp10_coeff_probs_model
+const av1_coeff_probs_model
 default_qctx_coef_probs[QCTX_BINS][TX_SIZES][PLANE_TYPES] = {
     {  // Q_Index 0
         {  // TX_SIZE 0
@@ -2450,7 +2450,7 @@
     },
 };
 #else
-static const vp10_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
   {  // Y plane
     {  // Intra
       {  // Band 0
@@ -2534,7 +2534,7 @@
   }
 };
 
-static const vp10_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
   {  // Y plane
     {  // Intra
       {  // Band 0
@@ -2618,7 +2618,7 @@
   }
 };
 
-static const vp10_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
   {  // Y plane
     {  // Intra
       {  // Band 0
@@ -2702,7 +2702,7 @@
   }
 };
 
-static const vp10_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
   {  // Y plane
     {  // Intra
       {  // Band 0
@@ -2788,30 +2788,30 @@
 #endif  // CONFIG_ENTROPY
 /* clang-format on */
 
-static void extend_to_full_distribution(vpx_prob *probs, vpx_prob p) {
+static void extend_to_full_distribution(aom_prob *probs, aom_prob p) {
   assert(p != 0);
-  memcpy(probs, vp10_pareto8_full[p - 1], MODEL_NODES * sizeof(vpx_prob));
+  memcpy(probs, av1_pareto8_full[p - 1], MODEL_NODES * sizeof(aom_prob));
 }
 
-void vp10_model_to_full_probs(const vpx_prob *model, vpx_prob *full) {
+void av1_model_to_full_probs(const aom_prob *model, aom_prob *full) {
   if (full != model)
-    memcpy(full, model, sizeof(vpx_prob) * UNCONSTRAINED_NODES);
+    memcpy(full, model, sizeof(aom_prob) * UNCONSTRAINED_NODES);
   extend_to_full_distribution(&full[UNCONSTRAINED_NODES], model[PIVOT_NODE]);
 }
 
 #if CONFIG_ANS
-void vp10_build_token_cdfs(const vpx_prob *pdf_model, rans_dec_lut cdf) {
+void av1_build_token_cdfs(const aom_prob *pdf_model, rans_dec_lut cdf) {
   AnsP10 pdf_tab[ENTROPY_TOKENS - 1];
   assert(pdf_model[2] != 0);
   // TODO(aconverse): Investigate making the precision of the zero and EOB tree
   // nodes 10-bits.
   rans_merge_prob8_pdf(pdf_tab, pdf_model[1],
-                       vp10_pareto8_token_probs[pdf_model[2] - 1],
+                       av1_pareto8_token_probs[pdf_model[2] - 1],
                        ENTROPY_TOKENS - 2);
   rans_build_cdf_from_pdf(pdf_tab, cdf);
 }
 
-void vp10_coef_pareto_cdfs(FRAME_CONTEXT *fc) {
+void av1_coef_pareto_cdfs(FRAME_CONTEXT *fc) {
   TX_SIZE t;
   int i, j, k, l;
   for (t = TX_4X4; t <= TX_32X32; ++t)
@@ -2819,42 +2819,42 @@
       for (j = 0; j < REF_TYPES; ++j)
         for (k = 0; k < COEF_BANDS; ++k)
           for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
-            vp10_build_token_cdfs(fc->coef_probs[t][i][j][k][l],
-                                  fc->coef_cdfs[t][i][j][k][l]);
+            av1_build_token_cdfs(fc->coef_probs[t][i][j][k][l],
+                                 fc->coef_cdfs[t][i][j][k][l]);
 }
 #endif  // CONFIG_ANS
 
-void vp10_default_coef_probs(VP10_COMMON *cm) {
+void av1_default_coef_probs(AV1_COMMON *cm) {
 #if CONFIG_ENTROPY
-  const int index = VPXMIN(
+  const int index = AOMMIN(
       ROUND_POWER_OF_TWO(cm->base_qindex, 8 - QCTX_BIN_BITS), QCTX_BINS - 1);
-  vp10_copy(cm->fc->coef_probs, default_qctx_coef_probs[index]);
+  av1_copy(cm->fc->coef_probs, default_qctx_coef_probs[index]);
 #else
-  vp10_copy(cm->fc->coef_probs[TX_4X4], default_coef_probs_4x4);
-  vp10_copy(cm->fc->coef_probs[TX_8X8], default_coef_probs_8x8);
-  vp10_copy(cm->fc->coef_probs[TX_16X16], default_coef_probs_16x16);
-  vp10_copy(cm->fc->coef_probs[TX_32X32], default_coef_probs_32x32);
+  av1_copy(cm->fc->coef_probs[TX_4X4], default_coef_probs_4x4);
+  av1_copy(cm->fc->coef_probs[TX_8X8], default_coef_probs_8x8);
+  av1_copy(cm->fc->coef_probs[TX_16X16], default_coef_probs_16x16);
+  av1_copy(cm->fc->coef_probs[TX_32X32], default_coef_probs_32x32);
 #endif  // CONFIG_ENTROPY
 #if CONFIG_ANS
-  vp10_coef_pareto_cdfs(cm->fc);
+  av1_coef_pareto_cdfs(cm->fc);
 #endif  // CONFIG_ANS
 }
 
-static void adapt_coef_probs(VP10_COMMON *cm, TX_SIZE tx_size,
+static void adapt_coef_probs(AV1_COMMON *cm, TX_SIZE tx_size,
                              unsigned int count_sat,
                              unsigned int update_factor) {
   const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
-  vp10_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size];
+  av1_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size];
 #if CONFIG_ENTROPY
-  const vp10_coeff_probs_model *const pre_probs =
+  const av1_coeff_probs_model *const pre_probs =
       cm->partial_prob_update
-          ? (const vp10_coeff_probs_model *)cm->starting_coef_probs[tx_size]
+          ? (const av1_coeff_probs_model *)cm->starting_coef_probs[tx_size]
           : pre_fc->coef_probs[tx_size];
 #else
-  const vp10_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size];
+  const av1_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size];
 #endif  // CONFIG_ENTROPY
-  const vp10_coeff_count_model *const counts =
-      (const vp10_coeff_count_model *)cm->counts.coef[tx_size];
+  const av1_coeff_count_model *const counts =
+      (const av1_coeff_count_model *)cm->counts.coef[tx_size];
   const unsigned int(*eob_counts)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
       (const unsigned int(*)[
           REF_TYPES][COEF_BANDS][COEFF_CONTEXTS])cm->counts.eob_branch[tx_size];
@@ -2873,12 +2873,12 @@
           };
           for (m = 0; m < UNCONSTRAINED_NODES; ++m)
             probs[i][j][k][l][m] =
-                vp10_merge_probs(pre_probs[i][j][k][l][m], branch_ct[m],
-                                 count_sat, update_factor);
+                av1_merge_probs(pre_probs[i][j][k][l][m], branch_ct[m],
+                                count_sat, update_factor);
         }
 }
 
-void vp10_adapt_coef_probs(VP10_COMMON *cm) {
+void av1_adapt_coef_probs(AV1_COMMON *cm) {
   TX_SIZE t;
   unsigned int count_sat, update_factor;
 
@@ -2905,18 +2905,18 @@
   for (t = TX_4X4; t <= TX_32X32; t++)
     adapt_coef_probs(cm, t, count_sat, update_factor);
 #if CONFIG_ANS
-  vp10_coef_pareto_cdfs(cm->fc);
+  av1_coef_pareto_cdfs(cm->fc);
 #endif
 }
 
 #if CONFIG_ENTROPY
-void vp10_partial_adapt_probs(VP10_COMMON *cm, int mi_row, int mi_col) {
+void av1_partial_adapt_probs(AV1_COMMON *cm, int mi_row, int mi_col) {
   (void)mi_row;
   (void)mi_col;
 
   if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
     cm->partial_prob_update = 1;
-    vp10_adapt_coef_probs(cm);
+    av1_adapt_coef_probs(cm);
   }
 }
 #endif  // CONFIG_ENTROPY
diff --git a/av1/common/entropy.h b/av1/common/entropy.h
index b0afd46..63b4edd 100644
--- a/av1/common/entropy.h
+++ b/av1/common/entropy.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ENTROPY_H_
-#define VP10_COMMON_ENTROPY_H_
+#ifndef AV1_COMMON_ENTROPY_H_
+#define AV1_COMMON_ENTROPY_H_
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/prob.h"
 
 #if CONFIG_ANS
@@ -51,7 +51,7 @@
 
 #define ENTROPY_NODES 11
 
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_pt_energy_class[ENTROPY_TOKENS]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_pt_energy_class[ENTROPY_TOKENS]);
 
 #define CAT1_MIN_VAL 5
 #define CAT2_MIN_VAL 7
@@ -61,50 +61,50 @@
 #define CAT6_MIN_VAL 67
 
 // Extra bit probabilities.
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob[14]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob[14]);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high10[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high10[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high10[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high10[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high10[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high10[16]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high12[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high12[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high12[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high12[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high12[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high12[18]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob_high10[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob_high10[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob_high10[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob_high10[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob_high10[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob_high10[16]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob_high12[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob_high12[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob_high12[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob_high12[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob_high12[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob_high12[18]);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #define EOB_MODEL_TOKEN 3
 
 typedef struct {
-  const vpx_tree_index *tree;
-  const vpx_prob *prob;
+  const aom_tree_index *tree;
+  const aom_prob *prob;
   int len;
   int base_val;
   const int16_t *cost;
-} vp10_extra_bit;
+} av1_extra_bit;
 
 // indexed by token value
-extern const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS];
-#if CONFIG_VP9_HIGHBITDEPTH
-extern const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS];
-extern const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+extern const av1_extra_bit av1_extra_bits[ENTROPY_TOKENS];
+#if CONFIG_AOM_HIGHBITDEPTH
+extern const av1_extra_bit av1_extra_bits_high10[ENTROPY_TOKENS];
+extern const av1_extra_bit av1_extra_bits_high12[ENTROPY_TOKENS];
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #define DCT_MAX_VALUE 16384
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define DCT_MAX_VALUE_HIGH10 65536
 #define DCT_MAX_VALUE_HIGH12 262144
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 /* Coefficients are predicted via a 3-dimensional probability table. */
 
@@ -135,18 +135,18 @@
 // #define ENTROPY_STATS
 
 typedef unsigned int
-    vp10_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_TOKENS];
+    av1_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_TOKENS];
 typedef unsigned int
-    vp10_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_NODES][2];
+    av1_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_NODES][2];
 
 #define SUBEXP_PARAM 4   /* Subexponential code parameter */
 #define MODULUS_PARAM 13 /* Modulus parameter */
 
-struct VP10Common;
-void vp10_default_coef_probs(struct VP10Common *cm);
-void vp10_adapt_coef_probs(struct VP10Common *cm);
+struct AV1Common;
+void av1_default_coef_probs(struct AV1Common *cm);
+void av1_adapt_coef_probs(struct AV1Common *cm);
 #if CONFIG_ENTROPY
-void vp10_partial_adapt_probs(struct VP10Common *cm, int mi_row, int mi_col);
+void av1_partial_adapt_probs(struct AV1Common *cm, int mi_row, int mi_col);
 #endif  // CONFIG_ENTROPY
 
 // This is the index in the scan order beyond which all coefficients for
@@ -154,11 +154,11 @@
 // This macro is currently unused but may be used by certain implementations
 #define MAXBAND_INDEX 21
 
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_8x8plus[1024]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_8x8plus[1024]);
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_4x8_8x4[32]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_4x8_8x4[32]);
 #endif  // CONFIG_EXT_TX
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_4x4[16]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_4x4[16]);
 
 DECLARE_ALIGNED(16, extern const uint16_t, band_count_table[TX_SIZES_ALL][8]);
 DECLARE_ALIGNED(16, extern const uint16_t,
@@ -166,11 +166,11 @@
 
 static INLINE const uint8_t *get_band_translate(TX_SIZE tx_size) {
   switch (tx_size) {
-    case TX_4X4: return vp10_coefband_trans_4x4;
+    case TX_4X4: return av1_coefband_trans_4x4;
 #if CONFIG_EXT_TX
-    case TX_4X8: return vp10_coefband_trans_4x8_8x4;
+    case TX_4X8: return av1_coefband_trans_4x8_8x4;
 #endif  // CONFIG_EXT_TX
-    default: return vp10_coefband_trans_8x8plus;
+    default: return av1_coefband_trans_8x8plus;
   }
 }
 
@@ -185,22 +185,22 @@
 #define PIVOT_NODE 2  // which node is pivot
 
 #define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES)
-extern const vpx_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
-extern const vpx_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
+extern const aom_tree_index av1_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
+extern const aom_prob av1_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
 #if CONFIG_ANS
 extern const AnsP10
-    vp10_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2];
+    av1_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2];
 
 typedef rans_dec_lut coeff_cdf_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
 #endif  // CONFIG_ANS
 
-typedef vpx_prob vp10_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
-                                       [UNCONSTRAINED_NODES];
+typedef aom_prob av1_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
+                                      [UNCONSTRAINED_NODES];
 
-typedef unsigned int vp10_coeff_count_model
+typedef unsigned int av1_coeff_count_model
     [REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
 
-void vp10_model_to_full_probs(const vpx_prob *model, vpx_prob *full);
+void av1_model_to_full_probs(const aom_prob *model, aom_prob *full);
 
 typedef char ENTROPY_CONTEXT;
 
@@ -263,7 +263,7 @@
 
 #if CONFIG_ANS
 struct frame_contexts;
-void vp10_coef_pareto_cdfs(struct frame_contexts *fc);
+void av1_coef_pareto_cdfs(struct frame_contexts *fc);
 #endif  // CONFIG_ANS
 
 #if CONFIG_ENTROPY
@@ -283,14 +283,14 @@
 
 #endif  // CONFIG_ENTROPY
 
-static INLINE vpx_prob vp10_merge_probs(vpx_prob pre_prob,
-                                        const unsigned int ct[2],
-                                        unsigned int count_sat,
-                                        unsigned int max_update_factor) {
+static INLINE aom_prob av1_merge_probs(aom_prob pre_prob,
+                                       const unsigned int ct[2],
+                                       unsigned int count_sat,
+                                       unsigned int max_update_factor) {
 #if CONFIG_ENTROPY
-  const vpx_prob prob = get_binary_prob(ct[0], ct[1]);
+  const aom_prob prob = get_binary_prob(ct[0], ct[1]);
   const unsigned int count =
-      VPXMIN(ct[0] + ct[1], (unsigned int)(1 << count_sat));
+      AOMMIN(ct[0] + ct[1], (unsigned int)(1 << count_sat));
   const unsigned int factor = count << (max_update_factor - count_sat);
   return weighted_prob(pre_prob, prob, factor);
 #else
@@ -298,11 +298,11 @@
 #endif  // CONFIG_ENTROPY
 }
 
-static INLINE vpx_prob vp10_mode_mv_merge_probs(vpx_prob pre_prob,
-                                                const unsigned int ct[2]) {
+static INLINE aom_prob av1_mode_mv_merge_probs(aom_prob pre_prob,
+                                               const unsigned int ct[2]) {
 #if CONFIG_ENTROPY
-  return vp10_merge_probs(pre_prob, ct, MODE_MV_COUNT_SAT_BITS,
-                          MODE_MV_MAX_UPDATE_FACTOR_BITS);
+  return av1_merge_probs(pre_prob, ct, MODE_MV_COUNT_SAT_BITS,
+                         MODE_MV_MAX_UPDATE_FACTOR_BITS);
 #else
   return mode_mv_merge_probs(pre_prob, ct);
 #endif  // CONFIG_ENTROPY
@@ -312,4 +312,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENTROPY_H_
+#endif  // AV1_COMMON_ENTROPY_H_
diff --git a/av1/common/entropymode.c b/av1/common/entropymode.c
index 98e26e7..80ed00f 100644
--- a/av1/common/entropymode.c
+++ b/av1/common/entropymode.c
@@ -8,152 +8,153 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
 #include "av1/common/reconinter.h"
 #include "av1/common/onyxc_int.h"
 #include "av1/common/seg_common.h"
 
-const vpx_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] =
-    { {
-          // above = dc
-          { 137, 30, 42, 148, 151, 207, 70, 52, 91 },   // left = dc
-          { 92, 45, 102, 136, 116, 180, 74, 90, 100 },  // left = v
-          { 73, 32, 19, 187, 222, 215, 46, 34, 100 },   // left = h
-          { 91, 30, 32, 116, 121, 186, 93, 86, 94 },    // left = d45
-          { 72, 35, 36, 149, 68, 206, 68, 63, 105 },    // left = d135
-          { 73, 31, 28, 138, 57, 124, 55, 122, 151 },   // left = d117
-          { 67, 23, 21, 140, 126, 197, 40, 37, 171 },   // left = d153
-          { 86, 27, 28, 128, 154, 212, 45, 43, 53 },    // left = d207
-          { 74, 32, 27, 107, 86, 160, 63, 134, 102 },   // left = d63
-          { 59, 67, 44, 140, 161, 202, 78, 67, 119 }    // left = tm
-      },
-      {
-          // above = v
-          { 63, 36, 126, 146, 123, 158, 60, 90, 96 },   // left = dc
-          { 43, 46, 168, 134, 107, 128, 69, 142, 92 },  // left = v
-          { 44, 29, 68, 159, 201, 177, 50, 57, 77 },    // left = h
-          { 58, 38, 76, 114, 97, 172, 78, 133, 92 },    // left = d45
-          { 46, 41, 76, 140, 63, 184, 69, 112, 57 },    // left = d135
-          { 38, 32, 85, 140, 46, 112, 54, 151, 133 },   // left = d117
-          { 39, 27, 61, 131, 110, 175, 44, 75, 136 },   // left = d153
-          { 52, 30, 74, 113, 130, 175, 51, 64, 58 },    // left = d207
-          { 47, 35, 80, 100, 74, 143, 64, 163, 74 },    // left = d63
-          { 36, 61, 116, 114, 128, 162, 80, 125, 82 }   // left = tm
-      },
-      {
-          // above = h
-          { 82, 26, 26, 171, 208, 204, 44, 32, 105 },  // left = dc
-          { 55, 44, 68, 166, 179, 192, 57, 57, 108 },  // left = v
-          { 42, 26, 11, 199, 241, 228, 23, 15, 85 },   // left = h
-          { 68, 42, 19, 131, 160, 199, 55, 52, 83 },   // left = d45
-          { 58, 50, 25, 139, 115, 232, 39, 52, 118 },  // left = d135
-          { 50, 35, 33, 153, 104, 162, 64, 59, 131 },  // left = d117
-          { 44, 24, 16, 150, 177, 202, 33, 19, 156 },  // left = d153
-          { 55, 27, 12, 153, 203, 218, 26, 27, 49 },   // left = d207
-          { 53, 49, 21, 110, 116, 168, 59, 80, 76 },   // left = d63
-          { 38, 72, 19, 168, 203, 212, 50, 50, 107 }   // left = tm
-      },
-      {
-          // above = d45
-          { 103, 26, 36, 129, 132, 201, 83, 80, 93 },  // left = dc
-          { 59, 38, 83, 112, 103, 162, 98, 136, 90 },  // left = v
-          { 62, 30, 23, 158, 200, 207, 59, 57, 50 },   // left = h
-          { 67, 30, 29, 84, 86, 191, 102, 91, 59 },    // left = d45
-          { 60, 32, 33, 112, 71, 220, 64, 89, 104 },   // left = d135
-          { 53, 26, 34, 130, 56, 149, 84, 120, 103 },  // left = d117
-          { 53, 21, 23, 133, 109, 210, 56, 77, 172 },  // left = d153
-          { 77, 19, 29, 112, 142, 228, 55, 66, 36 },   // left = d207
-          { 61, 29, 29, 93, 97, 165, 83, 175, 162 },   // left = d63
-          { 47, 47, 43, 114, 137, 181, 100, 99, 95 }   // left = tm
-      },
-      {
-          // above = d135
-          { 69, 23, 29, 128, 83, 199, 46, 44, 101 },   // left = dc
-          { 53, 40, 55, 139, 69, 183, 61, 80, 110 },   // left = v
-          { 40, 29, 19, 161, 180, 207, 43, 24, 91 },   // left = h
-          { 60, 34, 19, 105, 61, 198, 53, 64, 89 },    // left = d45
-          { 52, 31, 22, 158, 40, 209, 58, 62, 89 },    // left = d135
-          { 44, 31, 29, 147, 46, 158, 56, 102, 198 },  // left = d117
-          { 35, 19, 12, 135, 87, 209, 41, 45, 167 },   // left = d153
-          { 55, 25, 21, 118, 95, 215, 38, 39, 66 },    // left = d207
-          { 51, 38, 25, 113, 58, 164, 70, 93, 97 },    // left = d63
-          { 47, 54, 34, 146, 108, 203, 72, 103, 151 }  // left = tm
-      },
-      {
-          // above = d117
-          { 64, 19, 37, 156, 66, 138, 49, 95, 133 },   // left = dc
-          { 46, 27, 80, 150, 55, 124, 55, 121, 135 },  // left = v
-          { 36, 23, 27, 165, 149, 166, 54, 64, 118 },  // left = h
-          { 53, 21, 36, 131, 63, 163, 60, 109, 81 },   // left = d45
-          { 40, 26, 35, 154, 40, 185, 51, 97, 123 },   // left = d135
-          { 35, 19, 34, 179, 19, 97, 48, 129, 124 },   // left = d117
-          { 36, 20, 26, 136, 62, 164, 33, 77, 154 },   // left = d153
-          { 45, 18, 32, 130, 90, 157, 40, 79, 91 },    // left = d207
-          { 45, 26, 28, 129, 45, 129, 49, 147, 123 },  // left = d63
-          { 38, 44, 51, 136, 74, 162, 57, 97, 121 }    // left = tm
-      },
-      {
-          // above = d153
-          { 75, 17, 22, 136, 138, 185, 32, 34, 166 },  // left = dc
-          { 56, 39, 58, 133, 117, 173, 48, 53, 187 },  // left = v
-          { 35, 21, 12, 161, 212, 207, 20, 23, 145 },  // left = h
-          { 56, 29, 19, 117, 109, 181, 55, 68, 112 },  // left = d45
-          { 47, 29, 17, 153, 64, 220, 59, 51, 114 },   // left = d135
-          { 46, 16, 24, 136, 76, 147, 41, 64, 172 },   // left = d117
-          { 34, 17, 11, 108, 152, 187, 13, 15, 209 },  // left = d153
-          { 51, 24, 14, 115, 133, 209, 32, 26, 104 },  // left = d207
-          { 55, 30, 18, 122, 79, 179, 44, 88, 116 },   // left = d63
-          { 37, 49, 25, 129, 168, 164, 41, 54, 148 }   // left = tm
-      },
-      {
-          // above = d207
-          { 82, 22, 32, 127, 143, 213, 39, 41, 70 },   // left = dc
-          { 62, 44, 61, 123, 105, 189, 48, 57, 64 },   // left = v
-          { 47, 25, 17, 175, 222, 220, 24, 30, 86 },   // left = h
-          { 68, 36, 17, 106, 102, 206, 59, 74, 74 },   // left = d45
-          { 57, 39, 23, 151, 68, 216, 55, 63, 58 },    // left = d135
-          { 49, 30, 35, 141, 70, 168, 82, 40, 115 },   // left = d117
-          { 51, 25, 15, 136, 129, 202, 38, 35, 139 },  // left = d153
-          { 68, 26, 16, 111, 141, 215, 29, 28, 28 },   // left = d207
-          { 59, 39, 19, 114, 75, 180, 77, 104, 42 },   // left = d63
-          { 40, 61, 26, 126, 152, 206, 61, 59, 93 }    // left = tm
-      },
-      {
-          // above = d63
-          { 78, 23, 39, 111, 117, 170, 74, 124, 94 },   // left = dc
-          { 48, 34, 86, 101, 92, 146, 78, 179, 134 },   // left = v
-          { 47, 22, 24, 138, 187, 178, 68, 69, 59 },    // left = h
-          { 56, 25, 33, 105, 112, 187, 95, 177, 129 },  // left = d45
-          { 48, 31, 27, 114, 63, 183, 82, 116, 56 },    // left = d135
-          { 43, 28, 37, 121, 63, 123, 61, 192, 169 },   // left = d117
-          { 42, 17, 24, 109, 97, 177, 56, 76, 122 },    // left = d153
-          { 58, 18, 28, 105, 139, 182, 70, 92, 63 },    // left = d207
-          { 46, 23, 32, 74, 86, 150, 67, 183, 88 },     // left = d63
-          { 36, 38, 48, 92, 122, 165, 88, 137, 91 }     // left = tm
-      },
-      {
-          // above = tm
-          { 65, 70, 60, 155, 159, 199, 61, 60, 81 },    // left = dc
-          { 44, 78, 115, 132, 119, 173, 71, 112, 93 },  // left = v
-          { 39, 38, 21, 184, 227, 206, 42, 32, 64 },    // left = h
-          { 58, 47, 36, 124, 137, 193, 80, 82, 78 },    // left = d45
-          { 49, 50, 35, 144, 95, 205, 63, 78, 59 },     // left = d135
-          { 41, 53, 52, 148, 71, 142, 65, 128, 51 },    // left = d117
-          { 40, 36, 28, 143, 143, 202, 40, 55, 137 },   // left = d153
-          { 52, 34, 29, 129, 183, 227, 42, 35, 43 },    // left = d207
-          { 42, 44, 44, 104, 105, 164, 64, 130, 80 },   // left = d63
-          { 43, 81, 53, 140, 169, 204, 68, 84, 72 }     // left = tm
-      } };
+const aom_prob av1_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] = {
+  {
+      // above = dc
+      { 137, 30, 42, 148, 151, 207, 70, 52, 91 },   // left = dc
+      { 92, 45, 102, 136, 116, 180, 74, 90, 100 },  // left = v
+      { 73, 32, 19, 187, 222, 215, 46, 34, 100 },   // left = h
+      { 91, 30, 32, 116, 121, 186, 93, 86, 94 },    // left = d45
+      { 72, 35, 36, 149, 68, 206, 68, 63, 105 },    // left = d135
+      { 73, 31, 28, 138, 57, 124, 55, 122, 151 },   // left = d117
+      { 67, 23, 21, 140, 126, 197, 40, 37, 171 },   // left = d153
+      { 86, 27, 28, 128, 154, 212, 45, 43, 53 },    // left = d207
+      { 74, 32, 27, 107, 86, 160, 63, 134, 102 },   // left = d63
+      { 59, 67, 44, 140, 161, 202, 78, 67, 119 }    // left = tm
+  },
+  {
+      // above = v
+      { 63, 36, 126, 146, 123, 158, 60, 90, 96 },   // left = dc
+      { 43, 46, 168, 134, 107, 128, 69, 142, 92 },  // left = v
+      { 44, 29, 68, 159, 201, 177, 50, 57, 77 },    // left = h
+      { 58, 38, 76, 114, 97, 172, 78, 133, 92 },    // left = d45
+      { 46, 41, 76, 140, 63, 184, 69, 112, 57 },    // left = d135
+      { 38, 32, 85, 140, 46, 112, 54, 151, 133 },   // left = d117
+      { 39, 27, 61, 131, 110, 175, 44, 75, 136 },   // left = d153
+      { 52, 30, 74, 113, 130, 175, 51, 64, 58 },    // left = d207
+      { 47, 35, 80, 100, 74, 143, 64, 163, 74 },    // left = d63
+      { 36, 61, 116, 114, 128, 162, 80, 125, 82 }   // left = tm
+  },
+  {
+      // above = h
+      { 82, 26, 26, 171, 208, 204, 44, 32, 105 },  // left = dc
+      { 55, 44, 68, 166, 179, 192, 57, 57, 108 },  // left = v
+      { 42, 26, 11, 199, 241, 228, 23, 15, 85 },   // left = h
+      { 68, 42, 19, 131, 160, 199, 55, 52, 83 },   // left = d45
+      { 58, 50, 25, 139, 115, 232, 39, 52, 118 },  // left = d135
+      { 50, 35, 33, 153, 104, 162, 64, 59, 131 },  // left = d117
+      { 44, 24, 16, 150, 177, 202, 33, 19, 156 },  // left = d153
+      { 55, 27, 12, 153, 203, 218, 26, 27, 49 },   // left = d207
+      { 53, 49, 21, 110, 116, 168, 59, 80, 76 },   // left = d63
+      { 38, 72, 19, 168, 203, 212, 50, 50, 107 }   // left = tm
+  },
+  {
+      // above = d45
+      { 103, 26, 36, 129, 132, 201, 83, 80, 93 },  // left = dc
+      { 59, 38, 83, 112, 103, 162, 98, 136, 90 },  // left = v
+      { 62, 30, 23, 158, 200, 207, 59, 57, 50 },   // left = h
+      { 67, 30, 29, 84, 86, 191, 102, 91, 59 },    // left = d45
+      { 60, 32, 33, 112, 71, 220, 64, 89, 104 },   // left = d135
+      { 53, 26, 34, 130, 56, 149, 84, 120, 103 },  // left = d117
+      { 53, 21, 23, 133, 109, 210, 56, 77, 172 },  // left = d153
+      { 77, 19, 29, 112, 142, 228, 55, 66, 36 },   // left = d207
+      { 61, 29, 29, 93, 97, 165, 83, 175, 162 },   // left = d63
+      { 47, 47, 43, 114, 137, 181, 100, 99, 95 }   // left = tm
+  },
+  {
+      // above = d135
+      { 69, 23, 29, 128, 83, 199, 46, 44, 101 },   // left = dc
+      { 53, 40, 55, 139, 69, 183, 61, 80, 110 },   // left = v
+      { 40, 29, 19, 161, 180, 207, 43, 24, 91 },   // left = h
+      { 60, 34, 19, 105, 61, 198, 53, 64, 89 },    // left = d45
+      { 52, 31, 22, 158, 40, 209, 58, 62, 89 },    // left = d135
+      { 44, 31, 29, 147, 46, 158, 56, 102, 198 },  // left = d117
+      { 35, 19, 12, 135, 87, 209, 41, 45, 167 },   // left = d153
+      { 55, 25, 21, 118, 95, 215, 38, 39, 66 },    // left = d207
+      { 51, 38, 25, 113, 58, 164, 70, 93, 97 },    // left = d63
+      { 47, 54, 34, 146, 108, 203, 72, 103, 151 }  // left = tm
+  },
+  {
+      // above = d117
+      { 64, 19, 37, 156, 66, 138, 49, 95, 133 },   // left = dc
+      { 46, 27, 80, 150, 55, 124, 55, 121, 135 },  // left = v
+      { 36, 23, 27, 165, 149, 166, 54, 64, 118 },  // left = h
+      { 53, 21, 36, 131, 63, 163, 60, 109, 81 },   // left = d45
+      { 40, 26, 35, 154, 40, 185, 51, 97, 123 },   // left = d135
+      { 35, 19, 34, 179, 19, 97, 48, 129, 124 },   // left = d117
+      { 36, 20, 26, 136, 62, 164, 33, 77, 154 },   // left = d153
+      { 45, 18, 32, 130, 90, 157, 40, 79, 91 },    // left = d207
+      { 45, 26, 28, 129, 45, 129, 49, 147, 123 },  // left = d63
+      { 38, 44, 51, 136, 74, 162, 57, 97, 121 }    // left = tm
+  },
+  {
+      // above = d153
+      { 75, 17, 22, 136, 138, 185, 32, 34, 166 },  // left = dc
+      { 56, 39, 58, 133, 117, 173, 48, 53, 187 },  // left = v
+      { 35, 21, 12, 161, 212, 207, 20, 23, 145 },  // left = h
+      { 56, 29, 19, 117, 109, 181, 55, 68, 112 },  // left = d45
+      { 47, 29, 17, 153, 64, 220, 59, 51, 114 },   // left = d135
+      { 46, 16, 24, 136, 76, 147, 41, 64, 172 },   // left = d117
+      { 34, 17, 11, 108, 152, 187, 13, 15, 209 },  // left = d153
+      { 51, 24, 14, 115, 133, 209, 32, 26, 104 },  // left = d207
+      { 55, 30, 18, 122, 79, 179, 44, 88, 116 },   // left = d63
+      { 37, 49, 25, 129, 168, 164, 41, 54, 148 }   // left = tm
+  },
+  {
+      // above = d207
+      { 82, 22, 32, 127, 143, 213, 39, 41, 70 },   // left = dc
+      { 62, 44, 61, 123, 105, 189, 48, 57, 64 },   // left = v
+      { 47, 25, 17, 175, 222, 220, 24, 30, 86 },   // left = h
+      { 68, 36, 17, 106, 102, 206, 59, 74, 74 },   // left = d45
+      { 57, 39, 23, 151, 68, 216, 55, 63, 58 },    // left = d135
+      { 49, 30, 35, 141, 70, 168, 82, 40, 115 },   // left = d117
+      { 51, 25, 15, 136, 129, 202, 38, 35, 139 },  // left = d153
+      { 68, 26, 16, 111, 141, 215, 29, 28, 28 },   // left = d207
+      { 59, 39, 19, 114, 75, 180, 77, 104, 42 },   // left = d63
+      { 40, 61, 26, 126, 152, 206, 61, 59, 93 }    // left = tm
+  },
+  {
+      // above = d63
+      { 78, 23, 39, 111, 117, 170, 74, 124, 94 },   // left = dc
+      { 48, 34, 86, 101, 92, 146, 78, 179, 134 },   // left = v
+      { 47, 22, 24, 138, 187, 178, 68, 69, 59 },    // left = h
+      { 56, 25, 33, 105, 112, 187, 95, 177, 129 },  // left = d45
+      { 48, 31, 27, 114, 63, 183, 82, 116, 56 },    // left = d135
+      { 43, 28, 37, 121, 63, 123, 61, 192, 169 },   // left = d117
+      { 42, 17, 24, 109, 97, 177, 56, 76, 122 },    // left = d153
+      { 58, 18, 28, 105, 139, 182, 70, 92, 63 },    // left = d207
+      { 46, 23, 32, 74, 86, 150, 67, 183, 88 },     // left = d63
+      { 36, 38, 48, 92, 122, 165, 88, 137, 91 }     // left = tm
+  },
+  {
+      // above = tm
+      { 65, 70, 60, 155, 159, 199, 61, 60, 81 },    // left = dc
+      { 44, 78, 115, 132, 119, 173, 71, 112, 93 },  // left = v
+      { 39, 38, 21, 184, 227, 206, 42, 32, 64 },    // left = h
+      { 58, 47, 36, 124, 137, 193, 80, 82, 78 },    // left = d45
+      { 49, 50, 35, 144, 95, 205, 63, 78, 59 },     // left = d135
+      { 41, 53, 52, 148, 71, 142, 65, 128, 51 },    // left = d117
+      { 40, 36, 28, 143, 143, 202, 40, 55, 137 },   // left = d153
+      { 52, 34, 29, 129, 183, 227, 42, 35, 43 },    // left = d207
+      { 42, 44, 44, 104, 105, 164, 64, 130, 80 },   // left = d63
+      { 43, 81, 53, 140, 169, 204, 68, 84, 72 }     // left = tm
+  }
+};
 
-static const vpx_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = {
+static const aom_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = {
   { 65, 32, 18, 144, 162, 194, 41, 51, 98 },   // block_size < 8x8
   { 132, 68, 18, 165, 217, 196, 45, 40, 78 },  // block_size < 16x16
   { 173, 80, 19, 176, 240, 193, 64, 35, 46 },  // block_size < 32x32
   { 221, 135, 38, 194, 248, 121, 96, 85, 29 }  // block_size >= 32x32
 };
 
-static const vpx_prob default_uv_probs[INTRA_MODES][INTRA_MODES - 1] = {
+static const aom_prob default_uv_probs[INTRA_MODES][INTRA_MODES - 1] = {
   { 120, 7, 76, 176, 208, 126, 28, 54, 103 },   // y = dc
   { 48, 12, 154, 155, 139, 90, 34, 117, 119 },  // y = v
   { 67, 6, 25, 204, 243, 158, 13, 21, 96 },     // y = h
@@ -167,7 +168,7 @@
 };
 
 #if CONFIG_EXT_PARTITION_TYPES
-static const vpx_prob
+static const aom_prob
     default_partition_probs[PARTITION_CONTEXTS][EXT_PARTITION_TYPES - 1] = {
       // 8x8 -> 4x4
       { 199, 122, 141, 128, 128, 128, 128 },  // a/l both not split
@@ -198,7 +199,7 @@
 #endif                                      // CONFIG_EXT_PARTITION
     };
 #else
-static const vpx_prob
+static const aom_prob
     default_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1] = {
       // 8x8 -> 4x4
       { 199, 122, 141 },  // a/l both not split
@@ -231,27 +232,27 @@
 #endif  // CONFIG_EXT_PARTITION_TYPES
 
 #if CONFIG_REF_MV
-static const vpx_prob default_newmv_prob[NEWMV_MODE_CONTEXTS] = {
+static const aom_prob default_newmv_prob[NEWMV_MODE_CONTEXTS] = {
   200, 180, 150, 150, 110, 70, 60,
 };
 
-static const vpx_prob default_zeromv_prob[ZEROMV_MODE_CONTEXTS] = {
+static const aom_prob default_zeromv_prob[ZEROMV_MODE_CONTEXTS] = {
   192, 64,
 };
 
-static const vpx_prob default_refmv_prob[REFMV_MODE_CONTEXTS] = {
+static const aom_prob default_refmv_prob[REFMV_MODE_CONTEXTS] = {
   220, 220, 200, 200, 180, 128, 30, 220, 30,
 };
 
-static const vpx_prob default_drl_prob[DRL_MODE_CONTEXTS] = { 128, 160, 180,
+static const aom_prob default_drl_prob[DRL_MODE_CONTEXTS] = { 128, 160, 180,
                                                               128, 160 };
 
 #if CONFIG_EXT_INTER
-static const vpx_prob default_new2mv_prob = 180;
+static const aom_prob default_new2mv_prob = 180;
 #endif  // CONFIG_EXT_INTER
 #endif  // CONFIG_REF_MV
 
-static const vpx_prob
+static const aom_prob
     default_inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1] = {
 #if CONFIG_EXT_INTER
       // TODO(zoeliu): To adjust the initial default probs
@@ -274,7 +275,7 @@
     };
 
 #if CONFIG_EXT_INTER
-static const vpx_prob default_inter_compound_mode_probs
+static const aom_prob default_inter_compound_mode_probs
     [INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES - 1] = {
       { 2, 173, 68, 192, 64, 192, 128, 180, 180 },   // 0 = both zero mv
       { 7, 145, 160, 192, 64, 192, 128, 180, 180 },  // 1 = 1 zero + 1 predicted
@@ -285,11 +286,11 @@
       { 25, 29, 50, 192, 64, 192, 128, 180, 180 },   // 6 = two intra neighbours
     };
 
-static const vpx_prob default_interintra_prob[BLOCK_SIZE_GROUPS] = {
+static const aom_prob default_interintra_prob[BLOCK_SIZE_GROUPS] = {
   208, 208, 208, 208,
 };
 
-static const vpx_prob
+static const aom_prob
     default_interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1] = {
       { 65, 32, 18, 144, 162, 194, 41, 51, 98 },   // block_size < 8x8
       { 132, 68, 18, 165, 217, 196, 45, 40, 78 },  // block_size < 16x16
@@ -297,14 +298,14 @@
       { 221, 135, 38, 194, 248, 121, 96, 85, 29 }  // block_size >= 32x32
     };
 
-static const vpx_prob default_wedge_interintra_prob[BLOCK_SIZES] = {
+static const aom_prob default_wedge_interintra_prob[BLOCK_SIZES] = {
   208, 208, 208, 208, 208, 208, 216, 216, 216, 224, 224, 224, 240,
 #if CONFIG_EXT_PARTITION
   208, 208, 208
 #endif  // CONFIG_EXT_PARTITION
 };
 
-static const vpx_prob default_wedge_interinter_prob[BLOCK_SIZES] = {
+static const aom_prob default_wedge_interinter_prob[BLOCK_SIZES] = {
   208, 208, 208, 208, 208, 208, 216, 216, 216, 224, 224, 224, 240,
 #if CONFIG_EXT_PARTITION
   255, 255, 255
@@ -314,10 +315,10 @@
 
 // Change this section appropriately once warped motion is supported
 #if CONFIG_OBMC && !CONFIG_WARPED_MOTION
-const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
+const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
   -SIMPLE_TRANSLATION, -OBMC_CAUSAL
 };
-static const vpx_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
+static const aom_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
     {
       { 255 }, { 255 }, { 255 }, { 151 }, { 153 }, { 144 }, { 178 },
       { 165 }, { 160 }, { 207 }, { 195 }, { 168 }, { 244 },
@@ -328,10 +329,10 @@
 
 #elif !CONFIG_OBMC && CONFIG_WARPED_MOTION
 
-const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
+const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
   -SIMPLE_TRANSLATION, -WARPED_CAUSAL
 };
-static const vpx_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
+static const aom_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
     {
       { 255 }, { 255 }, { 255 }, { 151 }, { 153 }, { 144 }, { 178 },
       { 165 }, { 160 }, { 207 }, { 195 }, { 168 }, { 244 },
@@ -342,10 +343,10 @@
 
 #elif CONFIG_OBMC && CONFIG_WARPED_MOTION
 
-const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
+const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)] = {
   -SIMPLE_TRANSLATION, 2, -OBMC_CAUSAL, -WARPED_CAUSAL,
 };
-static const vpx_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
+static const aom_prob default_motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1] =
     {
       { 255, 200 }, { 255, 200 }, { 255, 200 }, { 151, 200 }, { 153, 200 },
       { 144, 200 }, { 178, 200 }, { 165, 200 }, { 160, 200 }, { 207, 200 },
@@ -357,7 +358,7 @@
 #endif  // CONFIG_OBMC || !CONFIG_WARPED_MOTION
 
 /* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
-const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
+const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
   -DC_PRED,   2,          /* 0 = DC_NODE */
   -TM_PRED,   4,          /* 1 = TM_NODE */
   -V_PRED,    6,          /* 2 = V_NODE */
@@ -369,7 +370,7 @@
   -D153_PRED, -D207_PRED  /* 8 = D153_NODE */
 };
 
-const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
+const aom_tree_index av1_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
   -INTER_OFFSET(ZEROMV),    2,
   -INTER_OFFSET(NEARESTMV), 4,
 #if CONFIG_EXT_INTER
@@ -382,7 +383,7 @@
 
 #if CONFIG_EXT_INTER
 /* clang-format off */
-const vpx_tree_index vp10_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)] = {
+const aom_tree_index av1_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)] = {
   -II_DC_PRED, 2,                   /* 0 = II_DC_NODE     */
   -II_TM_PRED, 4,                   /* 1 = II_TM_NODE     */
   -II_V_PRED, 6,                    /* 2 = II_V_NODE      */
@@ -394,7 +395,7 @@
   -II_D153_PRED, -II_D207_PRED      /* 8 = II_D153_NODE   */
 };
 
-const vpx_tree_index vp10_inter_compound_mode_tree
+const aom_tree_index av1_inter_compound_mode_tree
     [TREE_SIZE(INTER_COMPOUND_MODES)] = {
   -INTER_COMPOUND_OFFSET(ZERO_ZEROMV), 2,
   -INTER_COMPOUND_OFFSET(NEAREST_NEARESTMV), 4,
@@ -410,13 +411,13 @@
 /* clang-format on */
 #endif  // CONFIG_EXT_INTER
 
-const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
+const aom_tree_index av1_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
   -PARTITION_NONE, 2, -PARTITION_HORZ, 4, -PARTITION_VERT, -PARTITION_SPLIT
 };
 
 #if CONFIG_EXT_PARTITION_TYPES
 /* clang-format off */
-const vpx_tree_index vp10_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)] = {
+const aom_tree_index av1_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)] = {
   -PARTITION_NONE, 2,
   6, 4,
   8, -PARTITION_SPLIT,
@@ -428,16 +429,16 @@
 /* clang-format on */
 #endif  // CONFIG_EXT_PARTITION_TYPES
 
-static const vpx_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = {
+static const aom_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = {
   9, 102, 187, 225
 };
 
-static const vpx_prob default_comp_inter_p[COMP_INTER_CONTEXTS] = {
+static const aom_prob default_comp_inter_p[COMP_INTER_CONTEXTS] = {
   239, 183, 119, 96, 41
 };
 
 #if CONFIG_EXT_REFS
-static const vpx_prob default_comp_ref_p[REF_CONTEXTS][FWD_REFS - 1] = {
+static const aom_prob default_comp_ref_p[REF_CONTEXTS][FWD_REFS - 1] = {
   // TODO(zoeliu): To adjust the initial prob values.
   { 33, 16, 16 },
   { 77, 74, 74 },
@@ -445,16 +446,16 @@
   { 172, 170, 170 },
   { 238, 247, 247 }
 };
-static const vpx_prob default_comp_bwdref_p[REF_CONTEXTS][BWD_REFS - 1] = {
+static const aom_prob default_comp_bwdref_p[REF_CONTEXTS][BWD_REFS - 1] = {
   { 16 }, { 74 }, { 142 }, { 170 }, { 247 }
 };
 #else
-static const vpx_prob default_comp_ref_p[REF_CONTEXTS][COMP_REFS - 1] = {
+static const aom_prob default_comp_ref_p[REF_CONTEXTS][COMP_REFS - 1] = {
   { 50 }, { 126 }, { 123 }, { 221 }, { 226 }
 };
 #endif  // CONFIG_EXT_REFS
 
-static const vpx_prob default_single_ref_p[REF_CONTEXTS][SINGLE_REFS - 1] = {
+static const aom_prob default_single_ref_p[REF_CONTEXTS][SINGLE_REFS - 1] = {
 #if CONFIG_EXT_REFS
   { 33, 16, 16, 16, 16 },
   { 77, 74, 74, 74, 74 },
@@ -466,14 +467,14 @@
 #endif  // CONFIG_EXT_REFS
 };
 
-const vpx_tree_index vp10_palette_size_tree[TREE_SIZE(PALETTE_SIZES)] = {
+const aom_tree_index av1_palette_size_tree[TREE_SIZE(PALETTE_SIZES)] = {
   -TWO_COLORS,  2, -THREE_COLORS, 4,  -FOUR_COLORS,  6,
   -FIVE_COLORS, 8, -SIX_COLORS,   10, -SEVEN_COLORS, -EIGHT_COLORS,
 };
 
 // TODO(huisu): tune these probs
-const vpx_prob
-    vp10_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] = {
+const aom_prob
+    av1_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] = {
       { 96, 89, 100, 64, 77, 130 },   { 22, 15, 44, 16, 34, 82 },
       { 30, 19, 57, 18, 38, 86 },     { 94, 36, 104, 23, 43, 92 },
       { 116, 76, 107, 46, 65, 105 },  { 112, 82, 94, 40, 70, 112 },
@@ -485,21 +486,20 @@
 #endif  // CONFIG_EXT_PARTITION
     };
 
-const vpx_prob
-    vp10_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] =
-        {
-          { 160, 196, 228, 213, 175, 230 }, { 87, 148, 208, 141, 166, 163 },
-          { 72, 151, 204, 139, 155, 161 },  { 78, 135, 171, 104, 120, 173 },
-          { 59, 92, 131, 78, 92, 142 },     { 75, 118, 149, 84, 90, 128 },
-          { 89, 87, 92, 66, 66, 128 },      { 67, 53, 54, 55, 66, 93 },
-          { 120, 130, 83, 171, 75, 214 },   { 72, 55, 66, 68, 79, 107 },
+const aom_prob
+    av1_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1] = {
+      { 160, 196, 228, 213, 175, 230 }, { 87, 148, 208, 141, 166, 163 },
+      { 72, 151, 204, 139, 155, 161 },  { 78, 135, 171, 104, 120, 173 },
+      { 59, 92, 131, 78, 92, 142 },     { 75, 118, 149, 84, 90, 128 },
+      { 89, 87, 92, 66, 66, 128 },      { 67, 53, 54, 55, 66, 93 },
+      { 120, 130, 83, 171, 75, 214 },   { 72, 55, 66, 68, 79, 107 },
 #if CONFIG_EXT_PARTITION
-          { 72, 55, 66, 68, 79, 107 },      { 72, 55, 66, 68, 79, 107 },
-          { 72, 55, 66, 68, 79, 107 },
+      { 72, 55, 66, 68, 79, 107 },      { 72, 55, 66, 68, 79, 107 },
+      { 72, 55, 66, 68, 79, 107 },
 #endif  // CONFIG_EXT_PARTITION
-        };
+    };
 
-const vpx_prob vp10_default_palette_y_mode_prob
+const aom_prob av1_default_palette_y_mode_prob
     [PALETTE_BLOCK_SIZES][PALETTE_Y_MODE_CONTEXTS] = {
       { 240, 180, 100 }, { 240, 180, 100 }, { 240, 180, 100 },
       { 240, 180, 100 }, { 240, 180, 100 }, { 240, 180, 100 },
@@ -510,10 +510,10 @@
 #endif  // CONFIG_EXT_PARTITION
     };
 
-const vpx_prob vp10_default_palette_uv_mode_prob[2] = { 253, 229 };
+const aom_prob av1_default_palette_uv_mode_prob[2] = { 253, 229 };
 
-const vpx_tree_index
-    vp10_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)] = {
+const aom_tree_index
+    av1_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)] = {
       { // 2 colors
         -PALETTE_COLOR_ONE, -PALETTE_COLOR_TWO },
       { // 3 colors
@@ -537,7 +537,7 @@
         -PALETTE_COLOR_SEVEN, -PALETTE_COLOR_EIGHT },
     };
 
-const vpx_prob vp10_default_palette_y_color_prob
+const aom_prob av1_default_palette_y_color_prob
     [PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] = {
       {
           // 2 colors
@@ -674,7 +674,7 @@
       }
     };
 
-const vpx_prob vp10_default_palette_uv_color_prob
+const aom_prob av1_default_palette_uv_color_prob
     [PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] = {
       {
           // 2 colors
@@ -822,7 +822,7 @@
   9680, 10648, 10890, 13310
 };
 
-const vpx_tree_index vp10_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)] = {
+const aom_tree_index av1_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)] = {
   {
       // Max tx_size is 8X8
       -TX_4X4, -TX_8X8,
@@ -837,7 +837,7 @@
   },
 };
 
-static const vpx_prob
+static const aom_prob
     default_tx_size_prob[TX_SIZES - 1][TX_SIZE_CONTEXTS][TX_SIZES - 1] = {
       {
           // Max tx_size is 8X8
@@ -856,8 +856,8 @@
       },
     };
 
-int vp10_get_palette_color_context(const uint8_t *color_map, int cols, int r,
-                                   int c, int n, int *color_order) {
+int av1_get_palette_color_context(const uint8_t *color_map, int cols, int r,
+                                  int c, int n, int *color_order) {
   int i, j, max, max_idx, temp;
   int scores[PALETTE_MAX_SIZE + 10];
   int weights[4] = { 3, 2, 3, 2 };
@@ -926,15 +926,15 @@
 }
 
 #if CONFIG_VAR_TX
-static const vpx_prob default_txfm_partition_probs[TXFM_PARTITION_CONTEXTS] = {
+static const aom_prob default_txfm_partition_probs[TXFM_PARTITION_CONTEXTS] = {
   192, 128, 64, 192, 128, 64, 192, 128, 64,
 };
 #endif
 
-static const vpx_prob default_skip_probs[SKIP_CONTEXTS] = { 192, 128, 64 };
+static const aom_prob default_skip_probs[SKIP_CONTEXTS] = { 192, 128, 64 };
 
 #if CONFIG_EXT_INTERP
-static const vpx_prob default_switchable_interp_prob
+static const aom_prob default_switchable_interp_prob
     [SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1] = {
 #if CONFIG_DUAL_FILTER
       { 235, 192, 128, 128 }, { 36, 243, 208, 128 }, { 34, 16, 128, 128 },
@@ -955,7 +955,7 @@
     };
 #else  // CONFIG_EXT_INTERP
 #if CONFIG_DUAL_FILTER
-static const vpx_prob default_switchable_interp_prob
+static const aom_prob default_switchable_interp_prob
     [SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1] = {
       { 235, 162 }, { 36, 255 }, { 34, 3 }, { 149, 144 },
 
@@ -966,7 +966,7 @@
       { 235, 162 }, { 36, 255 }, { 34, 3 }, { 10, 3 },
     };
 #else
-static const vpx_prob default_switchable_interp_prob
+static const aom_prob default_switchable_interp_prob
     [SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1] = {
       { 235, 162 }, { 36, 255 }, { 34, 3 }, { 149, 144 },
     };
@@ -975,7 +975,7 @@
 
 #if CONFIG_EXT_TX
 /* clang-format off */
-const vpx_tree_index vp10_ext_tx_inter_tree[EXT_TX_SETS_INTER]
+const aom_tree_index av1_ext_tx_inter_tree[EXT_TX_SETS_INTER]
                                            [TREE_SIZE(TX_TYPES)] = {
   { // ToDo(yaowu): remove used entry 0.
     0
@@ -1012,7 +1012,7 @@
   }
 };
 
-const vpx_tree_index vp10_ext_tx_intra_tree[EXT_TX_SETS_INTRA]
+const aom_tree_index av1_ext_tx_intra_tree[EXT_TX_SETS_INTRA]
                                            [TREE_SIZE(TX_TYPES)] = {
   {  // ToDo(yaowu): remove unused entry 0.
     0
@@ -1032,7 +1032,7 @@
 };
 /* clang-format on */
 
-static const vpx_prob
+static const aom_prob
     default_inter_ext_tx_prob[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES - 1] = {
       {
           // ToDo(yaowu): remove unused entry 0.
@@ -1073,7 +1073,7 @@
       }
     };
 
-static const vpx_prob default_intra_ext_tx_prob
+static const aom_prob default_intra_ext_tx_prob
     [EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES][TX_TYPES - 1] = {
       {
           // ToDo(yaowu): remove unused entry 0.
@@ -1237,41 +1237,41 @@
 #else
 
 /* clang-format off */
-const vpx_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)] = {
+const aom_tree_index av1_ext_tx_tree[TREE_SIZE(TX_TYPES)] = {
   -DCT_DCT, 2,
   -ADST_ADST, 4,
   -ADST_DCT, -DCT_ADST
 };
 /* clang-format on */
 
-static const vpx_prob
+static const aom_prob
     default_intra_ext_tx_prob[EXT_TX_SIZES][TX_TYPES][TX_TYPES - 1] = {
       { { 240, 85, 128 }, { 4, 1, 248 }, { 4, 1, 8 }, { 4, 248, 128 } },
       { { 244, 85, 128 }, { 8, 2, 248 }, { 8, 2, 8 }, { 8, 248, 128 } },
       { { 248, 85, 128 }, { 16, 4, 248 }, { 16, 4, 8 }, { 16, 248, 128 } },
     };
 
-static const vpx_prob default_inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1] = {
+static const aom_prob default_inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1] = {
   { 160, 85, 128 }, { 176, 85, 128 }, { 192, 85, 128 },
 };
 #endif  // CONFIG_EXT_TX
 
 #if CONFIG_EXT_INTRA
-static const vpx_prob
+static const aom_prob
     default_intra_filter_probs[INTRA_FILTERS + 1][INTRA_FILTERS - 1] = {
       { 98, 63, 60 }, { 98, 82, 80 }, { 94, 65, 103 },
       { 49, 25, 24 }, { 72, 38, 50 },
     };
-static const vpx_prob default_ext_intra_probs[2] = { 230, 230 };
+static const aom_prob default_ext_intra_probs[2] = { 230, 230 };
 
-const vpx_tree_index vp10_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)] = {
+const aom_tree_index av1_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)] = {
   -INTRA_FILTER_LINEAR,      2, -INTRA_FILTER_8TAP, 4, -INTRA_FILTER_8TAP_SHARP,
   -INTRA_FILTER_8TAP_SMOOTH,
 };
 #endif  // CONFIG_EXT_INTRA
 
 #if CONFIG_SUPERTX
-static const vpx_prob
+static const aom_prob
     default_supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES] = {
       { 1, 160, 160, 170 }, { 1, 200, 200, 210 },
     };
@@ -1283,58 +1283,58 @@
 };
 
 static void init_mode_probs(FRAME_CONTEXT *fc) {
-  vp10_copy(fc->uv_mode_prob, default_uv_probs);
-  vp10_copy(fc->y_mode_prob, default_if_y_probs);
-  vp10_copy(fc->switchable_interp_prob, default_switchable_interp_prob);
-  vp10_copy(fc->partition_prob, default_partition_probs);
-  vp10_copy(fc->intra_inter_prob, default_intra_inter_p);
-  vp10_copy(fc->comp_inter_prob, default_comp_inter_p);
-  vp10_copy(fc->comp_ref_prob, default_comp_ref_p);
+  av1_copy(fc->uv_mode_prob, default_uv_probs);
+  av1_copy(fc->y_mode_prob, default_if_y_probs);
+  av1_copy(fc->switchable_interp_prob, default_switchable_interp_prob);
+  av1_copy(fc->partition_prob, default_partition_probs);
+  av1_copy(fc->intra_inter_prob, default_intra_inter_p);
+  av1_copy(fc->comp_inter_prob, default_comp_inter_p);
+  av1_copy(fc->comp_ref_prob, default_comp_ref_p);
 #if CONFIG_EXT_REFS
-  vp10_copy(fc->comp_bwdref_prob, default_comp_bwdref_p);
+  av1_copy(fc->comp_bwdref_prob, default_comp_bwdref_p);
 #endif  // CONFIG_EXT_REFS
-  vp10_copy(fc->single_ref_prob, default_single_ref_p);
-  vp10_copy(fc->tx_size_probs, default_tx_size_prob);
+  av1_copy(fc->single_ref_prob, default_single_ref_p);
+  av1_copy(fc->tx_size_probs, default_tx_size_prob);
 #if CONFIG_VAR_TX
-  vp10_copy(fc->txfm_partition_prob, default_txfm_partition_probs);
+  av1_copy(fc->txfm_partition_prob, default_txfm_partition_probs);
 #endif
-  vp10_copy(fc->skip_probs, default_skip_probs);
+  av1_copy(fc->skip_probs, default_skip_probs);
 #if CONFIG_REF_MV
-  vp10_copy(fc->newmv_prob, default_newmv_prob);
-  vp10_copy(fc->zeromv_prob, default_zeromv_prob);
-  vp10_copy(fc->refmv_prob, default_refmv_prob);
-  vp10_copy(fc->drl_prob, default_drl_prob);
+  av1_copy(fc->newmv_prob, default_newmv_prob);
+  av1_copy(fc->zeromv_prob, default_zeromv_prob);
+  av1_copy(fc->refmv_prob, default_refmv_prob);
+  av1_copy(fc->drl_prob, default_drl_prob);
 #if CONFIG_EXT_INTER
   fc->new2mv_prob = default_new2mv_prob;
 #endif  // CONFIG_EXT_INTER
 #endif  // CONFIG_REF_MV
-  vp10_copy(fc->inter_mode_probs, default_inter_mode_probs);
+  av1_copy(fc->inter_mode_probs, default_inter_mode_probs);
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
-  vp10_copy(fc->motvar_prob, default_motvar_prob);
+  av1_copy(fc->motvar_prob, default_motvar_prob);
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 #if CONFIG_EXT_INTER
-  vp10_copy(fc->inter_compound_mode_probs, default_inter_compound_mode_probs);
-  vp10_copy(fc->interintra_prob, default_interintra_prob);
-  vp10_copy(fc->interintra_mode_prob, default_interintra_mode_prob);
-  vp10_copy(fc->wedge_interintra_prob, default_wedge_interintra_prob);
-  vp10_copy(fc->wedge_interinter_prob, default_wedge_interinter_prob);
+  av1_copy(fc->inter_compound_mode_probs, default_inter_compound_mode_probs);
+  av1_copy(fc->interintra_prob, default_interintra_prob);
+  av1_copy(fc->interintra_mode_prob, default_interintra_mode_prob);
+  av1_copy(fc->wedge_interintra_prob, default_wedge_interintra_prob);
+  av1_copy(fc->wedge_interinter_prob, default_wedge_interinter_prob);
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_SUPERTX
-  vp10_copy(fc->supertx_prob, default_supertx_prob);
+  av1_copy(fc->supertx_prob, default_supertx_prob);
 #endif  // CONFIG_SUPERTX
-  vp10_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
-  vp10_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
+  av1_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
+  av1_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
 #if CONFIG_EXT_INTRA
-  vp10_copy(fc->ext_intra_probs, default_ext_intra_probs);
-  vp10_copy(fc->intra_filter_probs, default_intra_filter_probs);
+  av1_copy(fc->ext_intra_probs, default_ext_intra_probs);
+  av1_copy(fc->intra_filter_probs, default_intra_filter_probs);
 #endif  // CONFIG_EXT_INTRA
-  vp10_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
-  vp10_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob);
+  av1_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
+  av1_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob);
 }
 
 #if CONFIG_EXT_INTERP
-const vpx_tree_index
-    vp10_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)] = {
+const aom_tree_index av1_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)] =
+    {
       -EIGHTTAP_REGULAR,
       2,
       4,
@@ -1345,23 +1345,22 @@
       -MULTITAP_SHARP2,
     };
 #else
-const vpx_tree_index vp10_switchable_interp_tree[TREE_SIZE(
-    SWITCHABLE_FILTERS)] = { -EIGHTTAP_REGULAR, 2, -EIGHTTAP_SMOOTH,
-                             -MULTITAP_SHARP };
+const aom_tree_index av1_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)] =
+    { -EIGHTTAP_REGULAR, 2, -EIGHTTAP_SMOOTH, -MULTITAP_SHARP };
 #endif  // CONFIG_EXT_INTERP
 
-void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
+void av1_adapt_inter_frame_probs(AV1_COMMON *cm) {
   int i, j;
   FRAME_CONTEXT *fc = cm->fc;
   const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
   const FRAME_COUNTS *counts = &cm->counts;
 
   for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
-    fc->intra_inter_prob[i] = vp10_mode_mv_merge_probs(
+    fc->intra_inter_prob[i] = av1_mode_mv_merge_probs(
         pre_fc->intra_inter_prob[i], counts->intra_inter[i]);
   for (i = 0; i < COMP_INTER_CONTEXTS; i++)
-    fc->comp_inter_prob[i] = vp10_mode_mv_merge_probs(
-        pre_fc->comp_inter_prob[i], counts->comp_inter[i]);
+    fc->comp_inter_prob[i] = av1_mode_mv_merge_probs(pre_fc->comp_inter_prob[i],
+                                                     counts->comp_inter[i]);
 
 #if CONFIG_EXT_REFS
   for (i = 0; i < REF_CONTEXTS; i++)
@@ -1381,36 +1380,36 @@
 
   for (i = 0; i < REF_CONTEXTS; i++)
     for (j = 0; j < (SINGLE_REFS - 1); j++)
-      fc->single_ref_prob[i][j] = vp10_mode_mv_merge_probs(
+      fc->single_ref_prob[i][j] = av1_mode_mv_merge_probs(
           pre_fc->single_ref_prob[i][j], counts->single_ref[i][j]);
 
 #if CONFIG_REF_MV
   for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
     fc->newmv_prob[i] =
-        vp10_mode_mv_merge_probs(pre_fc->newmv_prob[i], counts->newmv_mode[i]);
+        av1_mode_mv_merge_probs(pre_fc->newmv_prob[i], counts->newmv_mode[i]);
   for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
-    fc->zeromv_prob[i] = vp10_mode_mv_merge_probs(pre_fc->zeromv_prob[i],
-                                                  counts->zeromv_mode[i]);
+    fc->zeromv_prob[i] =
+        av1_mode_mv_merge_probs(pre_fc->zeromv_prob[i], counts->zeromv_mode[i]);
   for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
     fc->refmv_prob[i] =
-        vp10_mode_mv_merge_probs(pre_fc->refmv_prob[i], counts->refmv_mode[i]);
+        av1_mode_mv_merge_probs(pre_fc->refmv_prob[i], counts->refmv_mode[i]);
 
   for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
     fc->drl_prob[i] =
-        vp10_mode_mv_merge_probs(pre_fc->drl_prob[i], counts->drl_mode[i]);
+        av1_mode_mv_merge_probs(pre_fc->drl_prob[i], counts->drl_mode[i]);
 #if CONFIG_EXT_INTER
   fc->new2mv_prob =
-      vp10_mode_mv_merge_probs(pre_fc->new2mv_prob, counts->new2mv_mode);
+      av1_mode_mv_merge_probs(pre_fc->new2mv_prob, counts->new2mv_mode);
 #endif  // CONFIG_EXT_INTER
 #else
   for (i = 0; i < INTER_MODE_CONTEXTS; i++)
-    vpx_tree_merge_probs(vp10_inter_mode_tree, pre_fc->inter_mode_probs[i],
+    aom_tree_merge_probs(av1_inter_mode_tree, pre_fc->inter_mode_probs[i],
                          counts->inter_mode[i], fc->inter_mode_probs[i]);
 #endif
 
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
   for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i)
-    vpx_tree_merge_probs(vp10_motvar_tree, pre_fc->motvar_prob[i],
+    aom_tree_merge_probs(av1_motvar_tree, pre_fc->motvar_prob[i],
                          counts->motvar[i], fc->motvar_prob[i]);
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 
@@ -1418,7 +1417,7 @@
   for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
     int j;
     for (j = 1; j < TX_SIZES; ++j) {
-      fc->supertx_prob[i][j] = vp10_mode_mv_merge_probs(
+      fc->supertx_prob[i][j] = av1_mode_mv_merge_probs(
           pre_fc->supertx_prob[i][j], counts->supertx[i][j]);
     }
   }
@@ -1426,44 +1425,44 @@
 
 #if CONFIG_EXT_INTER
   for (i = 0; i < INTER_MODE_CONTEXTS; i++)
-    vpx_tree_merge_probs(
-        vp10_inter_compound_mode_tree, pre_fc->inter_compound_mode_probs[i],
+    aom_tree_merge_probs(
+        av1_inter_compound_mode_tree, pre_fc->inter_compound_mode_probs[i],
         counts->inter_compound_mode[i], fc->inter_compound_mode_probs[i]);
   for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) {
     if (is_interintra_allowed_bsize_group(i))
-      fc->interintra_prob[i] = vp10_mode_mv_merge_probs(
+      fc->interintra_prob[i] = av1_mode_mv_merge_probs(
           pre_fc->interintra_prob[i], counts->interintra[i]);
   }
   for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
-    vpx_tree_merge_probs(
-        vp10_interintra_mode_tree, pre_fc->interintra_mode_prob[i],
+    aom_tree_merge_probs(
+        av1_interintra_mode_tree, pre_fc->interintra_mode_prob[i],
         counts->interintra_mode[i], fc->interintra_mode_prob[i]);
   }
   for (i = 0; i < BLOCK_SIZES; ++i) {
     if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i))
-      fc->wedge_interintra_prob[i] = vp10_mode_mv_merge_probs(
+      fc->wedge_interintra_prob[i] = av1_mode_mv_merge_probs(
           pre_fc->wedge_interintra_prob[i], counts->wedge_interintra[i]);
   }
   for (i = 0; i < BLOCK_SIZES; ++i) {
     if (is_interinter_wedge_used(i))
-      fc->wedge_interinter_prob[i] = vp10_mode_mv_merge_probs(
+      fc->wedge_interinter_prob[i] = av1_mode_mv_merge_probs(
           pre_fc->wedge_interinter_prob[i], counts->wedge_interinter[i]);
   }
 #endif  // CONFIG_EXT_INTER
 
   for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
-    vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->y_mode_prob[i],
+    aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->y_mode_prob[i],
                          counts->y_mode[i], fc->y_mode_prob[i]);
 
   if (cm->interp_filter == SWITCHABLE) {
     for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
-      vpx_tree_merge_probs(
-          vp10_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
+      aom_tree_merge_probs(
+          av1_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
           counts->switchable_interp[i], fc->switchable_interp_prob[i]);
   }
 }
 
-void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
+void av1_adapt_intra_frame_probs(AV1_COMMON *cm) {
   int i, j;
   FRAME_CONTEXT *fc = cm->fc;
   const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
@@ -1472,7 +1471,7 @@
   if (cm->tx_mode == TX_MODE_SELECT) {
     for (i = 0; i < TX_SIZES - 1; ++i) {
       for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
-        vpx_tree_merge_probs(vp10_tx_size_tree[i], pre_fc->tx_size_probs[i][j],
+        aom_tree_merge_probs(av1_tx_size_tree[i], pre_fc->tx_size_probs[i][j],
                              counts->tx_size[i][j], fc->tx_size_probs[i][j]);
     }
   }
@@ -1480,21 +1479,21 @@
 #if CONFIG_VAR_TX
   if (cm->tx_mode == TX_MODE_SELECT)
     for (i = 0; i < TXFM_PARTITION_CONTEXTS; ++i)
-      fc->txfm_partition_prob[i] = vp10_mode_mv_merge_probs(
+      fc->txfm_partition_prob[i] = av1_mode_mv_merge_probs(
           pre_fc->txfm_partition_prob[i], counts->txfm_partition[i]);
 #endif
 
   for (i = 0; i < SKIP_CONTEXTS; ++i)
     fc->skip_probs[i] =
-        vp10_mode_mv_merge_probs(pre_fc->skip_probs[i], counts->skip[i]);
+        av1_mode_mv_merge_probs(pre_fc->skip_probs[i], counts->skip[i]);
 
 #if CONFIG_EXT_TX
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     int s;
     for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
       if (use_inter_ext_tx_for_txsize[s][i]) {
-        vpx_tree_merge_probs(
-            vp10_ext_tx_inter_tree[s], pre_fc->inter_ext_tx_prob[s][i],
+        aom_tree_merge_probs(
+            av1_ext_tx_inter_tree[s], pre_fc->inter_ext_tx_prob[s][i],
             counts->inter_ext_tx[s][i], fc->inter_ext_tx_prob[s][i]);
       }
     }
@@ -1502,8 +1501,8 @@
       if (use_intra_ext_tx_for_txsize[s][i]) {
         int j;
         for (j = 0; j < INTRA_MODES; ++j)
-          vpx_tree_merge_probs(
-              vp10_ext_tx_intra_tree[s], pre_fc->intra_ext_tx_prob[s][i][j],
+          aom_tree_merge_probs(
+              av1_ext_tx_intra_tree[s], pre_fc->intra_ext_tx_prob[s][i][j],
               counts->intra_ext_tx[s][i][j], fc->intra_ext_tx_prob[s][i][j]);
       }
     }
@@ -1511,52 +1510,52 @@
 #else
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     for (j = 0; j < TX_TYPES; ++j)
-      vpx_tree_merge_probs(vp10_ext_tx_tree, pre_fc->intra_ext_tx_prob[i][j],
+      aom_tree_merge_probs(av1_ext_tx_tree, pre_fc->intra_ext_tx_prob[i][j],
                            counts->intra_ext_tx[i][j],
                            fc->intra_ext_tx_prob[i][j]);
   }
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
-    vpx_tree_merge_probs(vp10_ext_tx_tree, pre_fc->inter_ext_tx_prob[i],
+    aom_tree_merge_probs(av1_ext_tx_tree, pre_fc->inter_ext_tx_prob[i],
                          counts->inter_ext_tx[i], fc->inter_ext_tx_prob[i]);
   }
 #endif  // CONFIG_EXT_TX
 
   if (cm->seg.temporal_update) {
     for (i = 0; i < PREDICTION_PROBS; i++)
-      fc->seg.pred_probs[i] = vp10_mode_mv_merge_probs(
-          pre_fc->seg.pred_probs[i], counts->seg.pred[i]);
+      fc->seg.pred_probs[i] = av1_mode_mv_merge_probs(pre_fc->seg.pred_probs[i],
+                                                      counts->seg.pred[i]);
 
-    vpx_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+    aom_tree_merge_probs(av1_segment_tree, pre_fc->seg.tree_probs,
                          counts->seg.tree_mispred, fc->seg.tree_probs);
   } else {
-    vpx_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+    aom_tree_merge_probs(av1_segment_tree, pre_fc->seg.tree_probs,
                          counts->seg.tree_total, fc->seg.tree_probs);
   }
 
   for (i = 0; i < INTRA_MODES; ++i)
-    vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->uv_mode_prob[i],
+    aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->uv_mode_prob[i],
                          counts->uv_mode[i], fc->uv_mode_prob[i]);
 
 #if CONFIG_EXT_PARTITION_TYPES
-  vpx_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[0],
+  aom_tree_merge_probs(av1_partition_tree, pre_fc->partition_prob[0],
                        counts->partition[0], fc->partition_prob[0]);
   for (i = 1; i < PARTITION_CONTEXTS; i++)
-    vpx_tree_merge_probs(vp10_ext_partition_tree, pre_fc->partition_prob[i],
+    aom_tree_merge_probs(av1_ext_partition_tree, pre_fc->partition_prob[i],
                          counts->partition[i], fc->partition_prob[i]);
 #else
   for (i = 0; i < PARTITION_CONTEXTS; i++)
-    vpx_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i],
+    aom_tree_merge_probs(av1_partition_tree, pre_fc->partition_prob[i],
                          counts->partition[i], fc->partition_prob[i]);
 #endif  // CONFIG_EXT_PARTITION_TYPES
 
 #if CONFIG_EXT_INTRA
   for (i = 0; i < PLANE_TYPES; ++i) {
-    fc->ext_intra_probs[i] = vp10_mode_mv_merge_probs(
-        pre_fc->ext_intra_probs[i], counts->ext_intra[i]);
+    fc->ext_intra_probs[i] = av1_mode_mv_merge_probs(pre_fc->ext_intra_probs[i],
+                                                     counts->ext_intra[i]);
   }
 
   for (i = 0; i < INTRA_FILTERS + 1; ++i)
-    vpx_tree_merge_probs(vp10_intra_filter_tree, pre_fc->intra_filter_probs[i],
+    aom_tree_merge_probs(av1_intra_filter_tree, pre_fc->intra_filter_probs[i],
                          counts->intra_filter[i], fc->intra_filter_probs[i]);
 #endif  // CONFIG_EXT_INTRA
 }
@@ -1579,13 +1578,13 @@
   lf->mode_deltas[1] = 0;
 }
 
-void vp10_setup_past_independence(VP10_COMMON *cm) {
+void av1_setup_past_independence(AV1_COMMON *cm) {
   // Reset the segment feature data to the default stats:
   // Features disabled, 0, with delta coding (Default state).
   struct loopfilter *const lf = &cm->lf;
 
   int i;
-  vp10_clearall_segfeatures(&cm->seg);
+  av1_clearall_segfeatures(&cm->seg);
   cm->seg.abs_delta = SEGMENT_DELTADATA;
 
   if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
@@ -1595,8 +1594,8 @@
     memset(cm->current_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
 
   // Reset the mode ref deltas for loop filter
-  vp10_zero(lf->last_ref_deltas);
-  vp10_zero(lf->last_mode_deltas);
+  av1_zero(lf->last_ref_deltas);
+  av1_zero(lf->last_mode_deltas);
   set_default_lf_deltas(lf);
 
   // To force update of the sharpness
@@ -1608,9 +1607,9 @@
   }
 #endif  // CONFIG_LOOP_RESTORATION
 
-  vp10_default_coef_probs(cm);
+  av1_default_coef_probs(cm);
   init_mode_probs(cm->fc);
-  vp10_init_mv_probs(cm);
+  av1_init_mv_probs(cm);
   cm->fc->initialized = 1;
 
   if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
diff --git a/av1/common/entropymode.h b/av1/common/entropymode.h
index 4616aa2..e437b3f 100644
--- a/av1/common/entropymode.h
+++ b/av1/common/entropymode.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ENTROPYMODE_H_
-#define VP10_COMMON_ENTROPYMODE_H_
+#ifndef AV1_COMMON_ENTROPYMODE_H_
+#define AV1_COMMON_ENTROPYMODE_H_
 
 #include "av1/common/entropy.h"
 #include "av1/common/entropymv.h"
 #include "av1/common/filter.h"
 #include "av1/common/seg_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -36,7 +36,7 @@
 #define PALETTE_Y_MODE_CONTEXTS 3
 #define PALETTE_MAX_BLOCK_SIZE (64 * 64)
 
-struct VP10Common;
+struct AV1Common;
 
 struct seg_counts {
   unsigned int tree_total[MAX_SEGMENTS];
@@ -45,58 +45,58 @@
 };
 
 typedef struct frame_contexts {
-  vpx_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1];
-  vpx_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
+  aom_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1];
+  aom_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
 #if CONFIG_EXT_PARTITION_TYPES
-  vpx_prob partition_prob[PARTITION_CONTEXTS][EXT_PARTITION_TYPES - 1];
+  aom_prob partition_prob[PARTITION_CONTEXTS][EXT_PARTITION_TYPES - 1];
 #else
-  vpx_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
+  aom_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
 #endif
-  vp10_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
+  av1_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
 #if CONFIG_ANS
   coeff_cdf_model coef_cdfs[TX_SIZES][PLANE_TYPES];
 #endif
-  vpx_prob
+  aom_prob
       switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS -
                                                          1];
 
 #if CONFIG_REF_MV
-  vpx_prob newmv_prob[NEWMV_MODE_CONTEXTS];
-  vpx_prob zeromv_prob[ZEROMV_MODE_CONTEXTS];
-  vpx_prob refmv_prob[REFMV_MODE_CONTEXTS];
-  vpx_prob drl_prob[DRL_MODE_CONTEXTS];
+  aom_prob newmv_prob[NEWMV_MODE_CONTEXTS];
+  aom_prob zeromv_prob[ZEROMV_MODE_CONTEXTS];
+  aom_prob refmv_prob[REFMV_MODE_CONTEXTS];
+  aom_prob drl_prob[DRL_MODE_CONTEXTS];
 
 #if CONFIG_EXT_INTER
-  vpx_prob new2mv_prob;
+  aom_prob new2mv_prob;
 #endif  // CONFIG_EXT_INTER
 #endif  // CONFIG_REF_MV
 
-  vpx_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
+  aom_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
 #if CONFIG_EXT_INTER
-  vpx_prob
+  aom_prob
       inter_compound_mode_probs[INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES - 1];
-  vpx_prob interintra_prob[BLOCK_SIZE_GROUPS];
-  vpx_prob interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1];
-  vpx_prob wedge_interintra_prob[BLOCK_SIZES];
-  vpx_prob wedge_interinter_prob[BLOCK_SIZES];
+  aom_prob interintra_prob[BLOCK_SIZE_GROUPS];
+  aom_prob interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1];
+  aom_prob wedge_interintra_prob[BLOCK_SIZES];
+  aom_prob wedge_interinter_prob[BLOCK_SIZES];
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
-  vpx_prob motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1];
+  aom_prob motvar_prob[BLOCK_SIZES][MOTION_VARIATIONS - 1];
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
-  vpx_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
-  vpx_prob comp_inter_prob[COMP_INTER_CONTEXTS];
-  vpx_prob single_ref_prob[REF_CONTEXTS][SINGLE_REFS - 1];
+  aom_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
+  aom_prob comp_inter_prob[COMP_INTER_CONTEXTS];
+  aom_prob single_ref_prob[REF_CONTEXTS][SINGLE_REFS - 1];
 #if CONFIG_EXT_REFS
-  vpx_prob comp_ref_prob[REF_CONTEXTS][FWD_REFS - 1];
-  vpx_prob comp_bwdref_prob[REF_CONTEXTS][BWD_REFS - 1];
+  aom_prob comp_ref_prob[REF_CONTEXTS][FWD_REFS - 1];
+  aom_prob comp_bwdref_prob[REF_CONTEXTS][BWD_REFS - 1];
 #else
-  vpx_prob comp_ref_prob[REF_CONTEXTS][COMP_REFS - 1];
+  aom_prob comp_ref_prob[REF_CONTEXTS][COMP_REFS - 1];
 #endif  // CONFIG_EXT_REFS
-  vpx_prob tx_size_probs[TX_SIZES - 1][TX_SIZE_CONTEXTS][TX_SIZES - 1];
+  aom_prob tx_size_probs[TX_SIZES - 1][TX_SIZE_CONTEXTS][TX_SIZES - 1];
 #if CONFIG_VAR_TX
-  vpx_prob txfm_partition_prob[TXFM_PARTITION_CONTEXTS];
+  aom_prob txfm_partition_prob[TXFM_PARTITION_CONTEXTS];
 #endif
-  vpx_prob skip_probs[SKIP_CONTEXTS];
+  aom_prob skip_probs[SKIP_CONTEXTS];
 #if CONFIG_REF_MV
   nmv_context nmvc[NMV_CONTEXTS];
 #else
@@ -104,24 +104,24 @@
 #endif
   int initialized;
 #if CONFIG_EXT_TX
-  vpx_prob inter_ext_tx_prob[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES - 1];
-  vpx_prob
+  aom_prob inter_ext_tx_prob[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES - 1];
+  aom_prob
       intra_ext_tx_prob[EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES][TX_TYPES -
                                                                       1];
 #else
-  vpx_prob intra_ext_tx_prob[EXT_TX_SIZES][TX_TYPES][TX_TYPES - 1];
-  vpx_prob inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1];
+  aom_prob intra_ext_tx_prob[EXT_TX_SIZES][TX_TYPES][TX_TYPES - 1];
+  aom_prob inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1];
 #endif  // CONFIG_EXT_TX
 #if CONFIG_SUPERTX
-  vpx_prob supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES];
+  aom_prob supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES];
 #endif  // CONFIG_SUPERTX
   struct segmentation_probs seg;
 #if CONFIG_EXT_INTRA
-  vpx_prob ext_intra_probs[PLANE_TYPES];
-  vpx_prob intra_filter_probs[INTRA_FILTERS + 1][INTRA_FILTERS - 1];
+  aom_prob ext_intra_probs[PLANE_TYPES];
+  aom_prob intra_filter_probs[INTRA_FILTERS + 1][INTRA_FILTERS - 1];
 #endif  // CONFIG_EXT_INTRA
 #if CONFIG_GLOBAL_MOTION
-  vpx_prob global_motion_types_prob[GLOBAL_MOTION_TYPES - 1];
+  aom_prob global_motion_types_prob[GLOBAL_MOTION_TYPES - 1];
 #endif  // CONFIG_GLOBAL_MOTION
 } FRAME_CONTEXT;
 
@@ -136,7 +136,7 @@
 #else
   unsigned int partition[PARTITION_CONTEXTS][PARTITION_TYPES];
 #endif
-  vp10_coeff_count_model coef[TX_SIZES][PLANE_TYPES];
+  av1_coeff_count_model coef[TX_SIZES][PLANE_TYPES];
   unsigned int
       eob_branch[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
   unsigned int
@@ -207,61 +207,60 @@
 #endif  // CONFIG_EXT_INTRA
 } FRAME_COUNTS;
 
-extern const vpx_prob
-    vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
-extern const vpx_prob vp10_default_palette_y_mode_prob[PALETTE_BLOCK_SIZES]
-                                                      [PALETTE_Y_MODE_CONTEXTS];
-extern const vpx_prob vp10_default_palette_uv_mode_prob[2];
-extern const vpx_prob
-    vp10_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
-extern const vpx_prob
-    vp10_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
-extern const vpx_prob vp10_default_palette_y_color_prob
+extern const aom_prob
+    av1_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
+extern const aom_prob av1_default_palette_y_mode_prob[PALETTE_BLOCK_SIZES]
+                                                     [PALETTE_Y_MODE_CONTEXTS];
+extern const aom_prob av1_default_palette_uv_mode_prob[2];
+extern const aom_prob
+    av1_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+extern const aom_prob
+    av1_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+extern const aom_prob av1_default_palette_y_color_prob
     [PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1];
-extern const vpx_prob vp10_default_palette_uv_color_prob
+extern const aom_prob av1_default_palette_uv_color_prob
     [PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1];
 
-extern const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
-extern const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)];
+extern const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
+extern const aom_tree_index av1_inter_mode_tree[TREE_SIZE(INTER_MODES)];
 #if CONFIG_EXT_INTER
-extern const vpx_tree_index
-    vp10_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)];
-extern const vpx_tree_index
-    vp10_inter_compound_mode_tree[TREE_SIZE(INTER_COMPOUND_MODES)];
+extern const aom_tree_index
+    av1_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)];
+extern const aom_tree_index
+    av1_inter_compound_mode_tree[TREE_SIZE(INTER_COMPOUND_MODES)];
 #endif  // CONFIG_EXT_INTER
-extern const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)];
+extern const aom_tree_index av1_partition_tree[TREE_SIZE(PARTITION_TYPES)];
 #if CONFIG_EXT_PARTITION_TYPES
-extern const vpx_tree_index
-    vp10_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)];
+extern const aom_tree_index
+    av1_ext_partition_tree[TREE_SIZE(EXT_PARTITION_TYPES)];
 #endif
-extern const vpx_tree_index
-    vp10_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)];
-extern const vpx_tree_index vp10_palette_size_tree[TREE_SIZE(PALETTE_SIZES)];
-extern const vpx_tree_index
-    vp10_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)];
-extern const vpx_tree_index
-    vp10_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)];
+extern const aom_tree_index
+    av1_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)];
+extern const aom_tree_index av1_palette_size_tree[TREE_SIZE(PALETTE_SIZES)];
+extern const aom_tree_index
+    av1_palette_color_tree[PALETTE_MAX_SIZE - 1][TREE_SIZE(PALETTE_COLORS)];
+extern const aom_tree_index av1_tx_size_tree[TX_SIZES - 1][TREE_SIZE(TX_SIZES)];
 #if CONFIG_EXT_INTRA
-extern const vpx_tree_index vp10_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)];
+extern const aom_tree_index av1_intra_filter_tree[TREE_SIZE(INTRA_FILTERS)];
 #endif  // CONFIG_EXT_INTRA
 #if CONFIG_EXT_TX
-extern const vpx_tree_index
-    vp10_ext_tx_inter_tree[EXT_TX_SETS_INTER][TREE_SIZE(TX_TYPES)];
-extern const vpx_tree_index
-    vp10_ext_tx_intra_tree[EXT_TX_SETS_INTRA][TREE_SIZE(TX_TYPES)];
+extern const aom_tree_index
+    av1_ext_tx_inter_tree[EXT_TX_SETS_INTER][TREE_SIZE(TX_TYPES)];
+extern const aom_tree_index
+    av1_ext_tx_intra_tree[EXT_TX_SETS_INTRA][TREE_SIZE(TX_TYPES)];
 #else
-extern const vpx_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)];
+extern const aom_tree_index av1_ext_tx_tree[TREE_SIZE(TX_TYPES)];
 #endif  // CONFIG_EXT_TX
 #if CONFIG_OBMC || CONFIG_WARPED_MOTION
-extern const vpx_tree_index vp10_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)];
+extern const aom_tree_index av1_motvar_tree[TREE_SIZE(MOTION_VARIATIONS)];
 #endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
 
-void vp10_setup_past_independence(struct VP10Common *cm);
+void av1_setup_past_independence(struct AV1Common *cm);
 
-void vp10_adapt_intra_frame_probs(struct VP10Common *cm);
-void vp10_adapt_inter_frame_probs(struct VP10Common *cm);
+void av1_adapt_intra_frame_probs(struct AV1Common *cm);
+void av1_adapt_inter_frame_probs(struct AV1Common *cm);
 
-static INLINE int vp10_ceil_log2(int n) {
+static INLINE int av1_ceil_log2(int n) {
   int i = 1, p = 2;
   while (p < n) {
     i++;
@@ -270,11 +269,11 @@
   return i;
 }
 
-int vp10_get_palette_color_context(const uint8_t *color_map, int cols, int r,
-                                   int c, int n, int *color_order);
+int av1_get_palette_color_context(const uint8_t *color_map, int cols, int r,
+                                  int c, int n, int *color_order);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENTROPYMODE_H_
+#endif  // AV1_COMMON_ENTROPYMODE_H_
diff --git a/av1/common/entropymv.c b/av1/common/entropymv.c
index f3dba3f..5abc252 100644
--- a/av1/common/entropymv.c
+++ b/av1/common/entropymv.c
@@ -14,12 +14,12 @@
 // Integer pel reference mv threshold for use of high-precision 1/8 mv
 #define COMPANDED_MVREF_THRESH 8
 
-const vpx_tree_index vp10_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = {
+const aom_tree_index av1_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = {
   -MV_JOINT_ZERO, 2, -MV_JOINT_HNZVZ, 4, -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ
 };
 
 /* clang-format off */
-const vpx_tree_index vp10_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
+const aom_tree_index av1_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
   -MV_CLASS_0, 2,
   -MV_CLASS_1, 4,
   6, 8,
@@ -33,12 +33,12 @@
 };
 /* clang-format on */
 
-const vpx_tree_index vp10_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = {
+const aom_tree_index av1_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = {
   -0, -1,
 };
 
-const vpx_tree_index vp10_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2,  -1,
-                                                                4,  -2, -3 };
+const aom_tree_index av1_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2,  -1,
+                                                               4,  -2, -3 };
 
 static const nmv_context default_nmv_context = {
 #if CONFIG_REF_MV
@@ -115,12 +115,12 @@
 };
 
 #if CONFIG_GLOBAL_MOTION
-const vpx_tree_index
-    vp10_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)] = {
+const aom_tree_index
+    av1_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)] = {
       -GLOBAL_ZERO, 2, -GLOBAL_TRANSLATION, 4, -GLOBAL_ROTZOOM, -GLOBAL_AFFINE
     };
 
-static const vpx_prob default_global_motion_types_prob[GLOBAL_MOTION_TYPES -
+static const aom_prob default_global_motion_types_prob[GLOBAL_MOTION_TYPES -
                                                        1] = { 224, 128, 128 };
 #endif  // CONFIG_GLOBAL_MOTION
 
@@ -128,7 +128,7 @@
   return c ? CLASS0_SIZE << (c + 2) : 0;
 }
 
-MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset) {
+MV_CLASS_TYPE av1_get_mv_class(int z, int *offset) {
   const MV_CLASS_TYPE c = (z >= CLASS0_SIZE * 4096)
                               ? MV_CLASS_10
                               : (MV_CLASS_TYPE)log_in_base_2[z >> 3];
@@ -138,7 +138,7 @@
 
 // TODO(jingning): This idle function is intentionally left as is for
 // experimental purpose.
-int vp10_use_mv_hp(const MV *ref) {
+int av1_use_mv_hp(const MV *ref) {
   (void)ref;
   return 1;
 }
@@ -151,7 +151,7 @@
   comp_counts->sign[s] += incr;
   z = (s ? -v : v) - 1; /* magnitude - 1 */
 
-  c = vp10_get_mv_class(z, &o);
+  c = av1_get_mv_class(z, &o);
   comp_counts->classes[c] += incr;
 
   d = (o >> 3);     /* int mv data */
@@ -171,9 +171,9 @@
   }
 }
 
-void vp10_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
+void av1_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
   if (counts != NULL) {
-    const MV_JOINT_TYPE j = vp10_get_mv_joint(mv);
+    const MV_JOINT_TYPE j = av1_get_mv_joint(mv);
 
 #if CONFIG_REF_MV
     ++counts->zero_rmv[j == MV_JOINT_ZERO];
@@ -189,7 +189,7 @@
   }
 }
 
-void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
+void av1_adapt_mv_probs(AV1_COMMON *cm, int allow_hp) {
   int i, j;
 #if CONFIG_REF_MV
   int idx;
@@ -199,10 +199,10 @@
         &cm->frame_contexts[cm->frame_context_idx].nmvc[idx];
     const nmv_context_counts *counts = &cm->counts.mv[idx];
 
-    vpx_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints,
+    aom_tree_merge_probs(av1_mv_joint_tree, pre_fc->joints, counts->joints,
                          fc->joints);
 #if CONFIG_REF_MV
-    fc->zero_rmv = vp10_mode_mv_merge_probs(pre_fc->zero_rmv, counts->zero_rmv);
+    fc->zero_rmv = av1_mode_mv_merge_probs(pre_fc->zero_rmv, counts->zero_rmv);
 #endif
 
     for (i = 0; i < 2; ++i) {
@@ -210,25 +210,25 @@
       const nmv_component *pre_comp = &pre_fc->comps[i];
       const nmv_component_counts *c = &counts->comps[i];
 
-      comp->sign = vp10_mode_mv_merge_probs(pre_comp->sign, c->sign);
-      vpx_tree_merge_probs(vp10_mv_class_tree, pre_comp->classes, c->classes,
+      comp->sign = av1_mode_mv_merge_probs(pre_comp->sign, c->sign);
+      aom_tree_merge_probs(av1_mv_class_tree, pre_comp->classes, c->classes,
                            comp->classes);
-      vpx_tree_merge_probs(vp10_mv_class0_tree, pre_comp->class0, c->class0,
+      aom_tree_merge_probs(av1_mv_class0_tree, pre_comp->class0, c->class0,
                            comp->class0);
 
       for (j = 0; j < MV_OFFSET_BITS; ++j)
-        comp->bits[j] = vp10_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
+        comp->bits[j] = av1_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
 
       for (j = 0; j < CLASS0_SIZE; ++j)
-        vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->class0_fp[j],
+        aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->class0_fp[j],
                              c->class0_fp[j], comp->class0_fp[j]);
 
-      vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
+      aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
 
       if (allow_hp) {
         comp->class0_hp =
-            vp10_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
-        comp->hp = vp10_mode_mv_merge_probs(pre_comp->hp, c->hp);
+            av1_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
+        comp->hp = av1_mode_mv_merge_probs(pre_comp->hp, c->hp);
       }
     }
   }
@@ -237,7 +237,7 @@
   const nmv_context *pre_fc = &cm->frame_contexts[cm->frame_context_idx].nmvc;
   const nmv_context_counts *counts = &cm->counts.mv;
 
-  vpx_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints,
+  aom_tree_merge_probs(av1_mv_joint_tree, pre_fc->joints, counts->joints,
                        fc->joints);
 
   for (i = 0; i < 2; ++i) {
@@ -245,31 +245,31 @@
     const nmv_component *pre_comp = &pre_fc->comps[i];
     const nmv_component_counts *c = &counts->comps[i];
 
-    comp->sign = vp10_mode_mv_merge_probs(pre_comp->sign, c->sign);
-    vpx_tree_merge_probs(vp10_mv_class_tree, pre_comp->classes, c->classes,
+    comp->sign = av1_mode_mv_merge_probs(pre_comp->sign, c->sign);
+    aom_tree_merge_probs(av1_mv_class_tree, pre_comp->classes, c->classes,
                          comp->classes);
-    vpx_tree_merge_probs(vp10_mv_class0_tree, pre_comp->class0, c->class0,
+    aom_tree_merge_probs(av1_mv_class0_tree, pre_comp->class0, c->class0,
                          comp->class0);
 
     for (j = 0; j < MV_OFFSET_BITS; ++j)
-      comp->bits[j] = vp10_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
+      comp->bits[j] = av1_mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
 
     for (j = 0; j < CLASS0_SIZE; ++j)
-      vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->class0_fp[j],
+      aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->class0_fp[j],
                            c->class0_fp[j], comp->class0_fp[j]);
 
-    vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
+    aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
 
     if (allow_hp) {
       comp->class0_hp =
-          vp10_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
-      comp->hp = vp10_mode_mv_merge_probs(pre_comp->hp, c->hp);
+          av1_mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
+      comp->hp = av1_mode_mv_merge_probs(pre_comp->hp, c->hp);
     }
   }
 #endif
 }
 
-void vp10_init_mv_probs(VP10_COMMON *cm) {
+void av1_init_mv_probs(AV1_COMMON *cm) {
 #if CONFIG_REF_MV
   int i;
   for (i = 0; i < NMV_CONTEXTS; ++i) cm->fc->nmvc[i] = default_nmv_context;
@@ -277,6 +277,6 @@
   cm->fc->nmvc = default_nmv_context;
 #endif
 #if CONFIG_GLOBAL_MOTION
-  vp10_copy(cm->fc->global_motion_types_prob, default_global_motion_types_prob);
+  av1_copy(cm->fc->global_motion_types_prob, default_global_motion_types_prob);
 #endif  // CONFIG_GLOBAL_MOTION
 }
diff --git a/av1/common/entropymv.h b/av1/common/entropymv.h
index c809a67..c6e0855 100644
--- a/av1/common/entropymv.h
+++ b/av1/common/entropymv.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ENTROPYMV_H_
-#define VP10_COMMON_ENTROPYMV_H_
+#ifndef AV1_COMMON_ENTROPYMV_H_
+#define AV1_COMMON_ENTROPYMV_H_
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #include "aom_dsp/prob.h"
 
@@ -21,12 +21,12 @@
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 
-void vp10_init_mv_probs(struct VP10Common *cm);
+void av1_init_mv_probs(struct AV1Common *cm);
 
-void vp10_adapt_mv_probs(struct VP10Common *cm, int usehp);
-int vp10_use_mv_hp(const MV *ref);
+void av1_adapt_mv_probs(struct AV1Common *cm, int usehp);
+int av1_use_mv_hp(const MV *ref);
 
 #define MV_UPDATE_PROB 252
 
@@ -76,31 +76,31 @@
 #define MV_UPP ((1 << MV_IN_USE_BITS) - 1)
 #define MV_LOW (-(1 << MV_IN_USE_BITS))
 
-extern const vpx_tree_index vp10_mv_joint_tree[];
-extern const vpx_tree_index vp10_mv_class_tree[];
-extern const vpx_tree_index vp10_mv_class0_tree[];
-extern const vpx_tree_index vp10_mv_fp_tree[];
+extern const aom_tree_index av1_mv_joint_tree[];
+extern const aom_tree_index av1_mv_class_tree[];
+extern const aom_tree_index av1_mv_class0_tree[];
+extern const aom_tree_index av1_mv_fp_tree[];
 
 typedef struct {
-  vpx_prob sign;
-  vpx_prob classes[MV_CLASSES - 1];
-  vpx_prob class0[CLASS0_SIZE - 1];
-  vpx_prob bits[MV_OFFSET_BITS];
-  vpx_prob class0_fp[CLASS0_SIZE][MV_FP_SIZE - 1];
-  vpx_prob fp[MV_FP_SIZE - 1];
-  vpx_prob class0_hp;
-  vpx_prob hp;
+  aom_prob sign;
+  aom_prob classes[MV_CLASSES - 1];
+  aom_prob class0[CLASS0_SIZE - 1];
+  aom_prob bits[MV_OFFSET_BITS];
+  aom_prob class0_fp[CLASS0_SIZE][MV_FP_SIZE - 1];
+  aom_prob fp[MV_FP_SIZE - 1];
+  aom_prob class0_hp;
+  aom_prob hp;
 } nmv_component;
 
 typedef struct {
-  vpx_prob joints[MV_JOINTS - 1];
+  aom_prob joints[MV_JOINTS - 1];
 #if CONFIG_REF_MV
-  vpx_prob zero_rmv;
+  aom_prob zero_rmv;
 #endif
   nmv_component comps[2];
 } nmv_context;
 
-static INLINE MV_JOINT_TYPE vp10_get_mv_joint(const MV *mv) {
+static INLINE MV_JOINT_TYPE av1_get_mv_joint(const MV *mv) {
   if (mv->row == 0) {
     return mv->col == 0 ? MV_JOINT_ZERO : MV_JOINT_HNZVZ;
   } else {
@@ -108,7 +108,7 @@
   }
 }
 
-MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset);
+MV_CLASS_TYPE av1_get_mv_class(int z, int *offset);
 
 typedef struct {
   unsigned int sign[2];
@@ -129,15 +129,15 @@
   nmv_component_counts comps[2];
 } nmv_context_counts;
 
-void vp10_inc_mv(const MV *mv, nmv_context_counts *mvctx, const int usehp);
+void av1_inc_mv(const MV *mv, nmv_context_counts *mvctx, const int usehp);
 
 #if CONFIG_GLOBAL_MOTION
-extern const vpx_tree_index
-    vp10_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)];
+extern const aom_tree_index
+    av1_global_motion_types_tree[TREE_SIZE(GLOBAL_MOTION_TYPES)];
 #endif  // CONFIG_GLOBAL_MOTION
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENTROPYMV_H_
+#endif  // AV1_COMMON_ENTROPYMV_H_
diff --git a/av1/common/enums.h b/av1/common/enums.h
index 8cdec8e..899c8b9 100644
--- a/av1/common/enums.h
+++ b/av1/common/enums.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ENUMS_H_
-#define VP10_COMMON_ENUMS_H_
+#ifndef AV1_COMMON_ENUMS_H_
+#define AV1_COMMON_ENUMS_H_
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -211,20 +211,20 @@
 #endif                  // CONFIG_EXT_TX
 
 typedef enum {
-  VPX_LAST_FLAG = 1 << 0,
+  AOM_LAST_FLAG = 1 << 0,
 #if CONFIG_EXT_REFS
-  VPX_LAST2_FLAG = 1 << 1,
-  VPX_LAST3_FLAG = 1 << 2,
-  VPX_GOLD_FLAG = 1 << 3,
-  VPX_BWD_FLAG = 1 << 4,
-  VPX_ALT_FLAG = 1 << 5,
-  VPX_REFFRAME_ALL = (1 << 6) - 1
+  AOM_LAST2_FLAG = 1 << 1,
+  AOM_LAST3_FLAG = 1 << 2,
+  AOM_GOLD_FLAG = 1 << 3,
+  AOM_BWD_FLAG = 1 << 4,
+  AOM_ALT_FLAG = 1 << 5,
+  AOM_REFFRAME_ALL = (1 << 6) - 1
 #else
-  VPX_GOLD_FLAG = 1 << 1,
-  VPX_ALT_FLAG = 1 << 2,
-  VPX_REFFRAME_ALL = (1 << 3) - 1
+  AOM_GOLD_FLAG = 1 << 1,
+  AOM_ALT_FLAG = 1 << 2,
+  AOM_REFFRAME_ALL = (1 << 3) - 1
 #endif  // CONFIG_EXT_REFS
-} VPX_REFFRAME;
+} AOM_REFFRAME;
 
 typedef enum { PLANE_TYPE_Y = 0, PLANE_TYPE_UV = 1, PLANE_TYPES } PLANE_TYPE;
 
@@ -435,4 +435,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENUMS_H_
+#endif  // AV1_COMMON_ENUMS_H_
diff --git a/av1/common/filter.c b/av1/common/filter.c
index 46eca5d..4881642 100644
--- a/av1/common/filter.c
+++ b/av1/common/filter.c
@@ -186,7 +186,7 @@
 #endif  // CONFIG_EXT_INTERP
 
 #if CONFIG_EXT_INTRA
-const InterpKernel *vp10_intra_filter_kernels[INTRA_FILTERS] = {
+const InterpKernel *av1_intra_filter_kernels[INTRA_FILTERS] = {
   bilinear_filters,         // INTRA_FILTER_LINEAR
   sub_pel_filters_8,        // INTRA_FILTER_8TAP
   sub_pel_filters_8sharp,   // INTRA_FILTER_8TAP_SHARP
@@ -196,7 +196,7 @@
 
 #if CONFIG_EXT_INTERP
 static const InterpFilterParams
-    vp10_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
+    av1_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
       { (const int16_t *)sub_pel_filters_8, SUBPEL_TAPS, SUBPEL_SHIFTS },
       { (const int16_t *)sub_pel_filters_8smooth, SUBPEL_TAPS, SUBPEL_SHIFTS },
       { (const int16_t *)sub_pel_filters_10sharp, 10, SUBPEL_SHIFTS },
@@ -206,7 +206,7 @@
     };
 #else
 static const InterpFilterParams
-    vp10_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
+    av1_interp_filter_params_list[SWITCHABLE_FILTERS + 1] = {
       { (const int16_t *)sub_pel_filters_8, SUBPEL_TAPS, SUBPEL_SHIFTS },
       { (const int16_t *)sub_pel_filters_8smooth, SUBPEL_TAPS, SUBPEL_SHIFTS },
       { (const int16_t *)sub_pel_filters_8sharp, SUBPEL_TAPS, SUBPEL_SHIFTS },
@@ -215,32 +215,31 @@
 #endif  // CONFIG_EXT_INTERP
 
 #if USE_TEMPORALFILTER_12TAP
-static const InterpFilterParams vp10_interp_temporalfilter_12tap = {
+static const InterpFilterParams av1_interp_temporalfilter_12tap = {
   (const int16_t *)sub_pel_filters_temporalfilter_12, 12, SUBPEL_SHIFTS
 };
 #endif  // USE_TEMPORALFILTER_12TAP
 
-InterpFilterParams vp10_get_interp_filter_params(
+InterpFilterParams av1_get_interp_filter_params(
     const INTERP_FILTER interp_filter) {
 #if USE_TEMPORALFILTER_12TAP
   if (interp_filter == TEMPORALFILTER_12TAP)
-    return vp10_interp_temporalfilter_12tap;
+    return av1_interp_temporalfilter_12tap;
 #endif  // USE_TEMPORALFILTER_12TAP
-  return vp10_interp_filter_params_list[interp_filter];
+  return av1_interp_filter_params_list[interp_filter];
 }
 
-const int16_t *vp10_get_interp_filter_kernel(
-    const INTERP_FILTER interp_filter) {
+const int16_t *av1_get_interp_filter_kernel(const INTERP_FILTER interp_filter) {
 #if USE_TEMPORALFILTER_12TAP
   if (interp_filter == TEMPORALFILTER_12TAP)
-    return vp10_interp_temporalfilter_12tap.filter_ptr;
+    return av1_interp_temporalfilter_12tap.filter_ptr;
 #endif  // USE_TEMPORALFILTER_12TAP
-  return (const int16_t *)vp10_interp_filter_params_list[interp_filter]
+  return (const int16_t *)av1_interp_filter_params_list[interp_filter]
       .filter_ptr;
 }
 
-SubpelFilterCoeffs vp10_get_subpel_filter_signal_dir(const InterpFilterParams p,
-                                                     int index) {
+SubpelFilterCoeffs av1_get_subpel_filter_signal_dir(const InterpFilterParams p,
+                                                    int index) {
 #if CONFIG_EXT_INTERP && HAVE_SSSE3
   if (p.filter_ptr == (const int16_t *)sub_pel_filters_12sharp) {
     return &sub_pel_filters_12sharp_signal_dir[index][0];
@@ -259,7 +258,7 @@
   return NULL;
 }
 
-SubpelFilterCoeffs vp10_get_subpel_filter_ver_signal_dir(
+SubpelFilterCoeffs av1_get_subpel_filter_ver_signal_dir(
     const InterpFilterParams p, int index) {
 #if CONFIG_EXT_INTERP && HAVE_SSSE3
   if (p.filter_ptr == (const int16_t *)sub_pel_filters_12sharp) {
@@ -279,8 +278,8 @@
   return NULL;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-HbdSubpelFilterCoeffs vp10_hbd_get_subpel_filter_ver_signal_dir(
+#if CONFIG_AOM_HIGHBITDEPTH
+HbdSubpelFilterCoeffs av1_hbd_get_subpel_filter_ver_signal_dir(
     const InterpFilterParams p, int index) {
 #if CONFIG_EXT_INTERP && HAVE_SSE4_1
   if (p.filter_ptr == (const int16_t *)sub_pel_filters_12sharp) {
diff --git a/av1/common/filter.h b/av1/common/filter.h
index 39fad23..c5a8521 100644
--- a/av1/common/filter.h
+++ b/av1/common/filter.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_FILTER_H_
-#define VP10_COMMON_FILTER_H_
+#ifndef AV1_COMMON_FILTER_H_
+#define AV1_COMMON_FILTER_H_
 
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_filter.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_filter.h"
 #include "aom_ports/mem.h"
 
 #ifdef __cplusplus
@@ -65,7 +65,7 @@
   INTRA_FILTERS,
 } INTRA_FILTER;
 
-extern const InterpKernel *vp10_intra_filter_kernels[INTRA_FILTERS];
+extern const InterpKernel *av1_intra_filter_kernels[INTRA_FILTERS];
 #endif  // CONFIG_EXT_INTRA
 
 typedef struct InterpFilterParams {
@@ -74,26 +74,26 @@
   uint16_t subpel_shifts;
 } InterpFilterParams;
 
-InterpFilterParams vp10_get_interp_filter_params(
+InterpFilterParams av1_get_interp_filter_params(
     const INTERP_FILTER interp_filter);
 
-const int16_t *vp10_get_interp_filter_kernel(const INTERP_FILTER interp_filter);
+const int16_t *av1_get_interp_filter_kernel(const INTERP_FILTER interp_filter);
 
-static INLINE const int16_t *vp10_get_interp_filter_subpel_kernel(
+static INLINE const int16_t *av1_get_interp_filter_subpel_kernel(
     const InterpFilterParams filter_params, const int subpel) {
   return filter_params.filter_ptr + filter_params.taps * subpel;
 }
 
-static INLINE int vp10_is_interpolating_filter(
+static INLINE int av1_is_interpolating_filter(
     const INTERP_FILTER interp_filter) {
-  const InterpFilterParams ip = vp10_get_interp_filter_params(interp_filter);
+  const InterpFilterParams ip = av1_get_interp_filter_params(interp_filter);
   return (ip.filter_ptr[ip.taps / 2 - 1] == 128);
 }
 
 #if USE_TEMPORALFILTER_12TAP
 extern const int8_t sub_pel_filters_temporalfilter_12_signal_dir[15][2][16];
 extern const int8_t sub_pel_filters_temporalfilter_12_ver_signal_dir[15][6][16];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 extern const int16_t
     sub_pel_filters_temporalfilter_12_highbd_ver_signal_dir[15][6][8];
 #endif
@@ -104,24 +104,24 @@
 extern const int8_t sub_pel_filters_10sharp_signal_dir[15][2][16];
 extern const int8_t sub_pel_filters_12sharp_ver_signal_dir[15][6][16];
 extern const int8_t sub_pel_filters_10sharp_ver_signal_dir[15][6][16];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 extern const int16_t sub_pel_filters_12sharp_highbd_ver_signal_dir[15][6][8];
 extern const int16_t sub_pel_filters_10sharp_highbd_ver_signal_dir[15][6][8];
 #endif
 #endif
 
 typedef const int8_t (*SubpelFilterCoeffs)[16];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef const int16_t (*HbdSubpelFilterCoeffs)[8];
 #endif
 
-SubpelFilterCoeffs vp10_get_subpel_filter_signal_dir(const InterpFilterParams p,
-                                                     int index);
+SubpelFilterCoeffs av1_get_subpel_filter_signal_dir(const InterpFilterParams p,
+                                                    int index);
 
-SubpelFilterCoeffs vp10_get_subpel_filter_ver_signal_dir(
+SubpelFilterCoeffs av1_get_subpel_filter_ver_signal_dir(
     const InterpFilterParams p, int index);
-#if CONFIG_VP9_HIGHBITDEPTH
-HbdSubpelFilterCoeffs vp10_hbd_get_subpel_filter_ver_signal_dir(
+#if CONFIG_AOM_HIGHBITDEPTH
+HbdSubpelFilterCoeffs av1_hbd_get_subpel_filter_ver_signal_dir(
     const InterpFilterParams p, int index);
 #endif
 
@@ -129,4 +129,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_FILTER_H_
+#endif  // AV1_COMMON_FILTER_H_
diff --git a/av1/common/frame_buffers.c b/av1/common/frame_buffers.c
index 5c736a9..89f4e4f 100644
--- a/av1/common/frame_buffers.c
+++ b/av1/common/frame_buffers.c
@@ -11,34 +11,34 @@
 #include <assert.h>
 
 #include "av1/common/frame_buffers.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
 
-int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
+int av1_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
   assert(list != NULL);
-  vp10_free_internal_frame_buffers(list);
+  av1_free_internal_frame_buffers(list);
 
   list->num_internal_frame_buffers =
-      VPX_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS;
-  list->int_fb = (InternalFrameBuffer *)vpx_calloc(
+      AOM_MAXIMUM_REF_BUFFERS + AOM_MAXIMUM_WORK_BUFFERS;
+  list->int_fb = (InternalFrameBuffer *)aom_calloc(
       list->num_internal_frame_buffers, sizeof(*list->int_fb));
   return (list->int_fb == NULL);
 }
 
-void vp10_free_internal_frame_buffers(InternalFrameBufferList *list) {
+void av1_free_internal_frame_buffers(InternalFrameBufferList *list) {
   int i;
 
   assert(list != NULL);
 
   for (i = 0; i < list->num_internal_frame_buffers; ++i) {
-    vpx_free(list->int_fb[i].data);
+    aom_free(list->int_fb[i].data);
     list->int_fb[i].data = NULL;
   }
-  vpx_free(list->int_fb);
+  aom_free(list->int_fb);
   list->int_fb = NULL;
 }
 
-int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
-                          vpx_codec_frame_buffer_t *fb) {
+int av1_get_frame_buffer(void *cb_priv, size_t min_size,
+                         aom_codec_frame_buffer_t *fb) {
   int i;
   InternalFrameBufferList *const int_fb_list =
       (InternalFrameBufferList *)cb_priv;
@@ -53,7 +53,7 @@
 
   if (int_fb_list->int_fb[i].size < min_size) {
     int_fb_list->int_fb[i].data =
-        (uint8_t *)vpx_realloc(int_fb_list->int_fb[i].data, min_size);
+        (uint8_t *)aom_realloc(int_fb_list->int_fb[i].data, min_size);
     if (!int_fb_list->int_fb[i].data) return -1;
 
     // This memset is needed for fixing valgrind error from C loop filter
@@ -72,7 +72,7 @@
   return 0;
 }
 
-int vp10_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb) {
+int av1_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb) {
   InternalFrameBuffer *const int_fb = (InternalFrameBuffer *)fb->priv;
   (void)cb_priv;
   if (int_fb) int_fb->in_use = 0;
diff --git a/av1/common/frame_buffers.h b/av1/common/frame_buffers.h
index 6667132..63253be 100644
--- a/av1/common/frame_buffers.h
+++ b/av1/common/frame_buffers.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_FRAME_BUFFERS_H_
-#define VP10_COMMON_FRAME_BUFFERS_H_
+#ifndef AV1_COMMON_FRAME_BUFFERS_H_
+#define AV1_COMMON_FRAME_BUFFERS_H_
 
-#include "aom/vpx_frame_buffer.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_frame_buffer.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -30,24 +30,24 @@
 } InternalFrameBufferList;
 
 // Initializes |list|. Returns 0 on success.
-int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list);
+int av1_alloc_internal_frame_buffers(InternalFrameBufferList *list);
 
 // Free any data allocated to the frame buffers.
-void vp10_free_internal_frame_buffers(InternalFrameBufferList *list);
+void av1_free_internal_frame_buffers(InternalFrameBufferList *list);
 
 // Callback used by libaom to request an external frame buffer. |cb_priv|
 // Callback private data, which points to an InternalFrameBufferList.
 // |min_size| is the minimum size in bytes needed to decode the next frame.
 // |fb| pointer to the frame buffer.
-int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
-                          vpx_codec_frame_buffer_t *fb);
+int av1_get_frame_buffer(void *cb_priv, size_t min_size,
+                         aom_codec_frame_buffer_t *fb);
 
 // Callback used by libaom when there are no references to the frame buffer.
 // |cb_priv| is not used. |fb| pointer to the frame buffer.
-int vp10_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb);
+int av1_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_FRAME_BUFFERS_H_
+#endif  // AV1_COMMON_FRAME_BUFFERS_H_
diff --git a/av1/common/idct.c b/av1/common/idct.c
index 83b44d5..536e346 100644
--- a/av1/common/idct.c
+++ b/av1/common/idct.c
@@ -10,19 +10,19 @@
 
 #include <math.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "av1/common/blockd.h"
 #include "av1/common/enums.h"
 #include "av1/common/idct.h"
-#include "av1/common/vp10_inv_txfm2d_cfg.h"
+#include "av1/common/av1_inv_txfm2d_cfg.h"
 #include "aom_dsp/inv_txfm.h"
 #include "aom_ports/mem.h"
 
 int get_tx_scale(const MACROBLOCKD *const xd, const TX_TYPE tx_type,
                  const TX_SIZE tx_size) {
   (void)tx_type;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     return txsize_sqr_up_map[tx_size] == TX_32X32;
   }
@@ -70,7 +70,7 @@
   // Note overall scaling factor is 4 times orthogonal
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_iidtx4_c(const tran_low_t *input, tran_low_t *output,
                             int bd) {
   int i;
@@ -113,10 +113,10 @@
     inputhalf[i] =
         HIGHBD_WRAPLOW(highbd_dct_const_round_shift(input[i] * Sqrt2), bd);
   }
-  vpx_highbd_idct16_c(inputhalf, output + 16, bd);
+  aom_highbd_idct16_c(inputhalf, output + 16, bd);
   // Note overall scaling factor is 4 times orthogonal
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // Inverse identity transform and add.
 static void inv_idtx_add_c(const tran_low_t *input, uint8_t *dest, int stride,
@@ -177,7 +177,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void highbd_idst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step[4];
   tran_high_t temp1, temp2;
@@ -255,7 +255,7 @@
 }
 
 void highbd_idst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
-  // vp9_highbd_igentx16(input, output, bd, Tx16);
+  // av1_highbd_igentx16(input, output, bd, Tx16);
   tran_low_t step1[16], step2[16];
   tran_high_t temp1, temp2;
   (void)bd;
@@ -474,11 +474,11 @@
     default: assert(0); break;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_EXT_TX
 
-void vp10_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                          int tx_type) {
+void av1_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                         int tx_type) {
   static const transform_2d IHT_4[] = {
     { idct4_c, idct4_c },    // DCT_DCT
     { iadst4_c, idct4_c },   // ADST_DCT
@@ -541,8 +541,8 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                          int tx_type) {
+void av1_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                         int tx_type) {
   static const transform_2d IHT_4x8[] = {
     { idct8_c, idct4_c },    // DCT_DCT
     { iadst8_c, idct4_c },   // ADST_DCT
@@ -594,8 +594,8 @@
   }
 }
 
-void vp10_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                          int tx_type) {
+void av1_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                         int tx_type) {
   static const transform_2d IHT_8x4[] = {
     { idct4_c, idct8_c },    // DCT_DCT
     { iadst4_c, idct8_c },   // ADST_DCT
@@ -647,8 +647,8 @@
   }
 }
 
-void vp10_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                            int tx_type) {
+void av1_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                           int tx_type) {
   static const transform_2d IHT_8x16[] = {
     { idct16_c, idct8_c },    // DCT_DCT
     { iadst16_c, idct8_c },   // ADST_DCT
@@ -700,8 +700,8 @@
   }
 }
 
-void vp10_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                            int tx_type) {
+void av1_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                           int tx_type) {
   static const transform_2d IHT_16x8[] = {
     { idct8_c, idct16_c },    // DCT_DCT
     { iadst8_c, idct16_c },   // ADST_DCT
@@ -753,8 +753,8 @@
   }
 }
 
-void vp10_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                             int tx_type) {
+void av1_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
   static const transform_2d IHT_16x32[] = {
     { idct32_c, idct16_c },         // DCT_DCT
     { ihalfright32_c, idct16_c },   // ADST_DCT
@@ -806,8 +806,8 @@
   }
 }
 
-void vp10_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                             int tx_type) {
+void av1_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
   static const transform_2d IHT_32x16[] = {
     { idct16_c, idct32_c },         // DCT_DCT
     { iadst16_c, idct32_c },        // ADST_DCT
@@ -860,8 +860,8 @@
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                          int tx_type) {
+void av1_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                         int tx_type) {
   static const transform_2d IHT_8[] = {
     { idct8_c, idct8_c },    // DCT_DCT
     { iadst8_c, idct8_c },   // ADST_DCT
@@ -923,8 +923,8 @@
   }
 }
 
-void vp10_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                             int tx_type) {
+void av1_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
   static const transform_2d IHT_16[] = {
     { idct16_c, idct16_c },    // DCT_DCT
     { iadst16_c, idct16_c },   // ADST_DCT
@@ -987,8 +987,8 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
-                              int stride, int tx_type) {
+void av1_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+                             int tx_type) {
   static const transform_2d IHT_32[] = {
     { idct32_c, idct32_c },              // DCT_DCT
     { ihalfright32_c, idct32_c },        // ADST_DCT
@@ -1048,82 +1048,82 @@
 #endif  // CONFIG_EXT_TX
 
 // idct
-void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob) {
+void av1_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob) {
   if (eob > 1)
-    vpx_idct4x4_16_add(input, dest, stride);
+    aom_idct4x4_16_add(input, dest, stride);
   else
-    vpx_idct4x4_1_add(input, dest, stride);
+    aom_idct4x4_1_add(input, dest, stride);
 }
 
-void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob) {
+void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob) {
   if (eob > 1)
-    vpx_iwht4x4_16_add(input, dest, stride);
+    aom_iwht4x4_16_add(input, dest, stride);
   else
-    vpx_iwht4x4_1_add(input, dest, stride);
+    aom_iwht4x4_1_add(input, dest, stride);
 }
 
-void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob) {
+void av1_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob) {
   // If dc is 1, then input[0] is the reconstructed value, do not need
   // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
 
   // The calculation can be simplified if there are not many non-zero dct
   // coefficients. Use eobs to decide what to do.
-  // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c.
+  // TODO(yunqingwang): "eobs = 1" case is also handled in av1_short_idct8x8_c.
   // Combine that with code here.
   if (eob == 1)
     // DC only DCT coefficient
-    vpx_idct8x8_1_add(input, dest, stride);
+    aom_idct8x8_1_add(input, dest, stride);
   else if (eob <= 12)
-    vpx_idct8x8_12_add(input, dest, stride);
+    aom_idct8x8_12_add(input, dest, stride);
   else
-    vpx_idct8x8_64_add(input, dest, stride);
+    aom_idct8x8_64_add(input, dest, stride);
 }
 
-void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
-                        int eob) {
+void av1_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
+                       int eob) {
   /* The calculation can be simplified if there are not many non-zero dct
    * coefficients. Use eobs to separate different cases. */
   if (eob == 1) /* DC only DCT coefficient. */
-    vpx_idct16x16_1_add(input, dest, stride);
+    aom_idct16x16_1_add(input, dest, stride);
   else if (eob <= 10)
-    vpx_idct16x16_10_add(input, dest, stride);
+    aom_idct16x16_10_add(input, dest, stride);
   else
-    vpx_idct16x16_256_add(input, dest, stride);
+    aom_idct16x16_256_add(input, dest, stride);
 }
 
-void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
-                        int eob) {
+void av1_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
+                       int eob) {
   if (eob == 1)
-    vpx_idct32x32_1_add(input, dest, stride);
+    aom_idct32x32_1_add(input, dest, stride);
   else if (eob <= 34)
     // non-zero coeff only in upper-left 8x8
-    vpx_idct32x32_34_add(input, dest, stride);
+    aom_idct32x32_34_add(input, dest, stride);
   else
-    vpx_idct32x32_1024_add(input, dest, stride);
+    aom_idct32x32_1024_add(input, dest, stride);
 }
 
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type, int lossless) {
+void av1_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type, int lossless) {
   if (lossless) {
     assert(tx_type == DCT_DCT);
-    vp10_iwht4x4_add(input, dest, stride, eob);
+    av1_iwht4x4_add(input, dest, stride, eob);
     return;
   }
 
   switch (tx_type) {
-    case DCT_DCT: vp10_idct4x4_add(input, dest, stride, eob); break;
+    case DCT_DCT: av1_idct4x4_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST: vp10_iht4x4_16_add(input, dest, stride, tx_type); break;
+    case ADST_ADST: av1_iht4x4_16_add(input, dest, stride, tx_type); break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
     case DCT_FLIPADST:
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
-    case FLIPADST_ADST: vp10_iht4x4_16_add(input, dest, stride, tx_type); break;
+    case FLIPADST_ADST: av1_iht4x4_16_add(input, dest, stride, tx_type); break;
     case V_DCT:
     case H_DCT:
     case V_ADST:
@@ -1131,7 +1131,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_iht4x4_16_add_c(input, dest, stride, tx_type);
+      av1_iht4x4_16_add_c(input, dest, stride, tx_type);
       break;
     case IDTX: inv_idtx_add_c(input, dest, stride, 4, tx_type); break;
 #endif  // CONFIG_EXT_TX
@@ -1140,56 +1140,56 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type) {
+  (void)eob;
+  av1_iht4x8_32_add(input, dest, stride, tx_type);
+}
+
+void av1_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type) {
+  (void)eob;
+  av1_iht8x4_32_add(input, dest, stride, tx_type);
+}
+
+void av1_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest, int stride,
                            int eob, TX_TYPE tx_type) {
   (void)eob;
-  vp10_iht4x8_32_add(input, dest, stride, tx_type);
+  av1_iht8x16_128_add(input, dest, stride, tx_type);
 }
 
-void vp10_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest, int stride,
                            int eob, TX_TYPE tx_type) {
   (void)eob;
-  vp10_iht8x4_32_add(input, dest, stride, tx_type);
+  av1_iht16x8_128_add(input, dest, stride, tx_type);
 }
 
-void vp10_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest, int stride,
                             int eob, TX_TYPE tx_type) {
   (void)eob;
-  vp10_iht8x16_128_add(input, dest, stride, tx_type);
+  av1_iht16x32_512_add(input, dest, stride, tx_type);
 }
 
-void vp10_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest, int stride,
                             int eob, TX_TYPE tx_type) {
   (void)eob;
-  vp10_iht16x8_128_add(input, dest, stride, tx_type);
-}
-
-void vp10_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type) {
-  (void)eob;
-  vp10_iht16x32_512_add(input, dest, stride, tx_type);
-}
-
-void vp10_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type) {
-  (void)eob;
-  vp10_iht32x16_512_add(input, dest, stride, tx_type);
+  av1_iht32x16_512_add(input, dest, stride, tx_type);
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type) {
+void av1_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT: vp10_idct8x8_add(input, dest, stride, eob); break;
+    case DCT_DCT: av1_idct8x8_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST: vp10_iht8x8_64_add(input, dest, stride, tx_type); break;
+    case ADST_ADST: av1_iht8x8_64_add(input, dest, stride, tx_type); break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
     case DCT_FLIPADST:
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
-    case FLIPADST_ADST: vp10_iht8x8_64_add(input, dest, stride, tx_type); break;
+    case FLIPADST_ADST: av1_iht8x8_64_add(input, dest, stride, tx_type); break;
     case V_DCT:
     case H_DCT:
     case V_ADST:
@@ -1197,7 +1197,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_iht8x8_64_add_c(input, dest, stride, tx_type);
+      av1_iht8x8_64_add_c(input, dest, stride, tx_type);
       break;
     case IDTX: inv_idtx_add_c(input, dest, stride, 8, tx_type); break;
 #endif  // CONFIG_EXT_TX
@@ -1205,20 +1205,20 @@
   }
 }
 
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type) {
+void av1_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT: vp10_idct16x16_add(input, dest, stride, eob); break;
+    case DCT_DCT: av1_idct16x16_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST: vp10_iht16x16_256_add(input, dest, stride, tx_type); break;
+    case ADST_ADST: av1_iht16x16_256_add(input, dest, stride, tx_type); break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
     case DCT_FLIPADST:
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_iht16x16_256_add(input, dest, stride, tx_type);
+      av1_iht16x16_256_add(input, dest, stride, tx_type);
       break;
     case V_DCT:
     case H_DCT:
@@ -1227,7 +1227,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_iht16x16_256_add_c(input, dest, stride, tx_type);
+      av1_iht16x16_256_add_c(input, dest, stride, tx_type);
       break;
     case IDTX: inv_idtx_add_c(input, dest, stride, 16, tx_type); break;
 #endif  // CONFIG_EXT_TX
@@ -1235,10 +1235,10 @@
   }
 }
 
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type) {
+void av1_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT: vp10_idct32x32_add(input, dest, stride, eob); break;
+    case DCT_DCT: av1_idct32x32_add(input, dest, stride, eob); break;
 #if CONFIG_EXT_TX
     case ADST_DCT:
     case DCT_ADST:
@@ -1254,7 +1254,7 @@
     case H_ADST:
     case V_FLIPADST:
     case H_FLIPADST:
-      vp10_iht32x32_1024_add_c(input, dest, stride, tx_type);
+      av1_iht32x32_1024_add_c(input, dest, stride, tx_type);
       break;
     case IDTX: inv_idtx_add_c(input, dest, stride, 32, tx_type); break;
 #endif  // CONFIG_EXT_TX
@@ -1262,27 +1262,27 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int tx_type, int bd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_4[] = {
-    { vpx_highbd_idct4_c, vpx_highbd_idct4_c },    // DCT_DCT
-    { vpx_highbd_iadst4_c, vpx_highbd_idct4_c },   // ADST_DCT
-    { vpx_highbd_idct4_c, vpx_highbd_iadst4_c },   // DCT_ADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c },  // ADST_ADST
+    { aom_highbd_idct4_c, aom_highbd_idct4_c },    // DCT_DCT
+    { aom_highbd_iadst4_c, aom_highbd_idct4_c },   // ADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst4_c },   // DCT_ADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { vpx_highbd_iadst4_c, vpx_highbd_idct4_c },   // FLIPADST_DCT
-    { vpx_highbd_idct4_c, vpx_highbd_iadst4_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c },  // FLIPADST_ADST
+    { aom_highbd_iadst4_c, aom_highbd_idct4_c },   // FLIPADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst4_c },   // DCT_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // ADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // FLIPADST_ADST
     { highbd_iidtx4_c, highbd_iidtx4_c },          // IDTX
-    { vpx_highbd_idct4_c, highbd_iidtx4_c },       // V_DCT
-    { highbd_iidtx4_c, vpx_highbd_idct4_c },       // H_DCT
-    { vpx_highbd_iadst4_c, highbd_iidtx4_c },      // V_ADST
-    { highbd_iidtx4_c, vpx_highbd_iadst4_c },      // H_ADST
-    { vpx_highbd_iadst4_c, highbd_iidtx4_c },      // V_FLIPADST
-    { highbd_iidtx4_c, vpx_highbd_iadst4_c },      // H_FLIPADST
+    { aom_highbd_idct4_c, highbd_iidtx4_c },       // V_DCT
+    { highbd_iidtx4_c, aom_highbd_idct4_c },       // H_DCT
+    { aom_highbd_iadst4_c, highbd_iidtx4_c },      // V_ADST
+    { highbd_iidtx4_c, aom_highbd_iadst4_c },      // H_ADST
+    { aom_highbd_iadst4_c, highbd_iidtx4_c },      // V_FLIPADST
+    { highbd_iidtx4_c, aom_highbd_iadst4_c },      // H_FLIPADST
 #endif                                             // CONFIG_EXT_TX
   };
 
@@ -1330,25 +1330,25 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_highbd_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int tx_type, int bd) {
+void av1_highbd_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_4x8[] = {
-    { vpx_highbd_idct8_c, vpx_highbd_idct4_c },    // DCT_DCT
-    { vpx_highbd_iadst8_c, vpx_highbd_idct4_c },   // ADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst4_c },   // DCT_ADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c },  // ADST_ADST
-    { vpx_highbd_iadst8_c, vpx_highbd_idct4_c },   // FLIPADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst4_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst4_c },  // FLIPADST_ADST
+    { aom_highbd_idct8_c, aom_highbd_idct4_c },    // DCT_DCT
+    { aom_highbd_iadst8_c, aom_highbd_idct4_c },   // ADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst4_c },   // DCT_ADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // ADST_ADST
+    { aom_highbd_iadst8_c, aom_highbd_idct4_c },   // FLIPADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst4_c },   // DCT_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // ADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // FLIPADST_ADST
     { highbd_iidtx8_c, highbd_iidtx4_c },          // IDTX
-    { vpx_highbd_idct8_c, highbd_iidtx4_c },       // V_DCT
-    { highbd_iidtx8_c, vpx_highbd_idct4_c },       // H_DCT
-    { vpx_highbd_iadst8_c, highbd_iidtx4_c },      // V_ADST
-    { highbd_iidtx8_c, vpx_highbd_iadst4_c },      // H_ADST
-    { vpx_highbd_iadst8_c, highbd_iidtx4_c },      // V_FLIPADST
-    { highbd_iidtx8_c, vpx_highbd_iadst4_c },      // H_FLIPADST
+    { aom_highbd_idct8_c, highbd_iidtx4_c },       // V_DCT
+    { highbd_iidtx8_c, aom_highbd_idct4_c },       // H_DCT
+    { aom_highbd_iadst8_c, highbd_iidtx4_c },      // V_ADST
+    { highbd_iidtx8_c, aom_highbd_iadst4_c },      // H_ADST
+    { aom_highbd_iadst8_c, highbd_iidtx4_c },      // V_FLIPADST
+    { highbd_iidtx8_c, aom_highbd_iadst4_c },      // H_FLIPADST
   };
   const int n = 4;
   const int n2 = 8;
@@ -1388,25 +1388,25 @@
   }
 }
 
-void vp10_highbd_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int tx_type, int bd) {
+void av1_highbd_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_8x4[] = {
-    { vpx_highbd_idct4_c, vpx_highbd_idct8_c },    // DCT_DCT
-    { vpx_highbd_iadst4_c, vpx_highbd_idct8_c },   // ADST_DCT
-    { vpx_highbd_idct4_c, vpx_highbd_iadst8_c },   // DCT_ADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c },  // ADST_ADST
-    { vpx_highbd_iadst4_c, vpx_highbd_idct8_c },   // FLIPADST_DCT
-    { vpx_highbd_idct4_c, vpx_highbd_iadst8_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst8_c },  // FLIPADST_ADST
+    { aom_highbd_idct4_c, aom_highbd_idct8_c },    // DCT_DCT
+    { aom_highbd_iadst4_c, aom_highbd_idct8_c },   // ADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst8_c },   // DCT_ADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // ADST_ADST
+    { aom_highbd_iadst4_c, aom_highbd_idct8_c },   // FLIPADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst8_c },   // DCT_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // ADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // FLIPADST_ADST
     { highbd_iidtx4_c, highbd_iidtx8_c },          // IDTX
-    { vpx_highbd_idct4_c, highbd_iidtx8_c },       // V_DCT
-    { highbd_iidtx4_c, vpx_highbd_idct8_c },       // H_DCT
-    { vpx_highbd_iadst4_c, highbd_iidtx8_c },      // V_ADST
-    { highbd_iidtx4_c, vpx_highbd_iadst8_c },      // H_ADST
-    { vpx_highbd_iadst4_c, highbd_iidtx8_c },      // V_FLIPADST
-    { highbd_iidtx4_c, vpx_highbd_iadst8_c },      // H_FLIPADST
+    { aom_highbd_idct4_c, highbd_iidtx8_c },       // V_DCT
+    { highbd_iidtx4_c, aom_highbd_idct8_c },       // H_DCT
+    { aom_highbd_iadst4_c, highbd_iidtx8_c },      // V_ADST
+    { highbd_iidtx4_c, aom_highbd_iadst8_c },      // H_ADST
+    { aom_highbd_iadst4_c, highbd_iidtx8_c },      // V_FLIPADST
+    { highbd_iidtx4_c, aom_highbd_iadst8_c },      // H_FLIPADST
   };
   const int n = 4;
   const int n2 = 8;
@@ -1446,25 +1446,25 @@
   }
 }
 
-void vp10_highbd_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest8,
-                                   int stride, int tx_type, int bd) {
+void av1_highbd_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest8,
+                                  int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_8x16[] = {
-    { vpx_highbd_idct16_c, vpx_highbd_idct8_c },    // DCT_DCT
-    { vpx_highbd_iadst16_c, vpx_highbd_idct8_c },   // ADST_DCT
-    { vpx_highbd_idct16_c, vpx_highbd_iadst8_c },   // DCT_ADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c },  // ADST_ADST
-    { vpx_highbd_iadst16_c, vpx_highbd_idct8_c },   // FLIPADST_DCT
-    { vpx_highbd_idct16_c, vpx_highbd_iadst8_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst8_c },  // FLIPADST_ADST
+    { aom_highbd_idct16_c, aom_highbd_idct8_c },    // DCT_DCT
+    { aom_highbd_iadst16_c, aom_highbd_idct8_c },   // ADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst8_c },   // DCT_ADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // ADST_ADST
+    { aom_highbd_iadst16_c, aom_highbd_idct8_c },   // FLIPADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst8_c },   // DCT_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // ADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // FLIPADST_ADST
     { highbd_iidtx16_c, highbd_iidtx8_c },          // IDTX
-    { vpx_highbd_idct16_c, highbd_iidtx8_c },       // V_DCT
-    { highbd_iidtx16_c, vpx_highbd_idct8_c },       // H_DCT
-    { vpx_highbd_iadst16_c, highbd_iidtx8_c },      // V_ADST
-    { highbd_iidtx16_c, vpx_highbd_iadst8_c },      // H_ADST
-    { vpx_highbd_iadst16_c, highbd_iidtx8_c },      // V_FLIPADST
-    { highbd_iidtx16_c, vpx_highbd_iadst8_c },      // H_FLIPADST
+    { aom_highbd_idct16_c, highbd_iidtx8_c },       // V_DCT
+    { highbd_iidtx16_c, aom_highbd_idct8_c },       // H_DCT
+    { aom_highbd_iadst16_c, highbd_iidtx8_c },      // V_ADST
+    { highbd_iidtx16_c, aom_highbd_iadst8_c },      // H_ADST
+    { aom_highbd_iadst16_c, highbd_iidtx8_c },      // V_FLIPADST
+    { highbd_iidtx16_c, aom_highbd_iadst8_c },      // H_FLIPADST
   };
   const int n = 8;
   const int n2 = 16;
@@ -1503,25 +1503,25 @@
   }
 }
 
-void vp10_highbd_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest8,
-                                   int stride, int tx_type, int bd) {
+void av1_highbd_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest8,
+                                  int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_16x8[] = {
-    { vpx_highbd_idct8_c, vpx_highbd_idct16_c },    // DCT_DCT
-    { vpx_highbd_iadst8_c, vpx_highbd_idct16_c },   // ADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst16_c },   // DCT_ADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c },  // ADST_ADST
-    { vpx_highbd_iadst8_c, vpx_highbd_idct16_c },   // FLIPADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst16_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst16_c },  // FLIPADST_ADST
+    { aom_highbd_idct8_c, aom_highbd_idct16_c },    // DCT_DCT
+    { aom_highbd_iadst8_c, aom_highbd_idct16_c },   // ADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst16_c },   // DCT_ADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // ADST_ADST
+    { aom_highbd_iadst8_c, aom_highbd_idct16_c },   // FLIPADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst16_c },   // DCT_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // ADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // FLIPADST_ADST
     { highbd_iidtx8_c, highbd_iidtx16_c },          // IDTX
-    { vpx_highbd_idct8_c, highbd_iidtx16_c },       // V_DCT
-    { highbd_iidtx8_c, vpx_highbd_idct16_c },       // H_DCT
-    { vpx_highbd_iadst8_c, highbd_iidtx16_c },      // V_ADST
-    { highbd_iidtx8_c, vpx_highbd_iadst16_c },      // H_ADST
-    { vpx_highbd_iadst8_c, highbd_iidtx16_c },      // V_FLIPADST
-    { highbd_iidtx8_c, vpx_highbd_iadst16_c },      // H_FLIPADST
+    { aom_highbd_idct8_c, highbd_iidtx16_c },       // V_DCT
+    { highbd_iidtx8_c, aom_highbd_idct16_c },       // H_DCT
+    { aom_highbd_iadst8_c, highbd_iidtx16_c },      // V_ADST
+    { highbd_iidtx8_c, aom_highbd_iadst16_c },      // H_ADST
+    { aom_highbd_iadst8_c, highbd_iidtx16_c },      // V_FLIPADST
+    { highbd_iidtx8_c, aom_highbd_iadst16_c },      // H_FLIPADST
   };
   const int n = 8;
   const int n2 = 16;
@@ -1560,25 +1560,25 @@
   }
 }
 
-void vp10_highbd_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int tx_type, int bd) {
+void av1_highbd_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest8,
+                                   int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_16x32[] = {
-    { vpx_highbd_idct32_c, vpx_highbd_idct16_c },     // DCT_DCT
-    { highbd_ihalfright32_c, vpx_highbd_idct16_c },   // ADST_DCT
-    { vpx_highbd_idct32_c, vpx_highbd_iadst16_c },    // DCT_ADST
-    { highbd_ihalfright32_c, vpx_highbd_iadst16_c },  // ADST_ADST
-    { highbd_ihalfright32_c, vpx_highbd_idct16_c },   // FLIPADST_DCT
-    { vpx_highbd_idct32_c, vpx_highbd_iadst16_c },    // DCT_FLIPADST
-    { highbd_ihalfright32_c, vpx_highbd_iadst16_c },  // FLIPADST_FLIPADST
-    { highbd_ihalfright32_c, vpx_highbd_iadst16_c },  // ADST_FLIPADST
-    { highbd_ihalfright32_c, vpx_highbd_iadst16_c },  // FLIPADST_ADST
+    { aom_highbd_idct32_c, aom_highbd_idct16_c },     // DCT_DCT
+    { highbd_ihalfright32_c, aom_highbd_idct16_c },   // ADST_DCT
+    { aom_highbd_idct32_c, aom_highbd_iadst16_c },    // DCT_ADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // ADST_ADST
+    { highbd_ihalfright32_c, aom_highbd_idct16_c },   // FLIPADST_DCT
+    { aom_highbd_idct32_c, aom_highbd_iadst16_c },    // DCT_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // FLIPADST_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // ADST_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // FLIPADST_ADST
     { highbd_iidtx32_c, highbd_iidtx16_c },           // IDTX
-    { vpx_highbd_idct32_c, highbd_iidtx16_c },        // V_DCT
-    { highbd_iidtx32_c, vpx_highbd_idct16_c },        // H_DCT
+    { aom_highbd_idct32_c, highbd_iidtx16_c },        // V_DCT
+    { highbd_iidtx32_c, aom_highbd_idct16_c },        // H_DCT
     { highbd_ihalfright32_c, highbd_iidtx16_c },      // V_ADST
-    { highbd_iidtx32_c, vpx_highbd_iadst16_c },       // H_ADST
+    { highbd_iidtx32_c, aom_highbd_iadst16_c },       // H_ADST
     { highbd_ihalfright32_c, highbd_iidtx16_c },      // V_FLIPADST
-    { highbd_iidtx32_c, vpx_highbd_iadst16_c },       // H_FLIPADST
+    { highbd_iidtx32_c, aom_highbd_iadst16_c },       // H_FLIPADST
   };
   const int n = 16;
   const int n2 = 32;
@@ -1617,24 +1617,24 @@
   }
 }
 
-void vp10_highbd_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int tx_type, int bd) {
+void av1_highbd_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest8,
+                                   int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_32x16[] = {
-    { vpx_highbd_idct16_c, vpx_highbd_idct32_c },     // DCT_DCT
-    { vpx_highbd_iadst16_c, vpx_highbd_idct32_c },    // ADST_DCT
-    { vpx_highbd_idct16_c, highbd_ihalfright32_c },   // DCT_ADST
-    { vpx_highbd_iadst16_c, highbd_ihalfright32_c },  // ADST_ADST
-    { vpx_highbd_iadst16_c, vpx_highbd_idct32_c },    // FLIPADST_DCT
-    { vpx_highbd_idct16_c, highbd_ihalfright32_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst16_c, highbd_ihalfright32_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst16_c, highbd_ihalfright32_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst16_c, highbd_ihalfright32_c },  // FLIPADST_ADST
+    { aom_highbd_idct16_c, aom_highbd_idct32_c },     // DCT_DCT
+    { aom_highbd_iadst16_c, aom_highbd_idct32_c },    // ADST_DCT
+    { aom_highbd_idct16_c, highbd_ihalfright32_c },   // DCT_ADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // ADST_ADST
+    { aom_highbd_iadst16_c, aom_highbd_idct32_c },    // FLIPADST_DCT
+    { aom_highbd_idct16_c, highbd_ihalfright32_c },   // DCT_FLIPADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // ADST_FLIPADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // FLIPADST_ADST
     { highbd_iidtx16_c, highbd_iidtx32_c },           // IDTX
-    { vpx_highbd_idct16_c, highbd_iidtx32_c },        // V_DCT
-    { highbd_iidtx16_c, vpx_highbd_idct32_c },        // H_DCT
-    { vpx_highbd_iadst16_c, highbd_iidtx32_c },       // V_ADST
+    { aom_highbd_idct16_c, highbd_iidtx32_c },        // V_DCT
+    { highbd_iidtx16_c, aom_highbd_idct32_c },        // H_DCT
+    { aom_highbd_iadst16_c, highbd_iidtx32_c },       // V_ADST
     { highbd_iidtx16_c, highbd_ihalfright32_c },      // H_ADST
-    { vpx_highbd_iadst16_c, highbd_iidtx32_c },       // V_FLIPADST
+    { aom_highbd_iadst16_c, highbd_iidtx32_c },       // V_FLIPADST
     { highbd_iidtx16_c, highbd_ihalfright32_c },      // H_FLIPADST
   };
   const int n = 16;
@@ -1675,26 +1675,26 @@
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int tx_type, int bd) {
+void av1_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+                                int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_8[] = {
-    { vpx_highbd_idct8_c, vpx_highbd_idct8_c },    // DCT_DCT
-    { vpx_highbd_iadst8_c, vpx_highbd_idct8_c },   // ADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst8_c },   // DCT_ADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c },  // ADST_ADST
+    { aom_highbd_idct8_c, aom_highbd_idct8_c },    // DCT_DCT
+    { aom_highbd_iadst8_c, aom_highbd_idct8_c },   // ADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst8_c },   // DCT_ADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { vpx_highbd_iadst8_c, vpx_highbd_idct8_c },   // FLIPADST_DCT
-    { vpx_highbd_idct8_c, vpx_highbd_iadst8_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c },  // FLIPADST_ADST
+    { aom_highbd_iadst8_c, aom_highbd_idct8_c },   // FLIPADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst8_c },   // DCT_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // ADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // FLIPADST_ADST
     { highbd_iidtx8_c, highbd_iidtx8_c },          // IDTX
-    { vpx_highbd_idct8_c, highbd_iidtx8_c },       // V_DCT
-    { highbd_iidtx8_c, vpx_highbd_idct8_c },       // H_DCT
-    { vpx_highbd_iadst8_c, highbd_iidtx8_c },      // V_ADST
-    { highbd_iidtx8_c, vpx_highbd_iadst8_c },      // H_ADST
-    { vpx_highbd_iadst8_c, highbd_iidtx8_c },      // V_FLIPADST
-    { highbd_iidtx8_c, vpx_highbd_iadst8_c },      // H_FLIPADST
+    { aom_highbd_idct8_c, highbd_iidtx8_c },       // V_DCT
+    { highbd_iidtx8_c, aom_highbd_idct8_c },       // H_DCT
+    { aom_highbd_iadst8_c, highbd_iidtx8_c },      // V_ADST
+    { highbd_iidtx8_c, aom_highbd_iadst8_c },      // H_ADST
+    { aom_highbd_iadst8_c, highbd_iidtx8_c },      // V_FLIPADST
+    { highbd_iidtx8_c, aom_highbd_iadst8_c },      // H_FLIPADST
 #endif                                             // CONFIG_EXT_TX
   };
 
@@ -1741,26 +1741,26 @@
   }
 }
 
-void vp10_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int tx_type, int bd) {
+void av1_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+                                   int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_16[] = {
-    { vpx_highbd_idct16_c, vpx_highbd_idct16_c },    // DCT_DCT
-    { vpx_highbd_iadst16_c, vpx_highbd_idct16_c },   // ADST_DCT
-    { vpx_highbd_idct16_c, vpx_highbd_iadst16_c },   // DCT_ADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c },  // ADST_ADST
+    { aom_highbd_idct16_c, aom_highbd_idct16_c },    // DCT_DCT
+    { aom_highbd_iadst16_c, aom_highbd_idct16_c },   // ADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst16_c },   // DCT_ADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { vpx_highbd_iadst16_c, vpx_highbd_idct16_c },   // FLIPADST_DCT
-    { vpx_highbd_idct16_c, vpx_highbd_iadst16_c },   // DCT_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c },  // FLIPADST_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c },  // ADST_FLIPADST
-    { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c },  // FLIPADST_ADST
+    { aom_highbd_iadst16_c, aom_highbd_idct16_c },   // FLIPADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst16_c },   // DCT_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // ADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // FLIPADST_ADST
     { highbd_iidtx16_c, highbd_iidtx16_c },          // IDTX
-    { vpx_highbd_idct16_c, highbd_iidtx16_c },       // V_DCT
-    { highbd_iidtx16_c, vpx_highbd_idct16_c },       // H_DCT
-    { vpx_highbd_iadst16_c, highbd_iidtx16_c },      // V_ADST
-    { highbd_iidtx16_c, vpx_highbd_iadst16_c },      // H_ADST
-    { vpx_highbd_iadst16_c, highbd_iidtx16_c },      // V_FLIPADST
-    { highbd_iidtx16_c, vpx_highbd_iadst16_c },      // H_FLIPADST
+    { aom_highbd_idct16_c, highbd_iidtx16_c },       // V_DCT
+    { highbd_iidtx16_c, aom_highbd_idct16_c },       // H_DCT
+    { aom_highbd_iadst16_c, highbd_iidtx16_c },      // V_ADST
+    { highbd_iidtx16_c, aom_highbd_iadst16_c },      // H_ADST
+    { aom_highbd_iadst16_c, highbd_iidtx16_c },      // V_FLIPADST
+    { highbd_iidtx16_c, aom_highbd_iadst16_c },      // H_FLIPADST
 #endif                                               // CONFIG_EXT_TX
   };
 
@@ -1808,21 +1808,21 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_highbd_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
-                                     int stride, int tx_type, int bd) {
+void av1_highbd_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_32[] = {
-    { vpx_highbd_idct32_c, vpx_highbd_idct32_c },      // DCT_DCT
-    { highbd_ihalfright32_c, vpx_highbd_idct32_c },    // ADST_DCT
-    { vpx_highbd_idct32_c, highbd_ihalfright32_c },    // DCT_ADST
+    { aom_highbd_idct32_c, aom_highbd_idct32_c },      // DCT_DCT
+    { highbd_ihalfright32_c, aom_highbd_idct32_c },    // ADST_DCT
+    { aom_highbd_idct32_c, highbd_ihalfright32_c },    // DCT_ADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // ADST_ADST
-    { highbd_ihalfright32_c, vpx_highbd_idct32_c },    // FLIPADST_DCT
-    { vpx_highbd_idct32_c, highbd_ihalfright32_c },    // DCT_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_idct32_c },    // FLIPADST_DCT
+    { aom_highbd_idct32_c, highbd_ihalfright32_c },    // DCT_FLIPADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // FLIPADST_FLIPADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // ADST_FLIPADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // FLIPADST_ADST
     { highbd_iidtx32_c, highbd_iidtx32_c },            // IDTX
-    { vpx_highbd_idct32_c, highbd_iidtx32_c },         // V_DCT
-    { highbd_iidtx32_c, vpx_highbd_idct32_c },         // H_DCT
+    { aom_highbd_idct32_c, highbd_iidtx32_c },         // V_DCT
+    { highbd_iidtx32_c, aom_highbd_idct32_c },         // H_DCT
     { highbd_ihalfright32_c, highbd_iidtx32_c },       // V_ADST
     { highbd_iidtx32_c, highbd_ihalfright32_c },       // H_ADST
     { highbd_ihalfright32_c, highbd_iidtx32_c },       // V_FLIPADST
@@ -1872,73 +1872,73 @@
 #endif  // CONFIG_EXT_TX
 
 // idct
-void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd) {
+void av1_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd) {
   if (eob > 1)
-    vpx_highbd_idct4x4_16_add(input, dest, stride, bd);
+    aom_highbd_idct4x4_16_add(input, dest, stride, bd);
   else
-    vpx_highbd_idct4x4_1_add(input, dest, stride, bd);
+    aom_highbd_idct4x4_1_add(input, dest, stride, bd);
 }
 
-void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd) {
+void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd) {
   if (eob > 1)
-    vpx_highbd_iwht4x4_16_add(input, dest, stride, bd);
+    aom_highbd_iwht4x4_16_add(input, dest, stride, bd);
   else
-    vpx_highbd_iwht4x4_1_add(input, dest, stride, bd);
+    aom_highbd_iwht4x4_1_add(input, dest, stride, bd);
 }
 
-void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd) {
+void av1_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd) {
   // If dc is 1, then input[0] is the reconstructed value, do not need
   // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
 
   // The calculation can be simplified if there are not many non-zero dct
   // coefficients. Use eobs to decide what to do.
-  // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c.
+  // TODO(yunqingwang): "eobs = 1" case is also handled in av1_short_idct8x8_c.
   // Combine that with code here.
   // DC only DCT coefficient
   if (eob == 1) {
-    vpx_highbd_idct8x8_1_add(input, dest, stride, bd);
+    aom_highbd_idct8x8_1_add(input, dest, stride, bd);
   } else if (eob <= 10) {
-    vpx_highbd_idct8x8_10_add(input, dest, stride, bd);
+    aom_highbd_idct8x8_10_add(input, dest, stride, bd);
   } else {
-    vpx_highbd_idct8x8_64_add(input, dest, stride, bd);
+    aom_highbd_idct8x8_64_add(input, dest, stride, bd);
   }
 }
 
-void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
-                               int stride, int eob, int bd) {
+void av1_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
+                              int stride, int eob, int bd) {
   // The calculation can be simplified if there are not many non-zero dct
   // coefficients. Use eobs to separate different cases.
   // DC only DCT coefficient.
   if (eob == 1) {
-    vpx_highbd_idct16x16_1_add(input, dest, stride, bd);
+    aom_highbd_idct16x16_1_add(input, dest, stride, bd);
   } else if (eob <= 10) {
-    vpx_highbd_idct16x16_10_add(input, dest, stride, bd);
+    aom_highbd_idct16x16_10_add(input, dest, stride, bd);
   } else {
-    vpx_highbd_idct16x16_256_add(input, dest, stride, bd);
+    aom_highbd_idct16x16_256_add(input, dest, stride, bd);
   }
 }
 
-void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
-                               int stride, int eob, int bd) {
+void av1_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
+                              int stride, int eob, int bd) {
   // Non-zero coeff only in upper-left 8x8
   if (eob == 1) {
-    vpx_highbd_idct32x32_1_add(input, dest, stride, bd);
+    aom_highbd_idct32x32_1_add(input, dest, stride, bd);
   } else if (eob <= 34) {
-    vpx_highbd_idct32x32_34_add(input, dest, stride, bd);
+    aom_highbd_idct32x32_34_add(input, dest, stride, bd);
   } else {
-    vpx_highbd_idct32x32_1024_add(input, dest, stride, bd);
+    aom_highbd_idct32x32_1024_add(input, dest, stride, bd);
   }
 }
 
-void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd, TX_TYPE tx_type,
-                                  int lossless) {
+void av1_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type,
+                                 int lossless) {
   if (lossless) {
     assert(tx_type == DCT_DCT);
-    vp10_highbd_iwht4x4_add(input, dest, stride, eob, bd);
+    av1_highbd_iwht4x4_add(input, dest, stride, eob, bd);
     return;
   }
 
@@ -1947,8 +1947,8 @@
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
-                              bd);
+      av1_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+                             bd);
       break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
@@ -1956,8 +1956,8 @@
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
-                              bd);
+      av1_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+                             bd);
       break;
     case V_DCT:
     case H_DCT:
@@ -1966,7 +1966,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_highbd_iht4x4_16_add_c(input, dest, stride, tx_type, bd);
+      av1_highbd_iht4x4_16_add_c(input, dest, stride, tx_type, bd);
       break;
     case IDTX:
       highbd_inv_idtx_add_c(input, dest, stride, 4, tx_type, bd);
@@ -1977,60 +1977,57 @@
 }
 
 #if CONFIG_EXT_TX
-void vp10_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type) {
+  (void)eob;
+  av1_highbd_iht4x8_32_add_c(input, dest, stride, tx_type, bd);
+}
+
+void av1_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type) {
+  (void)eob;
+  av1_highbd_iht8x4_32_add_c(input, dest, stride, tx_type, bd);
+}
+
+void av1_highbd_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest,
                                   int stride, int eob, int bd,
                                   TX_TYPE tx_type) {
   (void)eob;
-  vp10_highbd_iht4x8_32_add_c(input, dest, stride, tx_type, bd);
+  av1_highbd_iht8x16_128_add_c(input, dest, stride, tx_type, bd);
 }
 
-void vp10_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest,
                                   int stride, int eob, int bd,
                                   TX_TYPE tx_type) {
   (void)eob;
-  vp10_highbd_iht8x4_32_add_c(input, dest, stride, tx_type, bd);
+  av1_highbd_iht16x8_128_add_c(input, dest, stride, tx_type, bd);
 }
 
-void vp10_highbd_inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest,
                                    int stride, int eob, int bd,
                                    TX_TYPE tx_type) {
   (void)eob;
-  vp10_highbd_iht8x16_128_add_c(input, dest, stride, tx_type, bd);
+  av1_highbd_iht16x32_512_add_c(input, dest, stride, tx_type, bd);
 }
 
-void vp10_highbd_inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest,
                                    int stride, int eob, int bd,
                                    TX_TYPE tx_type) {
   (void)eob;
-  vp10_highbd_iht16x8_128_add_c(input, dest, stride, tx_type, bd);
-}
-
-void vp10_highbd_inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type) {
-  (void)eob;
-  vp10_highbd_iht16x32_512_add_c(input, dest, stride, tx_type, bd);
-}
-
-void vp10_highbd_inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type) {
-  (void)eob;
-  vp10_highbd_iht32x16_512_add_c(input, dest, stride, tx_type, bd);
+  av1_highbd_iht32x16_512_add_c(input, dest, stride, tx_type, bd);
 }
 #endif  // CONFIG_EXT_TX
 
-void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd,
-                                  TX_TYPE tx_type) {
+void av1_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type) {
   (void)eob;
   switch (tx_type) {
     case DCT_DCT:
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
-                              bd);
+      av1_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+                             bd);
       break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
@@ -2038,8 +2035,8 @@
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
-                              bd);
+      av1_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type,
+                             bd);
       break;
     case V_DCT:
     case H_DCT:
@@ -2048,7 +2045,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_highbd_iht8x8_64_add_c(input, dest, stride, tx_type, bd);
+      av1_highbd_iht8x8_64_add_c(input, dest, stride, tx_type, bd);
       break;
     case IDTX:
       highbd_inv_idtx_add_c(input, dest, stride, 8, tx_type, bd);
@@ -2058,17 +2055,17 @@
   }
 }
 
-void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type) {
+void av1_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
+                                   int stride, int eob, int bd,
+                                   TX_TYPE tx_type) {
   (void)eob;
   switch (tx_type) {
     case DCT_DCT:
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
-                                tx_type, bd);
+      av1_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
+                               tx_type, bd);
       break;
 #if CONFIG_EXT_TX
     case FLIPADST_DCT:
@@ -2076,8 +2073,8 @@
     case FLIPADST_FLIPADST:
     case ADST_FLIPADST:
     case FLIPADST_ADST:
-      vp10_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
-                                tx_type, bd);
+      av1_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
+                               tx_type, bd);
       break;
     case V_DCT:
     case H_DCT:
@@ -2086,7 +2083,7 @@
     case V_FLIPADST:
     case H_FLIPADST:
       // Use C version since DST only exists in C code
-      vp10_highbd_iht16x16_256_add_c(input, dest, stride, tx_type, bd);
+      av1_highbd_iht16x16_256_add_c(input, dest, stride, tx_type, bd);
       break;
     case IDTX:
       highbd_inv_idtx_add_c(input, dest, stride, 16, tx_type, bd);
@@ -2096,14 +2093,14 @@
   }
 }
 
-void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type) {
+void av1_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
+                                   int stride, int eob, int bd,
+                                   TX_TYPE tx_type) {
   (void)eob;
   switch (tx_type) {
     case DCT_DCT:
-      vp10_inv_txfm2d_add_32x32(input, CONVERT_TO_SHORTPTR(dest), stride,
-                                DCT_DCT, bd);
+      av1_inv_txfm2d_add_32x32(input, CONVERT_TO_SHORTPTR(dest), stride,
+                               DCT_DCT, bd);
       break;
 #if CONFIG_EXT_TX
     case ADST_DCT:
@@ -2120,7 +2117,7 @@
     case H_ADST:
     case V_FLIPADST:
     case H_FLIPADST:
-      vp10_highbd_iht32x32_1024_add_c(input, dest, stride, tx_type, bd);
+      av1_highbd_iht32x32_1024_add_c(input, dest, stride, tx_type, bd);
       break;
     case IDTX:
       highbd_inv_idtx_add_c(input, dest, stride, 32, tx_type, bd);
@@ -2129,7 +2126,7 @@
     default: assert(0); break;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 void inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
                   INV_TXFM_PARAM *inv_txfm_param) {
@@ -2140,45 +2137,39 @@
 
   switch (tx_size) {
     case TX_32X32:
-      vp10_inv_txfm_add_32x32(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_32x32(input, dest, stride, eob, tx_type);
       break;
     case TX_16X16:
-      vp10_inv_txfm_add_16x16(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_16x16(input, dest, stride, eob, tx_type);
       break;
-    case TX_8X8:
-      vp10_inv_txfm_add_8x8(input, dest, stride, eob, tx_type);
-      break;
+    case TX_8X8: av1_inv_txfm_add_8x8(input, dest, stride, eob, tx_type); break;
 #if CONFIG_EXT_TX
-    case TX_4X8:
-      vp10_inv_txfm_add_4x8(input, dest, stride, eob, tx_type);
-      break;
-    case TX_8X4:
-      vp10_inv_txfm_add_8x4(input, dest, stride, eob, tx_type);
-      break;
+    case TX_4X8: av1_inv_txfm_add_4x8(input, dest, stride, eob, tx_type); break;
+    case TX_8X4: av1_inv_txfm_add_8x4(input, dest, stride, eob, tx_type); break;
     case TX_8X16:
-      vp10_inv_txfm_add_8x16(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_8x16(input, dest, stride, eob, tx_type);
       break;
     case TX_16X8:
-      vp10_inv_txfm_add_16x8(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_16x8(input, dest, stride, eob, tx_type);
       break;
     case TX_16X32:
-      vp10_inv_txfm_add_16x32(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_16x32(input, dest, stride, eob, tx_type);
       break;
     case TX_32X16:
-      vp10_inv_txfm_add_32x16(input, dest, stride, eob, tx_type);
+      av1_inv_txfm_add_32x16(input, dest, stride, eob, tx_type);
       break;
 #endif  // CONFIG_EXT_TX
     case TX_4X4:
-      // this is like vp10_short_idct4x4 but has a special case around eob<=1
+      // this is like av1_short_idct4x4 but has a special case around eob<=1
       // which is significant (not just an optimization) for the lossless
       // case.
-      vp10_inv_txfm_add_4x4(input, dest, stride, eob, tx_type, lossless);
+      av1_inv_txfm_add_4x4(input, dest, stride, eob, tx_type, lossless);
       break;
     default: assert(0 && "Invalid transform size"); break;
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void highbd_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
                          INV_TXFM_PARAM *inv_txfm_param) {
   const TX_TYPE tx_type = inv_txfm_param->tx_type;
@@ -2189,42 +2180,42 @@
 
   switch (tx_size) {
     case TX_32X32:
-      vp10_highbd_inv_txfm_add_32x32(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_32x32(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_16X16:
-      vp10_highbd_inv_txfm_add_16x16(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_16x16(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_8X8:
-      vp10_highbd_inv_txfm_add_8x8(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_8x8(input, dest, stride, eob, bd, tx_type);
       break;
 #if CONFIG_EXT_TX
     case TX_4X8:
-      vp10_highbd_inv_txfm_add_4x8(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_4x8(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_8X4:
-      vp10_highbd_inv_txfm_add_8x4(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_8x4(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_8X16:
-      vp10_highbd_inv_txfm_add_8x16(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_8x16(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_16X8:
-      vp10_highbd_inv_txfm_add_16x8(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_16x8(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_16X32:
-      vp10_highbd_inv_txfm_add_16x32(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_16x32(input, dest, stride, eob, bd, tx_type);
       break;
     case TX_32X16:
-      vp10_highbd_inv_txfm_add_32x16(input, dest, stride, eob, bd, tx_type);
+      av1_highbd_inv_txfm_add_32x16(input, dest, stride, eob, bd, tx_type);
       break;
 #endif  // CONFIG_EXT_TX
     case TX_4X4:
-      // this is like vp10_short_idct4x4 but has a special case around eob<=1
+      // this is like av1_short_idct4x4 but has a special case around eob<=1
       // which is significant (not just an optimization) for the lossless
       // case.
-      vp10_highbd_inv_txfm_add_4x4(input, dest, stride, eob, bd, tx_type,
-                                   lossless);
+      av1_highbd_inv_txfm_add_4x4(input, dest, stride, eob, bd, tx_type,
+                                  lossless);
       break;
     default: assert(0 && "Invalid transform size"); break;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/idct.h b/av1/common/idct.h
index 9b3be62..58ee0c7 100644
--- a/av1/common/idct.h
+++ b/av1/common/idct.h
@@ -8,12 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_IDCT_H_
-#define VP10_COMMON_IDCT_H_
+#ifndef AV1_COMMON_IDCT_H_
+#define AV1_COMMON_IDCT_H_
 
 #include <assert.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "av1/common/blockd.h"
 #include "av1/common/common.h"
 #include "av1/common/enums.h"
@@ -30,7 +30,7 @@
   TX_SIZE tx_size;
   int eob;
   int lossless;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   int bd;
 #endif
 } INV_TXFM_PARAM;
@@ -41,78 +41,78 @@
   transform_1d cols, rows;  // vertical and horizontal
 } transform_2d;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *, int bd);
 
 typedef struct {
   highbd_transform_1d cols, rows;  // vertical and horizontal
 } highbd_transform_2d;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #define MAX_TX_SCALE 1
 int get_tx_scale(const MACROBLOCKD *const xd, const TX_TYPE tx_type,
                  const TX_SIZE tx_size);
 
-void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob);
-void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob);
-void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
-                      int eob);
-void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
-                        int eob);
-void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
-                        int eob);
+void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob);
+void av1_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob);
+void av1_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+                     int eob);
+void av1_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
+                       int eob);
+void av1_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
+                       int eob);
 
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type, int lossless);
+void av1_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type, int lossless);
 #if CONFIG_EXT_TX
-void vp10_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type);
 #endif  // CONFIG_EXT_TX
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
-                           int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+                          int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, TX_TYPE tx_type);
+void av1_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, TX_TYPE tx_type);
 void inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
                   INV_TXFM_PARAM *inv_txfm_param);
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd);
-void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd);
-void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
-                             int eob, int bd);
-void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
-                               int stride, int eob, int bd);
-void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
-                               int stride, int eob, int bd);
-void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd, TX_TYPE tx_type,
-                                  int lossless);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd);
+void av1_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd);
+void av1_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+                            int eob, int bd);
+void av1_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
+                              int stride, int eob, int bd);
+void av1_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
+                              int stride, int eob, int bd);
+void av1_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type,
+                                 int lossless);
 #if CONFIG_EXT_TX
-void vp10_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd, TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd, TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type);
 #endif  // CONFIG_EXT_TX
-void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
-                                  int stride, int eob, int bd, TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
-                                    int stride, int eob, int bd,
-                                    TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
+                                 int stride, int eob, int bd, TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
+                                   int stride, int eob, int bd,
+                                   TX_TYPE tx_type);
+void av1_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
+                                   int stride, int eob, int bd,
+                                   TX_TYPE tx_type);
 void highbd_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
                          INV_TXFM_PARAM *inv_txfm_param);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_IDCT_H_
+#endif  // AV1_COMMON_IDCT_H_
diff --git a/av1/common/intra_filters.h b/av1/common/intra_filters.h
index 021fb8e..350f7ca 100644
--- a/av1/common/intra_filters.h
+++ b/av1/common/intra_filters.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_INTRA_FILTERS_H_
-#define VP10_COMMON_INTRA_FILTERS_H_
+#ifndef AV1_COMMON_INTRA_FILTERS_H_
+#define AV1_COMMON_INTRA_FILTERS_H_
 
 #define FILTER_INTRA_PREC_BITS (10)
 
@@ -64,4 +64,4 @@
   },
 };
 
-#endif  // VP10_COMMON_INTRA_FILTERS_H_
+#endif  // AV1_COMMON_INTRA_FILTERS_H_
diff --git a/av1/common/loopfilter.c b/av1/common/loopfilter.c
index e4636a5..906223f 100644
--- a/av1/common/loopfilter.c
+++ b/av1/common/loopfilter.c
@@ -10,14 +10,14 @@
 
 #include <math.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "av1/common/loopfilter.h"
 #include "av1/common/onyxc_int.h"
 #include "av1/common/reconinter.h"
 #include "av1/common/restoration.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #include "av1/common/seg_common.h"
@@ -241,7 +241,7 @@
 static uint8_t get_filter_level(const loop_filter_info_n *lfi_n,
                                 const MB_MODE_INFO *mbmi) {
 #if CONFIG_SUPERTX
-  const int segment_id = VPXMIN(mbmi->segment_id, mbmi->segment_id_supertx);
+  const int segment_id = AOMMIN(mbmi->segment_id, mbmi->segment_id_supertx);
   assert(
       IMPLIES(supertx_enabled(mbmi), mbmi->segment_id_supertx != MAX_SEGMENTS));
   assert(IMPLIES(supertx_enabled(mbmi),
@@ -252,7 +252,7 @@
   return lfi_n->lvl[segment_id][mbmi->ref_frame[0]][mode_lf_lut[mbmi->mode]];
 }
 
-void vp10_loop_filter_init(VP10_COMMON *cm) {
+void av1_loop_filter_init(AV1_COMMON *cm) {
   loop_filter_info_n *lfi = &cm->lf_info;
   struct loopfilter *lf = &cm->lf;
   int lvl;
@@ -266,7 +266,7 @@
     memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH);
 }
 
-void vp10_loop_filter_frame_init(VP10_COMMON *cm, int default_filt_lvl) {
+void av1_loop_filter_frame_init(AV1_COMMON *cm, int default_filt_lvl) {
   int seg_id;
   // n_shift is the multiplier for lf_deltas
   // the multiplier is 1 for when filter_lvl is between 0 and 31;
@@ -341,52 +341,52 @@
     if (mask & 1) {
       if ((mask_16x16_0 | mask_16x16_1) & 1) {
         if ((mask_16x16_0 & mask_16x16_1) & 1) {
-          vpx_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                    lfi0->hev_thr);
         } else if (mask_16x16_0 & 1) {
-          vpx_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
+          aom_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
         } else {
-          vpx_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+          aom_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
                               lfi1->hev_thr);
         }
       }
 
       if ((mask_8x8_0 | mask_8x8_1) & 1) {
         if ((mask_8x8_0 & mask_8x8_1) & 1) {
-          vpx_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                   lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                   lfi1->hev_thr);
         } else if (mask_8x8_0 & 1) {
-          vpx_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
+          aom_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
         } else {
-          vpx_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+          aom_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
                              lfi1->hev_thr);
         }
       }
 
       if ((mask_4x4_0 | mask_4x4_1) & 1) {
         if ((mask_4x4_0 & mask_4x4_1) & 1) {
-          vpx_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                   lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                   lfi1->hev_thr);
         } else if (mask_4x4_0 & 1) {
-          vpx_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
+          aom_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
         } else {
-          vpx_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+          aom_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
                              lfi1->hev_thr);
         }
       }
 
       if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
         if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
-          vpx_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
+          aom_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
                                   lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                   lfi1->hev_thr);
         } else if (mask_4x4_int_0 & 1) {
-          vpx_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
+          aom_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
                              lfi0->hev_thr);
         } else {
-          vpx_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim,
+          aom_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim,
                              lfi1->hev_thr);
         }
       }
@@ -405,7 +405,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_filter_selectively_vert_row2(
     int subsampling_factor, uint16_t *s, int pitch, unsigned int mask_16x16_l,
     unsigned int mask_8x8_l, unsigned int mask_4x4_l,
@@ -434,55 +434,55 @@
     if (mask & 1) {
       if ((mask_16x16_0 | mask_16x16_1) & 1) {
         if ((mask_16x16_0 & mask_16x16_1) & 1) {
-          vpx_highbd_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                           lfi0->hev_thr, bd);
         } else if (mask_16x16_0 & 1) {
-          vpx_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
                                      lfi0->hev_thr, bd);
         } else {
-          vpx_highbd_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim,
+          aom_highbd_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim,
                                      lfi1->lim, lfi1->hev_thr, bd);
         }
       }
 
       if ((mask_8x8_0 | mask_8x8_1) & 1) {
         if ((mask_8x8_0 & mask_8x8_1) & 1) {
-          vpx_highbd_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                          lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                          lfi1->hev_thr, bd);
         } else if (mask_8x8_0 & 1) {
-          vpx_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim,
                                     lfi0->hev_thr, bd);
         } else {
-          vpx_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim,
+          aom_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim,
                                     lfi1->lim, lfi1->hev_thr, bd);
         }
       }
 
       if ((mask_4x4_0 | mask_4x4_1) & 1) {
         if ((mask_4x4_0 & mask_4x4_1) & 1) {
-          vpx_highbd_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                          lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                          lfi1->hev_thr, bd);
         } else if (mask_4x4_0 & 1) {
-          vpx_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim,
                                     lfi0->hev_thr, bd);
         } else {
-          vpx_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim,
+          aom_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim,
                                     lfi1->lim, lfi1->hev_thr, bd);
         }
       }
 
       if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
         if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
-          vpx_highbd_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
                                          lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                          lfi1->hev_thr, bd);
         } else if (mask_4x4_int_0 & 1) {
-          vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
+          aom_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
                                     lfi0->hev_thr, bd);
         } else {
-          vpx_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim,
+          aom_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim,
                                     lfi1->lim, lfi1->hev_thr, bd);
         }
       }
@@ -500,7 +500,7 @@
     mask_4x4_int_1 >>= 1;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static void filter_selectively_horiz(
     uint8_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
@@ -517,11 +517,11 @@
     if (mask & 1) {
       if (mask_16x16 & 1) {
         if ((mask_16x16 & 3) == 3) {
-          vpx_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
+          aom_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
                                      lfi->hev_thr);
           count = 2;
         } else {
-          vpx_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
+          aom_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
                                     lfi->hev_thr);
         }
       } else if (mask_8x8 & 1) {
@@ -529,28 +529,28 @@
           // Next block's thresholds.
           const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
 
-          vpx_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
+          aom_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
                                     lfi->hev_thr, lfin->mblim, lfin->lim,
                                     lfin->hev_thr);
 
           if ((mask_4x4_int & 3) == 3) {
-            vpx_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
+            aom_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
                                       lfi->lim, lfi->hev_thr, lfin->mblim,
                                       lfin->lim, lfin->hev_thr);
           } else {
             if (mask_4x4_int & 1)
-              vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+              aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                                    lfi->hev_thr);
             else if (mask_4x4_int & 2)
-              vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+              aom_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
                                    lfin->lim, lfin->hev_thr);
           }
           count = 2;
         } else {
-          vpx_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+          aom_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
 
           if (mask_4x4_int & 1)
-            vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+            aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                                  lfi->hev_thr);
         }
       } else if (mask_4x4 & 1) {
@@ -558,31 +558,31 @@
           // Next block's thresholds.
           const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
 
-          vpx_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
+          aom_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
                                     lfi->hev_thr, lfin->mblim, lfin->lim,
                                     lfin->hev_thr);
           if ((mask_4x4_int & 3) == 3) {
-            vpx_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
+            aom_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
                                       lfi->lim, lfi->hev_thr, lfin->mblim,
                                       lfin->lim, lfin->hev_thr);
           } else {
             if (mask_4x4_int & 1)
-              vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+              aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                                    lfi->hev_thr);
             else if (mask_4x4_int & 2)
-              vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+              aom_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
                                    lfin->lim, lfin->hev_thr);
           }
           count = 2;
         } else {
-          vpx_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+          aom_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
 
           if (mask_4x4_int & 1)
-            vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+            aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                                  lfi->hev_thr);
         }
       } else if (mask_4x4_int & 1) {
-        vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+        aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                              lfi->hev_thr);
       }
     }
@@ -595,7 +595,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_filter_selectively_horiz(
     uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
     unsigned int mask_4x4, unsigned int mask_4x4_int,
@@ -611,11 +611,11 @@
     if (mask & 1) {
       if (mask_16x16 & 1) {
         if ((mask_16x16 & 3) == 3) {
-          vpx_highbd_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim,
                                             lfi->hev_thr, bd);
           count = 2;
         } else {
-          vpx_highbd_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim,
                                            lfi->hev_thr, bd);
         }
       } else if (mask_8x8 & 1) {
@@ -623,30 +623,30 @@
           // Next block's thresholds.
           const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
 
-          vpx_highbd_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
                                            lfi->hev_thr, lfin->mblim, lfin->lim,
                                            lfin->hev_thr, bd);
 
           if ((mask_4x4_int & 3) == 3) {
-            vpx_highbd_lpf_horizontal_4_dual(
+            aom_highbd_lpf_horizontal_4_dual(
                 s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                 lfin->mblim, lfin->lim, lfin->hev_thr, bd);
           } else {
             if (mask_4x4_int & 1) {
-              vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+              aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
                                           lfi->lim, lfi->hev_thr, bd);
             } else if (mask_4x4_int & 2) {
-              vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+              aom_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
                                           lfin->lim, lfin->hev_thr, bd);
             }
           }
           count = 2;
         } else {
-          vpx_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim,
                                       lfi->hev_thr, bd);
 
           if (mask_4x4_int & 1) {
-            vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+            aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
                                         lfi->lim, lfi->hev_thr, bd);
           }
         }
@@ -655,34 +655,34 @@
           // Next block's thresholds.
           const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
 
-          vpx_highbd_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
                                            lfi->hev_thr, lfin->mblim, lfin->lim,
                                            lfin->hev_thr, bd);
           if ((mask_4x4_int & 3) == 3) {
-            vpx_highbd_lpf_horizontal_4_dual(
+            aom_highbd_lpf_horizontal_4_dual(
                 s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                 lfin->mblim, lfin->lim, lfin->hev_thr, bd);
           } else {
             if (mask_4x4_int & 1) {
-              vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+              aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
                                           lfi->lim, lfi->hev_thr, bd);
             } else if (mask_4x4_int & 2) {
-              vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+              aom_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
                                           lfin->lim, lfin->hev_thr, bd);
             }
           }
           count = 2;
         } else {
-          vpx_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim,
+          aom_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim,
                                       lfi->hev_thr, bd);
 
           if (mask_4x4_int & 1) {
-            vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+            aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
                                         lfi->lim, lfi->hev_thr, bd);
           }
         }
       } else if (mask_4x4_int & 1) {
-        vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+        aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
                                     lfi->hev_thr, bd);
       }
     }
@@ -694,7 +694,7 @@
     mask_4x4_int >>= count;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // This function ors into the current lfm structure, where to do loop
 // filters for the specific mi we are looking at. It uses information
@@ -833,9 +833,9 @@
 // This function sets up the bit masks for the entire 64x64 region represented
 // by mi_row, mi_col.
 // TODO(JBB): This function only works for yv12.
-void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
-                     MODE_INFO **mi, const int mode_info_stride,
-                     LOOP_FILTER_MASK *lfm) {
+void av1_setup_mask(AV1_COMMON *const cm, const int mi_row, const int mi_col,
+                    MODE_INFO **mi, const int mode_info_stride,
+                    LOOP_FILTER_MASK *lfm) {
   int idx_32, idx_16, idx_8;
   const loop_filter_info_n *const lfi_n = &cm->lf_info;
   MODE_INFO **mip = mi;
@@ -861,13 +861,13 @@
   const int shift_32_uv[] = { 0, 2, 8, 10 };
   const int shift_16_uv[] = { 0, 1, 4, 5 };
   int i;
-  const int max_rows = VPXMIN(cm->mi_rows - mi_row, MAX_MIB_SIZE);
-  const int max_cols = VPXMIN(cm->mi_cols - mi_col, MAX_MIB_SIZE);
+  const int max_rows = AOMMIN(cm->mi_rows - mi_row, MAX_MIB_SIZE);
+  const int max_cols = AOMMIN(cm->mi_cols - mi_col, MAX_MIB_SIZE);
 #if CONFIG_EXT_PARTITION
   assert(0 && "Not yet updated");
 #endif  // CONFIG_EXT_PARTITION
 
-  vp10_zero(*lfm);
+  av1_zero(*lfm);
   assert(mip[0] != NULL);
 
   // TODO(jimbankoski): Try moving most of the following code into decode
@@ -1123,15 +1123,15 @@
 
     if (mask & 1) {
       if (mask_16x16 & 1) {
-        vpx_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+        aom_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
       } else if (mask_8x8 & 1) {
-        vpx_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+        aom_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
       } else if (mask_4x4 & 1) {
-        vpx_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+        aom_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
       }
     }
     if (mask_4x4_int & 1)
-      vpx_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
+      aom_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
     s += 8;
     lfl += 1;
     mask_16x16 >>= 1;
@@ -1141,7 +1141,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_filter_selectively_vert(
     uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
     unsigned int mask_4x4, unsigned int mask_4x4_int,
@@ -1154,18 +1154,18 @@
 
     if (mask & 1) {
       if (mask_16x16 & 1) {
-        vpx_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+        aom_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                                    bd);
       } else if (mask_8x8 & 1) {
-        vpx_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+        aom_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                                   bd);
       } else if (mask_4x4 & 1) {
-        vpx_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+        aom_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                                   bd);
       }
     }
     if (mask_4x4_int & 1)
-      vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim,
+      aom_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim,
                                 lfi->hev_thr, bd);
     s += 8;
     lfl += 1;
@@ -1175,11 +1175,11 @@
     mask_4x4_int >>= 1;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_filter_block_plane_non420(VP10_COMMON *cm,
-                                    struct macroblockd_plane *plane,
-                                    MODE_INFO **mib, int mi_row, int mi_col) {
+void av1_filter_block_plane_non420(AV1_COMMON *cm,
+                                   struct macroblockd_plane *plane,
+                                   MODE_INFO **mib, int mi_row, int mi_col) {
   const int ss_x = plane->subsampling_x;
   const int ss_y = plane->subsampling_y;
   const int row_step = 1 << ss_y;
@@ -1254,17 +1254,17 @@
 
 #if CONFIG_EXT_TX && CONFIG_RECT_TX
       tx_size_r =
-          VPXMIN(txsize_horz_map[tx_size], cm->above_txfm_context[mi_col + c]);
-      tx_size_c = VPXMIN(txsize_vert_map[tx_size],
+          AOMMIN(txsize_horz_map[tx_size], cm->above_txfm_context[mi_col + c]);
+      tx_size_c = AOMMIN(txsize_vert_map[tx_size],
                          cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
 
       cm->above_txfm_context[mi_col + c] = txsize_horz_map[tx_size];
       cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK] =
           txsize_vert_map[tx_size];
 #else
-      tx_size_r = VPXMIN(tx_size, cm->above_txfm_context[mi_col + c]);
+      tx_size_r = AOMMIN(tx_size, cm->above_txfm_context[mi_col + c]);
       tx_size_c =
-          VPXMIN(tx_size, cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
+          AOMMIN(tx_size, cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
 
       cm->above_txfm_context[mi_col + c] = tx_size;
       cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK] = tx_size;
@@ -1333,7 +1333,7 @@
 
     // Disable filtering on the leftmost column
     border_mask = ~(mi_col == 0);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_vert(
           CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1350,7 +1350,7 @@
     filter_selectively_vert(dst->buf, dst->stride, mask_16x16_c & border_mask,
                             mask_8x8_c & border_mask, mask_4x4_c & border_mask,
                             mask_4x4_int[r], &cm->lf_info, &lfl[r][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     dst->buf += MI_SIZE * dst->stride;
     mib += row_step * cm->mi_stride;
   }
@@ -1374,7 +1374,7 @@
       mask_8x8_r = mask_8x8[r];
       mask_4x4_r = mask_4x4[r];
     }
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
                                       dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1389,14 +1389,14 @@
     filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
                              mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
                              &lfl[r][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     dst->buf += MI_SIZE * dst->stride;
   }
 }
 
-void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
-                                  struct macroblockd_plane *const plane,
-                                  int mi_row, LOOP_FILTER_MASK *lfm) {
+void av1_filter_block_plane_ss00(AV1_COMMON *const cm,
+                                 struct macroblockd_plane *const plane,
+                                 int mi_row, LOOP_FILTER_MASK *lfm) {
   struct buf_2d *const dst = &plane->dst;
   uint8_t *const dst0 = dst->buf;
   int r;
@@ -1415,7 +1415,7 @@
     unsigned int mask_4x4_int_l = mask_4x4_int & 0xffff;
 
 // Disable filtering on the leftmost column.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_vert_row2(
           plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1430,7 +1430,7 @@
     filter_selectively_vert_row2(
         plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
         mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_y[r][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     dst->buf += 2 * MI_SIZE * dst->stride;
     mask_16x16 >>= 2 * MI_SIZE;
     mask_8x8 >>= 2 * MI_SIZE;
@@ -1460,7 +1460,7 @@
       mask_4x4_r = mask_4x4 & 0xff;
     }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_horiz(
           CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1475,7 +1475,7 @@
     filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
                              mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info,
                              &lfm->lfl_y[r][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     dst->buf += MI_SIZE * dst->stride;
     mask_16x16 >>= MI_SIZE;
@@ -1485,9 +1485,9 @@
   }
 }
 
-void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
-                                  struct macroblockd_plane *const plane,
-                                  int mi_row, LOOP_FILTER_MASK *lfm) {
+void av1_filter_block_plane_ss11(AV1_COMMON *const cm,
+                                 struct macroblockd_plane *const plane,
+                                 int mi_row, LOOP_FILTER_MASK *lfm) {
   struct buf_2d *const dst = &plane->dst;
   uint8_t *const dst0 = dst->buf;
   int r, c;
@@ -1514,7 +1514,7 @@
       unsigned int mask_4x4_int_l = mask_4x4_int & 0xff;
 
 // Disable filtering on the leftmost column.
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cm->use_highbitdepth) {
         highbd_filter_selectively_vert_row2(
             plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1530,7 +1530,7 @@
       filter_selectively_vert_row2(
           plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
           mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_uv[r >> 1][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
       dst->buf += 2 * MI_SIZE * dst->stride;
       mask_16x16 >>= MI_SIZE;
@@ -1565,7 +1565,7 @@
       mask_4x4_r = mask_4x4 & 0xf;
     }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_horiz(
           CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1580,7 +1580,7 @@
     filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
                              mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
                              &lfm->lfl_uv[r >> 1][0]);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     dst->buf += MI_SIZE * dst->stride;
     mask_16x16 >>= MI_SIZE / 2;
@@ -1590,9 +1590,9 @@
   }
 }
 
-void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, VP10_COMMON *cm,
-                           struct macroblockd_plane planes[MAX_MB_PLANE],
-                           int start, int stop, int y_only) {
+void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, AV1_COMMON *cm,
+                          struct macroblockd_plane planes[MAX_MB_PLANE],
+                          int start, int stop, int y_only) {
 #if CONFIG_VAR_TX || CONFIG_EXT_PARTITION || CONFIG_EXT_PARTITION_TYPES
   const int num_planes = y_only ? 1 : MAX_MB_PLANE;
   int mi_row, mi_col;
@@ -1608,11 +1608,11 @@
     for (mi_col = 0; mi_col < cm->mi_cols; mi_col += cm->mib_size) {
       int plane;
 
-      vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+      av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
 
       for (plane = 0; plane < num_planes; ++plane)
-        vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
-                                       mi_col);
+        av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
+                                      mi_col);
     }
   }
 #else
@@ -1635,23 +1635,23 @@
     for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MAX_MIB_SIZE) {
       int plane;
 
-      vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+      av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
 
       // TODO(JBB): Make setup_mask work for non 420.
-      vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
+      av1_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
 
-      vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
+      av1_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
       for (plane = 1; plane < num_planes; ++plane) {
         switch (path) {
           case LF_PATH_420:
-            vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_444:
-            vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_SLOW:
-            vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
-                                           mi_row, mi_col);
+            av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+                                          mi_row, mi_col);
             break;
         }
       }
@@ -1660,9 +1660,9 @@
 #endif  // CONFIG_VAR_TX || CONFIG_EXT_PARTITION || CONFIG_EXT_PARTITION_TYPES
 }
 
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                            MACROBLOCKD *xd, int frame_filter_level, int y_only,
-                            int partial_frame) {
+void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                           MACROBLOCKD *xd, int frame_filter_level, int y_only,
+                           int partial_frame) {
   int start_mi_row, end_mi_row, mi_rows_to_filter;
   if (!frame_filter_level) return;
   start_mi_row = 0;
@@ -1670,17 +1670,16 @@
   if (partial_frame && cm->mi_rows > 8) {
     start_mi_row = cm->mi_rows >> 1;
     start_mi_row &= 0xfffffff8;
-    mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
+    mi_rows_to_filter = AOMMAX(cm->mi_rows / 8, 8);
   }
   end_mi_row = start_mi_row + mi_rows_to_filter;
-  vp10_loop_filter_frame_init(cm, frame_filter_level);
-  vp10_loop_filter_rows(frame, cm, xd->plane, start_mi_row, end_mi_row, y_only);
+  av1_loop_filter_frame_init(cm, frame_filter_level);
+  av1_loop_filter_rows(frame, cm, xd->plane, start_mi_row, end_mi_row, y_only);
 }
 
-void vp10_loop_filter_data_reset(
+void av1_loop_filter_data_reset(
     LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
-    struct VP10Common *cm,
-    const struct macroblockd_plane planes[MAX_MB_PLANE]) {
+    struct AV1Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]) {
   lf_data->frame_buffer = frame_buffer;
   lf_data->cm = cm;
   lf_data->start = 0;
@@ -1689,9 +1688,9 @@
   memcpy(lf_data->planes, planes, sizeof(lf_data->planes));
 }
 
-int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
+int av1_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
   (void)unused;
-  vp10_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
-                        lf_data->start, lf_data->stop, lf_data->y_only);
+  av1_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
+                       lf_data->start, lf_data->stop, lf_data->y_only);
   return 1;
 }
diff --git a/av1/common/loopfilter.h b/av1/common/loopfilter.h
index b85ed04..d3377e2 100644
--- a/av1/common/loopfilter.h
+++ b/av1/common/loopfilter.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_LOOPFILTER_H_
-#define VP10_COMMON_LOOPFILTER_H_
+#ifndef AV1_COMMON_LOOPFILTER_H_
+#define AV1_COMMON_LOOPFILTER_H_
 
 #include "aom_ports/mem.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #include "av1/common/blockd.h"
 #include "av1/common/restoration.h"
@@ -89,49 +89,49 @@
 } LOOP_FILTER_MASK;
 
 /* assorted loopfilter functions which get used elsewhere */
-struct VP10Common;
+struct AV1Common;
 struct macroblockd;
-struct VP10LfSyncData;
+struct AV1LfSyncData;
 
 // This function sets up the bit masks for the entire 64x64 region represented
 // by mi_row, mi_col.
-void vp10_setup_mask(struct VP10Common *const cm, const int mi_row,
-                     const int mi_col, MODE_INFO **mi_8x8,
-                     const int mode_info_stride, LOOP_FILTER_MASK *lfm);
+void av1_setup_mask(struct AV1Common *const cm, const int mi_row,
+                    const int mi_col, MODE_INFO **mi_8x8,
+                    const int mode_info_stride, LOOP_FILTER_MASK *lfm);
 
-void vp10_filter_block_plane_ss00(struct VP10Common *const cm,
-                                  struct macroblockd_plane *const plane,
-                                  int mi_row, LOOP_FILTER_MASK *lfm);
+void av1_filter_block_plane_ss00(struct AV1Common *const cm,
+                                 struct macroblockd_plane *const plane,
+                                 int mi_row, LOOP_FILTER_MASK *lfm);
 
-void vp10_filter_block_plane_ss11(struct VP10Common *const cm,
-                                  struct macroblockd_plane *const plane,
-                                  int mi_row, LOOP_FILTER_MASK *lfm);
+void av1_filter_block_plane_ss11(struct AV1Common *const cm,
+                                 struct macroblockd_plane *const plane,
+                                 int mi_row, LOOP_FILTER_MASK *lfm);
 
-void vp10_filter_block_plane_non420(struct VP10Common *cm,
-                                    struct macroblockd_plane *plane,
-                                    MODE_INFO **mi_8x8, int mi_row, int mi_col);
+void av1_filter_block_plane_non420(struct AV1Common *cm,
+                                   struct macroblockd_plane *plane,
+                                   MODE_INFO **mi_8x8, int mi_row, int mi_col);
 
-void vp10_loop_filter_init(struct VP10Common *cm);
+void av1_loop_filter_init(struct AV1Common *cm);
 
 // Update the loop filter for the current frame.
-// This should be called before vp10_loop_filter_rows(),
-// vp10_loop_filter_frame()
+// This should be called before av1_loop_filter_rows(),
+// av1_loop_filter_frame()
 // calls this function directly.
-void vp10_loop_filter_frame_init(struct VP10Common *cm, int default_filt_lvl);
+void av1_loop_filter_frame_init(struct AV1Common *cm, int default_filt_lvl);
 
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
-                            struct macroblockd *mbd, int filter_level,
-                            int y_only, int partial_frame);
+void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+                           struct macroblockd *mbd, int filter_level,
+                           int y_only, int partial_frame);
 
 // Apply the loop filter to [start, stop) macro block rows in frame_buffer.
-void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
-                           struct VP10Common *cm,
-                           struct macroblockd_plane planes[MAX_MB_PLANE],
-                           int start, int stop, int y_only);
+void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
+                          struct AV1Common *cm,
+                          struct macroblockd_plane planes[MAX_MB_PLANE],
+                          int start, int stop, int y_only);
 
 typedef struct LoopFilterWorkerData {
   YV12_BUFFER_CONFIG *frame_buffer;
-  struct VP10Common *cm;
+  struct AV1Common *cm;
   struct macroblockd_plane planes[MAX_MB_PLANE];
 
   int start;
@@ -139,14 +139,14 @@
   int y_only;
 } LFWorkerData;
 
-void vp10_loop_filter_data_reset(
+void av1_loop_filter_data_reset(
     LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
-    struct VP10Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]);
+    struct AV1Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]);
 
 // Operates on the rows described by 'lf_data'.
-int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused);
+int av1_loop_filter_worker(LFWorkerData *const lf_data, void *unused);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_LOOPFILTER_H_
+#endif  // AV1_COMMON_LOOPFILTER_H_
diff --git a/av1/common/mips/dspr2/itrans16_dspr2.c b/av1/common/mips/dspr2/itrans16_dspr2.c
index c0b9b2a..9e63d4d 100644
--- a/av1/common/mips/dspr2/itrans16_dspr2.c
+++ b/av1/common/mips/dspr2/itrans16_dspr2.c
@@ -11,8 +11,8 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_config.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
 #include "av1/common/common.h"
 #include "av1/common/blockd.h"
 #include "av1/common/idct.h"
@@ -21,8 +21,8 @@
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
-void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
-                                 int tx_type) {
+void av1_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
+                                int tx_type) {
   int i, j;
   DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
   int16_t *outptr = out;
@@ -90,7 +90,7 @@
                                            dest[j * pitch + i]);
       }
     } break;
-    default: printf("vp10_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break;
+    default: printf("av1_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/av1/common/mips/dspr2/itrans4_dspr2.c b/av1/common/mips/dspr2/itrans4_dspr2.c
index dcb28c9..61fc0e7 100644
--- a/av1/common/mips/dspr2/itrans4_dspr2.c
+++ b/av1/common/mips/dspr2/itrans4_dspr2.c
@@ -11,8 +11,8 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_config.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
 #include "av1/common/common.h"
 #include "av1/common/blockd.h"
 #include "av1/common/idct.h"
@@ -21,8 +21,8 @@
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
-void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
-                              int dest_stride, int tx_type) {
+void av1_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
+                             int dest_stride, int tx_type) {
   int i, j;
   DECLARE_ALIGNED(32, int16_t, out[4 * 4]);
   int16_t *outptr = out;
@@ -36,11 +36,11 @@
 
   switch (tx_type) {
     case DCT_DCT:  // DCT in both horizontal and vertical
-      vpx_idct4_rows_dspr2(input, outptr);
-      vpx_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
+      aom_idct4_rows_dspr2(input, outptr);
+      aom_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
       break;
     case ADST_DCT:  // ADST in vertical, DCT in horizontal
-      vpx_idct4_rows_dspr2(input, outptr);
+      aom_idct4_rows_dspr2(input, outptr);
 
       outptr = out;
 
@@ -66,7 +66,7 @@
           temp_in[i * 4 + j] = out[j * 4 + i];
         }
       }
-      vpx_idct4_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride);
+      aom_idct4_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride);
       break;
     case ADST_ADST:  // ADST in both directions
       for (i = 0; i < 4; ++i) {
@@ -84,7 +84,7 @@
               ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * dest_stride + i]);
       }
       break;
-    default: printf("vp10_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break;
+    default: printf("av1_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/av1/common/mips/dspr2/itrans8_dspr2.c b/av1/common/mips/dspr2/itrans8_dspr2.c
index 761d6f0..fe99f31 100644
--- a/av1/common/mips/dspr2/itrans8_dspr2.c
+++ b/av1/common/mips/dspr2/itrans8_dspr2.c
@@ -11,8 +11,8 @@
 #include <assert.h>
 #include <stdio.h>
 
-#include "./vpx_config.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
 #include "av1/common/common.h"
 #include "av1/common/blockd.h"
 #include "aom_dsp/mips/inv_txfm_dspr2.h"
@@ -20,8 +20,8 @@
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
-void vp10_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
-                              int dest_stride, int tx_type) {
+void av1_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
+                             int dest_stride, int tx_type) {
   int i, j;
   DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
   int16_t *outptr = out;
@@ -78,7 +78,7 @@
               ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]);
       }
       break;
-    default: printf("vp10_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break;
+    default: printf("av1_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/av1/common/mips/msa/idct16x16_msa.c b/av1/common/mips/msa/idct16x16_msa.c
index baa3a97..e5a68fa 100644
--- a/av1/common/mips/msa/idct16x16_msa.c
+++ b/av1/common/mips/msa/idct16x16_msa.c
@@ -13,8 +13,8 @@
 #include "av1/common/enums.h"
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vp10_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
-                               int32_t dst_stride, int32_t tx_type) {
+void av1_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
+                              int32_t dst_stride, int32_t tx_type) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
   int16_t *out_ptr = &out[0];
@@ -24,13 +24,13 @@
       /* transform rows */
       for (i = 0; i < 2; ++i) {
         /* process 16 * 8 block */
-        vpx_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+        aom_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
       }
 
       /* transform columns */
       for (i = 0; i < 2; ++i) {
         /* process 8 * 16 block */
-        vpx_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+        aom_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
                                          dst_stride);
       }
       break;
@@ -38,12 +38,12 @@
       /* transform rows */
       for (i = 0; i < 2; ++i) {
         /* process 16 * 8 block */
-        vpx_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+        aom_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
       }
 
       /* transform columns */
       for (i = 0; i < 2; ++i) {
-        vpx_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
+        aom_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
                                           (dst + (i << 3)), dst_stride);
       }
       break;
@@ -51,13 +51,13 @@
       /* transform rows */
       for (i = 0; i < 2; ++i) {
         /* process 16 * 8 block */
-        vpx_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+        aom_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
       }
 
       /* transform columns */
       for (i = 0; i < 2; ++i) {
         /* process 8 * 16 block */
-        vpx_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+        aom_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
                                          dst_stride);
       }
       break;
@@ -65,12 +65,12 @@
       /* transform rows */
       for (i = 0; i < 2; ++i) {
         /* process 16 * 8 block */
-        vpx_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
+        aom_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7)));
       }
 
       /* transform columns */
       for (i = 0; i < 2; ++i) {
-        vpx_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
+        aom_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
                                           (dst + (i << 3)), dst_stride);
       }
       break;
diff --git a/av1/common/mips/msa/idct4x4_msa.c b/av1/common/mips/msa/idct4x4_msa.c
index 0620df7..7b4ba12 100644
--- a/av1/common/mips/msa/idct4x4_msa.c
+++ b/av1/common/mips/msa/idct4x4_msa.c
@@ -13,8 +13,8 @@
 #include "av1/common/enums.h"
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vp10_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
-                            int32_t dst_stride, int32_t tx_type) {
+void av1_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+                           int32_t dst_stride, int32_t tx_type) {
   v8i16 in0, in1, in2, in3;
 
   /* load vector elements of 4x4 block */
@@ -24,31 +24,31 @@
   switch (tx_type) {
     case DCT_DCT:
       /* DCT in horizontal */
-      VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       /* DCT in vertical */
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     case ADST_DCT:
       /* DCT in horizontal */
-      VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       /* ADST in vertical */
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     case DCT_ADST:
       /* ADST in horizontal */
-      VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       /* DCT in vertical */
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     case ADST_ADST:
       /* ADST in horizontal */
-      VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       /* ADST in vertical */
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-      VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      AOM_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
     default: assert(0); break;
   }
diff --git a/av1/common/mips/msa/idct8x8_msa.c b/av1/common/mips/msa/idct8x8_msa.c
index 5c62c4a..ce61676 100644
--- a/av1/common/mips/msa/idct8x8_msa.c
+++ b/av1/common/mips/msa/idct8x8_msa.c
@@ -13,8 +13,8 @@
 #include "av1/common/enums.h"
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vp10_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
-                            int32_t dst_stride, int32_t tx_type) {
+void av1_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
+                           int32_t dst_stride, int32_t tx_type) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
 
   /* load vector elements of 8x8 block */
@@ -26,42 +26,42 @@
   switch (tx_type) {
     case DCT_DCT:
       /* DCT in horizontal */
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+      AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
       /* DCT in vertical */
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+      AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
       break;
     case ADST_DCT:
       /* DCT in horizontal */
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+      AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
       /* ADST in vertical */
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       break;
     case DCT_ADST:
       /* ADST in horizontal */
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       /* DCT in vertical */
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+      AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                      in4, in5, in6, in7);
       break;
     case ADST_ADST:
       /* ADST in horizontal */
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       /* ADST in vertical */
       TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
                          in3, in4, in5, in6, in7);
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+      AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
                 in5, in6, in7);
       break;
     default: assert(0); break;
@@ -72,7 +72,7 @@
   SRARI_H4_SH(in4, in5, in6, in7, 5);
 
   /* add block and store 8x8 */
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
   dst += (4 * dst_stride);
-  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
+  AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
 }
diff --git a/av1/common/mv.h b/av1/common/mv.h
index dba3336..4908d74 100644
--- a/av1/common/mv.h
+++ b/av1/common/mv.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_MV_H_
-#define VP10_COMMON_MV_H_
+#ifndef AV1_COMMON_MV_H_
+#define AV1_COMMON_MV_H_
 
 #include "av1/common/common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
 #if CONFIG_GLOBAL_MOTION
 #include "av1/common/warped_motion.h"
 #endif  // CONFIG_GLOBAL_MOTION
@@ -146,4 +146,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_MV_H_
+#endif  // AV1_COMMON_MV_H_
diff --git a/av1/common/mvref_common.c b/av1/common/mvref_common.c
index 836b065..e14df3c 100644
--- a/av1/common/mvref_common.c
+++ b/av1/common/mvref_common.c
@@ -146,7 +146,7 @@
   return newmv_count;
 }
 
-static uint8_t scan_row_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static uint8_t scan_row_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                              const int mi_row, const int mi_col, int block,
                              const MV_REFERENCE_FRAME rf[2], int row_offset,
                              CANDIDATE_MV *ref_mv_stack, uint8_t *refmv_count) {
@@ -164,7 +164,7 @@
           xd->mi[mi_pos.row * xd->mi_stride + mi_pos.col];
       const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
       const int len =
-          VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[candidate->sb_type]);
+          AOMMIN(xd->n8_w, num_8x8_blocks_wide_lookup[candidate->sb_type]);
 
       newmv_count += add_ref_mv_candidate(
           candidate_mi, candidate, rf, refmv_count, ref_mv_stack,
@@ -178,7 +178,7 @@
   return newmv_count;
 }
 
-static uint8_t scan_col_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static uint8_t scan_col_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                              const int mi_row, const int mi_col, int block,
                              const MV_REFERENCE_FRAME rf[2], int col_offset,
                              CANDIDATE_MV *ref_mv_stack, uint8_t *refmv_count) {
@@ -196,7 +196,7 @@
           xd->mi[mi_pos.row * xd->mi_stride + mi_pos.col];
       const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
       const int len =
-          VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[candidate->sb_type]);
+          AOMMIN(xd->n8_h, num_8x8_blocks_high_lookup[candidate->sb_type]);
 
       newmv_count += add_ref_mv_candidate(
           candidate_mi, candidate, rf, refmv_count, ref_mv_stack,
@@ -210,7 +210,7 @@
   return newmv_count;
 }
 
-static uint8_t scan_blk_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static uint8_t scan_blk_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                              const int mi_row, const int mi_col, int block,
                              const MV_REFERENCE_FRAME rf[2], int row_offset,
                              int col_offset, CANDIDATE_MV *ref_mv_stack,
@@ -288,7 +288,7 @@
 
   for (rf = 0; rf < 2; ++rf) {
     if (candidate->ref_frame[rf] == ref_frame) {
-      const int list_range = VPXMIN(refmv_count, MAX_MV_REF_CANDIDATES);
+      const int list_range = AOMMIN(refmv_count, MAX_MV_REF_CANDIDATES);
 
       const int_mv pred_mv = candidate->mv[rf];
       for (idx = 0; idx < list_range; ++idx)
@@ -304,7 +304,7 @@
   }
 }
 
-static void setup_ref_mv_list(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void setup_ref_mv_list(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                               MV_REFERENCE_FRAME ref_frame,
                               uint8_t *refmv_count, CANDIDATE_MV *ref_mv_stack,
                               int_mv *mv_ref_list, int block, int mi_row,
@@ -320,11 +320,11 @@
           ? cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col
           : NULL;
 
-  int bs = VPXMAX(xd->n8_w, xd->n8_h);
+  int bs = AOMMAX(xd->n8_w, xd->n8_h);
   int has_tr = has_top_right(xd, mi_row, mi_col, bs);
 
   MV_REFERENCE_FRAME rf[2];
-  vp10_set_ref_frame(rf, ref_frame);
+  av1_set_ref_frame(rf, ref_frame);
 
   mode_context[ref_frame] = 0;
   *refmv_count = 0;
@@ -502,7 +502,7 @@
                    xd->n8_h << 3, xd);
     }
   } else {
-    for (idx = 0; idx < VPXMIN(MAX_MV_REF_CANDIDATES, *refmv_count); ++idx) {
+    for (idx = 0; idx < AOMMIN(MAX_MV_REF_CANDIDATES, *refmv_count); ++idx) {
       mv_ref_list[idx].as_int = ref_mv_stack[idx].this_mv.as_int;
       clamp_mv_ref(&mv_ref_list[idx].as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
     }
@@ -512,7 +512,7 @@
 
 // This function searches the neighbourhood of a given MB/SB
 // to try and find candidate reference vectors.
-static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void find_mv_refs_idx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                              MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
                              int_mv *mv_ref_list, int block, int mi_row,
                              int mi_col, find_mv_refs_sync sync,
@@ -648,10 +648,10 @@
 
 #if CONFIG_EXT_INTER
 // This function keeps a mode count for a given MB/SB
-void vp10_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
-                            MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
-                            int block, int mi_row, int mi_col,
-                            int16_t *mode_context) {
+void av1_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
+                           MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
+                           int block, int mi_row, int mi_col,
+                           int16_t *mode_context) {
   int i, refmv_count = 0;
   const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
   int context_counter = 0;
@@ -691,26 +691,26 @@
 }
 #endif  // CONFIG_EXT_INTER
 
-void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
-                       MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
+void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+                      MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
 #if CONFIG_REF_MV
-                       uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
+                      uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
 #if CONFIG_EXT_INTER
-                       int16_t *compound_mode_context,
+                      int16_t *compound_mode_context,
 #endif  // CONFIG_EXT_INTER
 #endif
-                       int_mv *mv_ref_list, int mi_row, int mi_col,
-                       find_mv_refs_sync sync, void *const data,
-                       int16_t *mode_context) {
+                      int_mv *mv_ref_list, int mi_row, int mi_col,
+                      find_mv_refs_sync sync, void *const data,
+                      int16_t *mode_context) {
 #if CONFIG_REF_MV
   int idx, all_zero = 1;
 #endif
 #if CONFIG_EXT_INTER
-  vp10_update_mv_context(xd, mi, ref_frame, mv_ref_list, -1, mi_row, mi_col,
+  av1_update_mv_context(xd, mi, ref_frame, mv_ref_list, -1, mi_row, mi_col,
 #if CONFIG_REF_MV
-                         compound_mode_context);
+                        compound_mode_context);
 #else
-                         mode_context);
+                        mode_context);
 #endif  // CONFIG_REF_MV
   find_mv_refs_idx(cm, xd, mi, ref_frame, mv_ref_list, -1, mi_row, mi_col, sync,
                    data, NULL);
@@ -730,8 +730,8 @@
 #endif
 }
 
-void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
-                            int_mv *near_mv) {
+void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+                           int_mv *near_mv) {
   int i;
   // Make sure all the candidates are properly clamped etc
   for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
@@ -741,16 +741,16 @@
   *near_mv = mvlist[1];
 }
 
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
-                                    int ref, int mi_row, int mi_col,
+void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
+                                   int ref, int mi_row, int mi_col,
 #if CONFIG_REF_MV
-                                    CANDIDATE_MV *ref_mv_stack,
-                                    uint8_t *ref_mv_count,
+                                   CANDIDATE_MV *ref_mv_stack,
+                                   uint8_t *ref_mv_count,
 #endif
 #if CONFIG_EXT_INTER
-                                    int_mv *mv_list,
+                                   int_mv *mv_list,
 #endif  // CONFIG_EXT_INTER
-                                    int_mv *nearest_mv, int_mv *near_mv) {
+                                   int_mv *nearest_mv, int_mv *near_mv) {
 #if !CONFIG_EXT_INTER
   int_mv mv_list[MAX_MV_REF_CANDIDATES];
 #endif  // !CONFIG_EXT_INTER
@@ -789,7 +789,7 @@
     clamp_mv_ref(&ref_mv_stack[idx].this_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3,
                  xd);
 
-  for (idx = 0; idx < VPXMIN(MAX_MV_REF_CANDIDATES, *ref_mv_count); ++idx)
+  for (idx = 0; idx < AOMMIN(MAX_MV_REF_CANDIDATES, *ref_mv_count); ++idx)
     mv_list[idx].as_int = ref_mv_stack[idx].this_mv.as_int;
 #endif
 
diff --git a/av1/common/mvref_common.h b/av1/common/mvref_common.h
index babd4f0..b65509a 100644
--- a/av1/common/mvref_common.h
+++ b/av1/common/mvref_common.h
@@ -7,8 +7,8 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#ifndef VP10_COMMON_MVREF_COMMON_H_
-#define VP10_COMMON_MVREF_COMMON_H_
+#ifndef AV1_COMMON_MVREF_COMMON_H_
+#define AV1_COMMON_MVREF_COMMON_H_
 
 #include "av1/common/onyxc_int.h"
 #include "av1/common/blockd.h"
@@ -340,7 +340,7 @@
 }
 
 static INLINE void lower_mv_precision(MV *mv, int allow_hp) {
-  const int use_hp = allow_hp && vp10_use_mv_hp(mv);
+  const int use_hp = allow_hp && av1_use_mv_hp(mv);
   if (!use_hp) {
     if (mv->row & 1) mv->row += (mv->row > 0 ? -1 : 1);
     if (mv->col & 1) mv->col += (mv->col > 0 ? -1 : 1);
@@ -348,8 +348,8 @@
 }
 
 #if CONFIG_REF_MV
-static INLINE int vp10_nmv_ctx(const uint8_t ref_mv_count,
-                               const CANDIDATE_MV *ref_mv_stack) {
+static INLINE int av1_nmv_ctx(const uint8_t ref_mv_count,
+                              const CANDIDATE_MV *ref_mv_stack) {
 #if CONFIG_EXT_INTER
   return 0;
 #endif
@@ -365,7 +365,7 @@
   return 0;
 }
 
-static INLINE int8_t vp10_ref_frame_type(const MV_REFERENCE_FRAME *const rf) {
+static INLINE int8_t av1_ref_frame_type(const MV_REFERENCE_FRAME *const rf) {
   if (rf[1] > INTRA_FRAME) {
     return TOTAL_REFS_PER_FRAME + FWD_RF_OFFSET(rf[0]) +
            BWD_RF_OFFSET(rf[1]) * FWD_REFS;
@@ -386,8 +386,8 @@
 #endif
 };
 
-static INLINE void vp10_set_ref_frame(MV_REFERENCE_FRAME *rf,
-                                      int8_t ref_frame_type) {
+static INLINE void av1_set_ref_frame(MV_REFERENCE_FRAME *rf,
+                                     int8_t ref_frame_type) {
   if (ref_frame_type >= TOTAL_REFS_PER_FRAME) {
     rf[0] = ref_frame_map[ref_frame_type - TOTAL_REFS_PER_FRAME][0];
     rf[1] = ref_frame_map[ref_frame_type - TOTAL_REFS_PER_FRAME][1];
@@ -399,7 +399,7 @@
   }
 }
 
-static INLINE int16_t vp10_mode_context_analyzer(
+static INLINE int16_t av1_mode_context_analyzer(
     const int16_t *const mode_context, const MV_REFERENCE_FRAME *const rf,
     BLOCK_SIZE bsize, int block) {
   int16_t mode_ctx = 0;
@@ -420,8 +420,8 @@
     return mode_context[rf[0]];
 }
 
-static INLINE uint8_t vp10_drl_ctx(const CANDIDATE_MV *ref_mv_stack,
-                                   int ref_idx) {
+static INLINE uint8_t av1_drl_ctx(const CANDIDATE_MV *ref_mv_stack,
+                                  int ref_idx) {
   if (ref_mv_stack[ref_idx].weight >= REF_CAT_LEVEL &&
       ref_mv_stack[ref_idx + 1].weight >= REF_CAT_LEVEL) {
     if (ref_mv_stack[ref_idx].weight == ref_mv_stack[ref_idx + 1].weight)
@@ -447,45 +447,45 @@
 #endif
 
 typedef void (*find_mv_refs_sync)(void *const data, int mi_row);
-void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
-                       MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
+void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+                      MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
 #if CONFIG_REF_MV
-                       uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
+                      uint8_t *ref_mv_count, CANDIDATE_MV *ref_mv_stack,
 #if CONFIG_EXT_INTER
-                       int16_t *compound_mode_context,
+                      int16_t *compound_mode_context,
 #endif  // CONFIG_EXT_INTER
 #endif
-                       int_mv *mv_ref_list, int mi_row, int mi_col,
-                       find_mv_refs_sync sync, void *const data,
-                       int16_t *mode_context);
+                      int_mv *mv_ref_list, int mi_row, int mi_col,
+                      find_mv_refs_sync sync, void *const data,
+                      int16_t *mode_context);
 
 // check a list of motion vectors by sad score using a number rows of pixels
 // above and a number cols of pixels in the left to select the one with best
 // score to use as ref motion vector
-void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
-                            int_mv *near_mv);
+void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+                           int_mv *near_mv);
 
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
-                                    int ref, int mi_row, int mi_col,
+void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
+                                   int ref, int mi_row, int mi_col,
 #if CONFIG_REF_MV
-                                    CANDIDATE_MV *ref_mv_stack,
-                                    uint8_t *ref_mv_count,
+                                   CANDIDATE_MV *ref_mv_stack,
+                                   uint8_t *ref_mv_count,
 #endif
 #if CONFIG_EXT_INTER
-                                    int_mv *mv_list,
+                                   int_mv *mv_list,
 #endif  // CONFIG_EXT_INTER
-                                    int_mv *nearest_mv, int_mv *near_mv);
+                                   int_mv *nearest_mv, int_mv *near_mv);
 
 #if CONFIG_EXT_INTER
 // This function keeps a mode count for a given MB/SB
-void vp10_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
-                            MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
-                            int block, int mi_row, int mi_col,
-                            int16_t *mode_context);
+void av1_update_mv_context(const MACROBLOCKD *xd, MODE_INFO *mi,
+                           MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list,
+                           int block, int mi_row, int mi_col,
+                           int16_t *mode_context);
 #endif  // CONFIG_EXT_INTER
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_MVREF_COMMON_H_
+#endif  // AV1_COMMON_MVREF_COMMON_H_
diff --git a/av1/common/odintrin.h b/av1/common/odintrin.h
index 87b1a36..8e9b3e4 100644
--- a/av1/common/odintrin.h
+++ b/av1/common/odintrin.h
@@ -1,9 +1,9 @@
-#ifndef VP10_COMMON_ODINTRIN_H_
-#define VP10_COMMON_ODINTRIN_H_
+#ifndef AV1_COMMON_ODINTRIN_H_
+#define AV1_COMMON_ODINTRIN_H_
 
 #include "av1/common/enums.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_ports/bitops.h"
 
 /*Smallest blocks are 4x4*/
@@ -33,7 +33,7 @@
 #define OD_DIVU(_x, _d) \
   (((_d) < OD_DIVU_DMAX) ? (OD_DIVU_SMALL((_x), (_d))) : ((_x) / (_d)))
 
-#define OD_MINI VPXMIN
+#define OD_MINI AOMMIN
 #define OD_CLAMPI(min, val, max) clamp((val), (min), (max))
 
 #define OD_CLZ0 (1)
diff --git a/av1/common/onyxc_int.h b/av1/common/onyxc_int.h
index 55a8112..d3bc820 100644
--- a/av1/common/onyxc_int.h
+++ b/av1/common/onyxc_int.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_ONYXC_INT_H_
-#define VP10_COMMON_ONYXC_INT_H_
+#ifndef AV1_COMMON_ONYXC_INT_H_
+#define AV1_COMMON_ONYXC_INT_H_
 
-#include "./vpx_config.h"
-#include "aom/internal/vpx_codec_internal.h"
-#include "aom_util/vpx_thread.h"
-#include "./vp10_rtcd.h"
+#include "./aom_config.h"
+#include "aom/internal/aom_codec_internal.h"
+#include "aom_util/aom_thread.h"
+#include "./av1_rtcd.h"
 #include "av1/common/alloccommon.h"
 #include "av1/common/loopfilter.h"
 #include "av1/common/entropymv.h"
@@ -87,14 +87,14 @@
   MV_REF *mvs;
   int mi_rows;
   int mi_cols;
-  vpx_codec_frame_buffer_t raw_frame_buffer;
+  aom_codec_frame_buffer_t raw_frame_buffer;
   YV12_BUFFER_CONFIG buf;
 
   // The Following variables will only be used in frame parallel decode.
 
   // frame_worker_owner indicates which FrameWorker owns this buffer. NULL means
   // that no FrameWorker owns, or is decoding, this buffer.
-  VPxWorker *frame_worker_owner;
+  AVxWorker *frame_worker_owner;
 
   // row and col indicate which position frame has been decoded to in real
   // pixel unit. They are reset to -1 when decoding begins and set to INT_MAX
@@ -114,8 +114,8 @@
   // Private data associated with the frame buffer callbacks.
   void *cb_priv;
 
-  vpx_get_frame_buffer_cb_fn_t get_fb_cb;
-  vpx_release_frame_buffer_cb_fn_t release_fb_cb;
+  aom_get_frame_buffer_cb_fn_t get_fb_cb;
+  aom_release_frame_buffer_cb_fn_t release_fb_cb;
 
   RefCntBuffer frame_bufs[FRAME_BUFFERS];
 
@@ -123,9 +123,9 @@
   InternalFrameBufferList int_frame_buffers;
 } BufferPool;
 
-typedef struct VP10Common {
-  struct vpx_internal_error_info error;
-  vpx_color_space_t color_space;
+typedef struct AV1Common {
+  struct aom_internal_error_info error;
+  aom_color_space_t color_space;
   int color_range;
   int width;
   int height;
@@ -140,7 +140,7 @@
   int subsampling_x;
   int subsampling_y;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   // Marks if we need to use 16bit frame buffers (1: yes, 0: no).
   int use_highbitdepth;
 #endif
@@ -247,9 +247,9 @@
   MODE_INFO *prev_mi;  /* 'mi' from last frame (points into prev_mip) */
 
   // Separate mi functions between encoder and decoder.
-  int (*alloc_mi)(struct VP10Common *cm, int mi_size);
-  void (*free_mi)(struct VP10Common *cm);
-  void (*setup_mi)(struct VP10Common *cm);
+  int (*alloc_mi)(struct AV1Common *cm, int mi_size);
+  void (*free_mi)(struct AV1Common *cm);
+  void (*setup_mi)(struct AV1Common *cm);
 
   // Grid of pointers to 8x8 MODE_INFO structs.  Any 8x8 not in the visible
   // area will be NULL.
@@ -307,7 +307,7 @@
 #if CONFIG_ENTROPY
   // The initial probabilities for a frame, before any subframe backward update,
   // and after forward update.
-  vp10_coeff_probs_model starting_coef_probs[TX_SIZES][PLANE_TYPES];
+  av1_coeff_probs_model starting_coef_probs[TX_SIZES][PLANE_TYPES];
   // Number of subframe backward updates already done
   uint8_t coef_probs_update_idx;
   // Signal if the backward update is subframe or end-of-frame
@@ -319,9 +319,9 @@
   unsigned int current_video_frame;
   BITSTREAM_PROFILE profile;
 
-  // VPX_BITS_8 in profile 0 or 1, VPX_BITS_10 or VPX_BITS_12 in profile 2 or 3.
-  vpx_bit_depth_t bit_depth;
-  vpx_bit_depth_t dequant_bit_depth;  // bit_depth of current dequantizer
+  // AOM_BITS_8 in profile 0 or 1, AOM_BITS_10 or AOM_BITS_12 in profile 2 or 3.
+  aom_bit_depth_t bit_depth;
+  aom_bit_depth_t dequant_bit_depth;  // bit_depth of current dequantizer
 
   int error_resilient_mode;
 
@@ -336,8 +336,8 @@
 
   // Private data associated with the frame buffer callbacks.
   void *cb_priv;
-  vpx_get_frame_buffer_cb_fn_t get_fb_cb;
-  vpx_release_frame_buffer_cb_fn_t release_fb_cb;
+  aom_get_frame_buffer_cb_fn_t get_fb_cb;
+  aom_release_frame_buffer_cb_fn_t release_fb_cb;
 
   // Handles memory for the codec.
   InternalFrameBufferList int_frame_buffers;
@@ -356,7 +356,7 @@
   // scratch memory for intraonly/keyframe forward updates from default tables
   // - this is intentionally not placed in FRAME_CONTEXT since it's reset upon
   // each keyframe and not used afterwards
-  vpx_prob kf_y_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
+  aom_prob kf_y_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
 #if CONFIG_GLOBAL_MOTION
   Global_Motion_Params global_motion[TOTAL_REFS_PER_FRAME];
 #endif
@@ -367,7 +367,7 @@
 #if CONFIG_DERING
   int dering_level;
 #endif
-} VP10_COMMON;
+} AV1_COMMON;
 
 // TODO(hkuang): Don't need to lock the whole pool after implementing atomic
 // frame reference count.
@@ -387,7 +387,7 @@
 #endif
 }
 
-static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP10_COMMON *cm, int index) {
+static INLINE YV12_BUFFER_CONFIG *get_ref_frame(AV1_COMMON *cm, int index) {
   if (index < 0 || index >= REF_FRAMES) return NULL;
   if (cm->ref_frame_map[index] < 0) return NULL;
   assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
@@ -395,11 +395,11 @@
 }
 
 static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(
-    const VP10_COMMON *const cm) {
+    const AV1_COMMON *const cm) {
   return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
 }
 
-static INLINE int get_free_fb(VP10_COMMON *cm) {
+static INLINE int get_free_fb(AV1_COMMON *cm) {
   RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
   int i;
 
@@ -429,20 +429,20 @@
   bufs[new_idx].ref_count++;
 }
 
-static INLINE int mi_cols_aligned_to_sb(const VP10_COMMON *cm) {
+static INLINE int mi_cols_aligned_to_sb(const AV1_COMMON *cm) {
   return ALIGN_POWER_OF_TWO(cm->mi_cols, cm->mib_size_log2);
 }
 
-static INLINE int mi_rows_aligned_to_sb(const VP10_COMMON *cm) {
+static INLINE int mi_rows_aligned_to_sb(const AV1_COMMON *cm) {
   return ALIGN_POWER_OF_TWO(cm->mi_rows, cm->mib_size_log2);
 }
 
-static INLINE int frame_is_intra_only(const VP10_COMMON *const cm) {
+static INLINE int frame_is_intra_only(const AV1_COMMON *const cm) {
   return cm->frame_type == KEY_FRAME || cm->intra_only;
 }
 
-static INLINE void vp10_init_macroblockd(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                         tran_low_t *dqcoeff) {
+static INLINE void av1_init_macroblockd(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                        tran_low_t *dqcoeff) {
   int i;
   for (i = 0; i < MAX_MB_PLANE; ++i) {
     xd->plane[i].dqcoeff = dqcoeff;
@@ -536,13 +536,13 @@
 #endif
 }
 
-static INLINE const vpx_prob *get_y_mode_probs(const VP10_COMMON *cm,
+static INLINE const aom_prob *get_y_mode_probs(const AV1_COMMON *cm,
                                                const MODE_INFO *mi,
                                                const MODE_INFO *above_mi,
                                                const MODE_INFO *left_mi,
                                                int block) {
-  const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, block);
-  const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, block);
+  const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
+  const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
   return cm->kf_y_prob[above][left];
 }
 
@@ -622,8 +622,8 @@
   return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
 }
 
-static INLINE void vp10_zero_above_context(VP10_COMMON *const cm,
-                                           int mi_col_start, int mi_col_end) {
+static INLINE void av1_zero_above_context(AV1_COMMON *const cm,
+                                          int mi_col_start, int mi_col_end) {
   const int width = mi_col_end - mi_col_start;
 
   const int offset_y = 2 * mi_col_start;
@@ -631,22 +631,22 @@
   const int offset_uv = offset_y >> cm->subsampling_x;
   const int width_uv = width_y >> cm->subsampling_x;
 
-  vp10_zero_array(cm->above_context[0] + offset_y, width_y);
-  vp10_zero_array(cm->above_context[1] + offset_uv, width_uv);
-  vp10_zero_array(cm->above_context[2] + offset_uv, width_uv);
+  av1_zero_array(cm->above_context[0] + offset_y, width_y);
+  av1_zero_array(cm->above_context[1] + offset_uv, width_uv);
+  av1_zero_array(cm->above_context[2] + offset_uv, width_uv);
 
-  vp10_zero_array(cm->above_seg_context + mi_col_start, width);
+  av1_zero_array(cm->above_seg_context + mi_col_start, width);
 
 #if CONFIG_VAR_TX
-  vp10_zero_array(cm->above_txfm_context + mi_col_start, width);
+  av1_zero_array(cm->above_txfm_context + mi_col_start, width);
 #endif  // CONFIG_VAR_TX
 }
 
-static INLINE void vp10_zero_left_context(MACROBLOCKD *const xd) {
-  vp10_zero(xd->left_context);
-  vp10_zero(xd->left_seg_context);
+static INLINE void av1_zero_left_context(MACROBLOCKD *const xd) {
+  av1_zero(xd->left_context);
+  av1_zero(xd->left_seg_context);
 #if CONFIG_VAR_TX
-  vp10_zero(xd->left_txfm_context_buffer);
+  av1_zero(xd->left_txfm_context_buffer);
 #endif
 }
 
@@ -684,7 +684,7 @@
 }
 #endif
 
-static INLINE PARTITION_TYPE get_partition(const VP10_COMMON *const cm,
+static INLINE PARTITION_TYPE get_partition(const AV1_COMMON *const cm,
                                            const int mi_row, const int mi_col,
                                            const BLOCK_SIZE bsize) {
   if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) {
@@ -726,8 +726,7 @@
   }
 }
 
-static INLINE void set_sb_size(VP10_COMMON *const cm,
-                               const BLOCK_SIZE sb_size) {
+static INLINE void set_sb_size(AV1_COMMON *const cm, const BLOCK_SIZE sb_size) {
   cm->sb_size = sb_size;
   cm->mib_size = num_8x8_blocks_wide_lookup[cm->sb_size];
   cm->mib_size_log2 = mi_width_log2_lookup[cm->sb_size];
@@ -737,4 +736,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ONYXC_INT_H_
+#endif  // AV1_COMMON_ONYXC_INT_H_
diff --git a/av1/common/pred_common.c b/av1/common/pred_common.c
index 0e1045e..6fe1188 100644
--- a/av1/common/pred_common.c
+++ b/av1/common/pred_common.c
@@ -34,7 +34,7 @@
   return ref_type;
 }
 
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir) {
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir) {
   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const int ctx_offset =
       (mbmi->ref_frame[1] > INTRA_FRAME) * INTER_FILTER_COMP_OFFSET;
@@ -67,7 +67,7 @@
   return filter_type_ctx;
 }
 #else
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
   // Note:
   // The mode info data structure has a one element border above and to the
   // left of the entries corresponding to real macroblocks.
@@ -115,7 +115,7 @@
       if (mode != DC_PRED && mode != TM_PRED) {
         int p_angle =
             mode_to_angle_map[mode] + ref_mbmi->angle_delta[0] * ANGLE_STEP;
-        if (vp10_is_intra_filter_switchable(p_angle)) {
+        if (av1_is_intra_filter_switchable(p_angle)) {
           ref_type = ref_mbmi->intra_filter;
         }
       }
@@ -124,7 +124,7 @@
   return ref_type;
 }
 
-int vp10_get_pred_context_intra_interp(const MACROBLOCKD *xd) {
+int av1_get_pred_context_intra_interp(const MACROBLOCKD *xd) {
   int left_type = INTRA_FILTERS, above_type = INTRA_FILTERS;
 
   if (xd->left_available) left_type = get_ref_intra_filter(xd->left_mbmi);
@@ -149,7 +149,7 @@
 // 1 - intra/inter, inter/intra
 // 2 - intra/--, --/intra
 // 3 - intra/intra
-int vp10_get_intra_inter_context(const MACROBLOCKD *xd) {
+int av1_get_intra_inter_context(const MACROBLOCKD *xd) {
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
   const int has_above = xd->up_available;
@@ -171,8 +171,8 @@
 #define CHECK_BWDREF_OR_ALTREF(ref_frame) \
   (((ref_frame) == BWDREF_FRAME) || ((ref_frame) == ALTREF_FRAME))
 
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
-                                    const MACROBLOCKD *xd) {
+int av1_get_reference_mode_context(const AV1_COMMON *cm,
+                                   const MACROBLOCKD *xd) {
   int ctx;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -218,8 +218,8 @@
 
 #else  // CONFIG_EXT_REFS
 
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
-                                    const MACROBLOCKD *xd) {
+int av1_get_reference_mode_context(const AV1_COMMON *cm,
+                                   const MACROBLOCKD *xd) {
   int ctx;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -279,8 +279,8 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is either
 //               GOLDEN_FRAME or LAST3_FRAME.
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
-                                     const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
+                                    const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -379,8 +379,8 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is LAST_FRAME,
 // conditioning on it is either LAST_FRAME or LAST2_FRAME.
-int vp10_get_pred_context_comp_ref_p1(const VP10_COMMON *cm,
-                                      const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p1(const AV1_COMMON *cm,
+                                     const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -480,8 +480,8 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is GOLDEN_FRAME,
 // conditioning on it is either GOLDEN or LAST3.
-int vp10_get_pred_context_comp_ref_p2(const VP10_COMMON *cm,
-                                      const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p2(const AV1_COMMON *cm,
+                                     const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -575,8 +575,8 @@
 }
 
 // Returns a context number for the given MB prediction signal
-int vp10_get_pred_context_comp_bwdref_p(const VP10_COMMON *cm,
-                                        const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_bwdref_p(const AV1_COMMON *cm,
+                                       const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -687,8 +687,8 @@
 #else  // CONFIG_EXT_REFS
 
 // Returns a context number for the given MB prediction signal
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
-                                     const MACROBLOCKD *xd) {
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
+                                    const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -777,7 +777,7 @@
 // or a BWDREF_FRAME.
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is ALTREF/BWDREF.
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -856,7 +856,7 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is ALTREF_FRAME, conditioning
 // on it is either ALTREF_FRAME/BWDREF_FRAME.
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -950,7 +950,7 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is LAST3/GOLDEN, conditioning
 // on it is either LAST3/GOLDEN/LAST2/LAST.
-int vp10_get_pred_context_single_ref_p3(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p3(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1047,7 +1047,7 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is LAST2_FRAME, conditioning
 // on it is either LAST2_FRAME/LAST_FRAME.
-int vp10_get_pred_context_single_ref_p4(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p4(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1139,7 +1139,7 @@
 //
 // NOTE(zoeliu): The probability of ref_frame[0] is GOLDEN_FRAME, conditioning
 // on it is either GOLDEN_FRAME/LAST3_FRAME.
-int vp10_get_pred_context_single_ref_p5(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p5(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1229,7 +1229,7 @@
 
 #else  // CONFIG_EXT_REFS
 
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -1295,7 +1295,7 @@
   return pred_context;
 }
 
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
diff --git a/av1/common/pred_common.h b/av1/common/pred_common.h
index 9a3e3f1..5873bf0 100644
--- a/av1/common/pred_common.h
+++ b/av1/common/pred_common.h
@@ -8,37 +8,37 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_PRED_COMMON_H_
-#define VP10_COMMON_PRED_COMMON_H_
+#ifndef AV1_COMMON_PRED_COMMON_H_
+#define AV1_COMMON_PRED_COMMON_H_
 
 #include "av1/common/blockd.h"
 #include "av1/common/onyxc_int.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-static INLINE int get_segment_id(const VP10_COMMON *cm,
+static INLINE int get_segment_id(const AV1_COMMON *cm,
                                  const uint8_t *segment_ids, BLOCK_SIZE bsize,
                                  int mi_row, int mi_col) {
   const int mi_offset = mi_row * cm->mi_cols + mi_col;
   const int bw = num_8x8_blocks_wide_lookup[bsize];
   const int bh = num_8x8_blocks_high_lookup[bsize];
-  const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
-  const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
+  const int xmis = AOMMIN(cm->mi_cols - mi_col, bw);
+  const int ymis = AOMMIN(cm->mi_rows - mi_row, bh);
   int x, y, segment_id = MAX_SEGMENTS;
 
   for (y = 0; y < ymis; ++y)
     for (x = 0; x < xmis; ++x)
       segment_id =
-          VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
+          AOMMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
 
   assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
   return segment_id;
 }
 
-static INLINE int vp10_get_pred_context_seg_id(const MACROBLOCKD *xd) {
+static INLINE int av1_get_pred_context_seg_id(const MACROBLOCKD *xd) {
   const MODE_INFO *const above_mi = xd->above_mi;
   const MODE_INFO *const left_mi = xd->left_mi;
   const int above_sip =
@@ -48,12 +48,12 @@
   return above_sip + left_sip;
 }
 
-static INLINE vpx_prob vp10_get_pred_prob_seg_id(
+static INLINE aom_prob av1_get_pred_prob_seg_id(
     const struct segmentation_probs *segp, const MACROBLOCKD *xd) {
-  return segp->pred_probs[vp10_get_pred_context_seg_id(xd)];
+  return segp->pred_probs[av1_get_pred_context_seg_id(xd)];
 }
 
-static INLINE int vp10_get_skip_context(const MACROBLOCKD *xd) {
+static INLINE int av1_get_skip_context(const MACROBLOCKD *xd) {
   const MODE_INFO *const above_mi = xd->above_mi;
   const MODE_INFO *const left_mi = xd->left_mi;
   const int above_skip = (above_mi != NULL) ? above_mi->mbmi.skip : 0;
@@ -61,109 +61,108 @@
   return above_skip + left_skip;
 }
 
-static INLINE vpx_prob vp10_get_skip_prob(const VP10_COMMON *cm,
-                                          const MACROBLOCKD *xd) {
-  return cm->fc->skip_probs[vp10_get_skip_context(xd)];
+static INLINE aom_prob av1_get_skip_prob(const AV1_COMMON *cm,
+                                         const MACROBLOCKD *xd) {
+  return cm->fc->skip_probs[av1_get_skip_context(xd)];
 }
 
 #if CONFIG_DUAL_FILTER
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir);
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd, int dir);
 #else
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
 #endif
 
 #if CONFIG_EXT_INTRA
-int vp10_get_pred_context_intra_interp(const MACROBLOCKD *xd);
+int av1_get_pred_context_intra_interp(const MACROBLOCKD *xd);
 #endif  // CONFIG_EXT_INTRA
 
-int vp10_get_intra_inter_context(const MACROBLOCKD *xd);
+int av1_get_intra_inter_context(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_intra_inter_prob(const VP10_COMMON *cm,
-                                                 const MACROBLOCKD *xd) {
-  return cm->fc->intra_inter_prob[vp10_get_intra_inter_context(xd)];
+static INLINE aom_prob av1_get_intra_inter_prob(const AV1_COMMON *cm,
+                                                const MACROBLOCKD *xd) {
+  return cm->fc->intra_inter_prob[av1_get_intra_inter_context(xd)];
 }
 
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
+int av1_get_reference_mode_context(const AV1_COMMON *cm, const MACROBLOCKD *xd);
+
+static INLINE aom_prob av1_get_reference_mode_prob(const AV1_COMMON *cm,
+                                                   const MACROBLOCKD *xd) {
+  return cm->fc->comp_inter_prob[av1_get_reference_mode_context(cm, xd)];
+}
+
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
                                     const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_reference_mode_prob(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_pred_prob_comp_ref_p(const AV1_COMMON *cm,
                                                     const MACROBLOCKD *xd) {
-  return cm->fc->comp_inter_prob[vp10_get_reference_mode_context(cm, xd)];
-}
-
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
-                                     const MACROBLOCKD *xd);
-
-static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p(const VP10_COMMON *cm,
-                                                     const MACROBLOCKD *xd) {
-  const int pred_context = vp10_get_pred_context_comp_ref_p(cm, xd);
+  const int pred_context = av1_get_pred_context_comp_ref_p(cm, xd);
   return cm->fc->comp_ref_prob[pred_context][0];
 }
 
 #if CONFIG_EXT_REFS
-int vp10_get_pred_context_comp_ref_p1(const VP10_COMMON *cm,
-                                      const MACROBLOCKD *xd);
+int av1_get_pred_context_comp_ref_p1(const AV1_COMMON *cm,
+                                     const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p1(const VP10_COMMON *cm,
-                                                      const MACROBLOCKD *xd) {
-  const int pred_context = vp10_get_pred_context_comp_ref_p1(cm, xd);
+static INLINE aom_prob av1_get_pred_prob_comp_ref_p1(const AV1_COMMON *cm,
+                                                     const MACROBLOCKD *xd) {
+  const int pred_context = av1_get_pred_context_comp_ref_p1(cm, xd);
   return cm->fc->comp_ref_prob[pred_context][1];
 }
 
-int vp10_get_pred_context_comp_ref_p2(const VP10_COMMON *cm,
-                                      const MACROBLOCKD *xd);
+int av1_get_pred_context_comp_ref_p2(const AV1_COMMON *cm,
+                                     const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p2(const VP10_COMMON *cm,
-                                                      const MACROBLOCKD *xd) {
-  const int pred_context = vp10_get_pred_context_comp_ref_p2(cm, xd);
+static INLINE aom_prob av1_get_pred_prob_comp_ref_p2(const AV1_COMMON *cm,
+                                                     const MACROBLOCKD *xd) {
+  const int pred_context = av1_get_pred_context_comp_ref_p2(cm, xd);
   return cm->fc->comp_ref_prob[pred_context][2];
 }
 
-int vp10_get_pred_context_comp_bwdref_p(const VP10_COMMON *cm,
-                                        const MACROBLOCKD *xd);
+int av1_get_pred_context_comp_bwdref_p(const AV1_COMMON *cm,
+                                       const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_comp_bwdref_p(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  const int pred_context = vp10_get_pred_context_comp_bwdref_p(cm, xd);
+static INLINE aom_prob av1_get_pred_prob_comp_bwdref_p(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  const int pred_context = av1_get_pred_context_comp_bwdref_p(cm, xd);
   return cm->fc->comp_bwdref_prob[pred_context][0];
 }
 
 #endif  // CONFIG_EXT_REFS
 
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p1(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p1(xd)][0];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p1(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p1(xd)][0];
 }
 
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p2(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p2(xd)][1];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p2(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p2(xd)][1];
 }
 
 #if CONFIG_EXT_REFS
-int vp10_get_pred_context_single_ref_p3(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p3(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p3(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p3(xd)][2];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p3(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p3(xd)][2];
 }
 
-int vp10_get_pred_context_single_ref_p4(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p4(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p4(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p4(xd)][3];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p4(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p4(xd)][3];
 }
 
-int vp10_get_pred_context_single_ref_p5(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p5(const MACROBLOCKD *xd);
 
-static INLINE vpx_prob vp10_get_pred_prob_single_ref_p5(const VP10_COMMON *cm,
-                                                        const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p5(xd)][4];
+static INLINE aom_prob av1_get_pred_prob_single_ref_p5(const AV1_COMMON *cm,
+                                                       const MACROBLOCKD *xd) {
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p5(xd)][4];
 }
 #endif  // CONFIG_EXT_REFS
 
@@ -192,7 +191,7 @@
 }
 
 #if CONFIG_VAR_TX
-static void update_tx_counts(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void update_tx_counts(AV1_COMMON *cm, MACROBLOCKD *xd,
                              MB_MODE_INFO *mbmi, BLOCK_SIZE plane_bsize,
                              TX_SIZE tx_size, int blk_row, int blk_col,
                              TX_SIZE max_tx_size, int ctx) {
@@ -232,7 +231,7 @@
   }
 }
 
-static INLINE void inter_block_tx_count_update(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE void inter_block_tx_count_update(AV1_COMMON *cm, MACROBLOCKD *xd,
                                                MB_MODE_INFO *mbmi,
                                                BLOCK_SIZE plane_bsize,
                                                int ctx) {
@@ -254,4 +253,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_PRED_COMMON_H_
+#endif  // AV1_COMMON_PRED_COMMON_H_
diff --git a/av1/common/quant_common.c b/av1/common/quant_common.c
index 79d8fb8..3adfa7b 100644
--- a/av1/common/quant_common.c
+++ b/av1/common/quant_common.c
@@ -130,8 +130,8 @@
     cuml_bins[i] = ROUND_POWER_OF_TWO(cuml_knots[i] * q, 7);
 }
 
-void vp10_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
-                              tran_low_t *cuml_bins, int q_profile) {
+void av1_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
+                             tran_low_t *cuml_bins, int q_profile) {
   const uint8_t *knots = get_nuq_knots(qindex, band, q_profile);
   tran_low_t cuml_bins_[NUQ_KNOTS], *cuml_bins_ptr;
   tran_low_t doff;
@@ -150,15 +150,15 @@
       cuml_bins_ptr[NUQ_KNOTS - 1] + ROUND_POWER_OF_TWO((64 - doff) * q, 7);
 }
 
-tran_low_t vp10_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq) {
+tran_low_t av1_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq) {
   if (v <= NUQ_KNOTS)
     return dq[v];
   else
     return dq[NUQ_KNOTS] + (v - NUQ_KNOTS) * q;
 }
 
-tran_low_t vp10_dequant_coeff_nuq(int v, int q, const tran_low_t *dq) {
-  tran_low_t dqmag = vp10_dequant_abscoeff_nuq(abs(v), q, dq);
+tran_low_t av1_dequant_coeff_nuq(int v, int q, const tran_low_t *dq) {
+  tran_low_t dqmag = av1_dequant_abscoeff_nuq(abs(v), q, dq);
   return (v < 0 ? -dqmag : dqmag);
 }
 #endif  // CONFIG_NEW_QUANT
@@ -185,7 +185,7 @@
   1184, 1232, 1282, 1336,
 };
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const int16_t dc_qlookup_10[QINDEX_RANGE] = {
   4,    9,    10,   13,   15,   17,   20,   22,   25,   28,   31,   34,   37,
   40,   43,   47,   50,   53,   57,   60,   64,   68,   71,   75,   78,   82,
@@ -260,7 +260,7 @@
   1567, 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
 };
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const int16_t ac_qlookup_10[QINDEX_RANGE] = {
   4,    9,    11,   13,   16,   18,   21,   24,   27,   30,   33,   37,   40,
   44,   48,   51,   55,   59,   63,   67,   71,   75,   79,   83,   88,   92,
@@ -312,14 +312,14 @@
 };
 #endif
 
-int16_t vp10_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) {
-#if CONFIG_VP9_HIGHBITDEPTH
+int16_t av1_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8: return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_10: return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_12: return dc_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_8: return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_10: return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_12: return dc_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
     default:
-      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
       return -1;
   }
 #else
@@ -328,14 +328,14 @@
 #endif
 }
 
-int16_t vp10_ac_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) {
-#if CONFIG_VP9_HIGHBITDEPTH
+int16_t av1_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8: return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_10: return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_12: return ac_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_8: return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_10: return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
+    case AOM_BITS_12: return ac_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
     default:
-      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      assert(0 && "bit_depth should be AOM_BITS_8, AOM_BITS_10 or AOM_BITS_12");
       return -1;
   }
 #else
@@ -344,8 +344,8 @@
 #endif
 }
 
-int vp10_get_qindex(const struct segmentation *seg, int segment_id,
-                    int base_qindex) {
+int av1_get_qindex(const struct segmentation *seg, int segment_id,
+                   int base_qindex) {
   if (segfeature_active(seg, segment_id, SEG_LVL_ALT_Q)) {
     const int data = get_segdata(seg, segment_id, SEG_LVL_ALT_Q);
     const int seg_qindex =
@@ -357,11 +357,11 @@
 }
 
 #if CONFIG_AOM_QM
-qm_val_t *aom_iqmatrix(VP10_COMMON *cm, int qmlevel, int is_chroma,
+qm_val_t *aom_iqmatrix(AV1_COMMON *cm, int qmlevel, int is_chroma,
                        int log2sizem2, int is_intra) {
   return &cm->giqmatrix[qmlevel][!!is_chroma][!!is_intra][log2sizem2][0];
 }
-qm_val_t *aom_qmatrix(VP10_COMMON *cm, int qmlevel, int is_chroma,
+qm_val_t *aom_qmatrix(AV1_COMMON *cm, int qmlevel, int is_chroma,
                       int log2sizem2, int is_intra) {
   return &cm->gqmatrix[qmlevel][!!is_chroma][!!is_intra][log2sizem2][0];
 }
@@ -371,7 +371,7 @@
 static uint16_t
     wt_matrix_ref[NUM_QM_LEVELS][2][2][4 * 4 + 8 * 8 + 16 * 16 + 32 * 32];
 
-void aom_qm_init(VP10_COMMON *cm) {
+void aom_qm_init(AV1_COMMON *cm) {
   int q, c, f, t, size;
   int current;
   for (q = 0; q < NUM_QM_LEVELS; ++q) {
diff --git a/av1/common/quant_common.h b/av1/common/quant_common.h
index 6ceed49..d04103e 100644
--- a/av1/common/quant_common.h
+++ b/av1/common/quant_common.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_QUANT_COMMON_H_
-#define VP10_COMMON_QUANT_COMMON_H_
+#ifndef AV1_COMMON_QUANT_COMMON_H_
+#define AV1_COMMON_QUANT_COMMON_H_
 
-#include "aom/vpx_codec.h"
+#include "aom/aom_codec.h"
 #include "av1/common/seg_common.h"
 #include "av1/common/enums.h"
 
@@ -34,25 +34,25 @@
 #define DEFAULT_QM_LAST (NUM_QM_LEVELS - 1)
 #endif
 
-struct VP10Common;
+struct AV1Common;
 
-int16_t vp10_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth);
-int16_t vp10_ac_quant(int qindex, int delta, vpx_bit_depth_t bit_depth);
+int16_t av1_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
+int16_t av1_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
 
-int vp10_get_qindex(const struct segmentation *seg, int segment_id,
-                    int base_qindex);
+int av1_get_qindex(const struct segmentation *seg, int segment_id,
+                   int base_qindex);
 #if CONFIG_AOM_QM
 // Reduce the large number of quantizers to a smaller number of levels for which
 // different matrices may be defined
 static inline int aom_get_qmlevel(int qindex, int first, int last) {
   int qmlevel = (qindex * (last + 1 - first) + QINDEX_RANGE / 2) / QINDEX_RANGE;
-  qmlevel = VPXMIN(qmlevel + first, NUM_QM_LEVELS - 1);
+  qmlevel = AOMMIN(qmlevel + first, NUM_QM_LEVELS - 1);
   return qmlevel;
 }
-void aom_qm_init(struct VP10Common *cm);
-qm_val_t *aom_iqmatrix(struct VP10Common *cm, int qindex, int comp,
+void aom_qm_init(struct AV1Common *cm);
+qm_val_t *aom_iqmatrix(struct AV1Common *cm, int qindex, int comp,
                        int log2sizem2, int is_intra);
-qm_val_t *aom_qmatrix(struct VP10Common *cm, int qindex, int comp,
+qm_val_t *aom_qmatrix(struct AV1Common *cm, int qindex, int comp,
                       int log2sizem2, int is_intra);
 #endif
 
@@ -64,13 +64,13 @@
 
 typedef tran_low_t dequant_val_type_nuq[NUQ_KNOTS + 1];
 typedef tran_low_t cuml_bins_type_nuq[NUQ_KNOTS];
-void vp10_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
-                              tran_low_t *cuml_bins, int dq_off_index);
-tran_low_t vp10_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq);
-tran_low_t vp10_dequant_coeff_nuq(int v, int q, const tran_low_t *dq);
+void av1_get_dequant_val_nuq(int q, int qindex, int band, tran_low_t *dq,
+                             tran_low_t *cuml_bins, int dq_off_index);
+tran_low_t av1_dequant_abscoeff_nuq(int v, int q, const tran_low_t *dq);
+tran_low_t av1_dequant_coeff_nuq(int v, int q, const tran_low_t *dq);
 
 static INLINE int get_dq_profile_from_ctx(int q_ctx) {
-  return VPXMIN(q_ctx, QUANT_PROFILES - 1);
+  return AOMMIN(q_ctx, QUANT_PROFILES - 1);
 }
 #endif  // CONFIG_NEW_QUANT
 
@@ -78,4 +78,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_QUANT_COMMON_H_
+#endif  // AV1_COMMON_QUANT_COMMON_H_
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index 0c3b93a..3db35e7 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -10,11 +10,11 @@
 
 #include <assert.h>
 
-#include "./vpx_scale_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
+#include "./aom_scale_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_dsp/blend.h"
 
 #include "av1/common/blockd.h"
@@ -242,9 +242,9 @@
   return master;
 }
 
-const uint8_t *vp10_get_soft_mask(int wedge_index, int wedge_sign,
-                                  BLOCK_SIZE sb_type, int offset_x,
-                                  int offset_y) {
+const uint8_t *av1_get_soft_mask(int wedge_index, int wedge_sign,
+                                 BLOCK_SIZE sb_type, int offset_x,
+                                 int offset_y) {
   const uint8_t *mask =
       get_wedge_mask_inplace(wedge_index, wedge_sign, sb_type);
   if (mask) mask -= (offset_x + offset_y * MASK_MASTER_STRIDE);
@@ -326,13 +326,13 @@
     if (wbits == 0) continue;
     for (w = 0; w < wtypes; ++w) {
       mask = get_wedge_mask_inplace(w, 0, bsize);
-      vpx_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
+      aom_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
                         bh);
       wedge_params->masks[0][w] = dst;
       dst += bw * bh;
 
       mask = get_wedge_mask_inplace(w, 1, bsize);
-      vpx_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
+      aom_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw, NULL, 0, NULL, 0, bw,
                         bh);
       wedge_params->masks[1][w] = dst;
       dst += bw * bh;
@@ -342,7 +342,7 @@
 }
 
 // Equation of line: f(x, y) = a[0]*(x - a[2]*w/8) + a[1]*(y - a[3]*h/8) = 0
-void vp10_init_wedge_masks() {
+void av1_init_wedge_masks() {
   init_wedge_master_masks();
   init_wedge_signs();
   init_wedge_masks();
@@ -355,13 +355,13 @@
     BLOCK_SIZE sb_type, int wedge_offset_x, int wedge_offset_y, int h, int w) {
   const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
   const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
-  const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type,
-                                           wedge_offset_x, wedge_offset_y);
-  vpx_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+  const uint8_t *mask = av1_get_soft_mask(wedge_index, wedge_sign, sb_type,
+                                          wedge_offset_x, wedge_offset_y);
+  aom_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
                      mask, MASK_MASTER_STRIDE, h, w, subh, subw);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void build_masked_compound_wedge_extend_highbd(
     uint8_t *dst_8, int dst_stride, const uint8_t *src0_8, int src0_stride,
     const uint8_t *src1_8, int src1_stride, int wedge_index, int wedge_sign,
@@ -369,13 +369,13 @@
     int bd) {
   const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
   const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
-  const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type,
-                                           wedge_offset_x, wedge_offset_y);
-  vpx_highbd_blend_a64_mask(dst_8, dst_stride, src0_8, src0_stride, src1_8,
+  const uint8_t *mask = av1_get_soft_mask(wedge_index, wedge_sign, sb_type,
+                                          wedge_offset_x, wedge_offset_y);
+  aom_highbd_blend_a64_mask(dst_8, dst_stride, src0_8, src0_stride, src1_8,
                             src1_stride, mask, MASK_MASTER_STRIDE, h, w, subh,
                             subw, bd);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_SUPERTX
 
 static void build_masked_compound_wedge(uint8_t *dst, int dst_stride,
@@ -388,13 +388,13 @@
   const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
   const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
   const uint8_t *mask =
-      vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
-  vpx_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+      av1_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
+  aom_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
                      mask, 4 * num_4x4_blocks_wide_lookup[sb_type], h, w, subh,
                      subw);
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void build_masked_compound_wedge_highbd(
     uint8_t *dst_8, int dst_stride, const uint8_t *src0_8, int src0_stride,
     const uint8_t *src1_8, int src1_stride, int wedge_index, int wedge_sign,
@@ -404,28 +404,28 @@
   const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
   const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
   const uint8_t *mask =
-      vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
-  vpx_highbd_blend_a64_mask(
+      av1_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
+  aom_highbd_blend_a64_mask(
       dst_8, dst_stride, src0_8, src0_stride, src1_8, src1_stride, mask,
       4 * num_4x4_blocks_wide_lookup[sb_type], h, w, subh, subw, bd);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
-                                      uint8_t *dst, int dst_stride,
-                                      const int subpel_x, const int subpel_y,
-                                      const struct scale_factors *sf, int w,
-                                      int h,
+void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
+                                     uint8_t *dst, int dst_stride,
+                                     const int subpel_x, const int subpel_y,
+                                     const struct scale_factors *sf, int w,
+                                     int h,
 #if CONFIG_DUAL_FILTER
-                                      const INTERP_FILTER *interp_filter,
+                                     const INTERP_FILTER *interp_filter,
 #else
-                                      const INTERP_FILTER interp_filter,
+                                     const INTERP_FILTER interp_filter,
 #endif
-                                      int xs, int ys,
+                                     int xs, int ys,
 #if CONFIG_SUPERTX
-                                      int wedge_offset_x, int wedge_offset_y,
+                                     int wedge_offset_x, int wedge_offset_y,
 #endif  // CONFIG_SUPERTX
-                                      const MACROBLOCKD *xd) {
+                                     const MACROBLOCKD *xd) {
   const MODE_INFO *mi = xd->mi[0];
 // The prediction filter types used here should be those for
 // the second reference block.
@@ -436,13 +436,13 @@
 #else
   INTERP_FILTER tmp_ipf = interp_filter;
 #endif  // CONFIG_DUAL_FILTER
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint8_t, tmp_dst_[2 * MAX_SB_SQUARE]);
   uint8_t *tmp_dst = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
                          ? CONVERT_TO_BYTEPTR(tmp_dst_)
                          : tmp_dst_;
-  vp10_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
-                            subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
+  av1_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
+                           subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
 #if CONFIG_SUPERTX
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
     build_masked_compound_wedge_extend_highbd(
@@ -466,10 +466,10 @@
                                 mi->mbmi.interinter_wedge_sign,
                                 mi->mbmi.sb_type, h, w);
 #endif  // CONFIG_SUPERTX
-#else   // CONFIG_VP9_HIGHBITDEPTH
+#else   // CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint8_t, tmp_dst[MAX_SB_SQUARE]);
-  vp10_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
-                            subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
+  av1_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
+                           subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
 #if CONFIG_SUPERTX
   build_masked_compound_wedge_extend(
       dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
@@ -481,12 +481,12 @@
                               mi->mbmi.interinter_wedge_sign, mi->mbmi.sb_type,
                               h, w);
 #endif  // CONFIG_SUPERTX
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
 #endif  // CONFIG_EXT_INTER
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_build_inter_predictor(
     const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
     const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref,
 #if CONFIG_DUAL_FILTER
@@ -498,7 +498,7 @@
   const int is_q4 = precision == MV_PRECISION_Q4;
   const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
                      is_q4 ? src_mv->col : src_mv->col * 2 };
-  MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf);
+  MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
   const int subpel_x = mv.col & SUBPEL_MASK;
   const int subpel_y = mv.row & SUBPEL_MASK;
 
@@ -508,22 +508,22 @@
                          sf, w, h, ref, interp_filter, sf->x_step_q4,
                          sf->y_step_q4, bd);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
-                                uint8_t *dst, int dst_stride, const MV *src_mv,
-                                const struct scale_factors *sf, int w, int h,
-                                int ref,
+void av1_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst,
+                               int dst_stride, const MV *src_mv,
+                               const struct scale_factors *sf, int w, int h,
+                               int ref,
 #if CONFIG_DUAL_FILTER
-                                const INTERP_FILTER *interp_filter,
+                               const INTERP_FILTER *interp_filter,
 #else
-                                const INTERP_FILTER interp_filter,
+                               const INTERP_FILTER interp_filter,
 #endif
-                                enum mv_precision precision, int x, int y) {
+                               enum mv_precision precision, int x, int y) {
   const int is_q4 = precision == MV_PRECISION_Q4;
   const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
                      is_q4 ? src_mv->col : src_mv->col * 2 };
-  MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf);
+  MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
   const int subpel_x = mv.col & SUBPEL_MASK;
   const int subpel_y = mv.row & SUBPEL_MASK;
 
@@ -600,7 +600,7 @@
           uint8_t *pre;
           MV32 scaled_mv;
           int xs, ys, subpel_x, subpel_y;
-          const int is_scaled = vp10_is_scaled(sf);
+          const int is_scaled = av1_is_scaled(sf);
 
           x = x_base + idx * x_step;
           y = y_base + idy * y_step;
@@ -610,7 +610,7 @@
           if (is_scaled) {
             pre =
                 pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
-            scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+            scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
             xs = sf->x_step_q4;
             ys = sf->y_step_q4;
           } else {
@@ -628,7 +628,7 @@
 #if CONFIG_EXT_INTER
           if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
               mi->mbmi.use_wedge_interinter)
-            vp10_make_masked_inter_predictor(
+            av1_make_masked_inter_predictor(
                 pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, subpel_y,
                 sf, w, h, mi->mbmi.interp_filter, xs, ys,
 #if CONFIG_SUPERTX
@@ -637,9 +637,9 @@
                 xd);
           else
 #endif  // CONFIG_EXT_INTER
-            vp10_make_inter_predictor(
-                pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, subpel_y,
-                sf, x_step, y_step, ref, mi->mbmi.interp_filter, xs, ys, xd);
+            av1_make_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
+                                     subpel_x, subpel_y, sf, x_step, y_step,
+                                     ref, mi->mbmi.interp_filter, xs, ys, xd);
         }
       }
     }
@@ -667,11 +667,11 @@
     uint8_t *pre;
     MV32 scaled_mv;
     int xs, ys, subpel_x, subpel_y;
-    const int is_scaled = vp10_is_scaled(sf);
+    const int is_scaled = av1_is_scaled(sf);
 
     if (is_scaled) {
       pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
-      scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+      scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
       xs = sf->x_step_q4;
       ys = sf->y_step_q4;
     } else {
@@ -689,36 +689,36 @@
 #if CONFIG_EXT_INTER
     if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
         mi->mbmi.use_wedge_interinter)
-      vp10_make_masked_inter_predictor(pre, pre_buf->stride, dst,
-                                       dst_buf->stride, subpel_x, subpel_y, sf,
-                                       w, h, mi->mbmi.interp_filter, xs, ys,
+      av1_make_masked_inter_predictor(pre, pre_buf->stride, dst,
+                                      dst_buf->stride, subpel_x, subpel_y, sf,
+                                      w, h, mi->mbmi.interp_filter, xs, ys,
 #if CONFIG_SUPERTX
-                                       wedge_offset_x, wedge_offset_y,
+                                      wedge_offset_x, wedge_offset_y,
 #endif  // CONFIG_SUPERTX
-                                       xd);
+                                      xd);
     else
 #else  // CONFIG_EXT_INTER
 #if CONFIG_GLOBAL_MOTION
     if (is_global[ref])
-      vp10_warp_plane(&(gm[ref]->motion_params),
-#if CONFIG_VP9_HIGHBITDEPTH
-                      xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH, xd->bd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                      pre_buf->buf0, pre_buf->width, pre_buf->height,
-                      pre_buf->stride, dst, (mi_x >> pd->subsampling_x) + x,
-                      (mi_y >> pd->subsampling_y) + y, w, h, dst_buf->stride,
-                      pd->subsampling_x, pd->subsampling_y, xs, ys);
+      av1_warp_plane(&(gm[ref]->motion_params),
+#if CONFIG_AOM_HIGHBITDEPTH
+                     xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH, xd->bd,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                     pre_buf->buf0, pre_buf->width, pre_buf->height,
+                     pre_buf->stride, dst, (mi_x >> pd->subsampling_x) + x,
+                     (mi_y >> pd->subsampling_y) + y, w, h, dst_buf->stride,
+                     pd->subsampling_x, pd->subsampling_y, xs, ys);
     else
 #endif  // CONFIG_GLOBAL_MOTION
 #endif  // CONFIG_EXT_INTER
-      vp10_make_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
-                                subpel_x, subpel_y, sf, w, h, ref,
-                                mi->mbmi.interp_filter, xs, ys, xd);
+      av1_make_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
+                               subpel_x, subpel_y, sf, w, h, ref,
+                               mi->mbmi.interp_filter, xs, ys, xd);
   }
 }
 
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
-                                       int ir, int ic, int mi_row, int mi_col) {
+void av1_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i, int ir,
+                                      int ic, int mi_row, int mi_col) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
   MODE_INFO *const mi = xd->mi[0];
   const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd);
@@ -732,27 +732,27 @@
   for (ref = 0; ref < 1 + is_compound; ++ref) {
     const uint8_t *pre =
         &pd->pre[ref].buf[(ir * pd->pre[ref].stride + ic) << 2];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-      vp10_highbd_build_inter_predictor(
+      av1_highbd_build_inter_predictor(
           pre, pd->pre[ref].stride, dst, pd->dst.stride,
           &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
           ref, mi->mbmi.interp_filter, MV_PRECISION_Q3,
           mi_col * MI_SIZE + 4 * ic, mi_row * MI_SIZE + 4 * ir, xd->bd);
     } else {
-      vp10_build_inter_predictor(
+      av1_build_inter_predictor(
           pre, pd->pre[ref].stride, dst, pd->dst.stride,
           &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
           ref, mi->mbmi.interp_filter, MV_PRECISION_Q3,
           mi_col * MI_SIZE + 4 * ic, mi_row * MI_SIZE + 4 * ir);
     }
 #else
-    vp10_build_inter_predictor(
+    av1_build_inter_predictor(
         pre, pd->pre[ref].stride, dst, pd->dst.stride,
         &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
         ref, mi->mbmi.interp_filter, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
         mi_row * MI_SIZE + 4 * ir);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 }
 
@@ -804,61 +804,61 @@
   }
 }
 
-void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                     BLOCK_SIZE bsize) {
+void av1_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                    BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
 #if CONFIG_EXT_INTER
   if (is_interintra_pred(&xd->mi[0]->mbmi))
-    vp10_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
-                                         xd->plane[0].dst.stride, bsize);
+    av1_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
+                                        xd->plane[0].dst.stride, bsize);
 #endif  // CONFIG_EXT_INTER
 }
 
-void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                     BLOCK_SIZE bsize, int plane) {
+void av1_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                    BLOCK_SIZE bsize, int plane) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, plane, plane);
 #if CONFIG_EXT_INTER
   if (is_interintra_pred(&xd->mi[0]->mbmi)) {
     if (plane == 0) {
-      vp10_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
-                                           xd->plane[0].dst.stride, bsize);
+      av1_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
+                                          xd->plane[0].dst.stride, bsize);
     } else {
-      vp10_build_interintra_predictors_sbc(xd, xd->plane[plane].dst.buf,
-                                           xd->plane[plane].dst.stride, plane,
-                                           bsize);
+      av1_build_interintra_predictors_sbc(xd, xd->plane[plane].dst.buf,
+                                          xd->plane[plane].dst.stride, plane,
+                                          bsize);
     }
   }
 #endif  // CONFIG_EXT_INTER
 }
 
-void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                      BLOCK_SIZE bsize) {
+void av1_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                     BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
                                     MAX_MB_PLANE - 1);
 #if CONFIG_EXT_INTER
   if (is_interintra_pred(&xd->mi[0]->mbmi))
-    vp10_build_interintra_predictors_sbuv(
+    av1_build_interintra_predictors_sbuv(
         xd, xd->plane[1].dst.buf, xd->plane[2].dst.buf, xd->plane[1].dst.stride,
         xd->plane[2].dst.stride, bsize);
 #endif  // CONFIG_EXT_INTER
 }
 
-void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                    BLOCK_SIZE bsize) {
+void av1_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                   BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
                                     MAX_MB_PLANE - 1);
 #if CONFIG_EXT_INTER
   if (is_interintra_pred(&xd->mi[0]->mbmi))
-    vp10_build_interintra_predictors(
+    av1_build_interintra_predictors(
         xd, xd->plane[0].dst.buf, xd->plane[1].dst.buf, xd->plane[2].dst.buf,
         xd->plane[0].dst.stride, xd->plane[1].dst.stride,
         xd->plane[2].dst.stride, bsize);
 #endif  // CONFIG_EXT_INTER
 }
 
-void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
-                           const YV12_BUFFER_CONFIG *src, int mi_row,
-                           int mi_col) {
+void av1_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
+                          const YV12_BUFFER_CONFIG *src, int mi_row,
+                          int mi_col) {
   uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
                                            src->v_buffer };
   const int widths[MAX_MB_PLANE] = { src->y_crop_width, src->uv_crop_width,
@@ -877,9 +877,9 @@
   }
 }
 
-void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
-                           const YV12_BUFFER_CONFIG *src, int mi_row,
-                           int mi_col, const struct scale_factors *sf) {
+void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
+                          const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
+                          const struct scale_factors *sf) {
   if (src != NULL) {
     int i;
     uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
@@ -929,7 +929,7 @@
   return NULL;
 }
 
-void vp10_build_masked_inter_predictor_complex(
+void av1_build_masked_inter_predictor_complex(
     MACROBLOCKD *xd, uint8_t *dst, int dst_stride, const uint8_t *pre,
     int pre_stride, int mi_row, int mi_col, int mi_row_ori, int mi_col_ori,
     BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, PARTITION_TYPE partition,
@@ -946,9 +946,9 @@
 
   int w_remain, h_remain;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int is_hdb = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   assert(bsize <= BLOCK_32X32);
   assert(IMPLIES(plane == 0, ssx == 0));
@@ -963,13 +963,13 @@
       dst += h_offset * dst_stride;
       pre += h_offset * pre_stride;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (is_hdb)
-        vpx_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre,
+        aom_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre,
                                    pre_stride, mask, h, top_w, xd->bd);
       else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-        vpx_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+        aom_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
                             mask, h, top_w);
 
       dst += h * dst_stride;
@@ -984,13 +984,13 @@
       dst += w_offset;
       pre += w_offset;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (is_hdb)
-        vpx_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre,
+        aom_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre,
                                    pre_stride, mask, top_h, w, xd->bd);
       else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-        vpx_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+        aom_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre, pre_stride,
                             mask, top_h, w);
 
       dst += w;
@@ -1007,7 +1007,7 @@
     return;
   }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (is_hdb) {
     dst = (uint8_t *)CONVERT_TO_SHORTPTR(dst);
     pre = (const uint8_t *)CONVERT_TO_SHORTPTR(pre);
@@ -1015,7 +1015,7 @@
     pre_stride *= 2;
     w_remain *= 2;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   do {
     memcpy(dst, pre, w_remain * sizeof(uint8_t));
@@ -1024,13 +1024,12 @@
   } while (--h_remain);
 }
 
-void vp10_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
+void av1_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
 #if CONFIG_EXT_INTER
-                                                  int mi_row_ori,
-                                                  int mi_col_ori,
+                                                 int mi_row_ori, int mi_col_ori,
 #endif  // CONFIG_EXT_INTER
-                                                  int mi_row, int mi_col,
-                                                  BLOCK_SIZE bsize, int block) {
+                                                 int mi_row, int mi_col,
+                                                 BLOCK_SIZE bsize, int block) {
   // Prediction function used in supertx:
   // Use the mv at current block (which is less than 8x8)
   // to get prediction of a block located at (mi_row, mi_col) at size of bsize
@@ -1068,19 +1067,19 @@
   }
 #if CONFIG_EXT_INTER
   if (is_interintra_pred(&xd->mi[0]->mbmi))
-    vp10_build_interintra_predictors(
+    av1_build_interintra_predictors(
         xd, xd->plane[0].dst.buf, xd->plane[1].dst.buf, xd->plane[2].dst.buf,
         xd->plane[0].dst.stride, xd->plane[1].dst.stride,
         xd->plane[2].dst.stride, bsize);
 #endif  // CONFIG_EXT_INTER
 }
 
-void vp10_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
+void av1_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
 #if CONFIG_EXT_INTER
-                                           int mi_row_ori, int mi_col_ori,
+                                          int mi_row_ori, int mi_col_ori,
 #endif  // CONFIG_EXT_INTER
-                                           int mi_row, int mi_col,
-                                           BLOCK_SIZE bsize) {
+                                          int mi_row, int mi_col,
+                                          BLOCK_SIZE bsize) {
   int plane;
   const int mi_x = mi_col * MI_SIZE;
   const int mi_y = mi_row * MI_SIZE;
@@ -1152,7 +1151,7 @@
 };
 #endif  // CONFIG_EXT_PARTITION
 
-const uint8_t *vp10_get_obmc_mask(int length) {
+const uint8_t *av1_get_obmc_mask(int length) {
   switch (length) {
     case 1: return obmc_mask_1;
     case 2: return obmc_mask_2;
@@ -1171,22 +1170,22 @@
 // top/left neighboring blocks' inter predictors with the regular inter
 // prediction. We assume the original prediction (bmc) is stored in
 // xd->plane[].dst.buf
-void vp10_build_obmc_inter_prediction(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                      int mi_row, int mi_col,
-                                      uint8_t *above[MAX_MB_PLANE],
-                                      int above_stride[MAX_MB_PLANE],
-                                      uint8_t *left[MAX_MB_PLANE],
-                                      int left_stride[MAX_MB_PLANE]) {
+void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                     int mi_row, int mi_col,
+                                     uint8_t *above[MAX_MB_PLANE],
+                                     int above_stride[MAX_MB_PLANE],
+                                     uint8_t *left[MAX_MB_PLANE],
+                                     int left_stride[MAX_MB_PLANE]) {
   const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   int plane, i;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int is_hbd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // handle above row
   if (xd->up_available) {
     const int overlap = num_4x4_blocks_high_lookup[bsize] * 2;
-    const int miw = VPXMIN(xd->n8_w, cm->mi_cols - mi_col);
+    const int miw = AOMMIN(xd->n8_w, cm->mi_cols - mi_col);
     const int mi_row_offset = -1;
 
     assert(miw > 0);
@@ -1197,7 +1196,7 @@
       const MB_MODE_INFO *const above_mbmi =
           &xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
       const int mi_step =
-          VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
+          AOMMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
 
       if (is_neighbor_overlappable(above_mbmi)) {
         for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
@@ -1209,15 +1208,15 @@
           const int tmp_stride = above_stride[plane];
           const uint8_t *const tmp =
               &above[plane][(i * MI_SIZE) >> pd->subsampling_x];
-          const uint8_t *const mask = vp10_get_obmc_mask(bh);
+          const uint8_t *const mask = av1_get_obmc_mask(bh);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (is_hbd)
-            vpx_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
+            aom_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
                                        tmp_stride, mask, bh, bw, xd->bd);
           else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-            vpx_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+            aom_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
                                 tmp_stride, mask, bh, bw);
         }
       }
@@ -1228,7 +1227,7 @@
   // handle left column
   if (xd->left_available) {
     const int overlap = num_4x4_blocks_wide_lookup[bsize] * 2;
-    const int mih = VPXMIN(xd->n8_h, cm->mi_rows - mi_row);
+    const int mih = AOMMIN(xd->n8_h, cm->mi_rows - mi_row);
     const int mi_col_offset = -1;
 
     assert(mih > 0);
@@ -1239,7 +1238,7 @@
       const MB_MODE_INFO *const left_mbmi =
           &xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
       const int mi_step =
-          VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
+          AOMMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
 
       if (is_neighbor_overlappable(left_mbmi)) {
         for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
@@ -1252,15 +1251,15 @@
           const int tmp_stride = left_stride[plane];
           const uint8_t *const tmp =
               &left[plane][(i * MI_SIZE * tmp_stride) >> pd->subsampling_y];
-          const uint8_t *const mask = vp10_get_obmc_mask(bw);
+          const uint8_t *const mask = av1_get_obmc_mask(bw);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (is_hbd)
-            vpx_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
+            aom_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
                                        tmp_stride, mask, bh, bw, xd->bd);
           else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-            vpx_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+            aom_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
                                 tmp_stride, mask, bh, bw);
         }
       }
@@ -1282,19 +1281,19 @@
 }
 #endif  // CONFIG_EXT_INTER
 
-void vp10_build_prediction_by_above_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                          int mi_row, int mi_col,
-                                          uint8_t *tmp_buf[MAX_MB_PLANE],
-                                          int tmp_width[MAX_MB_PLANE],
-                                          int tmp_height[MAX_MB_PLANE],
-                                          int tmp_stride[MAX_MB_PLANE]) {
+void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                         int mi_row, int mi_col,
+                                         uint8_t *tmp_buf[MAX_MB_PLANE],
+                                         int tmp_width[MAX_MB_PLANE],
+                                         int tmp_height[MAX_MB_PLANE],
+                                         int tmp_stride[MAX_MB_PLANE]) {
   const TileInfo *const tile = &xd->tile;
   BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   int i, j, mi_step, ref;
 
   if (mi_row <= tile->mi_row_start) return;
 
-  for (i = 0; i < VPXMIN(xd->n8_w, cm->mi_cols - mi_col); i += mi_step) {
+  for (i = 0; i < AOMMIN(xd->n8_w, cm->mi_cols - mi_col); i += mi_step) {
     int mi_row_offset = -1;
     int mi_col_offset = i;
     int mi_x, mi_y, bw, bh;
@@ -1304,7 +1303,7 @@
     MB_MODE_INFO backup_mbmi;
 #endif  // CONFIG_EXT_INTER
 
-    mi_step = VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
+    mi_step = AOMMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
 
     if (!is_neighbor_overlappable(above_mbmi)) continue;
 
@@ -1324,11 +1323,11 @@
       RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
 
       xd->block_refs[ref] = ref_buf;
-      if ((!vp10_is_valid_scale(&ref_buf->sf)))
-        vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+      if ((!av1_is_valid_scale(&ref_buf->sf)))
+        aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
                            "Reference frame has invalid dimensions");
-      vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col + i,
-                            &ref_buf->sf);
+      av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col + i,
+                           &ref_buf->sf);
     }
 
     xd->mb_to_left_edge = -(((mi_col + i) * MI_SIZE) * 8);
@@ -1338,7 +1337,7 @@
     for (j = 0; j < MAX_MB_PLANE; ++j) {
       const struct macroblockd_plane *pd = &xd->plane[j];
       bw = (mi_step * 8) >> pd->subsampling_x;
-      bh = VPXMAX((num_4x4_blocks_high_lookup[bsize] * 2) >> pd->subsampling_y,
+      bh = AOMMAX((num_4x4_blocks_high_lookup[bsize] * 2) >> pd->subsampling_y,
                   4);
 
       if (above_mbmi->sb_type < BLOCK_8X8) {
@@ -1379,19 +1378,19 @@
   xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
 }
 
-void vp10_build_prediction_by_left_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                         int mi_row, int mi_col,
-                                         uint8_t *tmp_buf[MAX_MB_PLANE],
-                                         int tmp_width[MAX_MB_PLANE],
-                                         int tmp_height[MAX_MB_PLANE],
-                                         int tmp_stride[MAX_MB_PLANE]) {
+void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                        int mi_row, int mi_col,
+                                        uint8_t *tmp_buf[MAX_MB_PLANE],
+                                        int tmp_width[MAX_MB_PLANE],
+                                        int tmp_height[MAX_MB_PLANE],
+                                        int tmp_stride[MAX_MB_PLANE]) {
   const TileInfo *const tile = &xd->tile;
   BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   int i, j, mi_step, ref;
 
   if (mi_col == 0 || (mi_col - 1 < tile->mi_col_start)) return;
 
-  for (i = 0; i < VPXMIN(xd->n8_h, cm->mi_rows - mi_row); i += mi_step) {
+  for (i = 0; i < AOMMIN(xd->n8_h, cm->mi_rows - mi_row); i += mi_step) {
     int mi_row_offset = i;
     int mi_col_offset = -1;
     int mi_x, mi_y, bw, bh;
@@ -1401,7 +1400,7 @@
     MB_MODE_INFO backup_mbmi;
 #endif  // CONFIG_EXT_INTER
 
-    mi_step = VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
+    mi_step = AOMMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
 
     if (!is_neighbor_overlappable(left_mbmi)) continue;
 
@@ -1421,11 +1420,11 @@
       RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
 
       xd->block_refs[ref] = ref_buf;
-      if ((!vp10_is_valid_scale(&ref_buf->sf)))
-        vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+      if ((!av1_is_valid_scale(&ref_buf->sf)))
+        aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
                            "Reference frame has invalid dimensions");
-      vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row + i, mi_col,
-                            &ref_buf->sf);
+      av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row + i, mi_col,
+                           &ref_buf->sf);
     }
 
     xd->mb_to_top_edge = -(((mi_row + i) * MI_SIZE) * 8);
@@ -1434,7 +1433,7 @@
 
     for (j = 0; j < MAX_MB_PLANE; ++j) {
       const struct macroblockd_plane *pd = &xd->plane[j];
-      bw = VPXMAX((num_4x4_blocks_wide_lookup[bsize] * 2) >> pd->subsampling_x,
+      bw = AOMMAX((num_4x4_blocks_wide_lookup[bsize] * 2) >> pd->subsampling_x,
                   4);
       bh = (mi_step << MI_SIZE_LOG2) >> pd->subsampling_y;
 
@@ -1515,10 +1514,10 @@
   if (use_wedge_interintra) {
     if (is_interintra_wedge_used(bsize)) {
       const uint8_t *mask =
-          vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+          av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
       const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
       const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
-      vpx_blend_a64_mask(
+      aom_blend_a64_mask(
           comppred, compstride, intrapred, intrastride, interpred, interstride,
           mask, 4 * num_4x4_blocks_wide_lookup[bsize], bh, bw, subh, subw);
     }
@@ -1531,7 +1530,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[i * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1542,7 +1541,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[j * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1556,7 +1555,7 @@
                        ii_weights1d[j * size_scale]) >>
                       2;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1570,7 +1569,7 @@
                        ii_weights1d[i * size_scale]) >>
                       2;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1581,7 +1580,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[(i < j ? i : j) * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1594,7 +1593,7 @@
               (ii_weights1d[i * size_scale] + ii_weights1d[j * size_scale]) >>
               1;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1605,7 +1604,7 @@
     default:
       for (i = 0; i < bh; ++i) {
         for (j = 0; j < bw; ++j) {
-          comppred[i * compstride + j] = VPX_BLEND_AVG(
+          comppred[i * compstride + j] = AOM_BLEND_AVG(
               intrapred[i * intrastride + j], interpred[i * interstride + j]);
         }
       }
@@ -1613,7 +1612,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void combine_interintra_highbd(
     INTERINTRA_MODE mode, int use_wedge_interintra, int wedge_index,
     int wedge_sign, BLOCK_SIZE bsize, BLOCK_SIZE plane_bsize,
@@ -1631,10 +1630,10 @@
   if (use_wedge_interintra) {
     if (is_interintra_wedge_used(bsize)) {
       const uint8_t *mask =
-          vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+          av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
       const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
       const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
-      vpx_highbd_blend_a64_mask(comppred8, compstride, intrapred8, intrastride,
+      aom_highbd_blend_a64_mask(comppred8, compstride, intrapred8, intrastride,
                                 interpred8, interstride, mask, bw, bh, bw, subh,
                                 subw, bd);
     }
@@ -1647,7 +1646,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[i * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1658,7 +1657,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[j * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1672,7 +1671,7 @@
                        ii_weights1d[j * size_scale]) >>
                       2;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1686,7 +1685,7 @@
                        ii_weights1d[i * size_scale]) >>
                       2;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1697,7 +1696,7 @@
         for (j = 0; j < bw; ++j) {
           int scale = ii_weights1d[(i < j ? i : j) * size_scale];
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1710,7 +1709,7 @@
               (ii_weights1d[i * size_scale] + ii_weights1d[j * size_scale]) >>
               1;
           comppred[i * compstride + j] =
-              VPX_BLEND_A256(scale, intrapred[i * intrastride + j],
+              AOM_BLEND_A256(scale, intrapred[i * intrastride + j],
                              interpred[i * interstride + j]);
         }
       }
@@ -1721,14 +1720,14 @@
     default:
       for (i = 0; i < bh; ++i) {
         for (j = 0; j < bw; ++j) {
-          comppred[i * compstride + j] = VPX_BLEND_AVG(
+          comppred[i * compstride + j] = AOM_BLEND_AVG(
               interpred[i * interstride + j], intrapred[i * intrastride + j]);
         }
       }
       break;
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // Break down rectangular intra prediction for joint spatio-temporal prediction
 // into two square intra predictions.
@@ -1745,47 +1744,47 @@
   TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
 
   if (bwl == bhl) {
-    vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
-                             dst, dst_stride, 0, 0, plane);
+    av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
+                            dst, dst_stride, 0, 0, plane);
 
   } else if (bwl < bhl) {
     uint8_t *src_2 = ref + pxbw * ref_stride;
     uint8_t *dst_2 = dst + pxbw * dst_stride;
-    vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
-                             dst, dst_stride, 0, 0, plane);
-#if CONFIG_VP9_HIGHBITDEPTH
+    av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
+                            dst, dst_stride, 0, 0, plane);
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       uint16_t *src_216 = CONVERT_TO_SHORTPTR(src_2);
       uint16_t *dst_216 = CONVERT_TO_SHORTPTR(dst_2);
       memcpy(src_216 - ref_stride, dst_216 - dst_stride,
              sizeof(*src_216) * pxbw);
     } else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     {
       memcpy(src_2 - ref_stride, dst_2 - dst_stride, sizeof(*src_2) * pxbw);
     }
-    vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
-                             dst_2, dst_stride, 0, 1 << bwl, plane);
+    av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
+                            dst_2, dst_stride, 0, 1 << bwl, plane);
   } else {  // bwl > bhl
     int i;
     uint8_t *src_2 = ref + pxbh;
     uint8_t *dst_2 = dst + pxbh;
-    vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
-                             dst, dst_stride, 0, 0, plane);
-#if CONFIG_VP9_HIGHBITDEPTH
+    av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, ref, ref_stride,
+                            dst, dst_stride, 0, 0, plane);
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       uint16_t *src_216 = CONVERT_TO_SHORTPTR(src_2);
       uint16_t *dst_216 = CONVERT_TO_SHORTPTR(dst_2);
       for (i = 0; i < pxbh; ++i)
         src_216[i * ref_stride - 1] = dst_216[i * dst_stride - 1];
     } else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     {
       for (i = 0; i < pxbh; ++i)
         src_2[i * ref_stride - 1] = dst_2[i * dst_stride - 1];
     }
-    vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
-                             dst_2, dst_stride, 1 << bhl, 0, plane);
+    av1_predict_intra_block(xd, bwl, bhl, max_tx_size, mode, src_2, ref_stride,
+                            dst_2, dst_stride, 1 << bhl, 0, plane);
   }
 }
 
@@ -1795,20 +1794,20 @@
   D117_PRED, D153_PRED, D207_PRED, D63_PRED, TM_PRED
 };
 
-void vp10_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
-                                                BLOCK_SIZE bsize, int plane,
-                                                uint8_t *dst, int dst_stride) {
+void av1_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
+                                               BLOCK_SIZE bsize, int plane,
+                                               uint8_t *dst, int dst_stride) {
   build_intra_predictors_for_interintra(
       xd, xd->plane[plane].dst.buf, xd->plane[plane].dst.stride, dst,
       dst_stride, interintra_to_intra_mode[xd->mi[0]->mbmi.interintra_mode],
       bsize, plane);
 }
 
-void vp10_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
-                             const uint8_t *inter_pred, int inter_stride,
-                             const uint8_t *intra_pred, int intra_stride) {
+void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
+                            const uint8_t *inter_pred, int inter_stride,
+                            const uint8_t *intra_pred, int intra_stride) {
   const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, &xd->plane[plane]);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     combine_interintra_highbd(
         xd->mi[0]->mbmi.interintra_mode, xd->mi[0]->mbmi.use_wedge_interintra,
@@ -1818,7 +1817,7 @@
         inter_stride, intra_pred, intra_stride, xd->bd);
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   combine_interintra(xd->mi[0]->mbmi.interintra_mode,
                      xd->mi[0]->mbmi.use_wedge_interintra,
                      xd->mi[0]->mbmi.interintra_wedge_index,
@@ -1827,63 +1826,63 @@
                      inter_pred, inter_stride, intra_pred, intra_stride);
 }
 
-void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
-                                          int ystride, BLOCK_SIZE bsize) {
-#if CONFIG_VP9_HIGHBITDEPTH
+void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
+                                         int ystride, BLOCK_SIZE bsize) {
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     DECLARE_ALIGNED(16, uint16_t, intrapredictor[MAX_SB_SQUARE]);
-    vp10_build_intra_predictors_for_interintra(
+    av1_build_intra_predictors_for_interintra(
         xd, bsize, 0, CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
-    vp10_combine_interintra(xd, bsize, 0, ypred, ystride,
-                            CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
+    av1_combine_interintra(xd, bsize, 0, ypred, ystride,
+                           CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   {
     DECLARE_ALIGNED(16, uint8_t, intrapredictor[MAX_SB_SQUARE]);
-    vp10_build_intra_predictors_for_interintra(xd, bsize, 0, intrapredictor,
-                                               MAX_SB_SIZE);
-    vp10_combine_interintra(xd, bsize, 0, ypred, ystride, intrapredictor,
-                            MAX_SB_SIZE);
+    av1_build_intra_predictors_for_interintra(xd, bsize, 0, intrapredictor,
+                                              MAX_SB_SIZE);
+    av1_combine_interintra(xd, bsize, 0, ypred, ystride, intrapredictor,
+                           MAX_SB_SIZE);
   }
 }
 
-void vp10_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
-                                          int ustride, int plane,
-                                          BLOCK_SIZE bsize) {
-#if CONFIG_VP9_HIGHBITDEPTH
+void av1_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
+                                         int ustride, int plane,
+                                         BLOCK_SIZE bsize) {
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     DECLARE_ALIGNED(16, uint16_t, uintrapredictor[MAX_SB_SQUARE]);
-    vp10_build_intra_predictors_for_interintra(
+    av1_build_intra_predictors_for_interintra(
         xd, bsize, plane, CONVERT_TO_BYTEPTR(uintrapredictor), MAX_SB_SIZE);
-    vp10_combine_interintra(xd, bsize, plane, upred, ustride,
-                            CONVERT_TO_BYTEPTR(uintrapredictor), MAX_SB_SIZE);
+    av1_combine_interintra(xd, bsize, plane, upred, ustride,
+                           CONVERT_TO_BYTEPTR(uintrapredictor), MAX_SB_SIZE);
     return;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   {
     DECLARE_ALIGNED(16, uint8_t, uintrapredictor[MAX_SB_SQUARE]);
-    vp10_build_intra_predictors_for_interintra(xd, bsize, plane,
-                                               uintrapredictor, MAX_SB_SIZE);
-    vp10_combine_interintra(xd, bsize, plane, upred, ustride, uintrapredictor,
-                            MAX_SB_SIZE);
+    av1_build_intra_predictors_for_interintra(xd, bsize, plane, uintrapredictor,
+                                              MAX_SB_SIZE);
+    av1_combine_interintra(xd, bsize, plane, upred, ustride, uintrapredictor,
+                           MAX_SB_SIZE);
   }
 }
 
-void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
-                                           uint8_t *vpred, int ustride,
-                                           int vstride, BLOCK_SIZE bsize) {
-  vp10_build_interintra_predictors_sbc(xd, upred, ustride, 1, bsize);
-  vp10_build_interintra_predictors_sbc(xd, vpred, vstride, 2, bsize);
+void av1_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
+                                          uint8_t *vpred, int ustride,
+                                          int vstride, BLOCK_SIZE bsize) {
+  av1_build_interintra_predictors_sbc(xd, upred, ustride, 1, bsize);
+  av1_build_interintra_predictors_sbc(xd, vpred, vstride, 2, bsize);
 }
 
-void vp10_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
-                                      uint8_t *upred, uint8_t *vpred,
-                                      int ystride, int ustride, int vstride,
-                                      BLOCK_SIZE bsize) {
-  vp10_build_interintra_predictors_sby(xd, ypred, ystride, bsize);
-  vp10_build_interintra_predictors_sbuv(xd, upred, vpred, ustride, vstride,
-                                        bsize);
+void av1_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
+                                     uint8_t *upred, uint8_t *vpred,
+                                     int ystride, int ustride, int vstride,
+                                     BLOCK_SIZE bsize) {
+  av1_build_interintra_predictors_sby(xd, ypred, ystride, bsize);
+  av1_build_interintra_predictors_sbuv(xd, upred, vpred, ustride, vstride,
+                                       bsize);
 }
 
 // Builds the inter-predictor for the single ref case
@@ -1899,7 +1898,7 @@
 
   const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
   struct buf_2d *const pre_buf = &pd->pre[ref];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint8_t *const dst =
       (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH ? CONVERT_TO_BYTEPTR(ext_dst)
                                                    : ext_dst) +
@@ -1922,11 +1921,11 @@
   uint8_t *pre;
   MV32 scaled_mv;
   int xs, ys, subpel_x, subpel_y;
-  const int is_scaled = vp10_is_scaled(sf);
+  const int is_scaled = av1_is_scaled(sf);
 
   if (is_scaled) {
     pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
-    scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+    scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
     xs = sf->x_step_q4;
     ys = sf->y_step_q4;
   } else {
@@ -1941,12 +1940,12 @@
   pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride +
          (scaled_mv.col >> SUBPEL_BITS);
 
-  vp10_make_inter_predictor(pre, pre_buf->stride, dst, ext_dst_stride, subpel_x,
-                            subpel_y, sf, w, h, 0, mi->mbmi.interp_filter, xs,
-                            ys, xd);
+  av1_make_inter_predictor(pre, pre_buf->stride, dst, ext_dst_stride, subpel_x,
+                           subpel_y, sf, w, h, 0, mi->mbmi.interp_filter, xs,
+                           ys, xd);
 }
 
-void vp10_build_inter_predictors_for_planes_single_buf(
+void av1_build_inter_predictors_for_planes_single_buf(
     MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to, int mi_row,
     int mi_col, int ref, uint8_t *ext_dst[3], int ext_dst_stride[3]) {
   int plane;
@@ -1987,7 +1986,7 @@
 
   if (is_compound && is_interinter_wedge_used(mbmi->sb_type) &&
       mbmi->use_wedge_interinter) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
       build_masked_compound_wedge_highbd(
           dst, dst_buf->stride, CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
@@ -1995,28 +1994,30 @@
           mbmi->interinter_wedge_index, mbmi->interinter_wedge_sign,
           mbmi->sb_type, h, w, xd->bd);
     else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       build_masked_compound_wedge(
           dst, dst_buf->stride, ext_dst0, ext_dst_stride0, ext_dst1,
           ext_dst_stride1, mbmi->interinter_wedge_index,
           mbmi->interinter_wedge_sign, mbmi->sb_type, h, w);
   } else {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
-      vpx_highbd_convolve_copy(CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
+      aom_highbd_convolve_copy(CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
                                dst, dst_buf->stride, NULL, 0, NULL, 0, w, h,
                                xd->bd);
     else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-      vpx_convolve_copy(ext_dst0, ext_dst_stride0, dst, dst_buf->stride, NULL,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+      aom_convolve_copy(ext_dst0, ext_dst_stride0, dst, dst_buf->stride, NULL,
                         0, NULL, 0, w, h);
   }
 }
 
-void vp10_build_wedge_inter_predictor_from_buf(
-    MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to,
-    uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst1[3],
-    int ext_dst_stride1[3]) {
+void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+                                              int plane_from, int plane_to,
+                                              uint8_t *ext_dst0[3],
+                                              int ext_dst_stride0[3],
+                                              uint8_t *ext_dst1[3],
+                                              int ext_dst_stride1[3]) {
   int plane;
   for (plane = plane_from; plane <= plane_to; ++plane) {
     const BLOCK_SIZE plane_bsize =
diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h
index 092926d..4182d9f 100644
--- a/av1/common/reconinter.h
+++ b/av1/common/reconinter.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_RECONINTER_H_
-#define VP10_COMMON_RECONINTER_H_
+#ifndef AV1_COMMON_RECONINTER_H_
+#define AV1_COMMON_RECONINTER_H_
 
 #include "av1/common/filter.h"
 #include "av1/common/onyxc_int.h"
-#include "av1/common/vp10_convolve.h"
-#include "aom/vpx_integer.h"
+#include "av1/common/av1_convolve.h"
+#include "aom/aom_integer.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -33,27 +33,27 @@
                                    int xs, int ys) {
 #if CONFIG_DUAL_FILTER
   InterpFilterParams interp_filter_params_x =
-      vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+      av1_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
   InterpFilterParams interp_filter_params_y =
-      vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+      av1_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
 #else
   InterpFilterParams interp_filter_params =
-      vp10_get_interp_filter_params(interp_filter);
+      av1_get_interp_filter_params(interp_filter);
 #endif
 
 #if CONFIG_DUAL_FILTER
   if (interp_filter_params_x.taps == SUBPEL_TAPS &&
       interp_filter_params_y.taps == SUBPEL_TAPS && w > 2 && h > 2) {
     const int16_t *kernel_x =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
     const int16_t *kernel_y =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
 #else
   if (interp_filter_params.taps == SUBPEL_TAPS) {
     const int16_t *kernel_x =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
     const int16_t *kernel_y =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
 #endif
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
     if (IsInterpolatingFilter(interp_filter)) {
@@ -72,12 +72,12 @@
     // ref_idx > 0 means this is the second reference frame
     // first reference frame's prediction result is already in dst
     // therefore we need to average the first and second results
-    vp10_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
-                  subpel_x, xs, subpel_y, ys, ref_idx);
+    av1_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
+                 subpel_x, xs, subpel_y, ys, ref_idx);
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE void highbd_inter_predictor(const uint8_t *src, int src_stride,
                                           uint8_t *dst, int dst_stride,
                                           const int subpel_x,
@@ -92,27 +92,27 @@
                                           int xs, int ys, int bd) {
 #if CONFIG_DUAL_FILTER
   InterpFilterParams interp_filter_params_x =
-      vp10_get_interp_filter_params(interp_filter[1 + 2 * ref]);
+      av1_get_interp_filter_params(interp_filter[1 + 2 * ref]);
   InterpFilterParams interp_filter_params_y =
-      vp10_get_interp_filter_params(interp_filter[0 + 2 * ref]);
+      av1_get_interp_filter_params(interp_filter[0 + 2 * ref]);
 #else
   InterpFilterParams interp_filter_params =
-      vp10_get_interp_filter_params(interp_filter);
+      av1_get_interp_filter_params(interp_filter);
 #endif
 
 #if CONFIG_DUAL_FILTER
   if (interp_filter_params_x.taps == SUBPEL_TAPS &&
       interp_filter_params_y.taps == SUBPEL_TAPS && w > 2 && h > 2) {
     const int16_t *kernel_x =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
     const int16_t *kernel_y =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params_y, subpel_y);
 #else
   if (interp_filter_params.taps == SUBPEL_TAPS) {
     const int16_t *kernel_x =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_x);
     const int16_t *kernel_y =
-        vp10_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
+        av1_get_interp_filter_subpel_kernel(interp_filter_params, subpel_y);
 #endif  // CONFIG_DUAL_FILTER
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
     if (IsInterpolatingFilter(interp_filter)) {
@@ -134,11 +134,11 @@
     // first reference frame's prediction result is already in dst
     // therefore we need to average the first and second results
     int avg = ref > 0;
-    vp10_highbd_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
-                         subpel_x, xs, subpel_y, ys, avg, bd);
+    av1_highbd_convolve(src, src_stride, dst, dst_stride, w, h, interp_filter,
+                        subpel_x, xs, subpel_y, ys, avg, bd);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #if CONFIG_EXT_INTER
 // Set to one to use larger codebooks
@@ -223,7 +223,7 @@
 #endif  // CONFIG_SUPERTX && CONFIG_EXT_INTER
                             int mi_x, int mi_y);
 
-static INLINE void vp10_make_inter_predictor(
+static INLINE void av1_make_inter_predictor(
     const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
     const int subpel_x, const int subpel_y, const struct scale_factors *sf,
     int w, int h, int ref,
@@ -234,32 +234,32 @@
 #endif
     int xs, int ys, const MACROBLOCKD *xd) {
   (void)xd;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
     highbd_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
                            sf, w, h, ref, interp_filter, xs, ys, xd->bd);
   else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, sf, w,
                     h, ref, interp_filter, xs, ys);
 }
 
 #if CONFIG_EXT_INTER
-void vp10_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
-                                      uint8_t *dst, int dst_stride,
-                                      const int subpel_x, const int subpel_y,
-                                      const struct scale_factors *sf, int w,
-                                      int h,
+void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
+                                     uint8_t *dst, int dst_stride,
+                                     const int subpel_x, const int subpel_y,
+                                     const struct scale_factors *sf, int w,
+                                     int h,
 #if CONFIG_DUAL_FILTER
-                                      const INTERP_FILTER *interp_filter,
+                                     const INTERP_FILTER *interp_filter,
 #else
-                                      const INTERP_FILTER interp_filter,
+                                     const INTERP_FILTER interp_filter,
 #endif
-                                      int xs, int ys,
+                                     int xs, int ys,
 #if CONFIG_SUPERTX
-                                      int wedge_offset_x, int wedge_offset_y,
+                                     int wedge_offset_x, int wedge_offset_y,
 #endif  // CONFIG_SUPERTX
-                                      const MACROBLOCKD *xd);
+                                     const MACROBLOCKD *xd);
 #endif  // CONFIG_EXT_INTER
 
 static INLINE int round_mv_comp_q4(int value) {
@@ -297,9 +297,9 @@
   // If the MV points so far into the UMV border that no visible pixels
   // are used for reconstruction, the subpel part of the MV can be
   // discarded and the MV limited to 16 pixels with equivalent results.
-  const int spel_left = (VPX_INTERP_EXTEND + bw) << SUBPEL_BITS;
+  const int spel_left = (AOM_INTERP_EXTEND + bw) << SUBPEL_BITS;
   const int spel_right = spel_left - SUBPEL_SHIFTS;
-  const int spel_top = (VPX_INTERP_EXTEND + bh) << SUBPEL_BITS;
+  const int spel_top = (AOM_INTERP_EXTEND + bh) << SUBPEL_BITS;
   const int spel_bottom = spel_top - SUBPEL_SHIFTS;
   MV clamped_mv = { src_mv->row * (1 << (1 - ss_y)),
                     src_mv->col * (1 << (1 - ss_x)) };
@@ -328,57 +328,56 @@
   return res;
 }
 
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
-                                       int ir, int ic, int mi_row, int mi_col);
+void av1_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i, int ir,
+                                      int ic, int mi_row, int mi_col);
 
-void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                     BLOCK_SIZE bsize);
-
-void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                     BLOCK_SIZE bsize, int plane);
-
-void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                      BLOCK_SIZE bsize);
-
-void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
                                     BLOCK_SIZE bsize);
 
-#if CONFIG_SUPERTX
-void vp10_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
-#if CONFIG_EXT_INTER
-                                                  int mi_row_ori,
-                                                  int mi_col_ori,
-#endif  // CONFIG_EXT_INTER
-                                                  int mi_row, int mi_col,
-                                                  BLOCK_SIZE bsize, int block);
+void av1_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                    BLOCK_SIZE bsize, int plane);
 
-void vp10_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
+void av1_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                     BLOCK_SIZE bsize);
+
+void av1_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+                                   BLOCK_SIZE bsize);
+
+#if CONFIG_SUPERTX
+void av1_build_inter_predictors_sb_sub8x8_extend(MACROBLOCKD *xd,
 #if CONFIG_EXT_INTER
-                                           int mi_row_ori, int mi_col_ori,
+                                                 int mi_row_ori, int mi_col_ori,
 #endif  // CONFIG_EXT_INTER
-                                           int mi_row, int mi_col,
-                                           BLOCK_SIZE bsize);
+                                                 int mi_row, int mi_col,
+                                                 BLOCK_SIZE bsize, int block);
+
+void av1_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
+#if CONFIG_EXT_INTER
+                                          int mi_row_ori, int mi_col_ori,
+#endif  // CONFIG_EXT_INTER
+                                          int mi_row, int mi_col,
+                                          BLOCK_SIZE bsize);
 struct macroblockd_plane;
-void vp10_build_masked_inter_predictor_complex(
+void av1_build_masked_inter_predictor_complex(
     MACROBLOCKD *xd, uint8_t *dst, int dst_stride, const uint8_t *pre,
     int pre_stride, int mi_row, int mi_col, int mi_row_ori, int mi_col_ori,
     BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, PARTITION_TYPE partition,
     int plane);
 #endif  // CONFIG_SUPERTX
 
-void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
-                                uint8_t *dst, int dst_stride, const MV *mv_q3,
-                                const struct scale_factors *sf, int w, int h,
-                                int do_avg,
+void av1_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst,
+                               int dst_stride, const MV *mv_q3,
+                               const struct scale_factors *sf, int w, int h,
+                               int do_avg,
 #if CONFIG_DUAL_FILTER
-                                const INTERP_FILTER *interp_filter,
+                               const INTERP_FILTER *interp_filter,
 #else
-                                const INTERP_FILTER interp_filter,
+                               const INTERP_FILTER interp_filter,
 #endif
-                                enum mv_precision precision, int x, int y);
+                               enum mv_precision precision, int x, int y);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_build_inter_predictor(
     const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
     const MV *mv_q3, const struct scale_factors *sf, int w, int h, int do_avg,
 #if CONFIG_DUAL_FILTER
@@ -410,13 +409,13 @@
   dst->stride = stride;
 }
 
-void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
-                           const YV12_BUFFER_CONFIG *src, int mi_row,
-                           int mi_col);
+void av1_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
+                          const YV12_BUFFER_CONFIG *src, int mi_row,
+                          int mi_col);
 
-void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
-                           const YV12_BUFFER_CONFIG *src, int mi_row,
-                           int mi_col, const struct scale_factors *sf);
+void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
+                          const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
+                          const struct scale_factors *sf);
 
 #if CONFIG_DUAL_FILTER
 // Detect if the block have sub-pixel level motion vectors
@@ -463,7 +462,7 @@
 #endif
 
 #if CONFIG_EXT_INTERP
-static INLINE int vp10_is_interp_needed(const MACROBLOCKD *const xd) {
+static INLINE int av1_is_interp_needed(const MACROBLOCKD *const xd) {
   MODE_INFO *const mi = xd->mi[0];
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   const BLOCK_SIZE bsize = mbmi->sb_type;
@@ -483,8 +482,8 @@
 #endif
 
   // For scaled references, interpolation filter is indicated all the time.
-  if (vp10_is_scaled(&xd->block_refs[0]->sf)) return 1;
-  if (is_compound && vp10_is_scaled(&xd->block_refs[1]->sf)) return 1;
+  if (av1_is_scaled(&xd->block_refs[0]->sf)) return 1;
+  if (is_compound && av1_is_scaled(&xd->block_refs[1]->sf)) return 1;
 
   if (bsize < BLOCK_8X8) {
     for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
@@ -516,81 +515,83 @@
 #endif  // CONFIG_EXT_INTERP
 
 #if CONFIG_OBMC
-const uint8_t *vp10_get_obmc_mask(int length);
-void vp10_build_obmc_inter_prediction(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                      int mi_row, int mi_col,
-                                      uint8_t *above[MAX_MB_PLANE],
-                                      int above_stride[MAX_MB_PLANE],
-                                      uint8_t *left[MAX_MB_PLANE],
-                                      int left_stride[MAX_MB_PLANE]);
-void vp10_build_prediction_by_above_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                          int mi_row, int mi_col,
-                                          uint8_t *tmp_buf[MAX_MB_PLANE],
-                                          int tmp_width[MAX_MB_PLANE],
-                                          int tmp_height[MAX_MB_PLANE],
-                                          int tmp_stride[MAX_MB_PLANE]);
-void vp10_build_prediction_by_left_preds(VP10_COMMON *cm, MACROBLOCKD *xd,
+const uint8_t *av1_get_obmc_mask(int length);
+void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                     int mi_row, int mi_col,
+                                     uint8_t *above[MAX_MB_PLANE],
+                                     int above_stride[MAX_MB_PLANE],
+                                     uint8_t *left[MAX_MB_PLANE],
+                                     int left_stride[MAX_MB_PLANE]);
+void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
                                          int mi_row, int mi_col,
                                          uint8_t *tmp_buf[MAX_MB_PLANE],
                                          int tmp_width[MAX_MB_PLANE],
                                          int tmp_height[MAX_MB_PLANE],
                                          int tmp_stride[MAX_MB_PLANE]);
+void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+                                        int mi_row, int mi_col,
+                                        uint8_t *tmp_buf[MAX_MB_PLANE],
+                                        int tmp_width[MAX_MB_PLANE],
+                                        int tmp_height[MAX_MB_PLANE],
+                                        int tmp_stride[MAX_MB_PLANE]);
 #endif  // CONFIG_OBMC
 
 #if CONFIG_EXT_INTER
 #define MASK_MASTER_SIZE (2 * MAX_SB_SIZE)
 #define MASK_MASTER_STRIDE (2 * MAX_SB_SIZE)
 
-void vp10_init_wedge_masks();
+void av1_init_wedge_masks();
 
-static INLINE const uint8_t *vp10_get_contiguous_soft_mask(int wedge_index,
-                                                           int wedge_sign,
-                                                           BLOCK_SIZE sb_type) {
+static INLINE const uint8_t *av1_get_contiguous_soft_mask(int wedge_index,
+                                                          int wedge_sign,
+                                                          BLOCK_SIZE sb_type) {
   return wedge_params_lookup[sb_type].masks[wedge_sign][wedge_index];
 }
 
-const uint8_t *vp10_get_soft_mask(int wedge_index, int wedge_sign,
-                                  BLOCK_SIZE sb_type, int wedge_offset_x,
-                                  int wedge_offset_y);
+const uint8_t *av1_get_soft_mask(int wedge_index, int wedge_sign,
+                                 BLOCK_SIZE sb_type, int wedge_offset_x,
+                                 int wedge_offset_y);
 
-void vp10_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
-                                      uint8_t *upred, uint8_t *vpred,
-                                      int ystride, int ustride, int vstride,
-                                      BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
-                                          int ystride, BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
-                                          int ustride, int plane,
-                                          BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
-                                           uint8_t *vpred, int ustride,
-                                           int vstride, BLOCK_SIZE bsize);
+void av1_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
+                                     uint8_t *upred, uint8_t *vpred,
+                                     int ystride, int ustride, int vstride,
+                                     BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
+                                         int ystride, BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
+                                         int ustride, int plane,
+                                         BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
+                                          uint8_t *vpred, int ustride,
+                                          int vstride, BLOCK_SIZE bsize);
 
-void vp10_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
-                                                BLOCK_SIZE bsize, int plane,
-                                                uint8_t *intra_pred,
-                                                int intra_stride);
-void vp10_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
-                             const uint8_t *inter_pred, int inter_stride,
-                             const uint8_t *intra_pred, int intra_stride);
-void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
-                                           uint8_t *vpred, int ustride,
-                                           int vstride, BLOCK_SIZE bsize);
-void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
-                                          int ystride, BLOCK_SIZE bsize);
+void av1_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
+                                               BLOCK_SIZE bsize, int plane,
+                                               uint8_t *intra_pred,
+                                               int intra_stride);
+void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
+                            const uint8_t *inter_pred, int inter_stride,
+                            const uint8_t *intra_pred, int intra_stride);
+void av1_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
+                                          uint8_t *vpred, int ustride,
+                                          int vstride, BLOCK_SIZE bsize);
+void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
+                                         int ystride, BLOCK_SIZE bsize);
 
 // Encoder only
-void vp10_build_inter_predictors_for_planes_single_buf(
+void av1_build_inter_predictors_for_planes_single_buf(
     MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to, int mi_row,
     int mi_col, int ref, uint8_t *ext_dst[3], int ext_dst_stride[3]);
-void vp10_build_wedge_inter_predictor_from_buf(
-    MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to,
-    uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst1[3],
-    int ext_dst_stride1[3]);
+void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+                                              int plane_from, int plane_to,
+                                              uint8_t *ext_dst0[3],
+                                              int ext_dst_stride0[3],
+                                              uint8_t *ext_dst1[3],
+                                              int ext_dst_stride1[3]);
 #endif  // CONFIG_EXT_INTER
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_RECONINTER_H_
+#endif  // AV1_COMMON_RECONINTER_H_
diff --git a/av1/common/reconintra.c b/av1/common/reconintra.c
index 801f61e..3c08ac4 100644
--- a/av1/common/reconintra.c
+++ b/av1/common/reconintra.c
@@ -10,17 +10,17 @@
 
 #include <math.h>
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_ports/system_state.h"
 
-#if CONFIG_VP9_HIGHBITDEPTH
-#include "aom_dsp/vpx_dsp_common.h"
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-#include "aom_mem/vpx_mem.h"
+#if CONFIG_AOM_HIGHBITDEPTH
+#include "aom_dsp/aom_dsp_common.h"
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
-#include "aom_ports/vpx_once.h"
+#include "aom_ports/aom_once.h"
 #if CONFIG_EXT_INTRA
 #include "av1/common/intra_filters.h"
 #endif
@@ -222,14 +222,14 @@
 #endif  // CONFIG_EXT_PARTITION
 #endif  // CONFIG_EXT_PARTITION_TYPES
 
-static int vp10_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
-                          int right_available,
+static int av1_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
+                         int right_available,
 #if CONFIG_EXT_PARTITION_TYPES
-                          PARTITION_TYPE partition,
+                         PARTITION_TYPE partition,
 #endif
-                          TX_SIZE txsz, int y, int x, int ss_x) {
+                         TX_SIZE txsz, int y, int x, int ss_x) {
   const int wl = mi_width_log2_lookup[bsize];
-  const int w = VPXMAX(num_4x4_blocks_wide_lookup[bsize] >> ss_x, 1);
+  const int w = AOMMAX(num_4x4_blocks_wide_lookup[bsize] >> ss_x, 1);
   const int step = 1 << txsz;
 
   if (!right_available) {
@@ -270,9 +270,9 @@
   }
 }
 
-static int vp10_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
-                           int bottom_available, TX_SIZE txsz, int y, int x,
-                           int ss_y) {
+static int av1_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
+                          int bottom_available, TX_SIZE txsz, int y, int x,
+                          int ss_y) {
   if (!bottom_available || x != 0) {
     return 0;
   } else {
@@ -309,22 +309,22 @@
 static intra_pred_fn pred[INTRA_MODES][TX_SIZES];
 static intra_pred_fn dc_pred[2][2][TX_SIZES];
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*intra_high_pred_fn)(uint16_t *dst, ptrdiff_t stride,
                                    const uint16_t *above, const uint16_t *left,
                                    int bd);
 static intra_high_pred_fn pred_high[INTRA_MODES][4];
 static intra_high_pred_fn dc_pred_high[2][2][4];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static void vp10_init_intra_predictors_internal(void) {
+static void av1_init_intra_predictors_internal(void) {
 #define INIT_NO_4X4(p, type)                  \
-  p[TX_8X8] = vpx_##type##_predictor_8x8;     \
-  p[TX_16X16] = vpx_##type##_predictor_16x16; \
-  p[TX_32X32] = vpx_##type##_predictor_32x32
+  p[TX_8X8] = aom_##type##_predictor_8x8;     \
+  p[TX_16X16] = aom_##type##_predictor_16x16; \
+  p[TX_32X32] = aom_##type##_predictor_32x32
 
 #define INIT_ALL_SIZES(p, type)           \
-  p[TX_4X4] = vpx_##type##_predictor_4x4; \
+  p[TX_4X4] = aom_##type##_predictor_4x4; \
   INIT_NO_4X4(p, type)
 
   INIT_ALL_SIZES(pred[V_PRED], v);
@@ -342,7 +342,7 @@
   INIT_ALL_SIZES(dc_pred[1][0], dc_left);
   INIT_ALL_SIZES(dc_pred[1][1], dc);
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   INIT_ALL_SIZES(pred_high[V_PRED], highbd_v);
   INIT_ALL_SIZES(pred_high[H_PRED], highbd_h);
   INIT_ALL_SIZES(pred_high[D207_PRED], highbd_d207e);
@@ -357,7 +357,7 @@
   INIT_ALL_SIZES(dc_pred_high[0][1], highbd_dc_top);
   INIT_ALL_SIZES(dc_pred_high[1][0], highbd_dc_left);
   INIT_ALL_SIZES(dc_pred_high[1][1], highbd_dc);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #undef intra_pred_allsizes
 }
@@ -388,13 +388,13 @@
     val = ROUND_POWER_OF_TWO(val, 8);
   } else {
     filter_idx = ROUND_POWER_OF_TWO(shift, 8 - SUBPEL_BITS);
-    filter = vp10_intra_filter_kernels[filter_type][filter_idx];
+    filter = av1_intra_filter_kernels[filter_type][filter_idx];
 
     if (filter_idx < (1 << SUBPEL_BITS)) {
       val = 0;
       for (k = 0; k < SUBPEL_TAPS; ++k) {
         idx = base + 1 - (SUBPEL_TAPS / 2) + k;
-        idx = VPXMAX(VPXMIN(idx, ref_end_idx), ref_start_idx);
+        idx = AOMMAX(AOMMIN(idx, ref_end_idx), ref_start_idx);
         val += ref[idx] * filter[k];
       }
       val = ROUND_POWER_OF_TWO(val, FILTER_BITS);
@@ -439,7 +439,7 @@
         base += 1;
         shift = 0;
       }
-      len = VPXMIN(bs, 2 * bs - 1 - base);
+      len = AOMMIN(bs, 2 * bs - 1 - base);
       if (len <= 0) {
         int i;
         for (i = r; i < bs; ++i) {
@@ -460,8 +460,8 @@
         }
       } else {
         if (!flags[shift]) {
-          const int16_t *filter = vp10_intra_filter_kernels[filter_type][shift];
-          vpx_convolve8_horiz(src + pad_size, 2 * bs, buf[shift], 2 * bs,
+          const int16_t *filter = av1_intra_filter_kernels[filter_type][shift];
+          aom_convolve8_horiz(src + pad_size, 2 * bs, buf[shift], 2 * bs,
                               filter, 16, NULL, 16, 2 * bs,
                               2 * bs < 16 ? 2 : 1);
           flags[shift] = 1;
@@ -570,7 +570,7 @@
         base += 1;
         shift = 0;
       }
-      len = VPXMIN(bs, 2 * bs - 1 - base);
+      len = AOMMIN(bs, 2 * bs - 1 - base);
 
       if (len <= 0) {
         for (r = 0; r < bs; ++r) {
@@ -590,8 +590,8 @@
         }
       } else {
         if (!flags[shift]) {
-          const int16_t *filter = vp10_intra_filter_kernels[filter_type][shift];
-          vpx_convolve8_vert(src + 4 * pad_size, 4, buf[0] + 4 * shift,
+          const int16_t *filter = av1_intra_filter_kernels[filter_type][shift];
+          aom_convolve8_vert(src + 4 * pad_size, 4, buf[0] + 4 * shift,
                              4 * SUBPEL_SHIFTS, NULL, 16, filter, 16,
                              2 * bs < 16 ? 4 : 4, 2 * bs);
           flags[shift] = 1;
@@ -730,53 +730,53 @@
   }
 }
 
-void vp10_dc_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                const uint8_t *above, const uint8_t *left) {
+void av1_dc_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                               const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, DC_PRED);
 }
 
-void vp10_v_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                               const uint8_t *above, const uint8_t *left) {
+void av1_v_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                              const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, V_PRED);
 }
 
-void vp10_h_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                               const uint8_t *above, const uint8_t *left) {
+void av1_h_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                              const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, H_PRED);
 }
 
-void vp10_d45_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                 const uint8_t *above, const uint8_t *left) {
+void av1_d45_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D45_PRED);
 }
 
-void vp10_d135_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                  const uint8_t *above, const uint8_t *left) {
+void av1_d135_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                 const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D135_PRED);
 }
 
-void vp10_d117_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                  const uint8_t *above, const uint8_t *left) {
+void av1_d117_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                 const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D117_PRED);
 }
 
-void vp10_d153_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                  const uint8_t *above, const uint8_t *left) {
+void av1_d153_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                 const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D153_PRED);
 }
 
-void vp10_d207_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                  const uint8_t *above, const uint8_t *left) {
+void av1_d207_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                 const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D207_PRED);
 }
 
-void vp10_d63_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                 const uint8_t *above, const uint8_t *left) {
+void av1_d63_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                                const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, D63_PRED);
 }
 
-void vp10_tm_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
-                                const uint8_t *above, const uint8_t *left) {
+void av1_tm_filter_predictor_c(uint8_t *dst, ptrdiff_t stride, int bs,
+                               const uint8_t *above, const uint8_t *left) {
   filter_intra_predictors_4tap(dst, stride, bs, above, left, TM_PRED);
 }
 
@@ -784,33 +784,33 @@
                                     int bs, const uint8_t *above,
                                     const uint8_t *left) {
   switch (mode) {
-    case DC_PRED: vp10_dc_filter_predictor(dst, stride, bs, above, left); break;
-    case V_PRED: vp10_v_filter_predictor(dst, stride, bs, above, left); break;
-    case H_PRED: vp10_h_filter_predictor(dst, stride, bs, above, left); break;
+    case DC_PRED: av1_dc_filter_predictor(dst, stride, bs, above, left); break;
+    case V_PRED: av1_v_filter_predictor(dst, stride, bs, above, left); break;
+    case H_PRED: av1_h_filter_predictor(dst, stride, bs, above, left); break;
     case D45_PRED:
-      vp10_d45_filter_predictor(dst, stride, bs, above, left);
+      av1_d45_filter_predictor(dst, stride, bs, above, left);
       break;
     case D135_PRED:
-      vp10_d135_filter_predictor(dst, stride, bs, above, left);
+      av1_d135_filter_predictor(dst, stride, bs, above, left);
       break;
     case D117_PRED:
-      vp10_d117_filter_predictor(dst, stride, bs, above, left);
+      av1_d117_filter_predictor(dst, stride, bs, above, left);
       break;
     case D153_PRED:
-      vp10_d153_filter_predictor(dst, stride, bs, above, left);
+      av1_d153_filter_predictor(dst, stride, bs, above, left);
       break;
     case D207_PRED:
-      vp10_d207_filter_predictor(dst, stride, bs, above, left);
+      av1_d207_filter_predictor(dst, stride, bs, above, left);
       break;
     case D63_PRED:
-      vp10_d63_filter_predictor(dst, stride, bs, above, left);
+      av1_d63_filter_predictor(dst, stride, bs, above, left);
       break;
-    case TM_PRED: vp10_tm_filter_predictor(dst, stride, bs, above, left); break;
+    case TM_PRED: av1_tm_filter_predictor(dst, stride, bs, above, left); break;
     default: assert(0);
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static int highbd_intra_subpel_interp(int base, int shift, const uint16_t *ref,
                                       int ref_start_idx, int ref_end_idx,
                                       INTRA_FILTER filter_type) {
@@ -822,13 +822,13 @@
     val = ROUND_POWER_OF_TWO(val, 8);
   } else {
     filter_idx = ROUND_POWER_OF_TWO(shift, 8 - SUBPEL_BITS);
-    filter = vp10_intra_filter_kernels[filter_type][filter_idx];
+    filter = av1_intra_filter_kernels[filter_type][filter_idx];
 
     if (filter_idx < (1 << SUBPEL_BITS)) {
       val = 0;
       for (k = 0; k < SUBPEL_TAPS; ++k) {
         idx = base + 1 - (SUBPEL_TAPS / 2) + k;
-        idx = VPXMAX(VPXMIN(idx, ref_end_idx), ref_start_idx);
+        idx = AOMMAX(AOMMIN(idx, ref_end_idx), ref_start_idx);
         val += ref[idx] * filter[k];
       }
       val = ROUND_POWER_OF_TWO(val, FILTER_BITS);
@@ -956,7 +956,7 @@
   (void)above;
   (void)bd;
   for (r = 0; r < bs; r++) {
-    vpx_memset16(dst, left[r], bs);
+    aom_memset16(dst, left[r], bs);
     dst += stride;
   }
 }
@@ -1025,70 +1025,70 @@
   }
 }
 
-void vp10_highbd_dc_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                       const uint16_t *above,
-                                       const uint16_t *left, int bd) {
+void av1_highbd_dc_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                      const uint16_t *above,
+                                      const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, DC_PRED,
                                       bd);
 }
 
-void vp10_highbd_v_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                      const uint16_t *above,
-                                      const uint16_t *left, int bd) {
+void av1_highbd_v_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                     const uint16_t *above,
+                                     const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, V_PRED, bd);
 }
 
-void vp10_highbd_h_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                      const uint16_t *above,
-                                      const uint16_t *left, int bd) {
+void av1_highbd_h_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                     const uint16_t *above,
+                                     const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, H_PRED, bd);
 }
 
-void vp10_highbd_d45_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                        const uint16_t *above,
-                                        const uint16_t *left, int bd) {
+void av1_highbd_d45_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                       const uint16_t *above,
+                                       const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D45_PRED,
                                       bd);
 }
 
-void vp10_highbd_d135_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
-                                         int bs, const uint16_t *above,
-                                         const uint16_t *left, int bd) {
+void av1_highbd_d135_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                        const uint16_t *above,
+                                        const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D135_PRED,
                                       bd);
 }
 
-void vp10_highbd_d117_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
-                                         int bs, const uint16_t *above,
-                                         const uint16_t *left, int bd) {
+void av1_highbd_d117_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                        const uint16_t *above,
+                                        const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D117_PRED,
                                       bd);
 }
 
-void vp10_highbd_d153_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
-                                         int bs, const uint16_t *above,
-                                         const uint16_t *left, int bd) {
+void av1_highbd_d153_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                        const uint16_t *above,
+                                        const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D153_PRED,
                                       bd);
 }
 
-void vp10_highbd_d207_filter_predictor_c(uint16_t *dst, ptrdiff_t stride,
-                                         int bs, const uint16_t *above,
-                                         const uint16_t *left, int bd) {
+void av1_highbd_d207_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                        const uint16_t *above,
+                                        const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D207_PRED,
                                       bd);
 }
 
-void vp10_highbd_d63_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                        const uint16_t *above,
-                                        const uint16_t *left, int bd) {
+void av1_highbd_d63_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                       const uint16_t *above,
+                                       const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, D63_PRED,
                                       bd);
 }
 
-void vp10_highbd_tm_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
-                                       const uint16_t *above,
-                                       const uint16_t *left, int bd) {
+void av1_highbd_tm_filter_predictor_c(uint16_t *dst, ptrdiff_t stride, int bs,
+                                      const uint16_t *above,
+                                      const uint16_t *left, int bd) {
   highbd_filter_intra_predictors_4tap(dst, stride, bs, above, left, TM_PRED,
                                       bd);
 }
@@ -1099,42 +1099,42 @@
                                            const uint16_t *left, int bd) {
   switch (mode) {
     case DC_PRED:
-      vp10_highbd_dc_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_dc_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case V_PRED:
-      vp10_highbd_v_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_v_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case H_PRED:
-      vp10_highbd_h_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_h_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D45_PRED:
-      vp10_highbd_d45_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d45_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D135_PRED:
-      vp10_highbd_d135_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d135_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D117_PRED:
-      vp10_highbd_d117_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d117_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D153_PRED:
-      vp10_highbd_d153_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d153_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D207_PRED:
-      vp10_highbd_d207_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d207_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case D63_PRED:
-      vp10_highbd_d63_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_d63_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     case TM_PRED:
-      vp10_highbd_tm_filter_predictor(dst, stride, bs, above, left, bd);
+      av1_highbd_tm_filter_predictor(dst, stride, bs, above, left, bd);
       break;
     default: assert(0);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_EXT_INTRA
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void build_intra_predictors_high(
     const MACROBLOCKD *xd, const uint8_t *ref8, int ref_stride, uint8_t *dst8,
     int dst_stride, PREDICTION_MODE mode, TX_SIZE tx_size, int n_top_px,
@@ -1194,7 +1194,7 @@
     int i;
     const int val = (n_left_px == 0) ? base + 1 : base - 1;
     for (i = 0; i < bs; ++i) {
-      vpx_memset16(dst, val, bs);
+      aom_memset16(dst, val, bs);
       dst += dst_stride;
     }
     return;
@@ -1224,9 +1224,9 @@
           left_col[i] = ref[i * ref_stride - 1];
       }
       if (i < (bs << need_bottom))
-        vpx_memset16(&left_col[i], left_col[i - 1], (bs << need_bottom) - i);
+        aom_memset16(&left_col[i], left_col[i - 1], (bs << need_bottom) - i);
     } else {
-      vpx_memset16(left_col, base + 1, bs << need_bottom);
+      aom_memset16(left_col, base + 1, bs << need_bottom);
     }
   }
 
@@ -1254,9 +1254,9 @@
         i += n_topright_px;
       }
       if (i < (bs << need_right))
-        vpx_memset16(&above_row[i], above_row[i - 1], (bs << need_right) - i);
+        aom_memset16(&above_row[i], above_row[i - 1], (bs << need_right) - i);
     } else {
-      vpx_memset16(above_row, base - 1, bs << need_right);
+      aom_memset16(above_row, base - 1, bs << need_right);
     }
   }
 
@@ -1285,7 +1285,7 @@
   if (mode != DC_PRED && mode != TM_PRED &&
       xd->mi[0]->mbmi.sb_type >= BLOCK_8X8) {
     INTRA_FILTER filter = INTRA_FILTER_LINEAR;
-    if (plane == 0 && vp10_is_intra_filter_switchable(p_angle))
+    if (plane == 0 && av1_is_intra_filter_switchable(p_angle))
       filter = xd->mi[0]->mbmi.intra_filter;
     highbd_dr_predictor(dst, dst_stride, bs, const_above_row, left_col, p_angle,
                         xd->bd, filter);
@@ -1302,7 +1302,7 @@
                              xd->bd);
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
                                    int ref_stride, uint8_t *dst, int dst_stride,
@@ -1453,7 +1453,7 @@
   if (mode != DC_PRED && mode != TM_PRED &&
       xd->mi[0]->mbmi.sb_type >= BLOCK_8X8) {
     INTRA_FILTER filter = INTRA_FILTER_LINEAR;
-    if (plane == 0 && vp10_is_intra_filter_switchable(p_angle))
+    if (plane == 0 && av1_is_intra_filter_switchable(p_angle))
       filter = xd->mi[0]->mbmi.intra_filter;
     dr_predictor(dst, dst_stride, tx_size, const_above_row, left_col, p_angle,
                  filter);
@@ -1470,11 +1470,11 @@
   }
 }
 
-void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
-                              TX_SIZE tx_size, PREDICTION_MODE mode,
-                              const uint8_t *ref, int ref_stride, uint8_t *dst,
-                              int dst_stride, int col_off, int row_off,
-                              int plane) {
+void av1_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
+                             TX_SIZE tx_size, PREDICTION_MODE mode,
+                             const uint8_t *ref, int ref_stride, uint8_t *dst,
+                             int dst_stride, int col_off, int row_off,
+                             int plane) {
   const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   const struct macroblockd_plane *const pd = &xd->plane[plane];
   const int txw = num_4x4_blocks_wide_txsize_lookup[tx_size];
@@ -1483,8 +1483,8 @@
   const int have_left = col_off || xd->left_available;
   const int x = col_off * 4;
   const int y = row_off * 4;
-  const int bw = pd->subsampling_x ? 1 << bwl_in : VPXMAX(2, 1 << bwl_in);
-  const int bh = pd->subsampling_y ? 1 << bhl_in : VPXMAX(2, 1 << bhl_in);
+  const int bw = pd->subsampling_x ? 1 << bwl_in : AOMMAX(2, 1 << bwl_in);
+  const int bh = pd->subsampling_y ? 1 << bhl_in : AOMMAX(2, 1 << bhl_in);
   const int mi_row = -xd->mb_to_top_edge >> (3 + MI_SIZE_LOG2);
   const int mi_col = -xd->mb_to_left_edge >> (3 + MI_SIZE_LOG2);
   const int wpx = 4 * bw;
@@ -1506,31 +1506,30 @@
   const PARTITION_TYPE partition = xd->mi[0]->mbmi.partition;
 #endif
   const int have_right =
-      vp10_has_right(bsize, mi_row, mi_col, right_available,
+      av1_has_right(bsize, mi_row, mi_col, right_available,
 #if CONFIG_EXT_PARTITION_TYPES
-                     partition,
+                    partition,
 #endif
-                     tx_size, row_off, col_off, pd->subsampling_x);
-  const int have_bottom =
-      vp10_has_bottom(bsize, mi_row, mi_col, yd > 0, tx_size, row_off, col_off,
-                      pd->subsampling_y);
+                    tx_size, row_off, col_off, pd->subsampling_x);
+  const int have_bottom = av1_has_bottom(bsize, mi_row, mi_col, yd > 0, tx_size,
+                                         row_off, col_off, pd->subsampling_y);
 
   if (xd->mi[0]->mbmi.palette_mode_info.palette_size[plane != 0] > 0) {
     const int bs = 4 * num_4x4_blocks_wide_txsize_lookup[tx_size];
     const int stride = 4 * (1 << bwl_in);
     int r, c;
     uint8_t *map = NULL;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     uint16_t *palette = xd->mi[0]->mbmi.palette_mode_info.palette_colors +
                         plane * PALETTE_MAX_SIZE;
 #else
     uint8_t *palette = xd->mi[0]->mbmi.palette_mode_info.palette_colors +
                        plane * PALETTE_MAX_SIZE;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     map = xd->plane[plane != 0].color_index_map;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
       for (r = 0; r < bs; ++r)
@@ -1546,29 +1545,29 @@
     for (r = 0; r < bs; ++r)
       for (c = 0; c < bs; ++c)
         dst[r * dst_stride + c] = palette[map[(r + y) * stride + c + x]];
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     return;
   }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     build_intra_predictors_high(
         xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
-        have_top ? VPXMIN(txwpx, xr + txwpx) : 0,
-        have_top && have_right ? VPXMIN(txwpx, xr) : 0,
-        have_left ? VPXMIN(txhpx, yd + txhpx) : 0,
-        have_bottom && have_left ? VPXMIN(txhpx, yd) : 0, plane);
+        have_top ? AOMMIN(txwpx, xr + txwpx) : 0,
+        have_top && have_right ? AOMMIN(txwpx, xr) : 0,
+        have_left ? AOMMIN(txhpx, yd + txhpx) : 0,
+        have_bottom && have_left ? AOMMIN(txhpx, yd) : 0, plane);
     return;
   }
 #endif
   build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
-                         have_top ? VPXMIN(txwpx, xr + txwpx) : 0,
-                         have_top && have_right ? VPXMIN(txwpx, xr) : 0,
-                         have_left ? VPXMIN(txhpx, yd + txhpx) : 0,
-                         have_bottom && have_left ? VPXMIN(txhpx, yd) : 0,
+                         have_top ? AOMMIN(txwpx, xr + txwpx) : 0,
+                         have_top && have_right ? AOMMIN(txwpx, xr) : 0,
+                         have_left ? AOMMIN(txhpx, yd + txhpx) : 0,
+                         have_bottom && have_left ? AOMMIN(txhpx, yd) : 0,
                          plane);
 }
 
-void vp10_init_intra_predictors(void) {
-  once(vp10_init_intra_predictors_internal);
+void av1_init_intra_predictors(void) {
+  once(av1_init_intra_predictors_internal);
 }
diff --git a/av1/common/reconintra.h b/av1/common/reconintra.h
index d20b5a4..3adde50 100644
--- a/av1/common/reconintra.h
+++ b/av1/common/reconintra.h
@@ -8,27 +8,27 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_RECONINTRA_H_
-#define VP10_COMMON_RECONINTRA_H_
+#ifndef AV1_COMMON_RECONINTRA_H_
+#define AV1_COMMON_RECONINTRA_H_
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "av1/common/blockd.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-void vp10_init_intra_predictors(void);
+void av1_init_intra_predictors(void);
 
-void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
-                              TX_SIZE tx_size, PREDICTION_MODE mode,
-                              const uint8_t *ref, int ref_stride, uint8_t *dst,
-                              int dst_stride, int aoff, int loff, int plane);
+void av1_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
+                             TX_SIZE tx_size, PREDICTION_MODE mode,
+                             const uint8_t *ref, int ref_stride, uint8_t *dst,
+                             int dst_stride, int aoff, int loff, int plane);
 #if CONFIG_EXT_INTRA
-int vp10_is_intra_filter_switchable(int angle);
+int av1_is_intra_filter_switchable(int angle);
 #endif  // CONFIG_EXT_INTRA
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_RECONINTRA_H_
+#endif  // AV1_COMMON_RECONINTRA_H_
diff --git a/av1/common/restoration.c b/av1/common/restoration.c
index 4d4c9fc..fad5dd6 100644
--- a/av1/common/restoration.c
+++ b/av1/common/restoration.c
@@ -10,12 +10,12 @@
 
 #include <math.h>
 
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
 #include "av1/common/onyxc_int.h"
 #include "av1/common/restoration.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
 #define BILATERAL_PARAM_PRECISION 16
@@ -55,15 +55,15 @@
 typedef void (*restore_func_type)(uint8_t *data8, int width, int height,
                                   int stride, RestorationInternal *rst,
                                   uint8_t *tmpdata8, int tmpstride);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*restore_func_highbd_type)(uint8_t *data8, int width, int height,
                                          int stride, RestorationInternal *rst,
                                          uint8_t *tmpdata8, int tmpstride,
                                          int bit_depth);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static INLINE BilateralParamsType vp10_bilateral_level_to_params(int index,
-                                                                 int kf) {
+static INLINE BilateralParamsType av1_bilateral_level_to_params(int index,
+                                                                int kf) {
   return kf ? bilateral_level_to_params_arr_kf[index]
             : bilateral_level_to_params_arr[index];
 }
@@ -77,31 +77,31 @@
   { 64, 64 }, { 128, 128 }, { 256, 256 }
 };
 
-void vp10_get_restoration_tile_size(int tilesize, int width, int height,
-                                    int *tile_width, int *tile_height,
-                                    int *nhtiles, int *nvtiles) {
+void av1_get_restoration_tile_size(int tilesize, int width, int height,
+                                   int *tile_width, int *tile_height,
+                                   int *nhtiles, int *nvtiles) {
   *tile_width = (tilesize < 0)
                     ? width
-                    : VPXMIN(restoration_tile_sizes[tilesize].width, width);
+                    : AOMMIN(restoration_tile_sizes[tilesize].width, width);
   *tile_height = (tilesize < 0)
                      ? height
-                     : VPXMIN(restoration_tile_sizes[tilesize].height, height);
+                     : AOMMIN(restoration_tile_sizes[tilesize].height, height);
   *nhtiles = (width + (*tile_width >> 1)) / *tile_width;
   *nvtiles = (height + (*tile_height >> 1)) / *tile_height;
 }
 
-int vp10_get_restoration_ntiles(int tilesize, int width, int height) {
+int av1_get_restoration_ntiles(int tilesize, int width, int height) {
   int nhtiles, nvtiles;
   int tile_width, tile_height;
-  vp10_get_restoration_tile_size(tilesize, width, height, &tile_width,
-                                 &tile_height, &nhtiles, &nvtiles);
+  av1_get_restoration_tile_size(tilesize, width, height, &tile_width,
+                                &tile_height, &nhtiles, &nvtiles);
   return (nhtiles * nvtiles);
 }
 
-void vp10_loop_restoration_precal() {
+void av1_loop_restoration_precal() {
   int i;
   for (i = 0; i < BILATERAL_LEVELS_KF; i++) {
-    const BilateralParamsType param = vp10_bilateral_level_to_params(i, 1);
+    const BilateralParamsType param = av1_bilateral_level_to_params(i, 1);
     const int sigma_x = param.sigma_x;
     const int sigma_y = param.sigma_y;
     const int sigma_r = param.sigma_r;
@@ -129,7 +129,7 @@
     }
   }
   for (i = 0; i < BILATERAL_LEVELS; i++) {
-    const BilateralParamsType param = vp10_bilateral_level_to_params(i, 0);
+    const BilateralParamsType param = av1_bilateral_level_to_params(i, 0);
     const int sigma_x = param.sigma_x;
     const int sigma_y = param.sigma_y;
     const int sigma_r = param.sigma_r;
@@ -159,13 +159,13 @@
   }
 }
 
-int vp10_bilateral_level_bits(const VP10_COMMON *const cm) {
+int av1_bilateral_level_bits(const AV1_COMMON *const cm) {
   return cm->frame_type == KEY_FRAME ? BILATERAL_LEVEL_BITS_KF
                                      : BILATERAL_LEVEL_BITS;
 }
 
-void vp10_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
-                                int kf, int width, int height) {
+void av1_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
+                               int kf, int width, int height) {
   int i, tile_idx;
   rst->restoration_type = rsi->restoration_type;
   rst->subsampling_x = 0;
@@ -173,10 +173,10 @@
   if (rsi->restoration_type == RESTORE_BILATERAL) {
     rst->tilesize_index = BILATERAL_TILESIZE;
     rst->ntiles =
-        vp10_get_restoration_ntiles(rst->tilesize_index, width, height);
-    vp10_get_restoration_tile_size(rst->tilesize_index, width, height,
-                                   &rst->tile_width, &rst->tile_height,
-                                   &rst->nhtiles, &rst->nvtiles);
+        av1_get_restoration_ntiles(rst->tilesize_index, width, height);
+    av1_get_restoration_tile_size(rst->tilesize_index, width, height,
+                                  &rst->tile_width, &rst->tile_height,
+                                  &rst->nhtiles, &rst->nvtiles);
     rst->bilateral_level = rsi->bilateral_level;
     rst->wr_lut = (uint8_t **)malloc(sizeof(*rst->wr_lut) * rst->ntiles);
     assert(rst->wr_lut != NULL);
@@ -195,10 +195,10 @@
   } else if (rsi->restoration_type == RESTORE_WIENER) {
     rst->tilesize_index = WIENER_TILESIZE;
     rst->ntiles =
-        vp10_get_restoration_ntiles(rst->tilesize_index, width, height);
-    vp10_get_restoration_tile_size(rst->tilesize_index, width, height,
-                                   &rst->tile_width, &rst->tile_height,
-                                   &rst->nhtiles, &rst->nvtiles);
+        av1_get_restoration_ntiles(rst->tilesize_index, width, height);
+    av1_get_restoration_tile_size(rst->tilesize_index, width, height,
+                                  &rst->tile_width, &rst->tile_height,
+                                  &rst->nhtiles, &rst->nvtiles);
     rst->wiener_level = rsi->wiener_level;
     rst->vfilter =
         (int(*)[RESTORATION_WIN])malloc(sizeof(*rst->vfilter) * rst->ntiles);
@@ -373,7 +373,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void loop_bilateral_filter_highbd(uint8_t *data8, int width, int height,
                                          int stride, RestorationInternal *rst,
                                          uint8_t *tmpdata8, int tmpstride,
@@ -530,10 +530,10 @@
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_loop_restoration_rows(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                                int start_mi_row, int end_mi_row, int y_only) {
+void av1_loop_restoration_rows(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                               int start_mi_row, int end_mi_row, int y_only) {
   const int ywidth = frame->y_crop_width;
   const int ystride = frame->y_stride;
   const int uvwidth = frame->uv_crop_width;
@@ -546,35 +546,35 @@
       cm->rst_internal.restoration_type == RESTORE_BILATERAL
           ? loop_bilateral_filter
           : loop_wiener_filter;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   restore_func_highbd_type restore_func_highbd =
       cm->rst_internal.restoration_type == RESTORE_BILATERAL
           ? loop_bilateral_filter_highbd
           : loop_wiener_filter_highbd;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   YV12_BUFFER_CONFIG tmp_buf;
   memset(&tmp_buf, 0, sizeof(YV12_BUFFER_CONFIG));
 
-  yend = VPXMIN(yend, cm->height);
-  uvend = VPXMIN(uvend, cm->subsampling_y ? (cm->height + 1) >> 1 : cm->height);
+  yend = AOMMIN(yend, cm->height);
+  uvend = AOMMIN(uvend, cm->subsampling_y ? (cm->height + 1) >> 1 : cm->height);
 
-  if (vpx_realloc_frame_buffer(
+  if (aom_realloc_frame_buffer(
           &tmp_buf, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           cm->use_highbitdepth,
 #endif
-          VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL) < 0)
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+          AOM_DEC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL) < 0)
+    aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
                        "Failed to allocate tmp restoration buffer");
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (cm->use_highbitdepth)
     restore_func_highbd(frame->y_buffer + ystart * ystride, ywidth,
                         yend - ystart, ystride, &cm->rst_internal,
                         tmp_buf.y_buffer + ystart * tmp_buf.y_stride,
                         tmp_buf.y_stride, cm->bit_depth);
   else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     restore_func(frame->y_buffer + ystart * ystride, ywidth, yend - ystart,
                  ystride, &cm->rst_internal,
                  tmp_buf.y_buffer + ystart * tmp_buf.y_stride,
@@ -582,7 +582,7 @@
   if (!y_only) {
     cm->rst_internal.subsampling_x = cm->subsampling_x;
     cm->rst_internal.subsampling_y = cm->subsampling_y;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       restore_func_highbd(frame->u_buffer + uvstart * uvstride, uvwidth,
                           uvend - uvstart, uvstride, &cm->rst_internal,
@@ -593,7 +593,7 @@
                           tmp_buf.v_buffer + uvstart * tmp_buf.uv_stride,
                           tmp_buf.uv_stride, cm->bit_depth);
     } else {
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       restore_func(frame->u_buffer + uvstart * uvstride, uvwidth,
                    uvend - uvstart, uvstride, &cm->rst_internal,
                    tmp_buf.u_buffer + uvstart * tmp_buf.uv_stride,
@@ -602,11 +602,11 @@
                    uvend - uvstart, uvstride, &cm->rst_internal,
                    tmp_buf.v_buffer + uvstart * tmp_buf.uv_stride,
                    tmp_buf.uv_stride);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
-  vpx_free_frame_buffer(&tmp_buf);
+  aom_free_frame_buffer(&tmp_buf);
   if (cm->rst_internal.restoration_type == RESTORE_BILATERAL) {
     free(cm->rst_internal.wr_lut);
     cm->rst_internal.wr_lut = NULL;
@@ -621,9 +621,9 @@
   }
 }
 
-void vp10_loop_restoration_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                                 RestorationInfo *rsi, int y_only,
-                                 int partial_frame) {
+void av1_loop_restoration_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                                RestorationInfo *rsi, int y_only,
+                                int partial_frame) {
   int start_mi_row, end_mi_row, mi_rows_to_filter;
   if (rsi->restoration_type != RESTORE_NONE) {
     start_mi_row = 0;
@@ -631,12 +631,12 @@
     if (partial_frame && cm->mi_rows > 8) {
       start_mi_row = cm->mi_rows >> 1;
       start_mi_row &= 0xfffffff8;
-      mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
+      mi_rows_to_filter = AOMMAX(cm->mi_rows / 8, 8);
     }
     end_mi_row = start_mi_row + mi_rows_to_filter;
-    vp10_loop_restoration_init(&cm->rst_internal, rsi,
-                               cm->frame_type == KEY_FRAME, cm->width,
-                               cm->height);
-    vp10_loop_restoration_rows(frame, cm, start_mi_row, end_mi_row, y_only);
+    av1_loop_restoration_init(&cm->rst_internal, rsi,
+                              cm->frame_type == KEY_FRAME, cm->width,
+                              cm->height);
+    av1_loop_restoration_rows(frame, cm, start_mi_row, end_mi_row, y_only);
   }
 }
diff --git a/av1/common/restoration.h b/av1/common/restoration.h
index c1e937a..6c53a77 100644
--- a/av1/common/restoration.h
+++ b/av1/common/restoration.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_RESTORATION_H_
-#define VP10_COMMON_RESTORATION_H_
+#ifndef AV1_COMMON_RESTORATION_H_
+#define AV1_COMMON_RESTORATION_H_
 
 #include "aom_ports/mem.h"
-#include "./vpx_config.h"
+#include "./aom_config.h"
 
 #include "av1/common/blockd.h"
 
@@ -88,22 +88,21 @@
   int (*vfilter)[RESTORATION_WIN], (*hfilter)[RESTORATION_WIN];
 } RestorationInternal;
 
-int vp10_bilateral_level_bits(const struct VP10Common *const cm);
-int vp10_get_restoration_ntiles(int tilesize, int width, int height);
-void vp10_get_restoration_tile_size(int tilesize, int width, int height,
-                                    int *tile_width, int *tile_height,
-                                    int *nhtiles, int *nvtiles);
-void vp10_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
-                                int kf, int width, int height);
-void vp10_loop_restoration_frame(YV12_BUFFER_CONFIG *frame,
-                                 struct VP10Common *cm, RestorationInfo *rsi,
-                                 int y_only, int partial_frame);
-void vp10_loop_restoration_rows(YV12_BUFFER_CONFIG *frame,
-                                struct VP10Common *cm, int start_mi_row,
-                                int end_mi_row, int y_only);
-void vp10_loop_restoration_precal();
+int av1_bilateral_level_bits(const struct AV1Common *const cm);
+int av1_get_restoration_ntiles(int tilesize, int width, int height);
+void av1_get_restoration_tile_size(int tilesize, int width, int height,
+                                   int *tile_width, int *tile_height,
+                                   int *nhtiles, int *nvtiles);
+void av1_loop_restoration_init(RestorationInternal *rst, RestorationInfo *rsi,
+                               int kf, int width, int height);
+void av1_loop_restoration_frame(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+                                RestorationInfo *rsi, int y_only,
+                                int partial_frame);
+void av1_loop_restoration_rows(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+                               int start_mi_row, int end_mi_row, int y_only);
+void av1_loop_restoration_precal();
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_RESTORATION_H_
+#endif  // AV1_COMMON_RESTORATION_H_
diff --git a/av1/common/scale.c b/av1/common/scale.c
index 6bd3b74..908a2db 100644
--- a/av1/common/scale.c
+++ b/av1/common/scale.c
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "av1/common/filter.h"
 #include "av1/common/scale.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
 
 static INLINE int scaled_x(int val, const struct scale_factors *sf) {
   return (int)((int64_t)val * sf->x_scale_fp >> REF_SCALE_SHIFT);
@@ -34,7 +34,7 @@
   return (other_size << REF_SCALE_SHIFT) / this_size;
 }
 
-MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
+MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
   const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK;
   const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK;
   const MV32 res = { scaled_y(mv->row, sf) + y_off_q4,
@@ -42,13 +42,13 @@
   return res;
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
-                                        int other_h, int this_w, int this_h,
-                                        int use_highbd) {
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                       int other_h, int this_w, int this_h,
+                                       int use_highbd) {
 #else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
-                                        int other_h, int this_w, int this_h) {
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                       int other_h, int this_w, int this_h) {
 #endif
   if (!valid_ref_frame_size(other_w, other_h, this_w, this_h)) {
     sf->x_scale_fp = REF_INVALID_SCALE;
@@ -61,7 +61,7 @@
   sf->x_step_q4 = scaled_x(16, sf);
   sf->y_step_q4 = scaled_y(16, sf);
 
-  if (vp10_is_scaled(sf)) {
+  if (av1_is_scaled(sf)) {
     sf->scale_value_x = scaled_x;
     sf->scale_value_y = scaled_y;
   } else {
@@ -76,108 +76,108 @@
 // best quality, but it may be worth trying an additional mode that does
 // do the filtering on full-pel.
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
-  sf->predict_ni[0][0][0] = vpx_convolve8_c;
-  sf->predict_ni[0][0][1] = vpx_convolve8_avg_c;
-  sf->predict_ni[0][1][0] = vpx_convolve8_c;
-  sf->predict_ni[0][1][1] = vpx_convolve8_avg_c;
-  sf->predict_ni[1][0][0] = vpx_convolve8_c;
-  sf->predict_ni[1][0][1] = vpx_convolve8_avg_c;
-  sf->predict_ni[1][1][0] = vpx_convolve8;
-  sf->predict_ni[1][1][1] = vpx_convolve8_avg;
+  sf->predict_ni[0][0][0] = aom_convolve8_c;
+  sf->predict_ni[0][0][1] = aom_convolve8_avg_c;
+  sf->predict_ni[0][1][0] = aom_convolve8_c;
+  sf->predict_ni[0][1][1] = aom_convolve8_avg_c;
+  sf->predict_ni[1][0][0] = aom_convolve8_c;
+  sf->predict_ni[1][0][1] = aom_convolve8_avg_c;
+  sf->predict_ni[1][1][0] = aom_convolve8;
+  sf->predict_ni[1][1][1] = aom_convolve8_avg;
 #endif  // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
   if (sf->x_step_q4 == 16) {
     if (sf->y_step_q4 == 16) {
       // No scaling in either direction.
-      sf->predict[0][0][0] = vpx_convolve_copy;
-      sf->predict[0][0][1] = vpx_convolve_avg;
-      sf->predict[0][1][0] = vpx_convolve8_vert;
-      sf->predict[0][1][1] = vpx_convolve8_avg_vert;
-      sf->predict[1][0][0] = vpx_convolve8_horiz;
-      sf->predict[1][0][1] = vpx_convolve8_avg_horiz;
+      sf->predict[0][0][0] = aom_convolve_copy;
+      sf->predict[0][0][1] = aom_convolve_avg;
+      sf->predict[0][1][0] = aom_convolve8_vert;
+      sf->predict[0][1][1] = aom_convolve8_avg_vert;
+      sf->predict[1][0][0] = aom_convolve8_horiz;
+      sf->predict[1][0][1] = aom_convolve8_avg_horiz;
     } else {
       // No scaling in x direction. Must always scale in the y direction.
-      sf->predict[0][0][0] = vpx_convolve8_vert;
-      sf->predict[0][0][1] = vpx_convolve8_avg_vert;
-      sf->predict[0][1][0] = vpx_convolve8_vert;
-      sf->predict[0][1][1] = vpx_convolve8_avg_vert;
-      sf->predict[1][0][0] = vpx_convolve8;
-      sf->predict[1][0][1] = vpx_convolve8_avg;
+      sf->predict[0][0][0] = aom_convolve8_vert;
+      sf->predict[0][0][1] = aom_convolve8_avg_vert;
+      sf->predict[0][1][0] = aom_convolve8_vert;
+      sf->predict[0][1][1] = aom_convolve8_avg_vert;
+      sf->predict[1][0][0] = aom_convolve8;
+      sf->predict[1][0][1] = aom_convolve8_avg;
     }
   } else {
     if (sf->y_step_q4 == 16) {
       // No scaling in the y direction. Must always scale in the x direction.
-      sf->predict[0][0][0] = vpx_convolve8_horiz;
-      sf->predict[0][0][1] = vpx_convolve8_avg_horiz;
-      sf->predict[0][1][0] = vpx_convolve8;
-      sf->predict[0][1][1] = vpx_convolve8_avg;
-      sf->predict[1][0][0] = vpx_convolve8_horiz;
-      sf->predict[1][0][1] = vpx_convolve8_avg_horiz;
+      sf->predict[0][0][0] = aom_convolve8_horiz;
+      sf->predict[0][0][1] = aom_convolve8_avg_horiz;
+      sf->predict[0][1][0] = aom_convolve8;
+      sf->predict[0][1][1] = aom_convolve8_avg;
+      sf->predict[1][0][0] = aom_convolve8_horiz;
+      sf->predict[1][0][1] = aom_convolve8_avg_horiz;
     } else {
       // Must always scale in both directions.
-      sf->predict[0][0][0] = vpx_convolve8;
-      sf->predict[0][0][1] = vpx_convolve8_avg;
-      sf->predict[0][1][0] = vpx_convolve8;
-      sf->predict[0][1][1] = vpx_convolve8_avg;
-      sf->predict[1][0][0] = vpx_convolve8;
-      sf->predict[1][0][1] = vpx_convolve8_avg;
+      sf->predict[0][0][0] = aom_convolve8;
+      sf->predict[0][0][1] = aom_convolve8_avg;
+      sf->predict[0][1][0] = aom_convolve8;
+      sf->predict[0][1][1] = aom_convolve8_avg;
+      sf->predict[1][0][0] = aom_convolve8;
+      sf->predict[1][0][1] = aom_convolve8_avg;
     }
   }
   // 2D subpel motion always gets filtered in both directions
-  sf->predict[1][1][0] = vpx_convolve8;
-  sf->predict[1][1][1] = vpx_convolve8_avg;
+  sf->predict[1][1][0] = aom_convolve8;
+  sf->predict[1][1][1] = aom_convolve8_avg;
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (use_highbd) {
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
-    sf->highbd_predict_ni[0][0][0] = vpx_highbd_convolve8_c;
-    sf->highbd_predict_ni[0][0][1] = vpx_highbd_convolve8_avg_c;
-    sf->highbd_predict_ni[0][1][0] = vpx_highbd_convolve8_c;
-    sf->highbd_predict_ni[0][1][1] = vpx_highbd_convolve8_avg_c;
-    sf->highbd_predict_ni[1][0][0] = vpx_highbd_convolve8_c;
-    sf->highbd_predict_ni[1][0][1] = vpx_highbd_convolve8_avg_c;
-    sf->highbd_predict_ni[1][1][0] = vpx_highbd_convolve8;
-    sf->highbd_predict_ni[1][1][1] = vpx_highbd_convolve8_avg;
+    sf->highbd_predict_ni[0][0][0] = aom_highbd_convolve8_c;
+    sf->highbd_predict_ni[0][0][1] = aom_highbd_convolve8_avg_c;
+    sf->highbd_predict_ni[0][1][0] = aom_highbd_convolve8_c;
+    sf->highbd_predict_ni[0][1][1] = aom_highbd_convolve8_avg_c;
+    sf->highbd_predict_ni[1][0][0] = aom_highbd_convolve8_c;
+    sf->highbd_predict_ni[1][0][1] = aom_highbd_convolve8_avg_c;
+    sf->highbd_predict_ni[1][1][0] = aom_highbd_convolve8;
+    sf->highbd_predict_ni[1][1][1] = aom_highbd_convolve8_avg;
 #endif  // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
     if (sf->x_step_q4 == 16) {
       if (sf->y_step_q4 == 16) {
         // No scaling in either direction.
-        sf->highbd_predict[0][0][0] = vpx_highbd_convolve_copy;
-        sf->highbd_predict[0][0][1] = vpx_highbd_convolve_avg;
-        sf->highbd_predict[0][1][0] = vpx_highbd_convolve8_vert;
-        sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg_vert;
-        sf->highbd_predict[1][0][0] = vpx_highbd_convolve8_horiz;
-        sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg_horiz;
+        sf->highbd_predict[0][0][0] = aom_highbd_convolve_copy;
+        sf->highbd_predict[0][0][1] = aom_highbd_convolve_avg;
+        sf->highbd_predict[0][1][0] = aom_highbd_convolve8_vert;
+        sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg_vert;
+        sf->highbd_predict[1][0][0] = aom_highbd_convolve8_horiz;
+        sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg_horiz;
       } else {
         // No scaling in x direction. Must always scale in the y direction.
-        sf->highbd_predict[0][0][0] = vpx_highbd_convolve8_vert;
-        sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg_vert;
-        sf->highbd_predict[0][1][0] = vpx_highbd_convolve8_vert;
-        sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg_vert;
-        sf->highbd_predict[1][0][0] = vpx_highbd_convolve8;
-        sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg;
+        sf->highbd_predict[0][0][0] = aom_highbd_convolve8_vert;
+        sf->highbd_predict[0][0][1] = aom_highbd_convolve8_avg_vert;
+        sf->highbd_predict[0][1][0] = aom_highbd_convolve8_vert;
+        sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg_vert;
+        sf->highbd_predict[1][0][0] = aom_highbd_convolve8;
+        sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg;
       }
     } else {
       if (sf->y_step_q4 == 16) {
         // No scaling in the y direction. Must always scale in the x direction.
-        sf->highbd_predict[0][0][0] = vpx_highbd_convolve8_horiz;
-        sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg_horiz;
-        sf->highbd_predict[0][1][0] = vpx_highbd_convolve8;
-        sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg;
-        sf->highbd_predict[1][0][0] = vpx_highbd_convolve8_horiz;
-        sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg_horiz;
+        sf->highbd_predict[0][0][0] = aom_highbd_convolve8_horiz;
+        sf->highbd_predict[0][0][1] = aom_highbd_convolve8_avg_horiz;
+        sf->highbd_predict[0][1][0] = aom_highbd_convolve8;
+        sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg;
+        sf->highbd_predict[1][0][0] = aom_highbd_convolve8_horiz;
+        sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg_horiz;
       } else {
         // Must always scale in both directions.
-        sf->highbd_predict[0][0][0] = vpx_highbd_convolve8;
-        sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg;
-        sf->highbd_predict[0][1][0] = vpx_highbd_convolve8;
-        sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg;
-        sf->highbd_predict[1][0][0] = vpx_highbd_convolve8;
-        sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg;
+        sf->highbd_predict[0][0][0] = aom_highbd_convolve8;
+        sf->highbd_predict[0][0][1] = aom_highbd_convolve8_avg;
+        sf->highbd_predict[0][1][0] = aom_highbd_convolve8;
+        sf->highbd_predict[0][1][1] = aom_highbd_convolve8_avg;
+        sf->highbd_predict[1][0][0] = aom_highbd_convolve8;
+        sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg;
       }
     }
     // 2D subpel motion always gets filtered in both directions.
-    sf->highbd_predict[1][1][0] = vpx_highbd_convolve8;
-    sf->highbd_predict[1][1][1] = vpx_highbd_convolve8_avg;
+    sf->highbd_predict[1][1][0] = aom_highbd_convolve8;
+    sf->highbd_predict[1][1][1] = aom_highbd_convolve8_avg;
   }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
diff --git a/av1/common/scale.h b/av1/common/scale.h
index bb02601..0b49b68 100644
--- a/av1/common/scale.h
+++ b/av1/common/scale.h
@@ -8,11 +8,11 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_SCALE_H_
-#define VP10_COMMON_SCALE_H_
+#ifndef AV1_COMMON_SCALE_H_
+#define AV1_COMMON_SCALE_H_
 
 #include "av1/common/mv.h"
-#include "aom_dsp/vpx_convolve.h"
+#include "aom_dsp/aom_convolve.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -32,37 +32,37 @@
   int (*scale_value_y)(int val, const struct scale_factors *sf);
 
   convolve_fn_t predict[2][2][2];  // horiz, vert, avg
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   highbd_convolve_fn_t highbd_predict[2][2][2];  // horiz, vert, avg
-#endif                                           // CONFIG_VP9_HIGHBITDEPTH
+#endif                                           // CONFIG_AOM_HIGHBITDEPTH
 
 // Functions for non-interpolating filters (those that filter zero offsets)
 #if CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
   convolve_fn_t predict_ni[2][2][2];  // horiz, vert, avg
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   highbd_convolve_fn_t highbd_predict_ni[2][2][2];  // horiz, vert, avg
-#endif                                              // CONFIG_VP9_HIGHBITDEPTH
+#endif                                              // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
 };
 
-MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
+MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
 
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
-                                        int other_h, int this_w, int this_h,
-                                        int use_high);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                       int other_h, int this_w, int this_h,
+                                       int use_high);
 #else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
-                                        int other_h, int this_w, int this_h);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                       int other_h, int this_w, int this_h);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static INLINE int vp10_is_valid_scale(const struct scale_factors *sf) {
+static INLINE int av1_is_valid_scale(const struct scale_factors *sf) {
   return sf->x_scale_fp != REF_INVALID_SCALE &&
          sf->y_scale_fp != REF_INVALID_SCALE;
 }
 
-static INLINE int vp10_is_scaled(const struct scale_factors *sf) {
-  return vp10_is_valid_scale(sf) &&
+static INLINE int av1_is_scaled(const struct scale_factors *sf) {
+  return av1_is_valid_scale(sf) &&
          (sf->x_scale_fp != REF_NO_SCALE || sf->y_scale_fp != REF_NO_SCALE);
 }
 
@@ -76,4 +76,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_SCALE_H_
+#endif  // AV1_COMMON_SCALE_H_
diff --git a/av1/common/scan.c b/av1/common/scan.c
index dbc36eb..8fc4ca2 100644
--- a/av1/common/scan.c
+++ b/av1/common/scan.c
@@ -2817,69 +2817,69 @@
 };
 #endif  // CONFIG_EXT_TX
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_4x4[16]) = {
   0, 2, 5, 8, 1, 3, 9, 12, 4, 7, 11, 14, 6, 10, 13, 15,
 };
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_4x4[16]) = {
   0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_4x4[16]) = {
   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
 };
 #endif  // CONFIG_EXT_TX
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_4x4[16]) = {
   0, 3, 7, 11, 1, 5, 9, 12, 2, 6, 10, 14, 4, 8, 13, 15,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_4x4[16]) = {
   0, 1, 3, 5, 2, 4, 6, 9, 7, 8, 11, 13, 10, 12, 14, 15,
 };
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_4x8[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_4x8[32]) = {
   0,  1,  4,  9,  2,  3,  6,  11, 5,  7,  8,  13, 10, 12, 14, 17,
   15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_4x8[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_4x8[32]) = {
   0, 8,  16, 24, 1, 9,  17, 25, 2, 10, 18, 26, 3, 11, 19, 27,
   4, 12, 20, 28, 5, 13, 21, 29, 6, 14, 22, 30, 7, 15, 23, 31,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_4x8[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_4x8[32]) = {
   0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15,
   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x4[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_8x4[32]) = {
   0, 1, 4, 9,  15, 19, 24, 28, 2,  3,  6,  11, 16, 21, 25, 29,
   5, 7, 8, 13, 18, 22, 26, 30, 10, 12, 14, 17, 20, 23, 27, 31,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_8x4[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_8x4[32]) = {
   0, 4, 8,  12, 16, 20, 24, 28, 1, 5, 9,  13, 17, 21, 25, 29,
   2, 6, 10, 14, 18, 22, 26, 30, 3, 7, 11, 15, 19, 23, 27, 31,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_8x4[32]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_8x4[32]) = {
   0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15,
   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
 };
 #endif  // CONFIG_EXT_TX
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_8x8[64]) = {
   0, 8,  16, 24, 32, 40, 48, 56, 1, 9,  17, 25, 33, 41, 49, 57,
   2, 10, 18, 26, 34, 42, 50, 58, 3, 11, 19, 27, 35, 43, 51, 59,
   4, 12, 20, 28, 36, 44, 52, 60, 5, 13, 21, 29, 37, 45, 53, 61,
   6, 14, 22, 30, 38, 46, 54, 62, 7, 15, 23, 31, 39, 47, 55, 63,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_8x8[64]) = {
   0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15,
   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
   32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
@@ -2887,21 +2887,21 @@
 };
 #endif  // CONFIG_EXT_TX
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_8x8[64]) = {
   0,  3,  8,  15, 22, 32, 40, 47, 1,  5,  11, 18, 26, 34, 44, 51,
   2,  7,  13, 20, 28, 38, 46, 54, 4,  10, 16, 24, 31, 41, 50, 56,
   6,  12, 21, 27, 35, 43, 52, 58, 9,  17, 25, 33, 39, 48, 55, 60,
   14, 23, 30, 37, 45, 53, 59, 62, 19, 29, 36, 42, 49, 57, 61, 63,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_8x8[64]) = {
   0,  1,  2,  5,  8,  12, 19, 24, 3,  4,  7,  10, 15, 20, 30, 39,
   6,  9,  13, 16, 21, 27, 37, 46, 11, 14, 17, 23, 28, 34, 44, 52,
   18, 22, 25, 31, 35, 41, 50, 57, 26, 29, 33, 38, 43, 49, 55, 59,
   32, 36, 42, 47, 51, 54, 60, 61, 40, 45, 48, 53, 56, 58, 62, 63,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_8x8[64]) = {
   0,  2,  5,  9,  14, 22, 31, 37, 1,  4,  8,  13, 19, 26, 38, 44,
   3,  6,  10, 17, 24, 30, 42, 49, 7,  11, 15, 21, 29, 36, 47, 53,
   12, 16, 20, 27, 34, 43, 52, 57, 18, 23, 28, 35, 41, 48, 56, 60,
@@ -2909,7 +2909,7 @@
 };
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x16[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_8x16[128]) = {
   0,  1,  3,   6,   10,  15,  21,  28,  2,  4,   7,   11,  16,  22,  29,  36,
   5,  8,  12,  17,  23,  30,  37,  44,  9,  13,  18,  24,  31,  38,  45,  52,
   14, 19, 25,  32,  39,  46,  53,  60,  20, 26,  33,  40,  47,  54,  61,  68,
@@ -2920,7 +2920,7 @@
   91, 98, 105, 111, 116, 120, 123, 125, 99, 106, 112, 117, 121, 124, 126, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x8[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_16x8[128]) = {
   0,  1,  3,  6,  10, 15, 21, 28, 36, 44,  52,  60,  68,  76,  84,  92,
   2,  4,  7,  11, 16, 22, 29, 37, 45, 53,  61,  69,  77,  85,  93,  100,
   5,  8,  12, 17, 23, 30, 38, 46, 54, 62,  70,  78,  86,  94,  101, 107,
@@ -2931,7 +2931,7 @@
   35, 43, 51, 59, 67, 75, 83, 91, 99, 106, 112, 117, 121, 124, 126, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_8x16[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_8x16[128]) = {
   0,  16, 32, 48, 64, 80, 96,  112, 1,  17, 33, 49, 65, 81, 97,  113,
   2,  18, 34, 50, 66, 82, 98,  114, 3,  19, 35, 51, 67, 83, 99,  115,
   4,  20, 36, 52, 68, 84, 100, 116, 5,  21, 37, 53, 69, 85, 101, 117,
@@ -2942,7 +2942,7 @@
   14, 30, 46, 62, 78, 94, 110, 126, 15, 31, 47, 63, 79, 95, 111, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_16x8[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_16x8[128]) = {
   0, 8,  16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96,  104, 112, 120,
   1, 9,  17, 25, 33, 41, 49, 57, 65, 73, 81, 89, 97,  105, 113, 121,
   2, 10, 18, 26, 34, 42, 50, 58, 66, 74, 82, 90, 98,  106, 114, 122,
@@ -2953,7 +2953,7 @@
   7, 15, 23, 31, 39, 47, 55, 63, 71, 79, 87, 95, 103, 111, 119, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_8x16[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_8x16[128]) = {
   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,
   15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
   30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,  42,  43,  44,
@@ -2965,7 +2965,7 @@
   120, 121, 122, 123, 124, 125, 126, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_16x8[128]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_16x8[128]) = {
   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,
   15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
   30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,  42,  43,  44,
@@ -2977,7 +2977,7 @@
   120, 121, 122, 123, 124, 125, 126, 127,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x32[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_16x32[512]) = {
   0,   1,   3,   6,   10,  15,  21,  28,  36,  45,  55,  66,  78,  91,  105,
   120, 2,   4,   7,   11,  16,  22,  29,  37,  46,  56,  67,  79,  92,  106,
   121, 136, 5,   8,   12,  17,  23,  30,  38,  47,  57,  68,  80,  93,  107,
@@ -3015,7 +3015,7 @@
   510, 511,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_32x16[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_32x16[512]) = {
   0,   1,   3,   6,   10,  15,  21,  28,  36,  45,  55,  66,  78,  91,  105,
   120, 136, 152, 168, 184, 200, 216, 232, 248, 264, 280, 296, 312, 328, 344,
   360, 376, 2,   4,   7,   11,  16,  22,  29,  37,  46,  56,  67,  79,  92,
@@ -3053,7 +3053,7 @@
   510, 511,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_16x32[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_16x32[512]) = {
   0,  32, 64, 96,  128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 480,
   1,  33, 65, 97,  129, 161, 193, 225, 257, 289, 321, 353, 385, 417, 449, 481,
   2,  34, 66, 98,  130, 162, 194, 226, 258, 290, 322, 354, 386, 418, 450, 482,
@@ -3088,7 +3088,7 @@
   31, 63, 95, 127, 159, 191, 223, 255, 287, 319, 351, 383, 415, 447, 479, 511,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_32x16[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_32x16[512]) = {
   0,   16,  32,  48,  64,  80,  96,  112, 128, 144, 160, 176, 192, 208, 224,
   240, 256, 272, 288, 304, 320, 336, 352, 368, 384, 400, 416, 432, 448, 464,
   480, 496, 1,   17,  33,  49,  65,  81,  97,  113, 129, 145, 161, 177, 193,
@@ -3126,7 +3126,7 @@
   495, 511,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_16x32[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_16x32[512]) = {
   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,
   15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
   30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,  42,  43,  44,
@@ -3164,7 +3164,7 @@
   510, 511,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_32x16[512]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_32x16[512]) = {
   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,
   15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
   30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,  42,  43,  44,
@@ -3205,7 +3205,7 @@
 #endif  // CONFIG_EXT_TX
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_16x16[256]) = {
   0,  16, 32, 48, 64, 80, 96,  112, 128, 144, 160, 176, 192, 208, 224, 240,
   1,  17, 33, 49, 65, 81, 97,  113, 129, 145, 161, 177, 193, 209, 225, 241,
   2,  18, 34, 50, 66, 82, 98,  114, 130, 146, 162, 178, 194, 210, 226, 242,
@@ -3224,7 +3224,7 @@
   15, 31, 47, 63, 79, 95, 111, 127, 143, 159, 175, 191, 207, 223, 239, 255,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_16x16[256]) = {
   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,
   15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
   30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,  42,  43,  44,
@@ -3246,7 +3246,7 @@
 };
 #endif  // CONFIG_EXT_TX
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_16x16[256]) = {
   0,  4,  11,  20,  31,  43,  59,  75,  85,  109, 130, 150, 165, 181, 195, 198,
   1,  6,  14,  23,  34,  47,  64,  81,  95,  114, 135, 153, 171, 188, 201, 212,
   2,  8,  16,  25,  38,  52,  67,  83,  101, 116, 136, 157, 172, 190, 205, 216,
@@ -3265,7 +3265,7 @@
   65, 88, 107, 124, 139, 152, 163, 177, 185, 199, 221, 234, 243, 248, 252, 255,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_16x16[256]) = {
   0,   1,   2,   4,   6,   9,   12,  17,  22,  29,  36,  43,  54,  64,  76,
   86,  3,   5,   7,   11,  15,  19,  25,  32,  38,  48,  59,  68,  84,  99,
   115, 130, 8,   10,  13,  18,  23,  27,  33,  42,  51,  60,  72,  88,  103,
@@ -3286,7 +3286,7 @@
   255,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_16x16[256]) = {
   0,   2,   5,   9,   17,  24,  36,  44,  55,  72,  88,  104, 128, 143, 166,
   179, 1,   4,   8,   13,  20,  30,  40,  54,  66,  79,  96,  113, 141, 154,
   178, 196, 3,   7,   11,  18,  25,  33,  46,  57,  71,  86,  101, 119, 148,
@@ -3308,7 +3308,7 @@
 };
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_mcol_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mcol_iscan_32x32[1024]) = {
   0,   32,   64,  96,   128, 160,  192, 224,  256, 288,  320, 352,  384, 416,
   448, 480,  512, 544,  576, 608,  640, 672,  704, 736,  768, 800,  832, 864,
   896, 928,  960, 992,  1,   33,   65,  97,   129, 161,  193, 225,  257, 289,
@@ -3385,7 +3385,7 @@
   991, 1023,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_mrow_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_mrow_iscan_32x32[1024]) = {
   0,    1,    2,    3,    4,    5,    6,    7,    8,    9,    10,   11,   12,
   13,   14,   15,   16,   17,   18,   19,   20,   21,   22,   23,   24,   25,
   26,   27,   28,   29,   30,   31,   32,   33,   34,   35,   36,   37,   38,
@@ -3468,7 +3468,7 @@
 };
 #endif  // CONFIG_EXT_TX
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_32x32[1024]) = {
   0,    2,    5,    10,   17,   25,   38,   47,   62,   83,   101,  121,  145,
   170,  193,  204,  210,  219,  229,  233,  245,  257,  275,  299,  342,  356,
   377,  405,  455,  471,  495,  527,  1,    4,    8,    15,   22,   30,   45,
@@ -3551,7 +3551,7 @@
 };
 
 #if CONFIG_EXT_TX
-DECLARE_ALIGNED(16, static const int16_t, vp10_v2_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_v2_iscan_32x32[1024]) = {
   0,    1,    4,    9,    15,   22,   33,   43,   56,   71,   86,   104,  121,
   142,  166,  189,  512,  518,  527,  539,  551,  566,  584,  602,  621,  644,
   668,  695,  721,  748,  780,  811,  2,    3,    6,    11,   17,   26,   35,
@@ -3633,7 +3633,7 @@
   978,  987,  995,  1002, 1008, 1013, 1017, 1020, 1022, 1023,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_h2_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_h2_iscan_32x32[1024]) = {
   0,    1,    4,    9,    15,   22,   33,   43,   56,   71,   86,   104,  121,
   142,  166,  189,  214,  233,  254,  273,  292,  309,  328,  345,  362,  378,
   397,  415,  431,  447,  464,  481,  2,    3,    6,    11,   17,   26,   35,
@@ -3715,7 +3715,7 @@
   978,  987,  995,  1002, 1008, 1013, 1017, 1020, 1022, 1023,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_qtr_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_qtr_iscan_32x32[1024]) = {
   0,    1,    4,    9,    15,   22,   33,   43,   56,   71,   86,   104,  121,
   142,  166,  189,  256,  268,  286,  310,  334,  364,  400,  435,  471,  510,
   553,  598,  640,  683,  732,  780,  2,    3,    6,    11,   17,   26,   35,
@@ -3798,371 +3798,369 @@
 };
 #endif  // CONFIG_EXT_TX
 
-const scan_order vp10_default_scan_orders[TX_SIZES] = {
-  { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-  { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-  { default_scan_16x16, vp10_default_iscan_16x16,
-    default_scan_16x16_neighbors },
-  { default_scan_32x32, vp10_default_iscan_32x32,
-    default_scan_32x32_neighbors },
+const scan_order av1_default_scan_orders[TX_SIZES] = {
+  { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+  { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+  { default_scan_16x16, av1_default_iscan_16x16, default_scan_16x16_neighbors },
+  { default_scan_32x32, av1_default_iscan_32x32, default_scan_32x32_neighbors },
 };
 
 #if CONFIG_EXT_TX
-const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES] = {
+const scan_order av1_intra_scan_orders[TX_SIZES][TX_TYPES] = {
   {
       // TX_4X4
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
-      { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
-      { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
-      { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
-      { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
-      { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
-      { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
-      { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+      { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+      { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+      { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+      { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+      { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+      { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+      { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
   },
   {
       // TX_8X8
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
-      { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
-      { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
-      { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
-      { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
-      { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
-      { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
-      { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+      { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+      { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+      { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+      { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+      { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+      { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+      { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
   },
   {
       // TX_16X16
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
-      { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+      { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
-      { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
-      { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
-      { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
-      { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
-      { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
-      { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
+      { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+      { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+      { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+      { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+      { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+      { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+      { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
   },
   {
       // TX_32X32
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
-      { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
-      { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
-      { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+      { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+      { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
   }
 };
 
-const scan_order vp10_inter_scan_orders[TX_SIZES_ALL][TX_TYPES] = {
+const scan_order av1_inter_scan_orders[TX_SIZES_ALL][TX_TYPES] = {
   {
       // TX_4X4
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-      { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
-      { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
-      { mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
-      { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
-      { mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
-      { mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
-      { mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+      { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+      { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+      { mcol_scan_4x4, av1_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
+      { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+      { mcol_scan_4x4, av1_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
+      { mrow_scan_4x4, av1_mrow_iscan_4x4, mrow_scan_4x4_neighbors },
+      { mcol_scan_4x4, av1_mcol_iscan_4x4, mcol_scan_4x4_neighbors },
   },
   {
       // TX_8X8
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-      { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
-      { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
-      { mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
-      { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
-      { mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
-      { mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
-      { mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+      { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+      { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+      { mcol_scan_8x8, av1_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
+      { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+      { mcol_scan_8x8, av1_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
+      { mrow_scan_8x8, av1_mrow_iscan_8x8, mrow_scan_8x8_neighbors },
+      { mcol_scan_8x8, av1_mcol_iscan_8x8, mcol_scan_8x8_neighbors },
   },
   {
       // TX_16X16
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { default_scan_16x16, vp10_default_iscan_16x16,
+      { default_scan_16x16, av1_default_iscan_16x16,
         default_scan_16x16_neighbors },
-      { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
-      { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
-      { mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
-      { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
-      { mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
-      { mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
-      { mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
+      { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+      { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+      { mcol_scan_16x16, av1_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
+      { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+      { mcol_scan_16x16, av1_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
+      { mrow_scan_16x16, av1_mrow_iscan_16x16, mrow_scan_16x16_neighbors },
+      { mcol_scan_16x16, av1_mcol_iscan_16x16, mcol_scan_16x16_neighbors },
   },
   {
       // TX_32X32
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
-      { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
-      { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { h2_scan_32x32, vp10_h2_iscan_32x32, h2_scan_32x32_neighbors },
-      { v2_scan_32x32, vp10_v2_iscan_32x32, v2_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { qtr_scan_32x32, vp10_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
-      { mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
-      { mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+      { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { h2_scan_32x32, av1_h2_iscan_32x32, h2_scan_32x32_neighbors },
+      { v2_scan_32x32, av1_v2_iscan_32x32, v2_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { qtr_scan_32x32, av1_qtr_iscan_32x32, qtr_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
+      { mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
+      { mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
   },
   {
       // TX_4X8
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { default_scan_4x8, vp10_default_iscan_4x8, default_scan_4x8_neighbors },
-      { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
-      { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
-      { mcol_scan_4x8, vp10_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
-      { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
-      { mcol_scan_4x8, vp10_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
-      { mrow_scan_4x8, vp10_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
-      { mcol_scan_4x8, vp10_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
+      { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+      { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+      { mcol_scan_4x8, av1_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
+      { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+      { mcol_scan_4x8, av1_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
+      { mrow_scan_4x8, av1_mrow_iscan_4x8, mrow_scan_4x8_neighbors },
+      { mcol_scan_4x8, av1_mcol_iscan_4x8, mcol_scan_4x8_neighbors },
   },
   {
       // TX_8X4
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { default_scan_8x4, vp10_default_iscan_8x4, default_scan_8x4_neighbors },
-      { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
-      { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
-      { mcol_scan_8x4, vp10_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
-      { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
-      { mcol_scan_8x4, vp10_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
-      { mrow_scan_8x4, vp10_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
-      { mcol_scan_8x4, vp10_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { default_scan_8x4, av1_default_iscan_8x4, default_scan_8x4_neighbors },
+      { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+      { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+      { mcol_scan_8x4, av1_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
+      { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+      { mcol_scan_8x4, av1_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
+      { mrow_scan_8x4, av1_mrow_iscan_8x4, mrow_scan_8x4_neighbors },
+      { mcol_scan_8x4, av1_mcol_iscan_8x4, mcol_scan_8x4_neighbors },
   },
   {
       // TX_8X16
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { default_scan_8x16, vp10_default_iscan_8x16,
+      { default_scan_8x16, av1_default_iscan_8x16,
         default_scan_8x16_neighbors },
-      { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
-      { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
-      { mcol_scan_8x16, vp10_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
-      { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
-      { mcol_scan_8x16, vp10_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
-      { mrow_scan_8x16, vp10_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
-      { mcol_scan_8x16, vp10_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
+      { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+      { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+      { mcol_scan_8x16, av1_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
+      { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+      { mcol_scan_8x16, av1_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
+      { mrow_scan_8x16, av1_mrow_iscan_8x16, mrow_scan_8x16_neighbors },
+      { mcol_scan_8x16, av1_mcol_iscan_8x16, mcol_scan_8x16_neighbors },
   },
   {
       // TX_16X8
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { default_scan_16x8, vp10_default_iscan_16x8,
+      { default_scan_16x8, av1_default_iscan_16x8,
         default_scan_16x8_neighbors },
-      { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
-      { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
-      { mcol_scan_16x8, vp10_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
-      { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
-      { mcol_scan_16x8, vp10_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
-      { mrow_scan_16x8, vp10_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
-      { mcol_scan_16x8, vp10_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
+      { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+      { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+      { mcol_scan_16x8, av1_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
+      { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+      { mcol_scan_16x8, av1_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
+      { mrow_scan_16x8, av1_mrow_iscan_16x8, mrow_scan_16x8_neighbors },
+      { mcol_scan_16x8, av1_mcol_iscan_16x8, mcol_scan_16x8_neighbors },
   },
   {
       // TX_16X32
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { default_scan_16x32, vp10_default_iscan_16x32,
+      { default_scan_16x32, av1_default_iscan_16x32,
         default_scan_16x32_neighbors },
-      { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
-      { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
-      { mcol_scan_16x32, vp10_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
-      { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
-      { mcol_scan_16x32, vp10_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
-      { mrow_scan_16x32, vp10_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
-      { mcol_scan_16x32, vp10_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
+      { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+      { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+      { mcol_scan_16x32, av1_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
+      { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+      { mcol_scan_16x32, av1_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
+      { mrow_scan_16x32, av1_mrow_iscan_16x32, mrow_scan_16x32_neighbors },
+      { mcol_scan_16x32, av1_mcol_iscan_16x32, mcol_scan_16x32_neighbors },
   },
   {
       // TX_32X16
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { default_scan_32x16, vp10_default_iscan_32x16,
+      { default_scan_32x16, av1_default_iscan_32x16,
         default_scan_32x16_neighbors },
-      { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
-      { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
-      { mcol_scan_32x16, vp10_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
-      { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
-      { mcol_scan_32x16, vp10_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
-      { mrow_scan_32x16, vp10_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
-      { mcol_scan_32x16, vp10_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
+      { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+      { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+      { mcol_scan_32x16, av1_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
+      { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+      { mcol_scan_32x16, av1_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
+      { mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
+      { mcol_scan_32x16, av1_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
   }
 };
 
 #else   // CONFIG_EXT_TX
 
-const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES] = {
+const scan_order av1_intra_scan_orders[TX_SIZES][TX_TYPES] = {
   { // TX_4X4
-    { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-    { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
-    { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
-    { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors } },
+    { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+    { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+    { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+    { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors } },
   { // TX_8X8
-    { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-    { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
-    { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
-    { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors } },
+    { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+    { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+    { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+    { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors } },
   { // TX_16X16
-    { default_scan_16x16, vp10_default_iscan_16x16,
+    { default_scan_16x16, av1_default_iscan_16x16,
       default_scan_16x16_neighbors },
-    { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
-    { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
-    { default_scan_16x16, vp10_default_iscan_16x16,
+    { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+    { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+    { default_scan_16x16, av1_default_iscan_16x16,
       default_scan_16x16_neighbors } },
   {
       // TX_32X32
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
-      { default_scan_32x32, vp10_default_iscan_32x32,
+      { default_scan_32x32, av1_default_iscan_32x32,
         default_scan_32x32_neighbors },
   }
 };
diff --git a/av1/common/scan.h b/av1/common/scan.h
index d2d9f35..cba92e7 100644
--- a/av1/common/scan.h
+++ b/av1/common/scan.h
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_SCAN_H_
-#define VP10_COMMON_SCAN_H_
+#ifndef AV1_COMMON_SCAN_H_
+#define AV1_COMMON_SCAN_H_
 
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
 
 #include "av1/common/enums.h"
@@ -29,8 +29,8 @@
   const int16_t *neighbors;
 } scan_order;
 
-extern const scan_order vp10_default_scan_orders[TX_SIZES];
-extern const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES];
+extern const scan_order av1_default_scan_orders[TX_SIZES];
+extern const scan_order av1_intra_scan_orders[TX_SIZES][TX_TYPES];
 
 static INLINE int get_coef_context(const int16_t *neighbors,
                                    const uint8_t *token_cache, int c) {
@@ -41,26 +41,26 @@
 
 static INLINE const scan_order *get_intra_scan(TX_SIZE tx_size,
                                                TX_TYPE tx_type) {
-  return &vp10_intra_scan_orders[tx_size][tx_type];
+  return &av1_intra_scan_orders[tx_size][tx_type];
 }
 
 #if CONFIG_EXT_TX
-extern const scan_order vp10_inter_scan_orders[TX_SIZES_ALL][TX_TYPES];
+extern const scan_order av1_inter_scan_orders[TX_SIZES_ALL][TX_TYPES];
 
 static INLINE const scan_order *get_inter_scan(TX_SIZE tx_size,
                                                TX_TYPE tx_type) {
-  return &vp10_inter_scan_orders[tx_size][tx_type];
+  return &av1_inter_scan_orders[tx_size][tx_type];
 }
 #endif  // CONFIG_EXT_TX
 
 static INLINE const scan_order *get_scan(TX_SIZE tx_size, TX_TYPE tx_type,
                                          int is_inter) {
 #if CONFIG_EXT_TX
-  return is_inter ? &vp10_inter_scan_orders[tx_size][tx_type]
-                  : &vp10_intra_scan_orders[tx_size][tx_type];
+  return is_inter ? &av1_inter_scan_orders[tx_size][tx_type]
+                  : &av1_intra_scan_orders[tx_size][tx_type];
 #else
   (void)is_inter;
-  return &vp10_intra_scan_orders[tx_size][tx_type];
+  return &av1_intra_scan_orders[tx_size][tx_type];
 #endif  // CONFIG_EXT_TX
 }
 
@@ -68,4 +68,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_SCAN_H_
+#endif  // AV1_COMMON_SCAN_H_
diff --git a/av1/common/seg_common.c b/av1/common/seg_common.c
index f131c7b..9a5b8c8 100644
--- a/av1/common/seg_common.c
+++ b/av1/common/seg_common.c
@@ -25,26 +25,26 @@
 // the coding mechanism is still subject to change so these provide a
 // convenient single point of change.
 
-void vp10_clearall_segfeatures(struct segmentation *seg) {
-  vp10_zero(seg->feature_data);
-  vp10_zero(seg->feature_mask);
+void av1_clearall_segfeatures(struct segmentation *seg) {
+  av1_zero(seg->feature_data);
+  av1_zero(seg->feature_mask);
 }
 
-void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
-                            SEG_LVL_FEATURES feature_id) {
+void av1_enable_segfeature(struct segmentation *seg, int segment_id,
+                           SEG_LVL_FEATURES feature_id) {
   seg->feature_mask[segment_id] |= 1 << feature_id;
 }
 
-int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
+int av1_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
   return seg_feature_data_max[feature_id];
 }
 
-int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
+int av1_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
   return seg_feature_data_signed[feature_id];
 }
 
-void vp10_set_segdata(struct segmentation *seg, int segment_id,
-                      SEG_LVL_FEATURES feature_id, int seg_data) {
+void av1_set_segdata(struct segmentation *seg, int segment_id,
+                     SEG_LVL_FEATURES feature_id, int seg_data) {
   assert(seg_data <= seg_feature_data_max[feature_id]);
   if (seg_data < 0) {
     assert(seg_feature_data_signed[feature_id]);
@@ -54,7 +54,7 @@
   seg->feature_data[segment_id][feature_id] = seg_data;
 }
 
-const vpx_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = {
+const aom_tree_index av1_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = {
   2, 4, 6, 8, 10, 12, 0, -1, -2, -3, -4, -5, -6, -7
 };
 
diff --git a/av1/common/seg_common.h b/av1/common/seg_common.h
index 7a8fa8f..f863ad8 100644
--- a/av1/common/seg_common.h
+++ b/av1/common/seg_common.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_SEG_COMMON_H_
-#define VP10_COMMON_SEG_COMMON_H_
+#ifndef AV1_COMMON_SEG_COMMON_H_
+#define AV1_COMMON_SEG_COMMON_H_
 
 #include "aom_dsp/prob.h"
 
@@ -46,8 +46,8 @@
 };
 
 struct segmentation_probs {
-  vpx_prob tree_probs[SEG_TREE_PROBS];
-  vpx_prob pred_probs[PREDICTION_PROBS];
+  aom_prob tree_probs[SEG_TREE_PROBS];
+  aom_prob pred_probs[PREDICTION_PROBS];
 };
 
 static INLINE int segfeature_active(const struct segmentation *seg,
@@ -56,27 +56,27 @@
   return seg->enabled && (seg->feature_mask[segment_id] & (1 << feature_id));
 }
 
-void vp10_clearall_segfeatures(struct segmentation *seg);
+void av1_clearall_segfeatures(struct segmentation *seg);
 
-void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
-                            SEG_LVL_FEATURES feature_id);
+void av1_enable_segfeature(struct segmentation *seg, int segment_id,
+                           SEG_LVL_FEATURES feature_id);
 
-int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
+int av1_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
 
-int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
+int av1_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
 
-void vp10_set_segdata(struct segmentation *seg, int segment_id,
-                      SEG_LVL_FEATURES feature_id, int seg_data);
+void av1_set_segdata(struct segmentation *seg, int segment_id,
+                     SEG_LVL_FEATURES feature_id, int seg_data);
 
 static INLINE int get_segdata(const struct segmentation *seg, int segment_id,
                               SEG_LVL_FEATURES feature_id) {
   return seg->feature_data[segment_id][feature_id];
 }
 
-extern const vpx_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)];
+extern const aom_tree_index av1_segment_tree[TREE_SIZE(MAX_SEGMENTS)];
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_SEG_COMMON_H_
+#endif  // AV1_COMMON_SEG_COMMON_H_
diff --git a/av1/common/thread_common.c b/av1/common/thread_common.c
index ba91a46..13150e0 100644
--- a/av1/common/thread_common.c
+++ b/av1/common/thread_common.c
@@ -8,9 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vpx_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_config.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
 #include "av1/common/entropymode.h"
 #include "av1/common/thread_common.h"
 #include "av1/common/reconinter.h"
@@ -33,7 +33,7 @@
 }
 #endif  // CONFIG_MULTITHREAD
 
-static INLINE void sync_read(VP10LfSync *const lf_sync, int r, int c) {
+static INLINE void sync_read(AV1LfSync *const lf_sync, int r, int c) {
 #if CONFIG_MULTITHREAD
   const int nsync = lf_sync->sync_range;
 
@@ -53,7 +53,7 @@
 #endif  // CONFIG_MULTITHREAD
 }
 
-static INLINE void sync_write(VP10LfSync *const lf_sync, int r, int c,
+static INLINE void sync_write(AV1LfSync *const lf_sync, int r, int c,
                               const int sb_cols) {
 #if CONFIG_MULTITHREAD
   const int nsync = lf_sync->sync_range;
@@ -86,9 +86,9 @@
 
 // Implement row loopfiltering for each thread.
 static INLINE void thread_loop_filter_rows(
-    const YV12_BUFFER_CONFIG *const frame_buffer, VP10_COMMON *const cm,
+    const YV12_BUFFER_CONFIG *const frame_buffer, AV1_COMMON *const cm,
     struct macroblockd_plane planes[MAX_MB_PLANE], int start, int stop,
-    int y_only, VP10LfSync *const lf_sync) {
+    int y_only, AV1LfSync *const lf_sync) {
   const int num_planes = y_only ? 1 : MAX_MB_PLANE;
   const int sb_cols = mi_cols_aligned_to_sb(cm) >> cm->mib_size_log2;
   int mi_row, mi_col;
@@ -123,28 +123,28 @@
 
       sync_read(lf_sync, r, c);
 
-      vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+      av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
 
 #if CONFIG_EXT_PARTITION_TYPES
       for (plane = 0; plane < num_planes; ++plane)
-        vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
-                                       mi_col);
+        av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, mi_row,
+                                      mi_col);
 #else
       // TODO(JBB): Make setup_mask work for non 420.
-      vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
+      av1_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
 
-      vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
+      av1_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
       for (plane = 1; plane < num_planes; ++plane) {
         switch (path) {
           case LF_PATH_420:
-            vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_444:
-            vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_SLOW:
-            vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
-                                           mi_row, mi_col);
+            av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+                                          mi_row, mi_col);
             break;
         }
       }
@@ -155,7 +155,7 @@
 }
 
 // Row-based multi-threaded loopfilter hook
-static int loop_filter_row_worker(VP10LfSync *const lf_sync,
+static int loop_filter_row_worker(AV1LfSync *const lf_sync,
                                   LFWorkerData *const lf_data) {
   thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
                           lf_data->start, lf_data->stop, lf_data->y_only,
@@ -163,18 +163,18 @@
   return 1;
 }
 
-static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
                                 struct macroblockd_plane planes[MAX_MB_PLANE],
                                 int start, int stop, int y_only,
-                                VPxWorker *workers, int nworkers,
-                                VP10LfSync *lf_sync) {
-  const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+                                AVxWorker *workers, int nworkers,
+                                AV1LfSync *lf_sync) {
+  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
   // Number of superblock rows and cols
   const int sb_rows = mi_rows_aligned_to_sb(cm) >> cm->mib_size_log2;
   // Decoder may allocate more threads than number of tiles based on user's
   // input.
   const int tile_cols = cm->tile_cols;
-  const int num_workers = VPXMIN(nworkers, tile_cols);
+  const int num_workers = AOMMIN(nworkers, tile_cols);
   int i;
 
 #if CONFIG_EXT_PARTITION
@@ -186,8 +186,8 @@
 
   if (!lf_sync->sync_range || sb_rows != lf_sync->rows ||
       num_workers > lf_sync->num_workers) {
-    vp10_loop_filter_dealloc(lf_sync);
-    vp10_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
+    av1_loop_filter_dealloc(lf_sync);
+    av1_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
   }
 
   // Initialize cur_sb_col to -1 for all SB rows.
@@ -202,15 +202,15 @@
   // because of contention. If the multithreading code changes in the future
   // then the number of workers used by the loopfilter should be revisited.
   for (i = 0; i < num_workers; ++i) {
-    VPxWorker *const worker = &workers[i];
+    AVxWorker *const worker = &workers[i];
     LFWorkerData *const lf_data = &lf_sync->lfdata[i];
 
-    worker->hook = (VPxWorkerHook)loop_filter_row_worker;
+    worker->hook = (AVxWorkerHook)loop_filter_row_worker;
     worker->data1 = lf_sync;
     worker->data2 = lf_data;
 
     // Loopfilter data
-    vp10_loop_filter_data_reset(lf_data, frame, cm, planes);
+    av1_loop_filter_data_reset(lf_data, frame, cm, planes);
     lf_data->start = start + i * cm->mib_size;
     lf_data->stop = stop;
     lf_data->y_only = y_only;
@@ -229,11 +229,11 @@
   }
 }
 
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
-                               struct macroblockd_plane planes[MAX_MB_PLANE],
-                               int frame_filter_level, int y_only,
-                               int partial_frame, VPxWorker *workers,
-                               int num_workers, VP10LfSync *lf_sync) {
+void av1_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
+                              struct macroblockd_plane planes[MAX_MB_PLANE],
+                              int frame_filter_level, int y_only,
+                              int partial_frame, AVxWorker *workers,
+                              int num_workers, AV1LfSync *lf_sync) {
   int start_mi_row, end_mi_row, mi_rows_to_filter;
 
   if (!frame_filter_level) return;
@@ -243,10 +243,10 @@
   if (partial_frame && cm->mi_rows > 8) {
     start_mi_row = cm->mi_rows >> 1;
     start_mi_row &= 0xfffffff8;
-    mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
+    mi_rows_to_filter = AOMMAX(cm->mi_rows / 8, 8);
   }
   end_mi_row = start_mi_row + mi_rows_to_filter;
-  vp10_loop_filter_frame_init(cm, frame_filter_level);
+  av1_loop_filter_frame_init(cm, frame_filter_level);
 
   loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, y_only,
                       workers, num_workers, lf_sync);
@@ -267,15 +267,15 @@
 }
 
 // Allocate memory for lf row synchronization
-void vp10_loop_filter_alloc(VP10LfSync *lf_sync, VP10_COMMON *cm, int rows,
-                            int width, int num_workers) {
+void av1_loop_filter_alloc(AV1LfSync *lf_sync, AV1_COMMON *cm, int rows,
+                           int width, int num_workers) {
   lf_sync->rows = rows;
 #if CONFIG_MULTITHREAD
   {
     int i;
 
     CHECK_MEM_ERROR(cm, lf_sync->mutex_,
-                    vpx_malloc(sizeof(*lf_sync->mutex_) * rows));
+                    aom_malloc(sizeof(*lf_sync->mutex_) * rows));
     if (lf_sync->mutex_) {
       for (i = 0; i < rows; ++i) {
         pthread_mutex_init(&lf_sync->mutex_[i], NULL);
@@ -283,7 +283,7 @@
     }
 
     CHECK_MEM_ERROR(cm, lf_sync->cond_,
-                    vpx_malloc(sizeof(*lf_sync->cond_) * rows));
+                    aom_malloc(sizeof(*lf_sync->cond_) * rows));
     if (lf_sync->cond_) {
       for (i = 0; i < rows; ++i) {
         pthread_cond_init(&lf_sync->cond_[i], NULL);
@@ -293,18 +293,18 @@
 #endif  // CONFIG_MULTITHREAD
 
   CHECK_MEM_ERROR(cm, lf_sync->lfdata,
-                  vpx_malloc(num_workers * sizeof(*lf_sync->lfdata)));
+                  aom_malloc(num_workers * sizeof(*lf_sync->lfdata)));
   lf_sync->num_workers = num_workers;
 
   CHECK_MEM_ERROR(cm, lf_sync->cur_sb_col,
-                  vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows));
+                  aom_malloc(sizeof(*lf_sync->cur_sb_col) * rows));
 
   // Set up nsync.
   lf_sync->sync_range = get_sync_range(width);
 }
 
 // Deallocate lf synchronization related mutex and data
-void vp10_loop_filter_dealloc(VP10LfSync *lf_sync) {
+void av1_loop_filter_dealloc(AV1LfSync *lf_sync) {
   if (lf_sync != NULL) {
 #if CONFIG_MULTITHREAD
     int i;
@@ -313,26 +313,26 @@
       for (i = 0; i < lf_sync->rows; ++i) {
         pthread_mutex_destroy(&lf_sync->mutex_[i]);
       }
-      vpx_free(lf_sync->mutex_);
+      aom_free(lf_sync->mutex_);
     }
     if (lf_sync->cond_ != NULL) {
       for (i = 0; i < lf_sync->rows; ++i) {
         pthread_cond_destroy(&lf_sync->cond_[i]);
       }
-      vpx_free(lf_sync->cond_);
+      aom_free(lf_sync->cond_);
     }
 #endif  // CONFIG_MULTITHREAD
-    vpx_free(lf_sync->lfdata);
-    vpx_free(lf_sync->cur_sb_col);
+    aom_free(lf_sync->lfdata);
+    aom_free(lf_sync->cur_sb_col);
     // clear the structure as the source of this call may be a resize in which
     // case this call will be followed by an _alloc() which may fail.
-    vp10_zero(*lf_sync);
+    av1_zero(*lf_sync);
   }
 }
 
 // Accumulate frame counts. FRAME_COUNTS consist solely of 'unsigned int'
 // members, so we treat it as an array, and sum over the whole length.
-void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts) {
+void av1_accumulate_frame_counts(AV1_COMMON *cm, FRAME_COUNTS *counts) {
   unsigned int *const acc = (unsigned int *)&cm->counts;
   const unsigned int *const cnt = (unsigned int *)counts;
 
diff --git a/av1/common/thread_common.h b/av1/common/thread_common.h
index 3df9557..29085cb 100644
--- a/av1/common/thread_common.h
+++ b/av1/common/thread_common.h
@@ -8,21 +8,21 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_LOOPFILTER_THREAD_H_
-#define VP10_COMMON_LOOPFILTER_THREAD_H_
-#include "./vpx_config.h"
+#ifndef AV1_COMMON_LOOPFILTER_THREAD_H_
+#define AV1_COMMON_LOOPFILTER_THREAD_H_
+#include "./aom_config.h"
 #include "av1/common/loopfilter.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_util/aom_thread.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 struct FRAME_COUNTS;
 
 // Loopfilter row synchronization
-typedef struct VP10LfSyncData {
+typedef struct AV1LfSyncData {
 #if CONFIG_MULTITHREAD
   pthread_mutex_t *mutex_;
   pthread_cond_t *cond_;
@@ -37,27 +37,27 @@
   // Row-based parallel loopfilter data
   LFWorkerData *lfdata;
   int num_workers;
-} VP10LfSync;
+} AV1LfSync;
 
 // Allocate memory for loopfilter row synchronization.
-void vp10_loop_filter_alloc(VP10LfSync *lf_sync, struct VP10Common *cm,
-                            int rows, int width, int num_workers);
+void av1_loop_filter_alloc(AV1LfSync *lf_sync, struct AV1Common *cm, int rows,
+                           int width, int num_workers);
 
 // Deallocate loopfilter synchronization related mutex and data.
-void vp10_loop_filter_dealloc(VP10LfSync *lf_sync);
+void av1_loop_filter_dealloc(AV1LfSync *lf_sync);
 
 // Multi-threaded loopfilter that uses the tile threads.
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
-                               struct macroblockd_plane planes[MAX_MB_PLANE],
-                               int frame_filter_level, int y_only,
-                               int partial_frame, VPxWorker *workers,
-                               int num_workers, VP10LfSync *lf_sync);
+void av1_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
+                              struct macroblockd_plane planes[MAX_MB_PLANE],
+                              int frame_filter_level, int y_only,
+                              int partial_frame, AVxWorker *workers,
+                              int num_workers, AV1LfSync *lf_sync);
 
-void vp10_accumulate_frame_counts(struct VP10Common *cm,
-                                  struct FRAME_COUNTS *counts);
+void av1_accumulate_frame_counts(struct AV1Common *cm,
+                                 struct FRAME_COUNTS *counts);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_LOOPFILTER_THREAD_H_
+#endif  // AV1_COMMON_LOOPFILTER_THREAD_H_
diff --git a/av1/common/tile_common.c b/av1/common/tile_common.c
index e79734e..220cad9 100644
--- a/av1/common/tile_common.c
+++ b/av1/common/tile_common.c
@@ -10,21 +10,21 @@
 
 #include "av1/common/tile_common.h"
 #include "av1/common/onyxc_int.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
-void vp10_tile_set_row(TileInfo *tile, const VP10_COMMON *cm, int row) {
+void av1_tile_set_row(TileInfo *tile, const AV1_COMMON *cm, int row) {
   tile->mi_row_start = row * cm->tile_height;
-  tile->mi_row_end = VPXMIN(tile->mi_row_start + cm->tile_height, cm->mi_rows);
+  tile->mi_row_end = AOMMIN(tile->mi_row_start + cm->tile_height, cm->mi_rows);
 }
 
-void vp10_tile_set_col(TileInfo *tile, const VP10_COMMON *cm, int col) {
+void av1_tile_set_col(TileInfo *tile, const AV1_COMMON *cm, int col) {
   tile->mi_col_start = col * cm->tile_width;
-  tile->mi_col_end = VPXMIN(tile->mi_col_start + cm->tile_width, cm->mi_cols);
+  tile->mi_col_end = AOMMIN(tile->mi_col_start + cm->tile_width, cm->mi_cols);
 }
 
-void vp10_tile_init(TileInfo *tile, const VP10_COMMON *cm, int row, int col) {
-  vp10_tile_set_row(tile, cm, row);
-  vp10_tile_set_col(tile, cm, col);
+void av1_tile_init(TileInfo *tile, const AV1_COMMON *cm, int row, int col) {
+  av1_tile_set_row(tile, cm, row);
+  av1_tile_set_col(tile, cm, col);
 }
 
 #if !CONFIG_EXT_TILE
@@ -49,8 +49,8 @@
   return max_log2 - 1;
 }
 
-void vp10_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
-                          int *max_log2_tile_cols) {
+void av1_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
+                         int *max_log2_tile_cols) {
   const int max_sb_cols =
       ALIGN_POWER_OF_TWO(mi_cols, MAX_MIB_SIZE_LOG2) >> MAX_MIB_SIZE_LOG2;
   *min_log2_tile_cols = get_min_log2_tile_cols(max_sb_cols);
diff --git a/av1/common/tile_common.h b/av1/common/tile_common.h
index a502173..68d434a 100644
--- a/av1/common/tile_common.h
+++ b/av1/common/tile_common.h
@@ -8,14 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_TILE_COMMON_H_
-#define VP10_COMMON_TILE_COMMON_H_
+#ifndef AV1_COMMON_TILE_COMMON_H_
+#define AV1_COMMON_TILE_COMMON_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 
 typedef struct TileInfo {
   int mi_row_start, mi_row_end;
@@ -24,17 +24,17 @@
 
 // initializes 'tile->mi_(row|col)_(start|end)' for (row, col) based on
 // 'cm->log2_tile_(rows|cols)' & 'cm->mi_(rows|cols)'
-void vp10_tile_init(TileInfo *tile, const struct VP10Common *cm, int row,
-                    int col);
+void av1_tile_init(TileInfo *tile, const struct AV1Common *cm, int row,
+                   int col);
 
-void vp10_tile_set_row(TileInfo *tile, const struct VP10Common *cm, int row);
-void vp10_tile_set_col(TileInfo *tile, const struct VP10Common *cm, int col);
+void av1_tile_set_row(TileInfo *tile, const struct AV1Common *cm, int row);
+void av1_tile_set_col(TileInfo *tile, const struct AV1Common *cm, int col);
 
-void vp10_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
-                          int *max_log2_tile_cols);
+void av1_get_tile_n_bits(const int mi_cols, int *min_log2_tile_cols,
+                         int *max_log2_tile_cols);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_TILE_COMMON_H_
+#endif  // AV1_COMMON_TILE_COMMON_H_
diff --git a/av1/common/vp10_convolve.h b/av1/common/vp10_convolve.h
deleted file mode 100644
index 9343402..0000000
--- a/av1/common/vp10_convolve.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef VP10_COMMON_VP10_CONVOLVE_H_
-#define VP10_COMMON_VP10_CONVOLVE_H_
-#include "av1/common/filter.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void vp10_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
-                   int dst_stride, int w, int h,
-#if CONFIG_DUAL_FILTER
-                   const INTERP_FILTER *interp_filter,
-#else
-                   const INTERP_FILTER interp_filter,
-#endif
-                   const int subpel_x, int xstep, const int subpel_y, int ystep,
-                   int avg);
-
-#if CONFIG_VP9_HIGHBITDEPTH
-void vp10_highbd_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
-                          int dst_stride, int w, int h,
-#if CONFIG_DUAL_FILTER
-                          const INTERP_FILTER *interp_filter,
-#else
-                          const INTERP_FILTER interp_filter,
-#endif
-                          const int subpel_x, int xstep, const int subpel_y,
-                          int ystep, int avg, int bd);
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // VP10_COMMON_VP10_CONVOLVE_H_
diff --git a/av1/common/vp10_fwd_txfm1d.h b/av1/common/vp10_fwd_txfm1d.h
deleted file mode 100644
index ab9d2ee..0000000
--- a/av1/common/vp10_fwd_txfm1d.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VP10_FWD_TXFM1D_H_
-#define VP10_FWD_TXFM1D_H_
-
-#include "av1/common/vp10_txfm.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void vp10_fdct4_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct8_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct16_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct32_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct64_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-
-void vp10_fadst4_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst8_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst16_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst32_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // VP10_FWD_TXFM1D_H_
diff --git a/av1/common/vp10_inv_txfm1d.h b/av1/common/vp10_inv_txfm1d.h
deleted file mode 100644
index 21b80bf..0000000
--- a/av1/common/vp10_inv_txfm1d.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VP10_INV_TXFM1D_H_
-#define VP10_INV_TXFM1D_H_
-
-#include "av1/common/vp10_txfm.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void vp10_idct4_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct8_new(const int32_t *input, int32_t *output,
-                    const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct16_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct32_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct64_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-
-void vp10_iadst4_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst8_new(const int32_t *input, int32_t *output,
-                     const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst16_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst32_new(const int32_t *input, int32_t *output,
-                      const int8_t *cos_bit, const int8_t *stage_range);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // VP10_INV_TXFM1D_H_
diff --git a/av1/common/vp10_rtcd_defs.pl b/av1/common/vp10_rtcd_defs.pl
deleted file mode 100644
index 4a16723..0000000
--- a/av1/common/vp10_rtcd_defs.pl
+++ /dev/null
@@ -1,912 +0,0 @@
-sub vp10_common_forward_decls() {
-print <<EOF
-/*
- * VP10
- */
-
-#include "aom/vpx_integer.h"
-#include "av1/common/common.h"
-#include "av1/common/enums.h"
-#include "av1/common/quant_common.h"
-#include "av1/common/filter.h"
-#include "av1/common/vp10_txfm.h"
-
-struct macroblockd;
-
-/* Encoder forward decls */
-struct macroblock;
-struct vpx_variance_vtable;
-struct search_site_config;
-struct mv;
-union int_mv;
-struct yv12_buffer_config;
-EOF
-}
-forward_decls qw/vp10_common_forward_decls/;
-
-# functions that are 64 bit only.
-$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
-if ($opts{arch} eq "x86_64") {
-  $mmx_x86_64 = 'mmx';
-  $sse2_x86_64 = 'sse2';
-  $ssse3_x86_64 = 'ssse3';
-  $avx_x86_64 = 'avx';
-  $avx2_x86_64 = 'avx2';
-}
-
-#
-# 10/12-tap convolution filters
-#
-add_proto qw/void vp10_convolve_horiz/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
-specialize qw/vp10_convolve_horiz ssse3/;
-
-add_proto qw/void vp10_convolve_vert/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg";
-specialize qw/vp10_convolve_vert ssse3/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  add_proto qw/void vp10_highbd_convolve_horiz/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
-  specialize qw/vp10_highbd_convolve_horiz sse4_1/;
-  add_proto qw/void vp10_highbd_convolve_vert/, "const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, int h, const InterpFilterParams fp, const int subpel_x_q4, int x_step_q4, int avg, int bd";
-  specialize qw/vp10_highbd_convolve_vert sse4_1/;
-}
-
-#
-# dct
-#
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  # Note as optimized versions of these functions are added we need to add a check to ensure
-  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
-  if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add/;
-
-    add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x8_32_add/;
-
-    add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x4_32_add/;
-
-    add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x16_128_add/;
-
-    add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x8_128_add/;
-
-    add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x32_512_add/;
-
-    add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht32x16_512_add/;
-
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add/;
-
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add/;
-
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4/;
-
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1/;
-
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8/;
-
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1/;
-
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16/;
-
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1/;
-
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32/;
-
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd/;
-
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1/;
-
-    add_proto qw/void vp10_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct4x4/;
-
-    add_proto qw/void vp10_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8/;
-
-    add_proto qw/void vp10_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8_1/;
-
-    add_proto qw/void vp10_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16/;
-
-    add_proto qw/void vp10_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16_1/;
-
-    add_proto qw/void vp10_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32/;
-
-    add_proto qw/void vp10_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_rd/;
-
-    add_proto qw/void vp10_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_1/;
-  } else {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add sse2/;
-
-    add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x8_32_add/;
-
-    add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x4_32_add/;
-
-    add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x16_128_add/;
-
-    add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x8_128_add/;
-
-    add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x32_512_add/;
-
-    add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht32x16_512_add/;
-
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add sse2/;
-
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add sse2/;
-
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4 sse2/;
-
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1 sse2/;
-
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8 sse2/;
-
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1 sse2/;
-
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16 sse2/;
-
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1 sse2/;
-
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32 sse2/;
-
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd sse2/;
-
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1 sse2/;
-
-    add_proto qw/void vp10_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct4x4 sse2/;
-
-    add_proto qw/void vp10_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8 sse2/;
-
-    add_proto qw/void vp10_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8_1/;
-
-    add_proto qw/void vp10_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16 sse2/;
-
-    add_proto qw/void vp10_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16_1/;
-
-    add_proto qw/void vp10_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32 sse2/;
-
-    add_proto qw/void vp10_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_rd sse2/;
-
-    add_proto qw/void vp10_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_1/;
-  }
-} else {
-  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
-  if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add/;
-
-    add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x8_32_add/;
-
-    add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x4_32_add/;
-
-    add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x16_128_add/;
-
-    add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x8_128_add/;
-
-    add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x32_512_add/;
-
-    add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht32x16_512_add/;
-
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add/;
-
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add/;
-
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4/;
-
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1/;
-
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8/;
-
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1/;
-
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16/;
-
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1/;
-
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32/;
-
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd/;
-
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1/;
-  } else {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add sse2 neon dspr2/;
-
-    add_proto qw/void vp10_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x8_32_add/;
-
-    add_proto qw/void vp10_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x4_32_add/;
-
-    add_proto qw/void vp10_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x16_128_add/;
-
-    add_proto qw/void vp10_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x8_128_add/;
-
-    add_proto qw/void vp10_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht16x32_512_add/;
-
-    add_proto qw/void vp10_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht32x16_512_add/;
-
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add sse2 neon dspr2/;
-
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add sse2 dspr2/;
-
-    if (vpx_config("CONFIG_EXT_TX") ne "yes") {
-      specialize qw/vp10_iht4x4_16_add msa/;
-      specialize qw/vp10_iht8x8_64_add msa/;
-      specialize qw/vp10_iht16x16_256_add msa/;
-    }
-
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4 sse2/;
-
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1 sse2/;
-
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8 sse2/;
-
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1 sse2/;
-
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16 sse2/;
-
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1 sse2/;
-
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32 sse2/;
-
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd sse2/;
-
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1 sse2/;
-  }
-}
-
-if (vpx_config("CONFIG_NEW_QUANT") eq "yes") {
-  add_proto qw/void quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-  specialize qw/quantize_nuq/;
-
-  add_proto qw/void quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-  specialize qw/quantize_fp_nuq/;
-
-  add_proto qw/void quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-  specialize qw/quantize_32x32_nuq/;
-
-  add_proto qw/void quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-  specialize qw/quantize_32x32_fp_nuq/;
-}
-
-# EXT_INTRA predictor functions
-if (vpx_config("CONFIG_EXT_INTRA") eq "yes") {
-  add_proto qw/void vp10_dc_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_dc_filter_predictor sse4_1/;
-  add_proto qw/void vp10_v_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_v_filter_predictor sse4_1/;
-  add_proto qw/void vp10_h_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_h_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d45_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d45_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d135_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d135_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d117_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d117_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d153_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d153_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d207_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d207_filter_predictor sse4_1/;
-  add_proto qw/void vp10_d63_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_d63_filter_predictor sse4_1/;
-  add_proto qw/void vp10_tm_filter_predictor/, "uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left";
-  specialize qw/vp10_tm_filter_predictor sse4_1/;
-  # High bitdepth functions
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    add_proto qw/void vp10_highbd_dc_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_dc_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_v_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_v_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_h_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_h_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d45_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d45_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d135_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d135_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d117_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d117_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d153_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d153_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d207_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d207_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_d63_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_d63_filter_predictor sse4_1/;
-    add_proto qw/void vp10_highbd_tm_filter_predictor/, "uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd";
-    specialize qw/vp10_highbd_tm_filter_predictor sse4_1/;
-  }
-}
-
-# High bitdepth functions
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  #
-  # Sub Pixel Filters
-  #
-  add_proto qw/void vp10_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve_copy/;
-
-  add_proto qw/void vp10_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve_avg/;
-
-  add_proto qw/void vp10_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8/, "$sse2_x86_64";
-
-  add_proto qw/void vp10_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_horiz/, "$sse2_x86_64";
-
-  add_proto qw/void vp10_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_vert/, "$sse2_x86_64";
-
-  add_proto qw/void vp10_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_avg/, "$sse2_x86_64";
-
-  add_proto qw/void vp10_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
-
-  add_proto qw/void vp10_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_avg_vert/, "$sse2_x86_64";
-
-  #
-  # dct
-  #
-  # Note as optimized versions of these functions are added we need to add a check to ensure
-  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
-  add_proto qw/void vp10_highbd_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht4x4_16_add/;
-
-  add_proto qw/void vp10_highbd_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht4x8_32_add/;
-
-  add_proto qw/void vp10_highbd_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht8x4_32_add/;
-
-  add_proto qw/void vp10_highbd_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht8x16_128_add/;
-
-  add_proto qw/void vp10_highbd_iht16x8_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht16x8_128_add/;
-
-  add_proto qw/void vp10_highbd_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht16x32_512_add/;
-
-  add_proto qw/void vp10_highbd_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht32x16_512_add/;
-
-  add_proto qw/void vp10_highbd_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht8x8_64_add/;
-
-  add_proto qw/void vp10_highbd_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht16x16_256_add/;
-}
-
-#
-# Encoder functions below this point.
-#
-if (vpx_config("CONFIG_VP10_ENCODER") eq "yes") {
-
-# ENCODEMB INVOKE
-
-if (vpx_config("CONFIG_AOM_QM") eq "yes") {
-  if (vpx_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
-    # the transform coefficients are held in 32-bit
-    # values, so the assembler code for  vp10_block_error can no longer be used.
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error/;
-
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-    specialize qw/vp10_fdct8x8_quant/;
-  } else {
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error avx2 msa/, "$sse2_x86inc";
-
-    add_proto qw/int64_t vp10_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
-    specialize qw/vp10_block_error_fp neon/, "$sse2_x86inc";
-
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-  }
-} else {
-  if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-    # the transform coefficients are held in 32-bit
-    # values, so the assembler code for  vp10_block_error can no longer be used.
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error/;
-
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp/;
-
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp_32x32/;
-
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_fdct8x8_quant/;
-  } else {
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error sse2 avx2 msa/;
-
-    add_proto qw/int64_t vp10_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
-    specialize qw/vp10_block_error_fp neon sse2/;
-
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp neon sse2/, "$ssse3_x86_64";
-
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp_32x32/, "$ssse3_x86_64";
-
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_fdct8x8_quant sse2 ssse3 neon/;
-  }
-
-}
-
-# fdct functions
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  add_proto qw/void vp10_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht4x4 sse2/;
-
-  add_proto qw/void vp10_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht4x8/;
-
-  add_proto qw/void vp10_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x4/;
-
-  add_proto qw/void vp10_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x16/;
-
-  add_proto qw/void vp10_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x8/;
-
-  add_proto qw/void vp10_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x32/;
-
-  add_proto qw/void vp10_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht32x16/;
-
-  add_proto qw/void vp10_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x8 sse2/;
-
-  add_proto qw/void vp10_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x16 sse2/;
-
-  add_proto qw/void vp10_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht32x32/;
-
-  add_proto qw/void vp10_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp10_fwht4x4/;
-} else {
-  add_proto qw/void vp10_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht4x4 sse2/;
-
-  add_proto qw/void vp10_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht4x8/;
-
-  add_proto qw/void vp10_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x4/;
-
-  add_proto qw/void vp10_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x16/;
-
-  add_proto qw/void vp10_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x8/;
-
-  add_proto qw/void vp10_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x32/;
-
-  add_proto qw/void vp10_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht32x16/;
-
-  add_proto qw/void vp10_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x8 sse2/;
-
-  add_proto qw/void vp10_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x16 sse2/;
-
-  if (vpx_config("CONFIG_EXT_TX") ne "yes") {
-    specialize qw/vp10_fht4x4 msa/;
-    specialize qw/vp10_fht8x8 msa/;
-    specialize qw/vp10_fht16x16 msa/;
-  }
-
-  add_proto qw/void vp10_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht32x32/;
-
-  add_proto qw/void vp10_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp10_fwht4x4/;
-}
-
-add_proto qw/void vp10_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bs, int tx_type";
-  specialize qw/vp10_fwd_idtx/;
-
-# Inverse transform
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  # Note as optimized versions of these functions are added we need to add a check to ensure
-  # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
-  add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct4x4_1_add/;
-
-  add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct4x4_16_add/;
-
-  add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct8x8_1_add/;
-
-  add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct8x8_64_add/;
-
-  add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct8x8_12_add/;
-
-  add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct16x16_1_add/;
-
-  add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct16x16_256_add/;
-
-  add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct16x16_10_add/;
-
-  add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct32x32_1024_add/;
-
-  add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct32x32_34_add/;
-
-  add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct32x32_1_add/;
-
-  add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_iwht4x4_1_add/;
-
-  add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_iwht4x4_16_add/;
-
-  add_proto qw/void vp10_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct4x4_1_add/;
-
-  add_proto qw/void vp10_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct8x8_1_add/;
-
-  add_proto qw/void vp10_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct16x16_1_add/;
-
-  add_proto qw/void vp10_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct32x32_1024_add/;
-
-  add_proto qw/void vp10_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct32x32_34_add/;
-
-  add_proto qw/void vp10_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct32x32_1_add/;
-
-  add_proto qw/void vp10_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_iwht4x4_1_add/;
-
-  add_proto qw/void vp10_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_iwht4x4_16_add/;
-
-  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
-  if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct4x4_16_add/;
-
-    add_proto qw/void vp10_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_64_add/;
-
-    add_proto qw/void vp10_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_10_add/;
-
-    add_proto qw/void vp10_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_256_add/;
-
-    add_proto qw/void vp10_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_10_add/;
-  } else {
-    add_proto qw/void vp10_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct4x4_16_add sse2/;
-
-    add_proto qw/void vp10_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_64_add sse2/;
-
-    add_proto qw/void vp10_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_10_add sse2/;
-
-    add_proto qw/void vp10_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_256_add sse2/;
-
-    add_proto qw/void vp10_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_10_add sse2/;
-  }  # CONFIG_EMULATE_HARDWARE
-} else {
-  # Force C versions if CONFIG_EMULATE_HARDWARE is 1
-  if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_1_add/;
-
-    add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_16_add/;
-
-    add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_1_add/;
-
-    add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_64_add/;
-
-    add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_12_add/;
-
-    add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_1_add/;
-
-    add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_256_add/;
-
-    add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_10_add/;
-
-    add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1024_add/;
-
-    add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_34_add/;
-
-    add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1_add/;
-
-    add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_1_add/;
-
-    add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_16_add/;
-  } else {
-    add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_1_add sse2/;
-
-    add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_16_add sse2/;
-
-    add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_1_add sse2/;
-
-    add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_64_add sse2/;
-
-    add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_12_add sse2/;
-
-    add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_1_add sse2/;
-
-    add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_256_add sse2/;
-
-    add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_10_add sse2/;
-
-    add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1024_add sse2/;
-
-    add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_34_add sse2/;
-
-    add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1_add sse2/;
-
-    add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_1_add/;
-
-    add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_16_add/;
-  }  # CONFIG_EMULATE_HARDWARE
-}  # CONFIG_VP9_HIGHBITDEPTH
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-  #fwd txfm
-  add_proto qw/void vp10_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_fwd_txfm2d_4x4 sse4_1/;
-  add_proto qw/void vp10_fwd_txfm2d_8x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_fwd_txfm2d_8x8 sse4_1/;
-  add_proto qw/void vp10_fwd_txfm2d_16x16/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_fwd_txfm2d_16x16 sse4_1/;
-  add_proto qw/void vp10_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_fwd_txfm2d_32x32 sse4_1/;
-  add_proto qw/void vp10_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_fwd_txfm2d_64x64 sse4_1/;
-
-  #inv txfm
-  add_proto qw/void vp10_inv_txfm2d_add_4x4/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_inv_txfm2d_add_4x4 sse4_1/;
-  add_proto qw/void vp10_inv_txfm2d_add_8x8/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_inv_txfm2d_add_8x8 sse4_1/;
-  add_proto qw/void vp10_inv_txfm2d_add_16x16/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_inv_txfm2d_add_16x16 sse4_1/;
-  add_proto qw/void vp10_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_inv_txfm2d_add_32x32/;
-  add_proto qw/void vp10_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-  specialize qw/vp10_inv_txfm2d_add_64x64/;
-}
-
-#
-# Motion search
-#
-add_proto qw/int vp10_full_search_sad/, "const struct macroblock *x, const struct mv *ref_mv, int sad_per_bit, int distance, const struct vpx_variance_vtable *fn_ptr, const struct mv *center_mv, struct mv *best_mv";
-specialize qw/vp10_full_search_sad sse3 sse4_1/;
-$vp10_full_search_sad_sse3=vp10_full_search_sadx3;
-$vp10_full_search_sad_sse4_1=vp10_full_search_sadx8;
-
-add_proto qw/int vp10_diamond_search_sad/, "struct macroblock *x, const struct search_site_config *cfg,  struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct vpx_variance_vtable *fn_ptr, const struct mv *center_mv";
-specialize qw/vp10_diamond_search_sad/;
-
-add_proto qw/int vp10_full_range_search/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct vpx_variance_vtable *fn_ptr, const struct mv *center_mv";
-specialize qw/vp10_full_range_search/;
-
-add_proto qw/void vp10_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
-specialize qw/vp10_temporal_filter_apply sse2 msa/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
-
-  # ENCODEMB INVOKE
-  if (vpx_config("CONFIG_NEW_QUANT") eq "yes") {
-    add_proto qw/void highbd_quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-    specialize qw/highbd_quantize_nuq/;
-
-    add_proto qw/void highbd_quantize_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-    specialize qw/highbd_quantize_fp_nuq/;
-
-    add_proto qw/void highbd_quantize_32x32_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-    specialize qw/highbd_quantize_32x32_nuq/;
-
-    add_proto qw/void highbd_quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const uint8_t *band";
-    specialize qw/highbd_quantize_32x32_fp_nuq/;
-  }
-
-  add_proto qw/int64_t vp10_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
-  specialize qw/vp10_highbd_block_error sse2/;
-
-  if (vpx_config("CONFIG_AOM_QM") eq "yes") {
-    add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-
-    add_proto qw/void vp10_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-  } else {
-    add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
-    specialize qw/vp10_highbd_quantize_fp sse4_1/;
-
-    add_proto qw/void vp10_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
-    specialize qw/vp10_highbd_quantize_b/;
-  }
-
-  # fdct functions
-  add_proto qw/void vp10_highbd_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht4x4 sse4_1/;
-
-  add_proto qw/void vp10_highbd_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht4x8/;
-
-  add_proto qw/void vp10_highbd_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht8x4/;
-
-  add_proto qw/void vp10_highbd_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht8x16/;
-
-  add_proto qw/void vp10_highbd_fht16x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht16x8/;
-
-  add_proto qw/void vp10_highbd_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht16x32/;
-
-  add_proto qw/void vp10_highbd_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht32x16/;
-
-  add_proto qw/void vp10_highbd_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht8x8/;
-
-  add_proto qw/void vp10_highbd_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht16x16/;
-
-  add_proto qw/void vp10_highbd_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht32x32/;
-
-  add_proto qw/void vp10_highbd_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp10_highbd_fwht4x4/;
-
-  add_proto qw/void vp10_highbd_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
-  specialize qw/vp10_highbd_temporal_filter_apply/;
-
-}
-# End vp10_high encoder functions
-
-if (vpx_config("CONFIG_EXT_INTER") eq "yes") {
-  add_proto qw/uint64_t vp10_wedge_sse_from_residuals/, "const int16_t *r1, const int16_t *d, const uint8_t *m, int N";
-  specialize qw/vp10_wedge_sse_from_residuals sse2/;
-  add_proto qw/int vp10_wedge_sign_from_residuals/, "const int16_t *ds, const uint8_t *m, int N, int64_t limit";
-  specialize qw/vp10_wedge_sign_from_residuals sse2/;
-  add_proto qw/void vp10_wedge_compute_delta_squares/, "int16_t *d, const int16_t *a, const int16_t *b, int N";
-  specialize qw/vp10_wedge_compute_delta_squares sse2/;
-}
-
-}
-# end encoder functions
-1;
diff --git a/av1/common/warped_motion.c b/av1/common/warped_motion.c
index 5f76453..c742c36 100644
--- a/av1/common/warped_motion.c
+++ b/av1/common/warped_motion.c
@@ -353,7 +353,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE void highbd_get_subcolumn(int taps, uint16_t *ref, int32_t *col,
                                         int stride, int x, int y_start) {
   int i;
@@ -522,7 +522,7 @@
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static double warp_erroradv(WarpedMotionParams *wm, uint8_t *ref, int width,
                             int height, int stride, uint8_t *dst, int p_col,
@@ -574,48 +574,48 @@
   }
 }
 
-double vp10_warp_erroradv(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
-                          int use_hbd, int bd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                          uint8_t *ref, int width, int height, int stride,
-                          uint8_t *dst, int p_col, int p_row, int p_width,
-                          int p_height, int p_stride, int subsampling_x,
-                          int subsampling_y, int x_scale, int y_scale) {
-#if CONFIG_VP9_HIGHBITDEPTH
+double av1_warp_erroradv(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+                         int use_hbd, int bd,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                         uint8_t *ref, int width, int height, int stride,
+                         uint8_t *dst, int p_col, int p_row, int p_width,
+                         int p_height, int p_stride, int subsampling_x,
+                         int subsampling_y, int x_scale, int y_scale) {
+#if CONFIG_AOM_HIGHBITDEPTH
   if (use_hbd)
     return highbd_warp_erroradv(
         wm, ref, width, height, stride, dst, p_col, p_row, p_width, p_height,
         p_stride, subsampling_x, subsampling_y, x_scale, y_scale, bd);
   else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     return warp_erroradv(wm, ref, width, height, stride, dst, p_col, p_row,
                          p_width, p_height, p_stride, subsampling_x,
                          subsampling_y, x_scale, y_scale);
 }
 
-void vp10_warp_plane(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
-                     int use_hbd, int bd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                     uint8_t *ref, int width, int height, int stride,
-                     uint8_t *pred, int p_col, int p_row, int p_width,
-                     int p_height, int p_stride, int subsampling_x,
-                     int subsampling_y, int x_scale, int y_scale) {
-#if CONFIG_VP9_HIGHBITDEPTH
+void av1_warp_plane(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+                    int use_hbd, int bd,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                    uint8_t *ref, int width, int height, int stride,
+                    uint8_t *pred, int p_col, int p_row, int p_width,
+                    int p_height, int p_stride, int subsampling_x,
+                    int subsampling_y, int x_scale, int y_scale) {
+#if CONFIG_AOM_HIGHBITDEPTH
   if (use_hbd)
     highbd_warp_plane(wm, ref, width, height, stride, pred, p_col, p_row,
                       p_width, p_height, p_stride, subsampling_x, subsampling_y,
                       x_scale, y_scale, bd);
   else
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     warp_plane(wm, ref, width, height, stride, pred, p_col, p_row, p_width,
                p_height, p_stride, subsampling_x, subsampling_y, x_scale,
                y_scale);
 }
 
-void vp10_integerize_model(const double *model, TransformationType wmtype,
-                           WarpedMotionParams *wm) {
+void av1_integerize_model(const double *model, TransformationType wmtype,
+                          WarpedMotionParams *wm) {
   wm->wmtype = wmtype;
   switch (wmtype) {
     case HOMOGRAPHY:
diff --git a/av1/common/warped_motion.h b/av1/common/warped_motion.h
index a9c57f9..965b296 100644
--- a/av1/common/warped_motion.h
+++ b/av1/common/warped_motion.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP10_COMMON_WARPED_MOTION_H
-#define VP10_COMMON_WARPED_MOTION_H
+#ifndef AV1_COMMON_WARPED_MOTION_H
+#define AV1_COMMON_WARPED_MOTION_H
 
 #include <stdio.h>
 #include <stdlib.h>
@@ -17,9 +17,9 @@
 #include <math.h>
 #include <assert.h>
 
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
 
 // Bits of precision used for the model
 #define WARPEDMODEL_PREC_BITS 8
@@ -72,25 +72,25 @@
   int wmmat[8];  // For homography wmmat[9] is assumed to be 1
 } WarpedMotionParams;
 
-double vp10_warp_erroradv(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
-                          int use_hbd, int bd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                          uint8_t *ref, int width, int height, int stride,
-                          uint8_t *dst, int p_col, int p_row, int p_width,
-                          int p_height, int p_stride, int subsampling_x,
-                          int subsampling_y, int x_scale, int y_scale);
+double av1_warp_erroradv(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+                         int use_hbd, int bd,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                         uint8_t *ref, int width, int height, int stride,
+                         uint8_t *dst, int p_col, int p_row, int p_width,
+                         int p_height, int p_stride, int subsampling_x,
+                         int subsampling_y, int x_scale, int y_scale);
 
-void vp10_warp_plane(WarpedMotionParams *wm,
-#if CONFIG_VP9_HIGHBITDEPTH
-                     int use_hbd, int bd,
-#endif  // CONFIG_VP9_HIGHBITDEPTH
-                     uint8_t *ref, int width, int height, int stride,
-                     uint8_t *pred, int p_col, int p_row, int p_width,
-                     int p_height, int p_stride, int subsampling_x,
-                     int subsampling_y, int x_scale, int y_scale);
+void av1_warp_plane(WarpedMotionParams *wm,
+#if CONFIG_AOM_HIGHBITDEPTH
+                    int use_hbd, int bd,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+                    uint8_t *ref, int width, int height, int stride,
+                    uint8_t *pred, int p_col, int p_row, int p_width,
+                    int p_height, int p_stride, int subsampling_x,
+                    int subsampling_y, int x_scale, int y_scale);
 
 // Integerize model into the WarpedMotionParams structure
-void vp10_integerize_model(const double *model, TransformationType wmtype,
-                           WarpedMotionParams *wm);
-#endif  // VP10_COMMON_WARPED_MOTION_H
+void av1_integerize_model(const double *model, TransformationType wmtype,
+                          WarpedMotionParams *wm);
+#endif  // AV1_COMMON_WARPED_MOTION_H
diff --git a/av1/common/x86/vp10_convolve_filters_ssse3.c b/av1/common/x86/av1_convolve_filters_ssse3.c
similarity index 99%
rename from av1/common/x86/vp10_convolve_filters_ssse3.c
rename to av1/common/x86/av1_convolve_filters_ssse3.c
index b842589..7a40b9c 100644
--- a/av1/common/x86/vp10_convolve_filters_ssse3.c
+++ b/av1/common/x86/av1_convolve_filters_ssse3.c
@@ -7,7 +7,7 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "av1/common/filter.h"
 
 #if CONFIG_EXT_INTERP
diff --git a/av1/common/x86/vp10_convolve_ssse3.c b/av1/common/x86/av1_convolve_ssse3.c
similarity index 95%
rename from av1/common/x86/vp10_convolve_ssse3.c
rename to av1/common/x86/av1_convolve_ssse3.c
index e891d74..0c6bb99 100644
--- a/av1/common/x86/vp10_convolve_ssse3.c
+++ b/av1/common/x86/av1_convolve_ssse3.c
@@ -11,7 +11,7 @@
 #include <assert.h>
 #include <tmmintrin.h>
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "av1/common/filter.h"
 
 #define WIDTH_BOUND (16)
@@ -610,10 +610,10 @@
 // (1) 10/12-taps filters
 // (2) x_step_q4 = 16 then filter is fixed at the call
 
-void vp10_convolve_horiz_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
-                               int dst_stride, int w, int h,
-                               const InterpFilterParams filter_params,
-                               const int subpel_x_q4, int x_step_q4, int avg) {
+void av1_convolve_horiz_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
+                              int dst_stride, int w, int h,
+                              const InterpFilterParams filter_params,
+                              const int subpel_x_q4, int x_step_q4, int avg) {
   DECLARE_ALIGNED(16, uint16_t, temp[8 * 8]);
   __m128i verf[6];
   __m128i horf[2];
@@ -630,18 +630,18 @@
   (void)x_step_q4;
 
   if (0 == subpel_x_q4 || 16 != x_step_q4) {
-    vp10_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
-                          subpel_x_q4, x_step_q4, avg);
+    av1_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+                         subpel_x_q4, x_step_q4, avg);
     return;
   }
 
-  hCoeffs = vp10_get_subpel_filter_signal_dir(filter_params, subpel_x_q4 - 1);
+  hCoeffs = av1_get_subpel_filter_signal_dir(filter_params, subpel_x_q4 - 1);
   vCoeffs =
-      vp10_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
+      av1_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
 
   if (!hCoeffs || !vCoeffs) {
-    vp10_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
-                          subpel_x_q4, x_step_q4, avg);
+    av1_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+                         subpel_x_q4, x_step_q4, avg);
     return;
   }
 
@@ -825,10 +825,10 @@
   } while (rowIndex < h);
 }
 
-void vp10_convolve_vert_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
-                              int dst_stride, int w, int h,
-                              const InterpFilterParams filter_params,
-                              const int subpel_y_q4, int y_step_q4, int avg) {
+void av1_convolve_vert_ssse3(const uint8_t *src, int src_stride, uint8_t *dst,
+                             int dst_stride, int w, int h,
+                             const InterpFilterParams filter_params,
+                             const int subpel_y_q4, int y_step_q4, int avg) {
   __m128i verf[6];
   SubpelFilterCoeffs vCoeffs;
   const uint8_t *src_ptr;
@@ -839,17 +839,17 @@
   const int tapsNum = filter_params.taps;
 
   if (0 == subpel_y_q4 || 16 != y_step_q4) {
-    vp10_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
-                         subpel_y_q4, y_step_q4, avg);
+    av1_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+                        subpel_y_q4, y_step_q4, avg);
     return;
   }
 
   vCoeffs =
-      vp10_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
+      av1_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
 
   if (!vCoeffs) {
-    vp10_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
-                         subpel_y_q4, y_step_q4, avg);
+    av1_convolve_vert_c(src, src_stride, dst, dst_stride, w, h, filter_params,
+                        subpel_y_q4, y_step_q4, avg);
     return;
   }
 
diff --git a/av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h b/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
similarity index 99%
rename from av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h
rename to av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
index e7d63fe..ecd3d4b 100644
--- a/av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h
+++ b/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
@@ -10,8 +10,8 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vp10_rtcd.h"
-#include "av1/common/vp10_fwd_txfm.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_fwd_txfm.h"
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
 
@@ -22,31 +22,31 @@
 #define ADD_EPI16 _mm_adds_epi16
 #define SUB_EPI16 _mm_subs_epi16
 #if FDCT32x32_HIGH_PRECISION
-void vp10_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void av1_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
   int i, j;
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       out[j + i * 32] =
           (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
   }
 }
-#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_c
-#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rows_c
+#define HIGH_FDCT32x32_2D_C av1_highbd_fdct32x32_c
+#define HIGH_FDCT32x32_2D_ROWS_C av1_fdct32x32_rows_c
 #else
-void vp10_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void av1_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
   int i, j;
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
-    vp10_fdct32(temp_in, temp_out, 1);
+    av1_fdct32(temp_in, temp_out, 1);
     for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
   }
 }
-#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_rd_c
-#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rd_rows_c
+#define HIGH_FDCT32x32_2D_C av1_highbd_fdct32x32_rd_c
+#define HIGH_FDCT32x32_2D_ROWS_C av1_fdct32x32_rd_rows_c
 #endif  // FDCT32x32_HIGH_PRECISION
 #else
 #define ADD_EPI16 _mm_add_epi16
diff --git a/av1/common/x86/vp10_fwd_txfm1d_sse4.c b/av1/common/x86/av1_fwd_txfm1d_sse4.c
similarity index 98%
rename from av1/common/x86/vp10_fwd_txfm1d_sse4.c
rename to av1/common/x86/av1_fwd_txfm1d_sse4.c
index 902c9b2..f0bcef9 100644
--- a/av1/common/x86/vp10_fwd_txfm1d_sse4.c
+++ b/av1/common/x86/av1_fwd_txfm1d_sse4.c
@@ -1,7 +1,7 @@
-#include "av1/common/x86/vp10_txfm1d_sse4.h"
+#include "av1/common/x86/av1_txfm1d_sse4.h"
 
-void vp10_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
-                           const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 4;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -53,8 +53,8 @@
   }
 }
 
-void vp10_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
-                           const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 8;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -152,8 +152,8 @@
   }
 }
 
-void vp10_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 16;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -349,8 +349,8 @@
   }
 }
 
-void vp10_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 32;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -764,8 +764,8 @@
   }
 }
 
-void vp10_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 4;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -835,8 +835,8 @@
   }
 }
 
-void vp10_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 8;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -960,8 +960,8 @@
   }
 }
 
-void vp10_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
+                            const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 16;
   const int num_per_128 = 4;
   const int32_t *cospi;
@@ -1199,8 +1199,8 @@
   }
 }
 
-void vp10_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range) {
+void av1_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
+                            const int8_t *cos_bit, const int8_t *stage_range) {
   const int txfm_size = 32;
   const int num_per_128 = 4;
   const int32_t *cospi;
diff --git a/av1/common/x86/vp10_fwd_txfm2d_sse4.c b/av1/common/x86/av1_fwd_txfm2d_sse4.c
similarity index 72%
rename from av1/common/x86/vp10_fwd_txfm2d_sse4.c
rename to av1/common/x86/av1_fwd_txfm2d_sse4.c
index a59a0c8..07c283e 100644
--- a/av1/common/x86/vp10_fwd_txfm2d_sse4.c
+++ b/av1/common/x86/av1_fwd_txfm2d_sse4.c
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "av1/common/enums.h"
-#include "av1/common/vp10_txfm.h"
-#include "av1/common/x86/vp10_txfm1d_sse4.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/common/x86/av1_txfm1d_sse4.h"
 
 static INLINE void int16_array_with_stride_to_int32_array_without_stride(
     const int16_t *input, int stride, int32_t *output, int txfm1d_size) {
@@ -28,14 +28,14 @@
 
 static INLINE TxfmFuncSSE2 fwd_txfm_type_to_func(TXFM_TYPE txfm_type) {
   switch (txfm_type) {
-    case TXFM_TYPE_DCT4: return vp10_fdct4_new_sse4_1; break;
-    case TXFM_TYPE_DCT8: return vp10_fdct8_new_sse4_1; break;
-    case TXFM_TYPE_DCT16: return vp10_fdct16_new_sse4_1; break;
-    case TXFM_TYPE_DCT32: return vp10_fdct32_new_sse4_1; break;
-    case TXFM_TYPE_ADST4: return vp10_fadst4_new_sse4_1; break;
-    case TXFM_TYPE_ADST8: return vp10_fadst8_new_sse4_1; break;
-    case TXFM_TYPE_ADST16: return vp10_fadst16_new_sse4_1; break;
-    case TXFM_TYPE_ADST32: return vp10_fadst32_new_sse4_1; break;
+    case TXFM_TYPE_DCT4: return av1_fdct4_new_sse4_1; break;
+    case TXFM_TYPE_DCT8: return av1_fdct8_new_sse4_1; break;
+    case TXFM_TYPE_DCT16: return av1_fdct16_new_sse4_1; break;
+    case TXFM_TYPE_DCT32: return av1_fdct32_new_sse4_1; break;
+    case TXFM_TYPE_ADST4: return av1_fadst4_new_sse4_1; break;
+    case TXFM_TYPE_ADST8: return av1_fadst8_new_sse4_1; break;
+    case TXFM_TYPE_ADST16: return av1_fadst16_new_sse4_1; break;
+    case TXFM_TYPE_ADST32: return av1_fadst32_new_sse4_1; break;
     default: assert(0);
   }
   return NULL;
@@ -69,18 +69,18 @@
   transpose_32(txfm_size, buf_128, out_128);
 }
 
-void vp10_fwd_txfm2d_32x32_sse4_1(const int16_t *input, int32_t *output,
-                                  int stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_32x32_sse4_1(const int16_t *input, int32_t *output,
+                                 int stride, int tx_type, int bd) {
   int32_t txfm_buf[1024];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_32X32);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_32X32);
   (void)bd;
   fwd_txfm2d_sse4_1(input, output, stride, cfg.cfg, txfm_buf);
 }
 
-void vp10_fwd_txfm2d_64x64_sse4_1(const int16_t *input, int32_t *output,
-                                  int stride, int tx_type, int bd) {
+void av1_fwd_txfm2d_64x64_sse4_1(const int16_t *input, int32_t *output,
+                                 int stride, int tx_type, int bd) {
   int32_t txfm_buf[4096];
-  TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_64x64_cfg(tx_type);
+  TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_64x64_cfg(tx_type);
   (void)bd;
   fwd_txfm2d_sse4_1(input, output, stride, cfg.cfg, txfm_buf);
 }
diff --git a/av1/common/x86/vp10_fwd_txfm_impl_sse2.h b/av1/common/x86/av1_fwd_txfm_impl_sse2.h
similarity index 96%
rename from av1/common/x86/vp10_fwd_txfm_impl_sse2.h
rename to av1/common/x86/av1_fwd_txfm_impl_sse2.h
index 9bb8abc..ecaa97c 100644
--- a/av1/common/x86/vp10_fwd_txfm_impl_sse2.h
+++ b/av1/common/x86/av1_fwd_txfm_impl_sse2.h
@@ -10,7 +10,7 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/x86/fwd_txfm_sse2.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
@@ -98,7 +98,7 @@
                        _mm_cmplt_epi16(in1, _mm_set1_epi16(0xfc00)));
   test = _mm_movemask_epi8(_mm_or_si128(cmp0, cmp1));
   if (test) {
-    vpx_highbd_fdct4x4_c(input, output, stride);
+    aom_highbd_fdct4x4_c(input, output, stride);
     return;
   }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -169,7 +169,7 @@
 #if DCT_HIGH_BIT_DEPTH
     overflow = check_epi16_overflow_x2(&x0, &x1);
     if (overflow) {
-      vpx_highbd_fdct4x4_c(input, output, stride);
+      aom_highbd_fdct4x4_c(input, output, stride);
       return;
     }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -191,7 +191,7 @@
 #if DCT_HIGH_BIT_DEPTH
     overflow = check_epi16_overflow_x2(&t0, &t1);
     if (overflow) {
-      vpx_highbd_fdct4x4_c(input, output, stride);
+      aom_highbd_fdct4x4_c(input, output, stride);
       return;
     }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -230,7 +230,7 @@
 #if DCT_HIGH_BIT_DEPTH
       overflow = check_epi16_overflow_x2(&x0, &x1);
       if (overflow) {
-        vpx_highbd_fdct4x4_c(input, output, stride);
+        aom_highbd_fdct4x4_c(input, output, stride);
         return;
       }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -313,7 +313,7 @@
       overflow =
           check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
       if (overflow) {
-        vpx_highbd_fdct8x8_c(input, output, stride);
+        aom_highbd_fdct8x8_c(input, output, stride);
         return;
       }
     }
@@ -328,7 +328,7 @@
 #if DCT_HIGH_BIT_DEPTH
       overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
       if (overflow) {
-        vpx_highbd_fdct8x8_c(input, output, stride);
+        aom_highbd_fdct8x8_c(input, output, stride);
         return;
       }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -371,7 +371,7 @@
 #if DCT_HIGH_BIT_DEPTH
         overflow = check_epi16_overflow_x4(&res0, &res4, &res2, &res6);
         if (overflow) {
-          vpx_highbd_fdct8x8_c(input, output, stride);
+          aom_highbd_fdct8x8_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -401,7 +401,7 @@
 #if DCT_HIGH_BIT_DEPTH
       overflow = check_epi16_overflow_x2(&r0, &r1);
       if (overflow) {
-        vpx_highbd_fdct8x8_c(input, output, stride);
+        aom_highbd_fdct8x8_c(input, output, stride);
         return;
       }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -414,7 +414,7 @@
 #if DCT_HIGH_BIT_DEPTH
         overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
         if (overflow) {
-          vpx_highbd_fdct8x8_c(input, output, stride);
+          aom_highbd_fdct8x8_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -457,7 +457,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&res1, &res7, &res5, &res3);
           if (overflow) {
-            vpx_highbd_fdct8x8_c(input, output, stride);
+            aom_highbd_fdct8x8_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -720,7 +720,7 @@
         overflow = check_epi16_overflow_x8(&input0, &input1, &input2, &input3,
                                            &input4, &input5, &input6, &input7);
         if (overflow) {
-          vpx_highbd_fdct16x16_c(input, output, stride);
+          aom_highbd_fdct16x16_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -740,7 +740,7 @@
             check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
                                     &step1_4, &step1_5, &step1_6, &step1_7);
         if (overflow) {
-          vpx_highbd_fdct16x16_c(input, output, stride);
+          aom_highbd_fdct16x16_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -760,7 +760,7 @@
         overflow =
             check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
         if (overflow) {
-          vpx_highbd_fdct16x16_c(input, output, stride);
+          aom_highbd_fdct16x16_c(input, output, stride);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -774,7 +774,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -796,7 +796,7 @@
 #if DCT_HIGH_BIT_DEPTH
             overflow = check_epi16_overflow_x4(&res00, &res08, &res04, &res12);
             if (overflow) {
-              vpx_highbd_fdct16x16_c(input, output, stride);
+              aom_highbd_fdct16x16_c(input, output, stride);
               return;
             }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -817,7 +817,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x2(&r0, &r1);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -830,7 +830,7 @@
 #if DCT_HIGH_BIT_DEPTH
             overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
             if (overflow) {
-              vpx_highbd_fdct16x16_c(input, output, stride);
+              aom_highbd_fdct16x16_c(input, output, stride);
               return;
             }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -853,7 +853,7 @@
               overflow =
                   check_epi16_overflow_x4(&res02, &res14, &res10, &res06);
               if (overflow) {
-                vpx_highbd_fdct16x16_c(input, output, stride);
+                aom_highbd_fdct16x16_c(input, output, stride);
                 return;
               }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -881,7 +881,7 @@
           overflow =
               check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, &step2_4);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -901,7 +901,7 @@
               check_epi16_overflow_x8(&step3_0, &step3_1, &step3_2, &step3_3,
                                       &step3_4, &step3_5, &step3_6, &step3_7);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -924,7 +924,7 @@
           overflow =
               check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, &step2_5);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -944,7 +944,7 @@
               check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
                                       &step1_4, &step1_5, &step1_6, &step1_7);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -966,7 +966,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&res01, &res09, &res15, &res07);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
@@ -987,7 +987,7 @@
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x4(&res05, &res13, &res11, &res03);
           if (overflow) {
-            vpx_highbd_fdct16x16_c(input, output, stride);
+            aom_highbd_fdct16x16_c(input, output, stride);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
diff --git a/av1/common/x86/vp10_fwd_txfm_sse2.c b/av1/common/x86/av1_fwd_txfm_sse2.c
similarity index 84%
rename from av1/common/x86/vp10_fwd_txfm_sse2.c
rename to av1/common/x86/av1_fwd_txfm_sse2.c
index 05ec539..3a95071 100644
--- a/av1/common/x86/vp10_fwd_txfm_sse2.c
+++ b/av1/common/x86/av1_fwd_txfm_sse2.c
@@ -10,12 +10,12 @@
 
 #include <emmintrin.h>  // SSE2
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "aom_dsp/aom_dsp_common.h"
 #include "aom_dsp/x86/fwd_txfm_sse2.h"
 
-void vp10_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   __m128i in0, in1;
   __m128i tmp;
   const __m128i zero = _mm_setzero_si128();
@@ -44,7 +44,7 @@
   store_output(&in0, output);
 }
 
-void vp10_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
   __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
   __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
@@ -84,8 +84,8 @@
   store_output(&in1, output);
 }
 
-void vp10_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
-                           int stride) {
+void av1_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
+                          int stride) {
   __m128i in0, in1, in2, in3;
   __m128i u0, u1;
   __m128i sum = _mm_setzero_si128();
@@ -153,8 +153,8 @@
   store_output(&in1, output);
 }
 
-void vp10_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
-                           int stride) {
+void av1_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
+                          int stride) {
   __m128i in0, in1, in2, in3;
   __m128i u0, u1;
   __m128i sum = _mm_setzero_si128();
@@ -226,47 +226,47 @@
 }
 
 #define DCT_HIGH_BIT_DEPTH 0
-#define FDCT4x4_2D vp10_fdct4x4_sse2
-#define FDCT8x8_2D vp10_fdct8x8_sse2
-#define FDCT16x16_2D vp10_fdct16x16_sse2
-#include "av1/common/x86/vp10_fwd_txfm_impl_sse2.h"
+#define FDCT4x4_2D av1_fdct4x4_sse2
+#define FDCT8x8_2D av1_fdct8x8_sse2
+#define FDCT16x16_2D av1_fdct16x16_sse2
+#include "av1/common/x86/av1_fwd_txfm_impl_sse2.h"
 #undef FDCT4x4_2D
 #undef FDCT8x8_2D
 #undef FDCT16x16_2D
 
-#define FDCT32x32_2D vp10_fdct32x32_rd_sse2
+#define FDCT32x32_2D av1_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h"
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 
-#define FDCT32x32_2D vp10_fdct32x32_sse2
+#define FDCT32x32_2D av1_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h"  // NOLINT
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 #undef DCT_HIGH_BIT_DEPTH
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define DCT_HIGH_BIT_DEPTH 1
-#define FDCT4x4_2D vp10_highbd_fdct4x4_sse2
-#define FDCT8x8_2D vp10_highbd_fdct8x8_sse2
-#define FDCT16x16_2D vp10_highbd_fdct16x16_sse2
-#include "av1/common/x86/vp10_fwd_txfm_impl_sse2.h"  // NOLINT
+#define FDCT4x4_2D av1_highbd_fdct4x4_sse2
+#define FDCT8x8_2D av1_highbd_fdct8x8_sse2
+#define FDCT16x16_2D av1_highbd_fdct16x16_sse2
+#include "av1/common/x86/av1_fwd_txfm_impl_sse2.h"  // NOLINT
 #undef FDCT4x4_2D
 #undef FDCT8x8_2D
 #undef FDCT16x16_2D
 
-#define FDCT32x32_2D vp10_highbd_fdct32x32_rd_sse2
+#define FDCT32x32_2D av1_highbd_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h"  // NOLINT
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 
-#define FDCT32x32_2D vp10_highbd_fdct32x32_sse2
+#define FDCT32x32_2D av1_highbd_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
-#include "av1/common/x86/vp10_fwd_dct32x32_impl_sse2.h"  // NOLINT
+#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 #undef DCT_HIGH_BIT_DEPTH
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/x86/vp10_highbd_convolve_filters_sse4.c b/av1/common/x86/av1_highbd_convolve_filters_sse4.c
similarity index 99%
rename from av1/common/x86/vp10_highbd_convolve_filters_sse4.c
rename to av1/common/x86/av1_highbd_convolve_filters_sse4.c
index 7f3630c..e2337fd 100644
--- a/av1/common/x86/vp10_highbd_convolve_filters_sse4.c
+++ b/av1/common/x86/av1_highbd_convolve_filters_sse4.c
@@ -7,10 +7,10 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "./vpx_config.h"
+#include "./aom_config.h"
 #include "av1/common/filter.h"
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #if CONFIG_EXT_INTERP
 DECLARE_ALIGNED(16, const int16_t,
                 sub_pel_filters_10sharp_highbd_ver_signal_dir[15][6][8]) = {
@@ -137,7 +137,7 @@
 };
 #endif
 #endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #if CONFIG_EXT_INTERP
 DECLARE_ALIGNED(16, const int16_t,
                 sub_pel_filters_12sharp_highbd_ver_signal_dir[15][6][8]) = {
@@ -264,7 +264,7 @@
 };
 #endif
 #endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #if USE_TEMPORALFILTER_12TAP
 DECLARE_ALIGNED(
     16, const int16_t,
diff --git a/av1/common/x86/vp10_highbd_convolve_sse4.c b/av1/common/x86/av1_highbd_convolve_sse4.c
similarity index 91%
rename from av1/common/x86/vp10_highbd_convolve_sse4.c
rename to av1/common/x86/av1_highbd_convolve_sse4.c
index ea78400..705c963 100644
--- a/av1/common/x86/vp10_highbd_convolve_sse4.c
+++ b/av1/common/x86/av1_highbd_convolve_sse4.c
@@ -11,7 +11,7 @@
 #include <assert.h>
 #include <smmintrin.h>
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "av1/common/filter.h"
 
 typedef void (*TransposeSave)(const int width, int pixelsNum, uint32_t *src,
@@ -212,12 +212,12 @@
   _mm_storeu_si128((__m128i *)buf, u[0]);
 }
 
-void vp10_highbd_convolve_horiz_sse4_1(const uint16_t *src, int src_stride,
-                                       uint16_t *dst, int dst_stride, int w,
-                                       int h,
-                                       const InterpFilterParams filter_params,
-                                       const int subpel_x_q4, int x_step_q4,
-                                       int avg, int bd) {
+void av1_highbd_convolve_horiz_sse4_1(const uint16_t *src, int src_stride,
+                                      uint16_t *dst, int dst_stride, int w,
+                                      int h,
+                                      const InterpFilterParams filter_params,
+                                      const int subpel_x_q4, int x_step_q4,
+                                      int avg, int bd) {
   DECLARE_ALIGNED(16, uint32_t, temp[4 * 4]);
   __m128i verf[6];
   HbdSubpelFilterCoeffs vCoeffs;
@@ -228,18 +228,16 @@
   (void)x_step_q4;
 
   if (0 == subpel_x_q4 || 16 != x_step_q4) {
-    vp10_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
-                                 filter_params, subpel_x_q4, x_step_q4, avg,
-                                 bd);
+    av1_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
+                                filter_params, subpel_x_q4, x_step_q4, avg, bd);
     return;
   }
 
   vCoeffs =
-      vp10_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
+      av1_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_x_q4 - 1);
   if (!vCoeffs) {
-    vp10_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
-                                 filter_params, subpel_x_q4, x_step_q4, avg,
-                                 bd);
+    av1_highbd_convolve_horiz_c(src, src_stride, dst, dst_stride, w, h,
+                                filter_params, subpel_x_q4, x_step_q4, avg, bd);
     return;
   }
 
@@ -423,27 +421,27 @@
   } while (rowIndex < h);
 }
 
-void vp10_highbd_convolve_vert_sse4_1(const uint16_t *src, int src_stride,
-                                      uint16_t *dst, int dst_stride, int w,
-                                      int h,
-                                      const InterpFilterParams filter_params,
-                                      const int subpel_y_q4, int y_step_q4,
-                                      int avg, int bd) {
+void av1_highbd_convolve_vert_sse4_1(const uint16_t *src, int src_stride,
+                                     uint16_t *dst, int dst_stride, int w,
+                                     int h,
+                                     const InterpFilterParams filter_params,
+                                     const int subpel_y_q4, int y_step_q4,
+                                     int avg, int bd) {
   __m128i verf[6];
   HbdSubpelFilterCoeffs vCoeffs;
   const int tapsNum = filter_params.taps;
 
   if (0 == subpel_y_q4 || 16 != y_step_q4) {
-    vp10_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
-                                filter_params, subpel_y_q4, y_step_q4, avg, bd);
+    av1_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
+                               filter_params, subpel_y_q4, y_step_q4, avg, bd);
     return;
   }
 
   vCoeffs =
-      vp10_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
+      av1_hbd_get_subpel_filter_ver_signal_dir(filter_params, subpel_y_q4 - 1);
   if (!vCoeffs) {
-    vp10_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
-                                filter_params, subpel_y_q4, y_step_q4, avg, bd);
+    av1_highbd_convolve_vert_c(src, src_stride, dst, dst_stride, w, h,
+                               filter_params, subpel_y_q4, y_step_q4, avg, bd);
     return;
   }
 
diff --git a/av1/common/x86/vp10_inv_txfm_sse2.c b/av1/common/x86/av1_inv_txfm_sse2.c
similarity index 98%
rename from av1/common/x86/vp10_inv_txfm_sse2.c
rename to av1/common/x86/av1_inv_txfm_sse2.c
index b09933e..74a0d90 100644
--- a/av1/common/x86/vp10_inv_txfm_sse2.c
+++ b/av1/common/x86/av1_inv_txfm_sse2.c
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
-#include "av1/common/x86/vp10_inv_txfm_sse2.h"
+#include "./av1_rtcd.h"
+#include "av1/common/x86/av1_inv_txfm_sse2.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
 
 #define RECON_AND_STORE4X4(dest, in_x)                    \
@@ -21,7 +21,7 @@
     *(int *)(dest) = _mm_cvtsi128_si32(d0);               \
   }
 
-void vp10_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i eight = _mm_set1_epi16(8);
   const __m128i cst = _mm_setr_epi16(
@@ -151,7 +151,7 @@
   }
 }
 
-void vp10_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
   int a;
@@ -176,7 +176,7 @@
   res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
 }
 
-void vp10_idct4_sse2(__m128i *in) {
+void av1_idct4_sse2(__m128i *in) {
   const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
@@ -212,7 +212,7 @@
   in[1] = _mm_shuffle_epi32(in[1], 0x4E);
 }
 
-void vp10_iadst4_sse2(__m128i *in) {
+void av1_iadst4_sse2(__m128i *in) {
   const __m128i k__sinpi_p01_p04 = pair_set_epi16(sinpi_1_9, sinpi_4_9);
   const __m128i k__sinpi_p03_p02 = pair_set_epi16(sinpi_3_9, sinpi_2_9);
   const __m128i k__sinpi_p02_m01 = pair_set_epi16(sinpi_2_9, -sinpi_1_9);
@@ -446,7 +446,7 @@
     out7 = _mm_subs_epi16(stp1_0, stp2_7);                                    \
   }
 
-void vp10_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -477,11 +477,11 @@
 
   // 2-D
   for (i = 0; i < 2; i++) {
-    // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
+    // 8x8 Transpose is copied from av1_fdct8x8_sse2()
     TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                   in4, in5, in6, in7);
 
-    // 4-stage 1D vp10_idct8x8
+    // 4-stage 1D av1_idct8x8
     IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in5,
           in6, in7);
   }
@@ -515,7 +515,7 @@
   RECON_AND_STORE(dest + 7 * stride, in7);
 }
 
-void vp10_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
   int a;
@@ -536,7 +536,7 @@
   RECON_AND_STORE(dest + 7 * stride, dc_value);
 }
 
-void vp10_idct8_sse2(__m128i *in) {
+void av1_idct8_sse2(__m128i *in) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
   const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
@@ -552,16 +552,16 @@
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
 
-  // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
+  // 8x8 Transpose is copied from av1_fdct8x8_sse2()
   TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], in0,
                 in1, in2, in3, in4, in5, in6, in7);
 
-  // 4-stage 1D vp10_idct8x8
+  // 4-stage 1D av1_idct8x8
   IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in[0], in[1], in[2], in[3],
         in[4], in[5], in[6], in[7]);
 }
 
-void vp10_iadst8_sse2(__m128i *in) {
+void av1_iadst8_sse2(__m128i *in) {
   const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
   const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
   const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
@@ -789,7 +789,7 @@
   in[7] = _mm_sub_epi16(k__const_0, s1);
 }
 
-void vp10_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -1158,8 +1158,8 @@
                            stp2_12)                                            \
   }
 
-void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
-                                 int stride) {
+void av1_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
+                                int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
@@ -1200,7 +1200,7 @@
 
   curr1 = l;
   for (i = 0; i < 2; i++) {
-    // 1-D vp10_idct
+    // 1-D av1_idct
 
     // Load input data.
     in[0] = _mm_load_si128((const __m128i *)input);
@@ -1248,7 +1248,7 @@
   }
   for (i = 0; i < 2; i++) {
     int j;
-    // 1-D vp10_idct
+    // 1-D av1_idct
     array_transpose_8x8(l + i * 8, in);
     array_transpose_8x8(r + i * 8, in + 8);
 
@@ -1283,8 +1283,7 @@
   }
 }
 
-void vp10_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest,
-                               int stride) {
+void av1_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
   int a, i;
@@ -1316,7 +1315,7 @@
   }
 }
 
-static void vp10_iadst16_8col(__m128i *in) {
+static void av1_iadst16_8col(__m128i *in) {
   // perform 16x16 1-D ADST for 8 columns
   __m128i s[16], x[16], u[32], v[32];
   const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
@@ -1786,7 +1785,7 @@
   in[15] = _mm_sub_epi16(kZero, s[1]);
 }
 
-static void vp10_idct16_8col(__m128i *in) {
+static void av1_idct16_8col(__m128i *in) {
   const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
   const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
   const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
@@ -2130,20 +2129,20 @@
   in[15] = _mm_sub_epi16(s[0], s[15]);
 }
 
-void vp10_idct16_sse2(__m128i *in0, __m128i *in1) {
+void av1_idct16_sse2(__m128i *in0, __m128i *in1) {
   array_transpose_16x16(in0, in1);
-  vp10_idct16_8col(in0);
-  vp10_idct16_8col(in1);
+  av1_idct16_8col(in0);
+  av1_idct16_8col(in1);
 }
 
-void vp10_iadst16_sse2(__m128i *in0, __m128i *in1) {
+void av1_iadst16_sse2(__m128i *in0, __m128i *in1) {
   array_transpose_16x16(in0, in1);
-  vp10_iadst16_8col(in0);
-  vp10_iadst16_8col(in1);
+  av1_iadst16_8col(in0);
+  av1_iadst16_8col(in1);
 }
 
-void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
-                                int stride) {
+void av1_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
+                               int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
@@ -3016,12 +3015,12 @@
   }
 
 // Only upper-left 8x8 has non-zero coeff
-void vp10_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
-                                int stride) {
+void av1_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
+                               int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
 
-  // vp10_idct constants for each stage
+  // av1_idct constants for each stage
   const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
   const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
   const __m128i stg1_6 = pair_set_epi16(cospi_7_64, -cospi_25_64);
@@ -3173,13 +3172,13 @@
   }
 }
 
-void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
-                                  int stride) {
+void av1_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
+                                 int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
 
-  // vp10_idct constants for each stage
+  // av1_idct constants for each stage
   const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
   const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
   const __m128i stg1_2 = pair_set_epi16(cospi_15_64, -cospi_17_64);
@@ -3241,7 +3240,7 @@
 
   for (i = 0; i < 4; i++) {
     i32 = (i << 5);
-    // First 1-D vp10_idct
+    // First 1-D av1_idct
     // Load input data.
     LOAD_DQCOEFF(in[0], input);
     LOAD_DQCOEFF(in[8], input);
@@ -3391,7 +3390,7 @@
     col[i32 + 31] = _mm_sub_epi16(stp1_0, stp1_31);
   }
   for (i = 0; i < 4; i++) {
-    // Second 1-D vp10_idct
+    // Second 1-D av1_idct
     j = i << 3;
 
     // Transpose 32x8 block to 8x32 block
@@ -3447,8 +3446,7 @@
   }
 }
 
-void vp10_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest,
-                               int stride) {
+void av1_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
   int a, i;
@@ -3468,7 +3466,7 @@
   }
 }
 
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
   __m128i ubounded, retval;
   const __m128i zero = _mm_set1_epi16(0);
@@ -3482,8 +3480,8 @@
   return retval;
 }
 
-void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                     int stride, int bd) {
+void av1_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int bd) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
   int i, j;
@@ -3516,7 +3514,7 @@
 
   if (!test) {
     // Do the row transform
-    vp10_idct4_sse2(inptr);
+    av1_idct4_sse2(inptr);
 
     // Check the min & max values
     max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3545,14 +3543,14 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vp10_highbd_idct4_c(input, outptr, bd);
+      av1_highbd_idct4_c(input, outptr, bd);
       input += 4;
       outptr += 4;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct4_sse2(inptr);
+    av1_idct4_sse2(inptr);
 
     // Final round and shift
     inptr[0] = _mm_add_epi16(inptr[0], eight);
@@ -3588,7 +3586,7 @@
     // Columns
     for (i = 0; i < 4; ++i) {
       for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
-      vp10_highbd_idct4_c(temp_in, temp_out, bd);
+      av1_highbd_idct4_c(temp_in, temp_out, bd);
       for (j = 0; j < 4; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -3597,8 +3595,8 @@
   }
 }
 
-void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                     int stride, int bd) {
+void av1_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int bd) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
   int i, j, test;
@@ -3632,7 +3630,7 @@
 
   if (!test) {
     // Do the row transform
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Find the min & max for the column transform
     max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3662,14 +3660,14 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 8; ++i) {
-      vp10_highbd_idct8_c(input, outptr, bd);
+      av1_highbd_idct8_c(input, outptr, bd);
       input += 8;
       outptr += 8;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -3688,7 +3686,7 @@
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
       for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-      vp10_highbd_idct8_c(temp_in, temp_out, bd);
+      av1_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3697,8 +3695,8 @@
   }
 }
 
-void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                     int stride, int bd) {
+void av1_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int bd) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
   int i, j, test;
@@ -3733,7 +3731,7 @@
 
   if (!test) {
     // Do the row transform
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Find the min & max for the column transform
     // N.B. Only first 4 cols contain non-zero coeffs
@@ -3765,14 +3763,14 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vp10_highbd_idct8_c(input, outptr, bd);
+      av1_highbd_idct8_c(input, outptr, bd);
       input += 8;
       outptr += 8;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -3791,7 +3789,7 @@
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
       for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-      vp10_highbd_idct8_c(temp_in, temp_out, bd);
+      av1_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3800,8 +3798,8 @@
   }
 }
 
-void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                        int stride, int bd) {
+void av1_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
+                                       int stride, int bd) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
   int i, j, test;
@@ -3838,7 +3836,7 @@
 
   if (!test) {
     // Do the row transform
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Find the min & max for the column transform
     max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3873,14 +3871,14 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 16; ++i) {
-      vp10_highbd_idct16_c(input, outptr, bd);
+      av1_highbd_idct16_c(input, outptr, bd);
       input += 16;
       outptr += 16;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -3904,7 +3902,7 @@
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
       for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-      vp10_highbd_idct16_c(temp_in, temp_out, bd);
+      av1_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -3913,8 +3911,8 @@
   }
 }
 
-void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                       int stride, int bd) {
+void av1_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+                                      int stride, int bd) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
   int i, j, test;
@@ -3953,7 +3951,7 @@
 
   if (!test) {
     // Do the row transform (N.B. This transposes inptr)
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Find the min & max for the column transform
     // N.B. Only first 4 cols contain non-zero coeffs
@@ -3991,14 +3989,14 @@
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vp10_highbd_idct16_c(input, outptr, bd);
+      av1_highbd_idct16_c(input, outptr, bd);
       input += 16;
       outptr += 16;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -4022,7 +4020,7 @@
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
       for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-      vp10_highbd_idct16_c(temp_in, temp_out, bd);
+      av1_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -4030,4 +4028,4 @@
     }
   }
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/x86/vp10_inv_txfm_sse2.h b/av1/common/x86/av1_inv_txfm_sse2.h
similarity index 97%
rename from av1/common/x86/vp10_inv_txfm_sse2.h
rename to av1/common/x86/av1_inv_txfm_sse2.h
index 0839ab9..a4cbbcf 100644
--- a/av1/common/x86/vp10_inv_txfm_sse2.h
+++ b/av1/common/x86/av1_inv_txfm_sse2.h
@@ -8,13 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VPX_DSP_X86_INV_TXFM_SSE2_H_
-#define VPX_DSP_X86_INV_TXFM_SSE2_H_
+#ifndef AOM_DSP_X86_INV_TXFM_SSE2_H_
+#define AOM_DSP_X86_INV_TXFM_SSE2_H_
 
 #include <emmintrin.h>  // SSE2
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
-#include "av1/common/vp10_inv_txfm.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
+#include "av1/common/av1_inv_txfm.h"
 
 // perform 8x8 transpose
 static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
@@ -181,4 +181,4 @@
 void iadst8_sse2(__m128i *in);
 void iadst16_sse2(__m128i *in0, __m128i *in1);
 
-#endif  // VPX_DSP_X86_INV_TXFM_SSE2_H_
+#endif  // AOM_DSP_X86_INV_TXFM_SSE2_H_
diff --git a/av1/common/x86/vp10_txfm1d_sse4.h b/av1/common/x86/av1_txfm1d_sse4.h
similarity index 68%
rename from av1/common/x86/vp10_txfm1d_sse4.h
rename to av1/common/x86/av1_txfm1d_sse4.h
index f05a54c..af7afb7 100644
--- a/av1/common/x86/vp10_txfm1d_sse4.h
+++ b/av1/common/x86/av1_txfm1d_sse4.h
@@ -1,52 +1,52 @@
-#ifndef VP10_TXMF1D_SSE2_H_
-#define VP10_TXMF1D_SSE2_H_
+#ifndef AV1_TXMF1D_SSE2_H_
+#define AV1_TXMF1D_SSE2_H_
 
 #include <smmintrin.h>
-#include "av1/common/vp10_txfm.h"
+#include "av1/common/av1_txfm.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-void vp10_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fdct64_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
+
+void av1_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
+void av1_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
                             const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fdct64_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
                             const int8_t *cos_bit, const int8_t *stage_range);
 
-void vp10_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range);
-
-void vp10_idct4_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_idct4_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct8_new_sse4_1(const __m128i *input, __m128i *output,
+                          const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct16_new_sse4_1(const __m128i *input, __m128i *output,
                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct8_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_idct32_new_sse4_1(const __m128i *input, __m128i *output,
                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct16_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct32_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_idct64_new_sse4_1(const __m128i *input, __m128i *output,
-                            const int8_t *cos_bit, const int8_t *stage_range);
+void av1_idct64_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
 
-void vp10_iadst4_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_iadst4_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst8_new_sse4_1(const __m128i *input, __m128i *output,
+                           const int8_t *cos_bit, const int8_t *stage_range);
+void av1_iadst16_new_sse4_1(const __m128i *input, __m128i *output,
                             const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst8_new_sse4_1(const __m128i *input, __m128i *output,
+void av1_iadst32_new_sse4_1(const __m128i *input, __m128i *output,
                             const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst16_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range);
-void vp10_iadst32_new_sse4_1(const __m128i *input, __m128i *output,
-                             const int8_t *cos_bit, const int8_t *stage_range);
 
 static INLINE void transpose_32_4x4(int stride, const __m128i *input,
                                     __m128i *output) {
@@ -141,4 +141,4 @@
 }
 #endif
 
-#endif  // VP10_TXMF1D_SSE2_H_
+#endif  // AV1_TXMF1D_SSE2_H_
diff --git a/av1/common/x86/highbd_inv_txfm_sse4.c b/av1/common/x86/highbd_inv_txfm_sse4.c
index f3686eb..eada3af 100644
--- a/av1/common/x86/highbd_inv_txfm_sse4.c
+++ b/av1/common/x86/highbd_inv_txfm_sse4.c
@@ -11,9 +11,9 @@
 #include <assert.h>
 #include <smmintrin.h> /* SSE4.1 */
 
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "av1/common/vp10_inv_txfm2d_cfg.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "av1/common/av1_inv_txfm2d_cfg.h"
 #include "av1/common/x86/highbd_txfm_utility_sse4.h"
 
 static INLINE void load_buffer_4x4(const int32_t *coeff, __m128i *in) {
@@ -229,8 +229,8 @@
   _mm_storel_epi64((__m128i *)(output + 3 * stride), v3);
 }
 
-void vp10_inv_txfm2d_add_4x4_sse4_1(const int32_t *coeff, uint16_t *output,
-                                    int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_4x4_sse4_1(const int32_t *coeff, uint16_t *output,
+                                   int stride, int tx_type, int bd) {
   __m128i in[4];
   const TXFM_2D_CFG *cfg = NULL;
 
@@ -695,8 +695,8 @@
   _mm_store_si128((__m128i *)(output + 7 * stride), u7);
 }
 
-void vp10_inv_txfm2d_add_8x8_sse4_1(const int32_t *coeff, uint16_t *output,
-                                    int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_8x8_sse4_1(const int32_t *coeff, uint16_t *output,
+                                   int stride, int tx_type, int bd) {
   __m128i in[16], out[16];
   const TXFM_2D_CFG *cfg = NULL;
 
@@ -1295,8 +1295,8 @@
   round_shift_8x8(&in[48], shift);
 }
 
-void vp10_inv_txfm2d_add_16x16_sse4_1(const int32_t *coeff, uint16_t *output,
-                                      int stride, int tx_type, int bd) {
+void av1_inv_txfm2d_add_16x16_sse4_1(const int32_t *coeff, uint16_t *output,
+                                     int stride, int tx_type, int bd) {
   __m128i in[64], out[64];
   const TXFM_2D_CFG *cfg = NULL;
 
diff --git a/av1/common/x86/idct_intrin_sse2.c b/av1/common/x86/idct_intrin_sse2.c
index 70bf9bf..e9f0ce8 100644
--- a/av1/common/x86/idct_intrin_sse2.c
+++ b/av1/common/x86/idct_intrin_sse2.c
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "aom_dsp/x86/inv_txfm_sse2.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
 #include "aom_ports/mem.h"
@@ -56,8 +56,8 @@
   } while (0)
 #endif
 
-void vp10_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
-                             int tx_type) {
+void av1_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
   __m128i in[2];
   const __m128i zero = _mm_setzero_si128();
   const __m128i eight = _mm_set1_epi16(8);
@@ -147,8 +147,8 @@
   }
 }
 
-void vp10_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
-                             int tx_type) {
+void av1_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
   __m128i in[8];
   const __m128i zero = _mm_setzero_si128();
   const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -240,8 +240,8 @@
   RECON_AND_STORE(dest + 7 * stride, in[7]);
 }
 
-void vp10_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
-                                int stride, int tx_type) {
+void av1_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
+                               int stride, int tx_type) {
   __m128i in[32];
   __m128i *in0 = &in[0];
   __m128i *in1 = &in[16];
diff --git a/av1/common/x86/reconintra_sse4.c b/av1/common/x86/reconintra_sse4.c
index cac34a6..ab1fa93 100644
--- a/av1/common/x86/reconintra_sse4.c
+++ b/av1/common/x86/reconintra_sse4.c
@@ -9,7 +9,7 @@
  */
 #include <smmintrin.h>
 
-#include "./vp10_rtcd.h"
+#include "./av1_rtcd.h"
 #include "aom_ports/mem.h"
 #include "av1/common/enums.h"
 #include "av1/common/intra_filters.h"
@@ -498,86 +498,84 @@
   GeneratePrediction(above, left, bs, prm, meanValue, dst, stride);
 }
 
-void vp10_dc_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                     const uint8_t *above,
-                                     const uint8_t *left) {
+void av1_dc_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                    const uint8_t *above, const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, DC_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_v_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                    const uint8_t *above, const uint8_t *left) {
+void av1_v_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                   const uint8_t *above, const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, V_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_h_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                    const uint8_t *above, const uint8_t *left) {
+void av1_h_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                   const uint8_t *above, const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, H_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d45_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                      const uint8_t *above,
-                                      const uint8_t *left) {
+void av1_d45_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                     const uint8_t *above,
+                                     const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D45_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d135_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                       const uint8_t *above,
-                                       const uint8_t *left) {
+void av1_d135_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                      const uint8_t *above,
+                                      const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D135_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d117_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                       const uint8_t *above,
-                                       const uint8_t *left) {
+void av1_d117_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                      const uint8_t *above,
+                                      const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D117_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d153_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                       const uint8_t *above,
-                                       const uint8_t *left) {
+void av1_d153_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                      const uint8_t *above,
+                                      const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D153_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d207_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                       const uint8_t *above,
-                                       const uint8_t *left) {
+void av1_d207_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                      const uint8_t *above,
+                                      const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D207_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_d63_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                      const uint8_t *above,
-                                      const uint8_t *left) {
+void av1_d63_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                     const uint8_t *above,
+                                     const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D63_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
-void vp10_tm_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
-                                     const uint8_t *above,
-                                     const uint8_t *left) {
+void av1_tm_filter_predictor_sse4_1(uint8_t *dst, ptrdiff_t stride, int bs,
+                                    const uint8_t *above, const uint8_t *left) {
   __m128i prm[5];
   GetIntraFilterParams(bs, TM_PRED, &prm[0]);
   FilterPrediction(above, left, bs, prm, dst, stride);
 }
 
 // ============== High Bit Depth ==============
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE int HighbdGetMeanValue4x4(const uint16_t *above,
                                         const uint16_t *left, const int bd,
                                         __m128i *params) {
@@ -809,83 +807,83 @@
   HighbdGeneratePrediction(above, left, bs, bd, prm, meanValue, dst, stride);
 }
 
-void vp10_highbd_dc_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                            int bs, const uint16_t *above,
-                                            const uint16_t *left, int bd) {
+void av1_highbd_dc_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                           int bs, const uint16_t *above,
+                                           const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, DC_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_v_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                           int bs, const uint16_t *above,
-                                           const uint16_t *left, int bd) {
+void av1_highbd_v_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                          int bs, const uint16_t *above,
+                                          const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, V_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_h_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                           int bs, const uint16_t *above,
-                                           const uint16_t *left, int bd) {
+void av1_highbd_h_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                          int bs, const uint16_t *above,
+                                          const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, H_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d45_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                             int bs, const uint16_t *above,
-                                             const uint16_t *left, int bd) {
+void av1_highbd_d45_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                            int bs, const uint16_t *above,
+                                            const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D45_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d135_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                              int bs, const uint16_t *above,
-                                              const uint16_t *left, int bd) {
+void av1_highbd_d135_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                             int bs, const uint16_t *above,
+                                             const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D135_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d117_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                              int bs, const uint16_t *above,
-                                              const uint16_t *left, int bd) {
+void av1_highbd_d117_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                             int bs, const uint16_t *above,
+                                             const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D117_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d153_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                              int bs, const uint16_t *above,
-                                              const uint16_t *left, int bd) {
+void av1_highbd_d153_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                             int bs, const uint16_t *above,
+                                             const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D153_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d207_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                              int bs, const uint16_t *above,
-                                              const uint16_t *left, int bd) {
+void av1_highbd_d207_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                             int bs, const uint16_t *above,
+                                             const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D207_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_d63_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                             int bs, const uint16_t *above,
-                                             const uint16_t *left, int bd) {
+void av1_highbd_d63_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                            int bs, const uint16_t *above,
+                                            const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, D63_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
 
-void vp10_highbd_tm_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
-                                            int bs, const uint16_t *above,
-                                            const uint16_t *left, int bd) {
+void av1_highbd_tm_filter_predictor_sse4_1(uint16_t *dst, ptrdiff_t stride,
+                                           int bs, const uint16_t *above,
+                                           const uint16_t *left, int bd) {
   __m128i prm[5];
   GetIntraFilterParams(bs, TM_PRED, &prm[0]);
   HighbdFilterPrediction(above, left, bs, bd, prm, dst, stride);
 }
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH