Remove unused convolve8_avg functions from aom_dsp

BUG=aomedia:1575

Change-Id: I21e8308b561b5054974e6e1c6aac4d9ef5615f11
diff --git a/aom_dsp/aom_convolve.c b/aom_dsp/aom_convolve.c
index a0ec277..38942fe 100644
--- a/aom_dsp/aom_convolve.c
+++ b/aom_dsp/aom_convolve.c
@@ -52,26 +52,6 @@
   }
 }
 
-static void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride,
-                               uint8_t *dst, ptrdiff_t dst_stride,
-                               const InterpKernel *x_filters, int x0_q4,
-                               int x_step_q4, int w, int h) {
-  src -= SUBPEL_TAPS / 2 - 1;
-  for (int y = 0; y < h; ++y) {
-    int x_q4 = x0_q4;
-    for (int x = 0; x < w; ++x) {
-      const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
-      const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
-      const int sum = horz_scalar_product(src_x, x_filter);
-      dst[x] = ROUND_POWER_OF_TWO(
-          dst[x] + clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1);
-      x_q4 += x_step_q4;
-    }
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
 static void convolve_vert(const uint8_t *src, ptrdiff_t src_stride,
                           uint8_t *dst, ptrdiff_t dst_stride,
                           const InterpKernel *y_filters, int y0_q4,
@@ -92,29 +72,6 @@
   }
 }
 
-static void convolve_avg_vert(const uint8_t *src, ptrdiff_t src_stride,
-                              uint8_t *dst, ptrdiff_t dst_stride,
-                              const InterpKernel *y_filters, int y0_q4,
-                              int y_step_q4, int w, int h) {
-  src -= src_stride * (SUBPEL_TAPS / 2 - 1);
-
-  for (int x = 0; x < w; ++x) {
-    int y_q4 = y0_q4;
-    for (int y = 0; y < h; ++y) {
-      const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
-      const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
-      const int sum = vert_scalar_product(src_y, src_stride, y_filter);
-      dst[y * dst_stride] = ROUND_POWER_OF_TWO(
-          dst[y * dst_stride] +
-              clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)),
-          1);
-      y_q4 += y_step_q4;
-    }
-    ++src;
-    ++dst;
-  }
-}
-
 static void convolve(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                      ptrdiff_t dst_stride, const InterpKernel *const x_filters,
                      int x0_q4, int x_step_q4,
@@ -174,21 +131,6 @@
                  w, h);
 }
 
-void aom_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
-                               uint8_t *dst, ptrdiff_t dst_stride,
-                               const int16_t *filter_x, int x_step_q4,
-                               const int16_t *filter_y, int y_step_q4, int w,
-                               int h) {
-  const InterpKernel *const filters_x = get_filter_base(filter_x);
-  const int x0_q4 = get_filter_offset(filter_x, filters_x);
-
-  (void)filter_y;
-  (void)y_step_q4;
-
-  convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4,
-                     x_step_q4, w, h);
-}
-
 void aom_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
                           uint8_t *dst, ptrdiff_t dst_stride,
                           const int16_t *filter_x, int x_step_q4,
@@ -204,21 +146,6 @@
                 w, h);
 }
 
-void aom_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
-                              uint8_t *dst, ptrdiff_t dst_stride,
-                              const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4, int w,
-                              int h) {
-  const InterpKernel *const filters_y = get_filter_base(filter_y);
-  const int y0_q4 = get_filter_offset(filter_y, filters_y);
-
-  (void)filter_x;
-  (void)x_step_q4;
-
-  convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y, y0_q4,
-                    y_step_q4, w, h);
-}
-
 void aom_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                      ptrdiff_t dst_stride, const int16_t *filter_x,
                      int x_step_q4, const int16_t *filter_y, int y_step_q4,
@@ -233,21 +160,6 @@
            filters_y, y0_q4, y_step_q4, w, h);
 }
 
-void aom_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
-                         ptrdiff_t dst_stride, const int16_t *filter_x,
-                         int x_step_q4, const int16_t *filter_y, int y_step_q4,
-                         int w, int h) {
-  /* Fixed size intermediate buffer places limits on parameters. */
-  DECLARE_ALIGNED(16, uint8_t, temp[MAX_SB_SIZE * MAX_SB_SIZE]);
-  assert(w <= MAX_SB_SIZE);
-  assert(h <= MAX_SB_SIZE);
-
-  aom_convolve8_c(src, src_stride, temp, MAX_SB_SIZE, filter_x, x_step_q4,
-                  filter_y, y_step_q4, w, h);
-  aom_convolve_avg_c(temp, MAX_SB_SIZE, dst, dst_stride, NULL, 0, NULL, 0, w,
-                     h);
-}
-
 void aom_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                          ptrdiff_t dst_stride, const int16_t *filter_x,
                          int filter_x_stride, const int16_t *filter_y,
@@ -548,29 +460,6 @@
   }
 }
 
-static void highbd_convolve_avg_horiz(const uint8_t *src8, ptrdiff_t src_stride,
-                                      uint8_t *dst8, ptrdiff_t dst_stride,
-                                      const InterpKernel *x_filters, int x0_q4,
-                                      int x_step_q4, int w, int h, int bd) {
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8);
-  uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
-  src -= SUBPEL_TAPS / 2 - 1;
-  for (int y = 0; y < h; ++y) {
-    int x_q4 = x0_q4;
-    for (int x = 0; x < w; ++x) {
-      const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
-      const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
-      const int sum = highbd_horz_scalar_product(src_x, x_filter);
-      dst[x] = ROUND_POWER_OF_TWO(
-          dst[x] + clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd),
-          1);
-      x_q4 += x_step_q4;
-    }
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
 static void highbd_convolve_vert(const uint8_t *src8, ptrdiff_t src_stride,
                                  uint8_t *dst8, ptrdiff_t dst_stride,
                                  const InterpKernel *y_filters, int y0_q4,
@@ -593,30 +482,6 @@
   }
 }
 
-static void highbd_convolve_avg_vert(const uint8_t *src8, ptrdiff_t src_stride,
-                                     uint8_t *dst8, ptrdiff_t dst_stride,
-                                     const InterpKernel *y_filters, int y0_q4,
-                                     int y_step_q4, int w, int h, int bd) {
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8);
-  uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
-  src -= src_stride * (SUBPEL_TAPS / 2 - 1);
-  for (int x = 0; x < w; ++x) {
-    int y_q4 = y0_q4;
-    for (int y = 0; y < h; ++y) {
-      const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
-      const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
-      const int sum = highbd_vert_scalar_product(src_y, src_stride, y_filter);
-      dst[y * dst_stride] = ROUND_POWER_OF_TWO(
-          dst[y * dst_stride] +
-              clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd),
-          1);
-      y_q4 += y_step_q4;
-    }
-    ++src;
-    ++dst;
-  }
-}
-
 static void highbd_convolve(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const InterpKernel *const x_filters, int x0_q4,
@@ -665,20 +530,6 @@
                         x_step_q4, w, h, bd);
 }
 
-void aom_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
-                                      uint8_t *dst, ptrdiff_t dst_stride,
-                                      const int16_t *filter_x, int x_step_q4,
-                                      const int16_t *filter_y, int y_step_q4,
-                                      int w, int h, int bd) {
-  const InterpKernel *const filters_x = get_filter_base(filter_x);
-  const int x0_q4 = get_filter_offset(filter_x, filters_x);
-  (void)filter_y;
-  (void)y_step_q4;
-
-  highbd_convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4,
-                            x_step_q4, w, h, bd);
-}
-
 void aom_highbd_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
                                  uint8_t *dst, ptrdiff_t dst_stride,
                                  const int16_t *filter_x, int x_step_q4,
@@ -693,20 +544,6 @@
                        y_step_q4, w, h, bd);
 }
 
-void aom_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
-                                     uint8_t *dst, ptrdiff_t dst_stride,
-                                     const int16_t *filter_x, int x_step_q4,
-                                     const int16_t *filter_y, int y_step_q4,
-                                     int w, int h, int bd) {
-  const InterpKernel *const filters_y = get_filter_base(filter_y);
-  const int y0_q4 = get_filter_offset(filter_y, filters_y);
-  (void)filter_x;
-  (void)x_step_q4;
-
-  highbd_convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y, y0_q4,
-                           y_step_q4, w, h, bd);
-}
-
 void aom_highbd_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x, int x_step_q4,
@@ -722,22 +559,6 @@
                   filters_y, y0_q4, y_step_q4, w, h, bd);
 }
 
-void aom_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
-                                uint8_t *dst, ptrdiff_t dst_stride,
-                                const int16_t *filter_x, int x_step_q4,
-                                const int16_t *filter_y, int y_step_q4, int w,
-                                int h, int bd) {
-  // Fixed size intermediate buffer places limits on parameters.
-  DECLARE_ALIGNED(16, uint16_t, temp[MAX_SB_SIZE * MAX_SB_SIZE]);
-  assert(w <= MAX_SB_SIZE);
-  assert(h <= MAX_SB_SIZE);
-
-  aom_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), MAX_SB_SIZE,
-                         filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd);
-  aom_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), MAX_SB_SIZE, dst,
-                            dst_stride, NULL, 0, NULL, 0, w, h, bd);
-}
-
 void aom_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride,
                                 uint8_t *dst8, ptrdiff_t dst_stride,
                                 const int16_t *filter_x, int filter_x_stride,
diff --git a/aom_dsp/aom_dsp.cmake b/aom_dsp/aom_dsp.cmake
index 10348e3..222f4b9 100644
--- a/aom_dsp/aom_dsp.cmake
+++ b/aom_dsp/aom_dsp.cmake
@@ -119,13 +119,9 @@
 set(AOM_DSP_COMMON_INTRIN_DSPR2
     "${AOM_ROOT}/aom_dsp/mips/common_dspr2.c"
     "${AOM_ROOT}/aom_dsp/mips/common_dspr2.h"
-    "${AOM_ROOT}/aom_dsp/mips/convolve2_avg_dspr2.c"
-    "${AOM_ROOT}/aom_dsp/mips/convolve2_avg_horiz_dspr2.c"
     "${AOM_ROOT}/aom_dsp/mips/convolve2_dspr2.c"
     "${AOM_ROOT}/aom_dsp/mips/convolve2_horiz_dspr2.c"
     "${AOM_ROOT}/aom_dsp/mips/convolve2_vert_dspr2.c"
-    "${AOM_ROOT}/aom_dsp/mips/convolve8_avg_dspr2.c"
-    "${AOM_ROOT}/aom_dsp/mips/convolve8_avg_horiz_dspr2.c"
     "${AOM_ROOT}/aom_dsp/mips/convolve8_dspr2.c"
     "${AOM_ROOT}/aom_dsp/mips/convolve8_horiz_dspr2.c"
     "${AOM_ROOT}/aom_dsp/mips/convolve8_vert_dspr2.c"
@@ -136,9 +132,6 @@
     "${AOM_ROOT}/aom_dsp/mips/inv_txfm_dspr2.h")
 
 set(AOM_DSP_COMMON_INTRIN_MSA
-    "${AOM_ROOT}/aom_dsp/mips/aom_convolve8_avg_horiz_msa.c"
-    "${AOM_ROOT}/aom_dsp/mips/aom_convolve8_avg_msa.c"
-    "${AOM_ROOT}/aom_dsp/mips/aom_convolve8_avg_vert_msa.c"
     "${AOM_ROOT}/aom_dsp/mips/aom_convolve8_horiz_msa.c"
     "${AOM_ROOT}/aom_dsp/mips/aom_convolve8_msa.c"
     "${AOM_ROOT}/aom_dsp/mips/aom_convolve8_vert_msa.c"
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 5e8bb4e..6f274c6 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -272,18 +272,12 @@
 add_proto qw/void aom_convolve8/,                 "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
 add_proto qw/void aom_convolve8_horiz/,           "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
 add_proto qw/void aom_convolve8_vert/,            "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_convolve8_avg/,             "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_convolve8_avg_horiz/,       "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_convolve8_avg_vert/,        "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
 
 specialize qw/aom_convolve_copy       sse2      /;
 specialize qw/aom_convolve_avg        sse2      /;
 specialize qw/aom_convolve8           sse2 ssse3/, "$avx2_ssse3";
 specialize qw/aom_convolve8_horiz     sse2 ssse3/, "$avx2_ssse3";
 specialize qw/aom_convolve8_vert      sse2 ssse3/, "$avx2_ssse3";
-specialize qw/aom_convolve8_avg       sse2 ssse3/;
-specialize qw/aom_convolve8_avg_horiz sse2 ssse3/;
-specialize qw/aom_convolve8_avg_vert  sse2 ssse3/;
 
 add_proto qw/void aom_convolve8_add_src/,       "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
 add_proto qw/void aom_convolve8_add_src_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
@@ -305,9 +299,6 @@
   specialize qw/aom_convolve8           neon dspr2 msa/;
   specialize qw/aom_convolve8_horiz     neon dspr2 msa/;
   specialize qw/aom_convolve8_vert      neon dspr2 msa/;
-  specialize qw/aom_convolve8_avg       neon dspr2 msa/;
-  specialize qw/aom_convolve8_avg_horiz neon dspr2 msa/;
-  specialize qw/aom_convolve8_avg_vert  neon dspr2 msa/;
 }
 
 add_proto qw/void aom_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
@@ -325,15 +316,6 @@
 add_proto qw/void aom_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
 specialize qw/aom_highbd_convolve8_vert avx2/, "$sse2_x86_64";
 
-add_proto qw/void aom_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-specialize qw/aom_highbd_convolve8_avg avx2/, "$sse2_x86_64";
-
-add_proto qw/void aom_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-specialize qw/aom_highbd_convolve8_avg_horiz avx2/, "$sse2_x86_64";
-
-add_proto qw/void aom_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-specialize qw/aom_highbd_convolve8_avg_vert avx2/, "$sse2_x86_64";
-
 add_proto qw/void aom_highbd_convolve8_add_src/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
 add_proto qw/void aom_highbd_convolve8_add_src_hip/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
 
diff --git a/aom_dsp/arm/aom_convolve8_avg_neon.c b/aom_dsp/arm/aom_convolve8_avg_neon.c
deleted file mode 100644
index 09429d6..0000000
--- a/aom_dsp/arm/aom_convolve8_avg_neon.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <arm_neon.h>
-#include <assert.h>
-
-#include "./aom_config.h"
-#include "./aom_dsp_rtcd.h"
-#include "aom/aom_integer.h"
-#include "aom_ports/mem.h"
-
-static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1,
-                                       int16x4_t dsrc2, int16x4_t dsrc3,
-                                       int16x4_t dsrc4, int16x4_t dsrc5,
-                                       int16x4_t dsrc6, int16x4_t dsrc7,
-                                       int16x8_t q0s16) {
-  int32x4_t qdst;
-  int16x4_t d0s16, d1s16;
-
-  d0s16 = vget_low_s16(q0s16);
-  d1s16 = vget_high_s16(q0s16);
-
-  qdst = vmull_lane_s16(dsrc0, d0s16, 0);
-  qdst = vmlal_lane_s16(qdst, dsrc1, d0s16, 1);
-  qdst = vmlal_lane_s16(qdst, dsrc2, d0s16, 2);
-  qdst = vmlal_lane_s16(qdst, dsrc3, d0s16, 3);
-  qdst = vmlal_lane_s16(qdst, dsrc4, d1s16, 0);
-  qdst = vmlal_lane_s16(qdst, dsrc5, d1s16, 1);
-  qdst = vmlal_lane_s16(qdst, dsrc6, d1s16, 2);
-  qdst = vmlal_lane_s16(qdst, dsrc7, d1s16, 3);
-  return qdst;
-}
-
-void aom_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
-                                  uint8_t *dst, ptrdiff_t dst_stride,
-                                  const int16_t *filter_x, int x_step_q4,
-                                  const int16_t *filter_y,  // unused
-                                  int y_step_q4,            // unused
-                                  int w, int h) {
-  int width;
-  const uint8_t *s;
-  uint8_t *d;
-  uint8x8_t d2u8, d3u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8;
-  uint32x2_t d2u32, d3u32, d6u32, d7u32, d28u32, d29u32, d30u32, d31u32;
-  uint8x16_t q1u8, q3u8, q12u8, q13u8, q14u8, q15u8;
-  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d22s16, d23s16;
-  int16x4_t d24s16, d25s16, d26s16, d27s16;
-  uint16x4_t d2u16, d3u16, d4u16, d5u16, d16u16, d17u16, d18u16, d19u16;
-  int16x8_t q0s16;
-  uint16x8_t q1u16, q2u16, q8u16, q9u16, q10u16, q11u16, q12u16, q13u16;
-  int32x4_t q1s32, q2s32, q14s32, q15s32;
-  uint16x8x2_t q0x2u16;
-  uint8x8x2_t d0x2u8, d1x2u8;
-  uint32x2x2_t d0x2u32;
-  uint16x4x2_t d0x2u16, d1x2u16;
-  uint32x4x2_t q0x2u32;
-
-  assert(x_step_q4 == 16);
-
-  (void)x_step_q4;
-  (void)y_step_q4;
-  (void)filter_y;
-
-  q0s16 = vld1q_s16(filter_x);
-
-  src -= 3;                // adjust for taps
-  for (; h > 0; h -= 4) {  // loop_horiz_v
-    s = src;
-    d24u8 = vld1_u8(s);
-    s += src_stride;
-    d25u8 = vld1_u8(s);
-    s += src_stride;
-    d26u8 = vld1_u8(s);
-    s += src_stride;
-    d27u8 = vld1_u8(s);
-
-    q12u8 = vcombine_u8(d24u8, d25u8);
-    q13u8 = vcombine_u8(d26u8, d27u8);
-
-    q0x2u16 =
-        vtrnq_u16(vreinterpretq_u16_u8(q12u8), vreinterpretq_u16_u8(q13u8));
-    d24u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[0]));
-    d25u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[0]));
-    d26u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[1]));
-    d27u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[1]));
-    d0x2u8 = vtrn_u8(d24u8, d25u8);
-    d1x2u8 = vtrn_u8(d26u8, d27u8);
-
-    __builtin_prefetch(src + src_stride * 4);
-    __builtin_prefetch(src + src_stride * 5);
-
-    q8u16 = vmovl_u8(d0x2u8.val[0]);
-    q9u16 = vmovl_u8(d0x2u8.val[1]);
-    q10u16 = vmovl_u8(d1x2u8.val[0]);
-    q11u16 = vmovl_u8(d1x2u8.val[1]);
-
-    src += 7;
-    d16u16 = vget_low_u16(q8u16);
-    d17u16 = vget_high_u16(q8u16);
-    d18u16 = vget_low_u16(q9u16);
-    d19u16 = vget_high_u16(q9u16);
-    q8u16 = vcombine_u16(d16u16, d18u16);  // vswp 17 18
-    q9u16 = vcombine_u16(d17u16, d19u16);
-
-    d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
-    d23s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));         // vmov 23 21
-    for (width = w; width > 0; width -= 4, src += 4, dst += 4) {  // loop_horiz
-      s = src;
-      d28u32 = vld1_dup_u32((const uint32_t *)s);
-      s += src_stride;
-      d29u32 = vld1_dup_u32((const uint32_t *)s);
-      s += src_stride;
-      d31u32 = vld1_dup_u32((const uint32_t *)s);
-      s += src_stride;
-      d30u32 = vld1_dup_u32((const uint32_t *)s);
-
-      __builtin_prefetch(src + 64);
-
-      d0x2u16 =
-          vtrn_u16(vreinterpret_u16_u32(d28u32), vreinterpret_u16_u32(d31u32));
-      d1x2u16 =
-          vtrn_u16(vreinterpret_u16_u32(d29u32), vreinterpret_u16_u32(d30u32));
-      d0x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[0]),   // d28
-                       vreinterpret_u8_u16(d1x2u16.val[0]));  // d29
-      d1x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[1]),   // d31
-                       vreinterpret_u8_u16(d1x2u16.val[1]));  // d30
-
-      __builtin_prefetch(src + 64 + src_stride);
-
-      q14u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]);
-      q15u8 = vcombine_u8(d1x2u8.val[1], d1x2u8.val[0]);
-      q0x2u32 =
-          vtrnq_u32(vreinterpretq_u32_u8(q14u8), vreinterpretq_u32_u8(q15u8));
-
-      d28u8 = vreinterpret_u8_u32(vget_low_u32(q0x2u32.val[0]));
-      d29u8 = vreinterpret_u8_u32(vget_high_u32(q0x2u32.val[0]));
-      q12u16 = vmovl_u8(d28u8);
-      q13u16 = vmovl_u8(d29u8);
-
-      __builtin_prefetch(src + 64 + src_stride * 2);
-
-      d = dst;
-      d6u32 = vld1_lane_u32((const uint32_t *)d, d6u32, 0);
-      d += dst_stride;
-      d7u32 = vld1_lane_u32((const uint32_t *)d, d7u32, 0);
-      d += dst_stride;
-      d6u32 = vld1_lane_u32((const uint32_t *)d, d6u32, 1);
-      d += dst_stride;
-      d7u32 = vld1_lane_u32((const uint32_t *)d, d7u32, 1);
-
-      d16s16 = vreinterpret_s16_u16(vget_low_u16(q8u16));
-      d17s16 = vreinterpret_s16_u16(vget_high_u16(q8u16));
-      d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16));
-      d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16));
-      d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-      d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-      d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-      d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-      d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-
-      q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16, d18s16, d19s16,
-                             d23s16, d24s16, q0s16);
-      q2s32 = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16, d19s16, d23s16,
-                             d24s16, d26s16, q0s16);
-      q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16, d23s16, d24s16,
-                              d26s16, d27s16, q0s16);
-      q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16, d24s16, d26s16,
-                              d27s16, d25s16, q0s16);
-
-      __builtin_prefetch(src + 64 + src_stride * 3);
-
-      d2u16 = vqrshrun_n_s32(q1s32, 7);
-      d3u16 = vqrshrun_n_s32(q2s32, 7);
-      d4u16 = vqrshrun_n_s32(q14s32, 7);
-      d5u16 = vqrshrun_n_s32(q15s32, 7);
-
-      q1u16 = vcombine_u16(d2u16, d3u16);
-      q2u16 = vcombine_u16(d4u16, d5u16);
-
-      d2u8 = vqmovn_u16(q1u16);
-      d3u8 = vqmovn_u16(q2u16);
-
-      d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8), vreinterpret_u16_u8(d3u8));
-      d0x2u32 = vtrn_u32(vreinterpret_u32_u16(d0x2u16.val[0]),
-                         vreinterpret_u32_u16(d0x2u16.val[1]));
-      d0x2u8 = vtrn_u8(vreinterpret_u8_u32(d0x2u32.val[0]),
-                       vreinterpret_u8_u32(d0x2u32.val[1]));
-
-      q1u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]);
-      q3u8 = vreinterpretq_u8_u32(vcombine_u32(d6u32, d7u32));
-
-      q1u8 = vrhaddq_u8(q1u8, q3u8);
-
-      d2u32 = vreinterpret_u32_u8(vget_low_u8(q1u8));
-      d3u32 = vreinterpret_u32_u8(vget_high_u8(q1u8));
-
-      d = dst;
-      vst1_lane_u32((uint32_t *)d, d2u32, 0);
-      d += dst_stride;
-      vst1_lane_u32((uint32_t *)d, d3u32, 0);
-      d += dst_stride;
-      vst1_lane_u32((uint32_t *)d, d2u32, 1);
-      d += dst_stride;
-      vst1_lane_u32((uint32_t *)d, d3u32, 1);
-
-      q8u16 = q9u16;
-      d20s16 = d23s16;
-      q11u16 = q12u16;
-      q9u16 = q13u16;
-      d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-    }
-    src += src_stride * 4 - w - 7;
-    dst += dst_stride * 4 - w;
-  }
-  return;
-}
-
-void aom_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
-                                 uint8_t *dst, ptrdiff_t dst_stride,
-                                 const int16_t *filter_x,  // unused
-                                 int x_step_q4,            // unused
-                                 const int16_t *filter_y, int y_step_q4, int w,
-                                 int h) {
-  int height;
-  const uint8_t *s;
-  uint8_t *d;
-  uint8x8_t d2u8, d3u8;
-  uint32x2_t d2u32, d3u32, d6u32, d7u32;
-  uint32x2_t d16u32, d18u32, d20u32, d22u32, d24u32, d26u32;
-  uint8x16_t q1u8, q3u8;
-  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16;
-  int16x4_t d24s16, d25s16, d26s16, d27s16;
-  uint16x4_t d2u16, d3u16, d4u16, d5u16;
-  int16x8_t q0s16;
-  uint16x8_t q1u16, q2u16, q8u16, q9u16, q10u16, q11u16, q12u16, q13u16;
-  int32x4_t q1s32, q2s32, q14s32, q15s32;
-
-  assert(y_step_q4 == 16);
-
-  (void)x_step_q4;
-  (void)y_step_q4;
-  (void)filter_x;
-
-  src -= src_stride * 3;
-  q0s16 = vld1q_s16(filter_y);
-  for (; w > 0; w -= 4, src += 4, dst += 4) {  // loop_vert_h
-    s = src;
-    d16u32 = vld1_lane_u32((const uint32_t *)s, d16u32, 0);
-    s += src_stride;
-    d16u32 = vld1_lane_u32((const uint32_t *)s, d16u32, 1);
-    s += src_stride;
-    d18u32 = vld1_lane_u32((const uint32_t *)s, d18u32, 0);
-    s += src_stride;
-    d18u32 = vld1_lane_u32((const uint32_t *)s, d18u32, 1);
-    s += src_stride;
-    d20u32 = vld1_lane_u32((const uint32_t *)s, d20u32, 0);
-    s += src_stride;
-    d20u32 = vld1_lane_u32((const uint32_t *)s, d20u32, 1);
-    s += src_stride;
-    d22u32 = vld1_lane_u32((const uint32_t *)s, d22u32, 0);
-    s += src_stride;
-
-    q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32));
-    q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32));
-    q10u16 = vmovl_u8(vreinterpret_u8_u32(d20u32));
-    q11u16 = vmovl_u8(vreinterpret_u8_u32(d22u32));
-
-    d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16));
-    d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16));
-    d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-    d = dst;
-    for (height = h; height > 0; height -= 4) {  // loop_vert
-      d24u32 = vld1_lane_u32((const uint32_t *)s, d24u32, 0);
-      s += src_stride;
-      d26u32 = vld1_lane_u32((const uint32_t *)s, d26u32, 0);
-      s += src_stride;
-      d26u32 = vld1_lane_u32((const uint32_t *)s, d26u32, 1);
-      s += src_stride;
-      d24u32 = vld1_lane_u32((const uint32_t *)s, d24u32, 1);
-      s += src_stride;
-
-      q12u16 = vmovl_u8(vreinterpret_u8_u32(d24u32));
-      q13u16 = vmovl_u8(vreinterpret_u8_u32(d26u32));
-
-      d6u32 = vld1_lane_u32((const uint32_t *)d, d6u32, 0);
-      d += dst_stride;
-      d6u32 = vld1_lane_u32((const uint32_t *)d, d6u32, 1);
-      d += dst_stride;
-      d7u32 = vld1_lane_u32((const uint32_t *)d, d7u32, 0);
-      d += dst_stride;
-      d7u32 = vld1_lane_u32((const uint32_t *)d, d7u32, 1);
-      d -= dst_stride * 3;
-
-      d16s16 = vreinterpret_s16_u16(vget_low_u16(q8u16));
-      d17s16 = vreinterpret_s16_u16(vget_high_u16(q8u16));
-      d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
-      d21s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));
-      d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-      d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-      d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-      d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-
-      __builtin_prefetch(s);
-      __builtin_prefetch(s + src_stride);
-      q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16, d20s16, d21s16,
-                             d22s16, d24s16, q0s16);
-      __builtin_prefetch(s + src_stride * 2);
-      __builtin_prefetch(s + src_stride * 3);
-      q2s32 = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16, d21s16, d22s16,
-                             d24s16, d26s16, q0s16);
-      __builtin_prefetch(d);
-      __builtin_prefetch(d + dst_stride);
-      q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16, d22s16, d24s16,
-                              d26s16, d27s16, q0s16);
-      __builtin_prefetch(d + dst_stride * 2);
-      __builtin_prefetch(d + dst_stride * 3);
-      q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16, d24s16, d26s16,
-                              d27s16, d25s16, q0s16);
-
-      d2u16 = vqrshrun_n_s32(q1s32, 7);
-      d3u16 = vqrshrun_n_s32(q2s32, 7);
-      d4u16 = vqrshrun_n_s32(q14s32, 7);
-      d5u16 = vqrshrun_n_s32(q15s32, 7);
-
-      q1u16 = vcombine_u16(d2u16, d3u16);
-      q2u16 = vcombine_u16(d4u16, d5u16);
-
-      d2u8 = vqmovn_u16(q1u16);
-      d3u8 = vqmovn_u16(q2u16);
-
-      q1u8 = vcombine_u8(d2u8, d3u8);
-      q3u8 = vreinterpretq_u8_u32(vcombine_u32(d6u32, d7u32));
-
-      q1u8 = vrhaddq_u8(q1u8, q3u8);
-
-      d2u32 = vreinterpret_u32_u8(vget_low_u8(q1u8));
-      d3u32 = vreinterpret_u32_u8(vget_high_u8(q1u8));
-
-      vst1_lane_u32((uint32_t *)d, d2u32, 0);
-      d += dst_stride;
-      vst1_lane_u32((uint32_t *)d, d2u32, 1);
-      d += dst_stride;
-      vst1_lane_u32((uint32_t *)d, d3u32, 0);
-      d += dst_stride;
-      vst1_lane_u32((uint32_t *)d, d3u32, 1);
-      d += dst_stride;
-
-      q8u16 = q10u16;
-      d18s16 = d22s16;
-      d19s16 = d24s16;
-      q10u16 = q13u16;
-      d22s16 = d25s16;
-    }
-  }
-  return;
-}
diff --git a/aom_dsp/arm/aom_convolve8_avg_neon_asm.asm b/aom_dsp/arm/aom_convolve8_avg_neon_asm.asm
deleted file mode 100644
index 80aef99..0000000
--- a/aom_dsp/arm/aom_convolve8_avg_neon_asm.asm
+++ /dev/null
@@ -1,295 +0,0 @@
-;
-; Copyright (c) 2016, Alliance for Open Media. All rights reserved
-;
-; This source code is subject to the terms of the BSD 2 Clause License and
-; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
-; was not distributed with this source code in the LICENSE file, you can
-; obtain it at www.aomedia.org/license/software. If the Alliance for Open
-; Media Patent License 1.0 was not distributed with this source code in the
-; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
-;
-
-;
-
-
-    ; These functions are only valid when:
-    ; x_step_q4 == 16
-    ; w%4 == 0
-    ; h%4 == 0
-    ; taps == 8
-    ; AV1_FILTER_WEIGHT == 128
-    ; AV1_FILTER_SHIFT == 7
-
-    EXPORT  |aom_convolve8_avg_horiz_neon|
-    EXPORT  |aom_convolve8_avg_vert_neon|
-    ARM
-    REQUIRE8
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-
-    ; Multiply and accumulate by q0
-    MACRO
-    MULTIPLY_BY_Q0 $dst, $src0, $src1, $src2, $src3, $src4, $src5, $src6, $src7
-    vmull.s16 $dst, $src0, d0[0]
-    vmlal.s16 $dst, $src1, d0[1]
-    vmlal.s16 $dst, $src2, d0[2]
-    vmlal.s16 $dst, $src3, d0[3]
-    vmlal.s16 $dst, $src4, d1[0]
-    vmlal.s16 $dst, $src5, d1[1]
-    vmlal.s16 $dst, $src6, d1[2]
-    vmlal.s16 $dst, $src7, d1[3]
-    MEND
-
-; r0    const uint8_t *src
-; r1    int src_stride
-; r2    uint8_t *dst
-; r3    int dst_stride
-; sp[]const int16_t *filter_x
-; sp[]int x_step_q4
-; sp[]const int16_t *filter_y ; unused
-; sp[]int y_step_q4           ; unused
-; sp[]int w
-; sp[]int h
-
-|aom_convolve8_avg_horiz_neon| PROC
-    push            {r4-r10, lr}
-
-    sub             r0, r0, #3              ; adjust for taps
-
-    ldr             r5, [sp, #32]           ; filter_x
-    ldr             r6, [sp, #48]           ; w
-    ldr             r7, [sp, #52]           ; h
-
-    vld1.s16        {q0}, [r5]              ; filter_x
-
-    sub             r8, r1, r1, lsl #2      ; -src_stride * 3
-    add             r8, r8, #4              ; -src_stride * 3 + 4
-
-    sub             r4, r3, r3, lsl #2      ; -dst_stride * 3
-    add             r4, r4, #4              ; -dst_stride * 3 + 4
-
-    rsb             r9, r6, r1, lsl #2      ; reset src for outer loop
-    sub             r9, r9, #7
-    rsb             r12, r6, r3, lsl #2     ; reset dst for outer loop
-
-    mov             r10, r6                 ; w loop counter
-
-aom_convolve8_avg_loop_horiz_v
-    vld1.8          {d24}, [r0], r1
-    vld1.8          {d25}, [r0], r1
-    vld1.8          {d26}, [r0], r1
-    vld1.8          {d27}, [r0], r8
-
-    vtrn.16         q12, q13
-    vtrn.8          d24, d25
-    vtrn.8          d26, d27
-
-    pld             [r0, r1, lsl #2]
-
-    vmovl.u8        q8, d24
-    vmovl.u8        q9, d25
-    vmovl.u8        q10, d26
-    vmovl.u8        q11, d27
-
-    ; save a few instructions in the inner loop
-    vswp            d17, d18
-    vmov            d23, d21
-
-    add             r0, r0, #3
-
-aom_convolve8_avg_loop_horiz
-    add             r5, r0, #64
-
-    vld1.32         {d28[]}, [r0], r1
-    vld1.32         {d29[]}, [r0], r1
-    vld1.32         {d31[]}, [r0], r1
-    vld1.32         {d30[]}, [r0], r8
-
-    pld             [r5]
-
-    vtrn.16         d28, d31
-    vtrn.16         d29, d30
-    vtrn.8          d28, d29
-    vtrn.8          d31, d30
-
-    pld             [r5, r1]
-
-    ; extract to s16
-    vtrn.32         q14, q15
-    vmovl.u8        q12, d28
-    vmovl.u8        q13, d29
-
-    pld             [r5, r1, lsl #1]
-
-    ; slightly out of order load to match the existing data
-    vld1.u32        {d6[0]}, [r2], r3
-    vld1.u32        {d7[0]}, [r2], r3
-    vld1.u32        {d6[1]}, [r2], r3
-    vld1.u32        {d7[1]}, [r2], r3
-
-    sub             r2, r2, r3, lsl #2      ; reset for store
-
-    ; src[] * filter_x
-    MULTIPLY_BY_Q0  q1,  d16, d17, d20, d22, d18, d19, d23, d24
-    MULTIPLY_BY_Q0  q2,  d17, d20, d22, d18, d19, d23, d24, d26
-    MULTIPLY_BY_Q0  q14, d20, d22, d18, d19, d23, d24, d26, d27
-    MULTIPLY_BY_Q0  q15, d22, d18, d19, d23, d24, d26, d27, d25
-
-    pld             [r5, -r8]
-
-    ; += 64 >> 7
-    vqrshrun.s32    d2, q1, #7
-    vqrshrun.s32    d3, q2, #7
-    vqrshrun.s32    d4, q14, #7
-    vqrshrun.s32    d5, q15, #7
-
-    ; saturate
-    vqmovn.u16      d2, q1
-    vqmovn.u16      d3, q2
-
-    ; transpose
-    vtrn.16         d2, d3
-    vtrn.32         d2, d3
-    vtrn.8          d2, d3
-
-    ; average the new value and the dst value
-    vrhadd.u8       q1, q1, q3
-
-    vst1.u32        {d2[0]}, [r2@32], r3
-    vst1.u32        {d3[0]}, [r2@32], r3
-    vst1.u32        {d2[1]}, [r2@32], r3
-    vst1.u32        {d3[1]}, [r2@32], r4
-
-    vmov            q8,  q9
-    vmov            d20, d23
-    vmov            q11, q12
-    vmov            q9,  q13
-
-    subs            r6, r6, #4              ; w -= 4
-    bgt             aom_convolve8_avg_loop_horiz
-
-    ; outer loop
-    mov             r6, r10                 ; restore w counter
-    add             r0, r0, r9              ; src += src_stride * 4 - w
-    add             r2, r2, r12             ; dst += dst_stride * 4 - w
-    subs            r7, r7, #4              ; h -= 4
-    bgt aom_convolve8_avg_loop_horiz_v
-
-    pop             {r4-r10, pc}
-
-    ENDP
-
-|aom_convolve8_avg_vert_neon| PROC
-    push            {r4-r8, lr}
-
-    ; adjust for taps
-    sub             r0, r0, r1
-    sub             r0, r0, r1, lsl #1
-
-    ldr             r4, [sp, #32]           ; filter_y
-    ldr             r6, [sp, #40]           ; w
-    ldr             lr, [sp, #44]           ; h
-
-    vld1.s16        {q0}, [r4]              ; filter_y
-
-    lsl             r1, r1, #1
-    lsl             r3, r3, #1
-
-aom_convolve8_avg_loop_vert_h
-    mov             r4, r0
-    add             r7, r0, r1, asr #1
-    mov             r5, r2
-    add             r8, r2, r3, asr #1
-    mov             r12, lr                 ; h loop counter
-
-    vld1.u32        {d16[0]}, [r4], r1
-    vld1.u32        {d16[1]}, [r7], r1
-    vld1.u32        {d18[0]}, [r4], r1
-    vld1.u32        {d18[1]}, [r7], r1
-    vld1.u32        {d20[0]}, [r4], r1
-    vld1.u32        {d20[1]}, [r7], r1
-    vld1.u32        {d22[0]}, [r4], r1
-
-    vmovl.u8        q8, d16
-    vmovl.u8        q9, d18
-    vmovl.u8        q10, d20
-    vmovl.u8        q11, d22
-
-aom_convolve8_avg_loop_vert
-    ; always process a 4x4 block at a time
-    vld1.u32        {d24[0]}, [r7], r1
-    vld1.u32        {d26[0]}, [r4], r1
-    vld1.u32        {d26[1]}, [r7], r1
-    vld1.u32        {d24[1]}, [r4], r1
-
-    ; extract to s16
-    vmovl.u8        q12, d24
-    vmovl.u8        q13, d26
-
-    vld1.u32        {d6[0]}, [r5@32], r3
-    vld1.u32        {d6[1]}, [r8@32], r3
-    vld1.u32        {d7[0]}, [r5@32], r3
-    vld1.u32        {d7[1]}, [r8@32], r3
-
-    pld             [r7]
-    pld             [r4]
-
-    ; src[] * filter_y
-    MULTIPLY_BY_Q0  q1,  d16, d17, d18, d19, d20, d21, d22, d24
-
-    pld             [r7, r1]
-    pld             [r4, r1]
-
-    MULTIPLY_BY_Q0  q2,  d17, d18, d19, d20, d21, d22, d24, d26
-
-    pld             [r5]
-    pld             [r8]
-
-    MULTIPLY_BY_Q0  q14, d18, d19, d20, d21, d22, d24, d26, d27
-
-    pld             [r5, r3]
-    pld             [r8, r3]
-
-    MULTIPLY_BY_Q0  q15, d19, d20, d21, d22, d24, d26, d27, d25
-
-    ; += 64 >> 7
-    vqrshrun.s32    d2, q1, #7
-    vqrshrun.s32    d3, q2, #7
-    vqrshrun.s32    d4, q14, #7
-    vqrshrun.s32    d5, q15, #7
-
-    ; saturate
-    vqmovn.u16      d2, q1
-    vqmovn.u16      d3, q2
-
-    ; average the new value and the dst value
-    vrhadd.u8       q1, q1, q3
-
-    sub             r5, r5, r3, lsl #1      ; reset for store
-    sub             r8, r8, r3, lsl #1
-
-    vst1.u32        {d2[0]}, [r5@32], r3
-    vst1.u32        {d2[1]}, [r8@32], r3
-    vst1.u32        {d3[0]}, [r5@32], r3
-    vst1.u32        {d3[1]}, [r8@32], r3
-
-    vmov            q8, q10
-    vmov            d18, d22
-    vmov            d19, d24
-    vmov            q10, q13
-    vmov            d22, d25
-
-    subs            r12, r12, #4            ; h -= 4
-    bgt             aom_convolve8_avg_loop_vert
-
-    ; outer loop
-    add             r0, r0, #4
-    add             r2, r2, #4
-    subs            r6, r6, #4              ; w -= 4
-    bgt             aom_convolve8_avg_loop_vert_h
-
-    pop             {r4-r8, pc}
-
-    ENDP
-    END
diff --git a/aom_dsp/arm/aom_convolve_neon.c b/aom_dsp/arm/aom_convolve_neon.c
index 6c2997e..faaac7c 100644
--- a/aom_dsp/arm/aom_convolve_neon.c
+++ b/aom_dsp/arm/aom_convolve_neon.c
@@ -43,24 +43,3 @@
   aom_convolve8_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
                           x_step_q4, filter_y, y_step_q4, w, h);
 }
-
-void aom_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
-                            uint8_t *dst, ptrdiff_t dst_stride,
-                            const int16_t *filter_x, int x_step_q4,
-                            const int16_t *filter_y, int y_step_q4, int w,
-                            int h) {
-  DECLARE_ALIGNED(8, uint8_t, temp[64 * 72]);
-  int intermediate_height = h + 7;
-
-  assert(y_step_q4 == 16);
-  assert(x_step_q4 == 16);
-
-  /* This implementation has the same issues as above. In addition, we only want
-   * to average the values after both passes.
-   */
-  aom_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
-                           x_step_q4, filter_y, y_step_q4, w,
-                           intermediate_height);
-  aom_convolve8_avg_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
-                              x_step_q4, filter_y, y_step_q4, w, h);
-}
diff --git a/aom_dsp/mips/aom_convolve8_avg_horiz_msa.c b/aom_dsp/mips/aom_convolve8_avg_horiz_msa.c
deleted file mode 100644
index 847394a..0000000
--- a/aom_dsp/mips/aom_convolve8_avg_horiz_msa.c
+++ /dev/null
@@ -1,704 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/mips/aom_convolve_msa.h"
-
-static void common_hz_8t_and_aver_dst_4x4_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter) {
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v16u8 dst0, dst1, dst2, dst3, res2, res3;
-  v16u8 mask0, mask1, mask2, mask3;
-  v8i16 filt, res0, res1;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[16]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  XORI_B4_128_SB(src0, src1, src2, src3);
-  HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
-                             filt0, filt1, filt2, filt3, res0, res1);
-  LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  SRARI_H2_SH(res0, res1, FILTER_BITS);
-  SAT_SH2_SH(res0, res1, 7);
-  PCKEV_B2_UB(res0, res0, res1, res1, res2, res3);
-  ILVR_W2_UB(dst1, dst0, dst3, dst2, dst0, dst2);
-  XORI_B2_128_UB(res2, res3);
-  AVER_UB2_UB(res2, dst0, res3, dst2, res2, res3);
-  ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
-}
-
-static void common_hz_8t_and_aver_dst_4x8_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter) {
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v16u8 mask0, mask1, mask2, mask3, res0, res1, res2, res3;
-  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
-  v8i16 filt, vec0, vec1, vec2, vec3;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[16]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  XORI_B4_128_SB(src0, src1, src2, src3);
-  src += (4 * src_stride);
-  LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
-  HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
-                             filt0, filt1, filt2, filt3, vec0, vec1);
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  XORI_B4_128_SB(src0, src1, src2, src3);
-  HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
-                             filt0, filt1, filt2, filt3, vec2, vec3);
-  SRARI_H4_SH(vec0, vec1, vec2, vec3, FILTER_BITS);
-  SAT_SH4_SH(vec0, vec1, vec2, vec3, 7);
-  PCKEV_B4_UB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, res0, res1, res2,
-              res3);
-  ILVR_D2_UB(res1, res0, res3, res2, res0, res2);
-  XORI_B2_128_UB(res0, res2);
-  ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst2, dst4,
-             dst6);
-  ILVR_D2_UB(dst2, dst0, dst6, dst4, dst0, dst4);
-  AVER_UB2_UB(res0, dst0, res2, dst4, res0, res2);
-  ST4x8_UB(res0, res2, dst, dst_stride);
-}
-
-static void common_hz_8t_and_aver_dst_4w_msa(const uint8_t *src,
-                                             int32_t src_stride, uint8_t *dst,
-                                             int32_t dst_stride, int8_t *filter,
-                                             int32_t height) {
-  if (4 == height) {
-    common_hz_8t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter);
-  } else if (8 == height) {
-    common_hz_8t_and_aver_dst_4x8_msa(src, src_stride, dst, dst_stride, filter);
-  }
-}
-
-static void common_hz_8t_and_aver_dst_8w_msa(const uint8_t *src,
-                                             int32_t src_stride, uint8_t *dst,
-                                             int32_t dst_stride, int8_t *filter,
-                                             int32_t height) {
-  int32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v16u8 mask0, mask1, mask2, mask3, dst0, dst1, dst2, dst3;
-  v8i16 filt, out0, out1, out2, out3;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[0]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_SB4(src, src_stride, src0, src1, src2, src3);
-    XORI_B4_128_SB(src0, src1, src2, src3);
-    src += (4 * src_stride);
-    HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
-                               mask3, filt0, filt1, filt2, filt3, out0, out1,
-                               out2, out3);
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-    SAT_SH4_SH(out0, out1, out2, out3, 7);
-    CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst2, dst3, dst,
-                            dst_stride);
-    dst += (4 * dst_stride);
-  }
-}
-
-static void common_hz_8t_and_aver_dst_16w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  int32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v16u8 mask0, mask1, mask2, mask3, dst0, dst1;
-  v8i16 filt, out0, out1, out2, out3;
-  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8i16 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[0]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  for (loop_cnt = height >> 1; loop_cnt--;) {
-    LD_SB2(src, src_stride, src0, src2);
-    LD_SB2(src + 8, src_stride, src1, src3);
-    src += (2 * src_stride);
-
-    XORI_B4_128_SB(src0, src1, src2, src3);
-    VSHF_B4_SH(src0, src0, mask0, mask1, mask2, mask3, vec0, vec4, vec8, vec12);
-    VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9, vec13);
-    VSHF_B4_SH(src2, src2, mask0, mask1, mask2, mask3, vec2, vec6, vec10,
-               vec14);
-    VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, vec11,
-               vec15);
-    DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-                vec2, vec3);
-    DOTP_SB4_SH(vec8, vec9, vec10, vec11, filt2, filt2, filt2, filt2, vec8,
-                vec9, vec10, vec11);
-    DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, vec1,
-                 vec2, vec3);
-    DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt3, filt3, filt3, filt3, vec8,
-                 vec9, vec10, vec11);
-    ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, out1,
-                out2, out3);
-    LD_UB2(dst, dst_stride, dst0, dst1);
-    SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-    SAT_SH4_SH(out0, out1, out2, out3, 7);
-    PCKEV_XORI128_AVG_ST_UB(out1, out0, dst0, dst);
-    dst += dst_stride;
-    PCKEV_XORI128_AVG_ST_UB(out3, out2, dst1, dst);
-    dst += dst_stride;
-  }
-}
-
-static void common_hz_8t_and_aver_dst_32w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v16u8 dst1, dst2, mask0, mask1, mask2, mask3;
-  v8i16 filt, out0, out1, out2, out3;
-  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8i16 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[0]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  for (loop_cnt = height; loop_cnt--;) {
-    src0 = LD_SB(src);
-    src2 = LD_SB(src + 16);
-    src3 = LD_SB(src + 24);
-    src1 = __msa_sldi_b(src2, src0, 8);
-    src += src_stride;
-
-    XORI_B4_128_SB(src0, src1, src2, src3);
-    VSHF_B4_SH(src0, src0, mask0, mask1, mask2, mask3, vec0, vec4, vec8, vec12);
-    VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9, vec13);
-    VSHF_B4_SH(src2, src2, mask0, mask1, mask2, mask3, vec2, vec6, vec10,
-               vec14);
-    VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, vec11,
-               vec15);
-    DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-                vec2, vec3);
-    DOTP_SB4_SH(vec8, vec9, vec10, vec11, filt2, filt2, filt2, filt2, vec8,
-                vec9, vec10, vec11);
-    DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, vec1,
-                 vec2, vec3);
-    DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt3, filt3, filt3, filt3, vec8,
-                 vec9, vec10, vec11);
-    ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, out1,
-                out2, out3);
-    SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-    SAT_SH4_SH(out0, out1, out2, out3, 7);
-    LD_UB2(dst, 16, dst1, dst2);
-    PCKEV_XORI128_AVG_ST_UB(out1, out0, dst1, dst);
-    PCKEV_XORI128_AVG_ST_UB(out3, out2, dst2, dst + 16);
-    dst += dst_stride;
-  }
-}
-
-static void common_hz_8t_and_aver_dst_64w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  uint32_t loop_cnt, cnt;
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v16u8 dst1, dst2, mask0, mask1, mask2, mask3;
-  v8i16 filt, out0, out1, out2, out3;
-  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8i16 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[0]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  for (loop_cnt = height; loop_cnt--;) {
-    for (cnt = 0; cnt < 2; ++cnt) {
-      src0 = LD_SB(&src[cnt << 5]);
-      src2 = LD_SB(&src[16 + (cnt << 5)]);
-      src3 = LD_SB(&src[24 + (cnt << 5)]);
-      src1 = __msa_sldi_b(src2, src0, 8);
-
-      XORI_B4_128_SB(src0, src1, src2, src3);
-      VSHF_B4_SH(src0, src0, mask0, mask1, mask2, mask3, vec0, vec4, vec8,
-                 vec12);
-      VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9,
-                 vec13);
-      VSHF_B4_SH(src2, src2, mask0, mask1, mask2, mask3, vec2, vec6, vec10,
-                 vec14);
-      VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, vec11,
-                 vec15);
-      DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0,
-                  vec1, vec2, vec3);
-      DOTP_SB4_SH(vec8, vec9, vec10, vec11, filt2, filt2, filt2, filt2, vec8,
-                  vec9, vec10, vec11);
-      DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0,
-                   vec1, vec2, vec3);
-      DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt3, filt3, filt3, filt3, vec8,
-                   vec9, vec10, vec11);
-      ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, out1,
-                  out2, out3);
-      SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-      SAT_SH4_SH(out0, out1, out2, out3, 7);
-      LD_UB2(&dst[cnt << 5], 16, dst1, dst2);
-      PCKEV_XORI128_AVG_ST_UB(out1, out0, dst1, &dst[cnt << 5]);
-      PCKEV_XORI128_AVG_ST_UB(out3, out2, dst2, &dst[16 + (cnt << 5)]);
-    }
-
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void common_hz_2t_and_aver_dst_4x4_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter) {
-  v16i8 src0, src1, src2, src3, mask;
-  v16u8 filt0, dst0, dst1, dst2, dst3, vec0, vec1, res0, res1;
-  v8u16 vec2, vec3, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[16]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
-  DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3);
-  SRARI_H2_UH(vec2, vec3, FILTER_BITS);
-  PCKEV_B2_UB(vec2, vec2, vec3, vec3, res0, res1);
-  ILVR_W2_UB(dst1, dst0, dst3, dst2, dst0, dst2);
-  AVER_UB2_UB(res0, dst0, res1, dst2, res0, res1);
-  ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
-}
-
-static void common_hz_2t_and_aver_dst_4x8_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter) {
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
-  v16u8 filt0, vec0, vec1, vec2, vec3, res0, res1, res2, res3;
-  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
-  v8u16 vec4, vec5, vec6, vec7, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[16]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
-  LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
-  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
-  VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5,
-              vec6, vec7);
-  SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS);
-  PCKEV_B4_UB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2,
-              res3);
-  ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst2, dst4,
-             dst6);
-  AVER_UB4_UB(res0, dst0, res1, dst2, res2, dst4, res3, dst6, res0, res1, res2,
-              res3);
-  ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
-  dst += (4 * dst_stride);
-  ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
-}
-
-static void common_hz_2t_and_aver_dst_4w_msa(const uint8_t *src,
-                                             int32_t src_stride, uint8_t *dst,
-                                             int32_t dst_stride, int8_t *filter,
-                                             int32_t height) {
-  if (4 == height) {
-    common_hz_2t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter);
-  } else if (8 == height) {
-    common_hz_2t_and_aver_dst_4x8_msa(src, src_stride, dst, dst_stride, filter);
-  }
-}
-
-static void common_hz_2t_and_aver_dst_8x4_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter) {
-  v16i8 src0, src1, src2, src3, mask;
-  v16u8 filt0, dst0, dst1, dst2, dst3;
-  v8u16 vec0, vec1, vec2, vec3, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
-  VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-              vec2, vec3);
-  SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-  LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst,
-                     dst_stride);
-}
-
-static void common_hz_2t_and_aver_dst_8x8mult_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter, int32_t height) {
-  v16i8 src0, src1, src2, src3, mask;
-  v16u8 filt0, dst0, dst1, dst2, dst3;
-  v8u16 vec0, vec1, vec2, vec3, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  src += (4 * src_stride);
-  VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
-  VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-              vec2, vec3);
-  SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-  LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  src += (4 * src_stride);
-  PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst,
-                     dst_stride);
-  dst += (4 * dst_stride);
-
-  VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
-  VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-              vec2, vec3);
-  SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-  LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst,
-                     dst_stride);
-  dst += (4 * dst_stride);
-
-  if (16 == height) {
-    LD_SB4(src, src_stride, src0, src1, src2, src3);
-    src += (4 * src_stride);
-
-    VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
-    VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-                vec2, vec3);
-    SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    LD_SB4(src, src_stride, src0, src1, src2, src3);
-    PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst,
-                       dst_stride);
-    dst += (4 * dst_stride);
-
-    VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
-    VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-                vec2, vec3);
-    SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst,
-                       dst_stride);
-  }
-}
-
-static void common_hz_2t_and_aver_dst_8w_msa(const uint8_t *src,
-                                             int32_t src_stride, uint8_t *dst,
-                                             int32_t dst_stride, int8_t *filter,
-                                             int32_t height) {
-  if (4 == height) {
-    common_hz_2t_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, filter);
-  } else {
-    common_hz_2t_and_aver_dst_8x8mult_msa(src, src_stride, dst, dst_stride,
-                                          filter, height);
-  }
-}
-
-static void common_hz_2t_and_aver_dst_16w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
-  v16u8 filt0, dst0, dst1, dst2, dst3;
-  v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8u16 res0, res1, res2, res3, res4, res5, res6, res7, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_SB4(src, src_stride, src0, src2, src4, src6);
-  LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
-  src += (4 * src_stride);
-
-  VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
-  VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
-  VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
-  VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, res0, res1,
-              res2, res3);
-  DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, res4, res5,
-              res6, res7);
-  SRARI_H4_UH(res0, res1, res2, res3, FILTER_BITS);
-  SRARI_H4_UH(res4, res5, res6, res7, FILTER_BITS);
-  LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  PCKEV_AVG_ST_UB(res1, res0, dst0, dst);
-  dst += dst_stride;
-  PCKEV_AVG_ST_UB(res3, res2, dst1, dst);
-  dst += dst_stride;
-  PCKEV_AVG_ST_UB(res5, res4, dst2, dst);
-  dst += dst_stride;
-  PCKEV_AVG_ST_UB(res7, res6, dst3, dst);
-  dst += dst_stride;
-
-  for (loop_cnt = (height >> 2) - 1; loop_cnt--;) {
-    LD_SB4(src, src_stride, src0, src2, src4, src6);
-    LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
-    src += (4 * src_stride);
-
-    VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
-    VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
-    VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, res0, res1,
-                res2, res3);
-    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, res4, res5,
-                res6, res7);
-    SRARI_H4_UH(res0, res1, res2, res3, FILTER_BITS);
-    SRARI_H4_UH(res4, res5, res6, res7, FILTER_BITS);
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    PCKEV_AVG_ST_UB(res1, res0, dst0, dst);
-    dst += dst_stride;
-    PCKEV_AVG_ST_UB(res3, res2, dst1, dst);
-    dst += dst_stride;
-    PCKEV_AVG_ST_UB(res5, res4, dst2, dst);
-    dst += dst_stride;
-    PCKEV_AVG_ST_UB(res7, res6, dst3, dst);
-    dst += dst_stride;
-  }
-}
-
-static void common_hz_2t_and_aver_dst_32w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
-  v16u8 filt0, dst0, dst1, dst2, dst3;
-  v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8u16 res0, res1, res2, res3, res4, res5, res6, res7, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  for (loop_cnt = (height >> 1); loop_cnt--;) {
-    src0 = LD_SB(src);
-    src2 = LD_SB(src + 16);
-    src3 = LD_SB(src + 24);
-    src1 = __msa_sldi_b(src2, src0, 8);
-    src += src_stride;
-    src4 = LD_SB(src);
-    src6 = LD_SB(src + 16);
-    src7 = LD_SB(src + 24);
-    src5 = __msa_sldi_b(src6, src4, 8);
-    src += src_stride;
-
-    VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
-    VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
-    VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, res0, res1,
-                res2, res3);
-    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, res4, res5,
-                res6, res7);
-    SRARI_H4_UH(res0, res1, res2, res3, FILTER_BITS);
-    SRARI_H4_UH(res4, res5, res6, res7, FILTER_BITS);
-    LD_UB2(dst, 16, dst0, dst1);
-    PCKEV_AVG_ST_UB(res1, res0, dst0, dst);
-    PCKEV_AVG_ST_UB(res3, res2, dst1, (dst + 16));
-    dst += dst_stride;
-    LD_UB2(dst, 16, dst2, dst3);
-    PCKEV_AVG_ST_UB(res5, res4, dst2, dst);
-    PCKEV_AVG_ST_UB(res7, res6, dst3, (dst + 16));
-    dst += dst_stride;
-  }
-}
-
-static void common_hz_2t_and_aver_dst_64w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
-  v16u8 filt0, dst0, dst1, dst2, dst3;
-  v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  for (loop_cnt = height; loop_cnt--;) {
-    LD_SB4(src, 16, src0, src2, src4, src6);
-    src7 = LD_SB(src + 56);
-    SLDI_B3_SB(src2, src4, src6, src0, src2, src4, src1, src3, src5, 8);
-    src += src_stride;
-
-    VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
-    VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
-    VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
-                out2, out3);
-    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
-                out6, out7);
-    SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
-    SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
-    LD_UB4(dst, 16, dst0, dst1, dst2, dst3);
-    PCKEV_AVG_ST_UB(out1, out0, dst0, dst);
-    PCKEV_AVG_ST_UB(out3, out2, dst1, dst + 16);
-    PCKEV_AVG_ST_UB(out5, out4, dst2, dst + 32);
-    PCKEV_AVG_ST_UB(out7, out6, dst3, dst + 48);
-    dst += dst_stride;
-  }
-}
-
-void aom_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
-                                 uint8_t *dst, ptrdiff_t dst_stride,
-                                 const int16_t *filter_x, int x_step_q4,
-                                 const int16_t *filter_y, int y_step_q4, int w,
-                                 int h) {
-  int8_t cnt, filt_hor[8];
-
-  assert(x_step_q4 == 16);
-  assert(((const int32_t *)filter_x)[1] != 0x800000);
-
-  for (cnt = 0; cnt < 8; ++cnt) {
-    filt_hor[cnt] = filter_x[cnt];
-  }
-
-  if (((const int32_t *)filter_x)[0] == 0) {
-    switch (w) {
-      case 4:
-        common_hz_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
-                                         (int32_t)dst_stride, &filt_hor[3], h);
-        break;
-      case 8:
-        common_hz_2t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
-                                         (int32_t)dst_stride, &filt_hor[3], h);
-        break;
-      case 16:
-        common_hz_2t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, &filt_hor[3], h);
-        break;
-      case 32:
-        common_hz_2t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, &filt_hor[3], h);
-        break;
-      case 64:
-        common_hz_2t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, &filt_hor[3], h);
-        break;
-      default:
-        aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
-                                  x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  } else {
-    switch (w) {
-      case 4:
-        common_hz_8t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
-                                         (int32_t)dst_stride, filt_hor, h);
-        break;
-      case 8:
-        common_hz_8t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
-                                         (int32_t)dst_stride, filt_hor, h);
-        break;
-      case 16:
-        common_hz_8t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, filt_hor, h);
-        break;
-      case 32:
-        common_hz_8t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, filt_hor, h);
-        break;
-      case 64:
-        common_hz_8t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, filt_hor, h);
-        break;
-      default:
-        aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
-                                  x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  }
-}
diff --git a/aom_dsp/mips/aom_convolve8_avg_msa.c b/aom_dsp/mips/aom_convolve8_avg_msa.c
deleted file mode 100644
index bed600d..0000000
--- a/aom_dsp/mips/aom_convolve8_avg_msa.c
+++ /dev/null
@@ -1,605 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/mips/aom_convolve_msa.h"
-
-static void common_hv_8ht_8vt_and_aver_dst_4w_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
-  v16u8 dst0, dst1, dst2, dst3, mask0, mask1, mask2, mask3, tmp0, tmp1;
-  v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3;
-  v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
-  v8i16 hz_out7, hz_out8, hz_out9, res0, res1, vec0, vec1, vec2, vec3, vec4;
-  v8i16 filt, filt_vt0, filt_vt1, filt_vt2, filt_vt3;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[16]);
-  src -= (3 + 3 * src_stride);
-
-  /* rearranging filter */
-  filt = LD_SH(filter_horiz);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt_hz0, filt_hz1, filt_hz2, filt_hz3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
-  XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
-  src += (7 * src_stride);
-
-  hz_out0 = HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3, filt_hz0,
-                            filt_hz1, filt_hz2, filt_hz3);
-  hz_out2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3, filt_hz0,
-                            filt_hz1, filt_hz2, filt_hz3);
-  hz_out4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3, filt_hz0,
-                            filt_hz1, filt_hz2, filt_hz3);
-  hz_out5 = HORIZ_8TAP_FILT(src5, src6, mask0, mask1, mask2, mask3, filt_hz0,
-                            filt_hz1, filt_hz2, filt_hz3);
-  SLDI_B2_SH(hz_out2, hz_out4, hz_out0, hz_out2, hz_out1, hz_out3, 8);
-
-  filt = LD_SH(filter_vert);
-  SPLATI_H4_SH(filt, 0, 1, 2, 3, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
-
-  ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
-  vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4);
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_SB4(src, src_stride, src7, src8, src9, src10);
-    XORI_B4_128_SB(src7, src8, src9, src10);
-    src += (4 * src_stride);
-
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, filt_hz0,
-                              filt_hz1, filt_hz2, filt_hz3);
-    hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8);
-    vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
-    res0 = FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, filt_vt0, filt_vt1,
-                               filt_vt2, filt_vt3);
-
-    hz_out9 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3, filt_hz0,
-                              filt_hz1, filt_hz2, filt_hz3);
-    hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8);
-    vec4 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8);
-    res1 = FILT_8TAP_DPADD_S_H(vec1, vec2, vec3, vec4, filt_vt0, filt_vt1,
-                               filt_vt2, filt_vt3);
-    ILVR_W2_UB(dst1, dst0, dst3, dst2, dst0, dst2);
-
-    SRARI_H2_SH(res0, res1, FILTER_BITS);
-    SAT_SH2_SH(res0, res1, 7);
-    PCKEV_B2_UB(res0, res0, res1, res1, tmp0, tmp1);
-    XORI_B2_128_UB(tmp0, tmp1);
-    AVER_UB2_UB(tmp0, dst0, tmp1, dst2, tmp0, tmp1);
-    ST4x4_UB(tmp0, tmp1, 0, 1, 0, 1, dst, dst_stride);
-    dst += (4 * dst_stride);
-
-    hz_out5 = hz_out9;
-    vec0 = vec2;
-    vec1 = vec3;
-    vec2 = vec4;
-  }
-}
-
-static void common_hv_8ht_8vt_and_aver_dst_8w_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
-  v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3;
-  v8i16 filt, filt_vt0, filt_vt1, filt_vt2, filt_vt3;
-  v16u8 dst0, dst1, dst2, dst3, mask0, mask1, mask2, mask3;
-  v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
-  v8i16 hz_out7, hz_out8, hz_out9, hz_out10, tmp0, tmp1, tmp2, tmp3;
-  v8i16 out0, out1, out2, out3, out4, out5, out6, out7, out8, out9;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[0]);
-  src -= (3 + 3 * src_stride);
-
-  /* rearranging filter */
-  filt = LD_SH(filter_horiz);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt_hz0, filt_hz1, filt_hz2, filt_hz3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
-  src += (7 * src_stride);
-
-  XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
-  hz_out0 = HORIZ_8TAP_FILT(src0, src0, mask0, mask1, mask2, mask3, filt_hz0,
-                            filt_hz1, filt_hz2, filt_hz3);
-  hz_out1 = HORIZ_8TAP_FILT(src1, src1, mask0, mask1, mask2, mask3, filt_hz0,
-                            filt_hz1, filt_hz2, filt_hz3);
-  hz_out2 = HORIZ_8TAP_FILT(src2, src2, mask0, mask1, mask2, mask3, filt_hz0,
-                            filt_hz1, filt_hz2, filt_hz3);
-  hz_out3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3, filt_hz0,
-                            filt_hz1, filt_hz2, filt_hz3);
-  hz_out4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3, filt_hz0,
-                            filt_hz1, filt_hz2, filt_hz3);
-  hz_out5 = HORIZ_8TAP_FILT(src5, src5, mask0, mask1, mask2, mask3, filt_hz0,
-                            filt_hz1, filt_hz2, filt_hz3);
-  hz_out6 = HORIZ_8TAP_FILT(src6, src6, mask0, mask1, mask2, mask3, filt_hz0,
-                            filt_hz1, filt_hz2, filt_hz3);
-
-  filt = LD_SH(filter_vert);
-  SPLATI_H4_SH(filt, 0, 1, 2, 3, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
-
-  ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1);
-  ILVEV_B2_SH(hz_out4, hz_out5, hz_out1, hz_out2, out2, out4);
-  ILVEV_B2_SH(hz_out3, hz_out4, hz_out5, hz_out6, out5, out6);
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_SB4(src, src_stride, src7, src8, src9, src10);
-    XORI_B4_128_SB(src7, src8, src9, src10);
-    src += (4 * src_stride);
-
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-
-    hz_out7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3, filt_hz0,
-                              filt_hz1, filt_hz2, filt_hz3);
-    out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
-    tmp0 = FILT_8TAP_DPADD_S_H(out0, out1, out2, out3, filt_vt0, filt_vt1,
-                               filt_vt2, filt_vt3);
-
-    hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, filt_hz0,
-                              filt_hz1, filt_hz2, filt_hz3);
-    out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7);
-    tmp1 = FILT_8TAP_DPADD_S_H(out4, out5, out6, out7, filt_vt0, filt_vt1,
-                               filt_vt2, filt_vt3);
-
-    hz_out9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3, filt_hz0,
-                              filt_hz1, filt_hz2, filt_hz3);
-    out8 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8);
-    tmp2 = FILT_8TAP_DPADD_S_H(out1, out2, out3, out8, filt_vt0, filt_vt1,
-                               filt_vt2, filt_vt3);
-
-    hz_out10 = HORIZ_8TAP_FILT(src10, src10, mask0, mask1, mask2, mask3,
-                               filt_hz0, filt_hz1, filt_hz2, filt_hz3);
-    out9 = (v8i16)__msa_ilvev_b((v16i8)hz_out10, (v16i8)hz_out9);
-    tmp3 = FILT_8TAP_DPADD_S_H(out5, out6, out7, out9, filt_vt0, filt_vt1,
-                               filt_vt2, filt_vt3);
-
-    SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-    SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7);
-    CONVERT_UB_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst2, dst3, dst,
-                            dst_stride);
-    dst += (4 * dst_stride);
-
-    hz_out6 = hz_out10;
-    out0 = out2;
-    out1 = out3;
-    out2 = out8;
-    out4 = out6;
-    out5 = out7;
-    out6 = out9;
-  }
-}
-
-static void common_hv_8ht_8vt_and_aver_dst_16w_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
-  int32_t multiple8_cnt;
-  for (multiple8_cnt = 2; multiple8_cnt--;) {
-    common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride,
-                                          filter_horiz, filter_vert, height);
-    src += 8;
-    dst += 8;
-  }
-}
-
-static void common_hv_8ht_8vt_and_aver_dst_32w_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
-  int32_t multiple8_cnt;
-  for (multiple8_cnt = 4; multiple8_cnt--;) {
-    common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride,
-                                          filter_horiz, filter_vert, height);
-    src += 8;
-    dst += 8;
-  }
-}
-
-static void common_hv_8ht_8vt_and_aver_dst_64w_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
-  int32_t multiple8_cnt;
-  for (multiple8_cnt = 8; multiple8_cnt--;) {
-    common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride,
-                                          filter_horiz, filter_vert, height);
-    src += 8;
-    dst += 8;
-  }
-}
-
-static void common_hv_2ht_2vt_and_aver_dst_4x4_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert) {
-  v16i8 src0, src1, src2, src3, src4, mask;
-  v16u8 filt_hz, filt_vt, vec0, vec1;
-  v16u8 dst0, dst1, dst2, dst3, res0, res1;
-  v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, tmp0, tmp1, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[16]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter_horiz);
-  filt_hz = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  filt = LD_UH(filter_vert);
-  filt_vt = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
-
-  hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, FILTER_BITS);
-  hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, FILTER_BITS);
-  hz_out4 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS);
-  hz_out1 = (v8u16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8);
-  hz_out3 = (v8u16)__msa_pckod_d((v2i64)hz_out4, (v2i64)hz_out2);
-  ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
-
-  LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  ILVR_W2_UB(dst1, dst0, dst3, dst2, dst0, dst2);
-  DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
-  SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-  PCKEV_B2_UB(tmp0, tmp0, tmp1, tmp1, res0, res1);
-  AVER_UB2_UB(res0, dst0, res1, dst2, res0, res1);
-  ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
-}
-
-static void common_hv_2ht_2vt_and_aver_dst_4x8_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert) {
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask;
-  v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3, res0, res1, res2, res3;
-  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
-  v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
-  v8u16 hz_out7, hz_out8, tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[16]);
-
-  /* rearranging filter */
-  filt = LD_SH(filter_horiz);
-  filt_hz = (v16u8)__msa_splati_h(filt, 0);
-
-  filt = LD_SH(filter_vert);
-  filt_vt = (v16u8)__msa_splati_h(filt, 0);
-
-  LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
-  src += (8 * src_stride);
-  src8 = LD_SB(src);
-
-  hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, FILTER_BITS);
-  hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, FILTER_BITS);
-  hz_out4 = HORIZ_2TAP_FILT_UH(src4, src5, mask, filt_hz, FILTER_BITS);
-  hz_out6 = HORIZ_2TAP_FILT_UH(src6, src7, mask, filt_hz, FILTER_BITS);
-  hz_out8 = HORIZ_2TAP_FILT_UH(src8, src8, mask, filt_hz, FILTER_BITS);
-  SLDI_B3_UH(hz_out2, hz_out4, hz_out6, hz_out0, hz_out2, hz_out4, hz_out1,
-             hz_out3, hz_out5, 8);
-  hz_out7 = (v8u16)__msa_pckod_d((v2i64)hz_out8, (v2i64)hz_out6);
-
-  LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
-  ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst2, dst4,
-             dst6);
-  ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
-  ILVEV_B2_UB(hz_out4, hz_out5, hz_out6, hz_out7, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, tmp0,
-              tmp1, tmp2, tmp3);
-  SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-  PCKEV_B4_UB(tmp0, tmp0, tmp1, tmp1, tmp2, tmp2, tmp3, tmp3, res0, res1, res2,
-              res3);
-  AVER_UB4_UB(res0, dst0, res1, dst2, res2, dst4, res3, dst6, res0, res1, res2,
-              res3);
-  ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
-  dst += (4 * dst_stride);
-  ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
-}
-
-static void common_hv_2ht_2vt_and_aver_dst_4w_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
-  if (4 == height) {
-    common_hv_2ht_2vt_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride,
-                                           filter_horiz, filter_vert);
-  } else if (8 == height) {
-    common_hv_2ht_2vt_and_aver_dst_4x8_msa(src, src_stride, dst, dst_stride,
-                                           filter_horiz, filter_vert);
-  }
-}
-
-static void common_hv_2ht_2vt_and_aver_dst_8x4_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert) {
-  v16i8 src0, src1, src2, src3, src4, mask;
-  v16u8 filt_hz, filt_vt, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3;
-  v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_SH(filter_horiz);
-  filt_hz = (v16u8)__msa_splati_h(filt, 0);
-
-  filt = LD_SH(filter_vert);
-  filt_vt = (v16u8)__msa_splati_h(filt, 0);
-
-  LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
-  src += (5 * src_stride);
-
-  LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS);
-  hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS);
-  vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
-  tmp0 = __msa_dotp_u_h(vec0, filt_vt);
-
-  hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, FILTER_BITS);
-  vec1 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
-  tmp1 = __msa_dotp_u_h(vec1, filt_vt);
-
-  hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, FILTER_BITS);
-  vec2 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
-  tmp2 = __msa_dotp_u_h(vec2, filt_vt);
-
-  hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS);
-  vec3 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
-  tmp3 = __msa_dotp_u_h(vec3, filt_vt);
-
-  SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-  PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst,
-                     dst_stride);
-}
-
-static void common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, mask;
-  v16u8 filt_hz, filt_vt, vec0, dst0, dst1, dst2, dst3;
-  v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_SH(filter_horiz);
-  filt_hz = (v16u8)__msa_splati_h(filt, 0);
-
-  filt = LD_SH(filter_vert);
-  filt_vt = (v16u8)__msa_splati_h(filt, 0);
-
-  src0 = LD_SB(src);
-  src += src_stride;
-
-  hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS);
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_SB4(src, src_stride, src1, src2, src3, src4);
-    src += (4 * src_stride);
-
-    hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS);
-    vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
-    tmp0 = __msa_dotp_u_h(vec0, filt_vt);
-
-    hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, FILTER_BITS);
-    vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
-    tmp1 = __msa_dotp_u_h(vec0, filt_vt);
-
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-
-    hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, FILTER_BITS);
-    vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
-    tmp2 = __msa_dotp_u_h(vec0, filt_vt);
-
-    hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS);
-    vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
-    tmp3 = __msa_dotp_u_h(vec0, filt_vt);
-
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst,
-                       dst_stride);
-    dst += (4 * dst_stride);
-  }
-}
-
-static void common_hv_2ht_2vt_and_aver_dst_8w_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
-  if (4 == height) {
-    common_hv_2ht_2vt_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride,
-                                           filter_horiz, filter_vert);
-  } else {
-    common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(
-        src, src_stride, dst, dst_stride, filter_horiz, filter_vert, height);
-  }
-}
-
-static void common_hv_2ht_2vt_and_aver_dst_16w_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
-  v16u8 filt_hz, filt_vt, vec0, vec1, dst0, dst1, dst2, dst3;
-  v8u16 hz_out0, hz_out1, hz_out2, hz_out3, tmp0, tmp1;
-  v8i16 filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_SH(filter_horiz);
-  filt_hz = (v16u8)__msa_splati_h(filt, 0);
-
-  filt = LD_SH(filter_vert);
-  filt_vt = (v16u8)__msa_splati_h(filt, 0);
-
-  LD_SB2(src, 8, src0, src1);
-  src += src_stride;
-
-  hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS);
-  hz_out2 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS);
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_SB4(src, src_stride, src0, src2, src4, src6);
-    LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
-    src += (4 * src_stride);
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-
-    hz_out1 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS);
-    hz_out3 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS);
-    ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
-    DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst);
-    dst += dst_stride;
-
-    hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, FILTER_BITS);
-    hz_out2 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, FILTER_BITS);
-    ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1);
-    DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst1, dst);
-    dst += dst_stride;
-
-    hz_out1 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS);
-    hz_out3 = HORIZ_2TAP_FILT_UH(src5, src5, mask, filt_hz, FILTER_BITS);
-    ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
-    DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst2, dst);
-    dst += dst_stride;
-
-    hz_out0 = HORIZ_2TAP_FILT_UH(src6, src6, mask, filt_hz, FILTER_BITS);
-    hz_out2 = HORIZ_2TAP_FILT_UH(src7, src7, mask, filt_hz, FILTER_BITS);
-    ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1);
-    DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst3, dst);
-    dst += dst_stride;
-  }
-}
-
-static void common_hv_2ht_2vt_and_aver_dst_32w_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
-  int32_t multiple8_cnt;
-  for (multiple8_cnt = 2; multiple8_cnt--;) {
-    common_hv_2ht_2vt_and_aver_dst_16w_msa(src, src_stride, dst, dst_stride,
-                                           filter_horiz, filter_vert, height);
-    src += 16;
-    dst += 16;
-  }
-}
-
-static void common_hv_2ht_2vt_and_aver_dst_64w_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
-  int32_t multiple8_cnt;
-  for (multiple8_cnt = 4; multiple8_cnt--;) {
-    common_hv_2ht_2vt_and_aver_dst_16w_msa(src, src_stride, dst, dst_stride,
-                                           filter_horiz, filter_vert, height);
-    src += 16;
-    dst += 16;
-  }
-}
-
-void aom_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
-                           uint8_t *dst, ptrdiff_t dst_stride,
-                           const int16_t *filter_x, int x_step_q4,
-                           const int16_t *filter_y, int y_step_q4, int w,
-                           int h) {
-  int8_t cnt, filt_hor[8], filt_ver[8];
-
-  assert(x_step_q4 == 16);
-  assert(y_step_q4 == 16);
-  assert(((const int32_t *)filter_x)[1] != 0x800000);
-  assert(((const int32_t *)filter_y)[1] != 0x800000);
-
-  for (cnt = 0; cnt < 8; ++cnt) {
-    filt_hor[cnt] = filter_x[cnt];
-    filt_ver[cnt] = filter_y[cnt];
-  }
-
-  if (((const int32_t *)filter_x)[0] == 0 &&
-      ((const int32_t *)filter_y)[0] == 0) {
-    switch (w) {
-      case 4:
-        common_hv_2ht_2vt_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
-                                              (int32_t)dst_stride, &filt_hor[3],
-                                              &filt_ver[3], h);
-        break;
-      case 8:
-        common_hv_2ht_2vt_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
-                                              (int32_t)dst_stride, &filt_hor[3],
-                                              &filt_ver[3], h);
-        break;
-      case 16:
-        common_hv_2ht_2vt_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
-                                               (int32_t)dst_stride,
-                                               &filt_hor[3], &filt_ver[3], h);
-        break;
-      case 32:
-        common_hv_2ht_2vt_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
-                                               (int32_t)dst_stride,
-                                               &filt_hor[3], &filt_ver[3], h);
-        break;
-      case 64:
-        common_hv_2ht_2vt_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
-                                               (int32_t)dst_stride,
-                                               &filt_hor[3], &filt_ver[3], h);
-        break;
-      default:
-        aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
-                            x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  } else if (((const int32_t *)filter_x)[0] == 0 ||
-             ((const int32_t *)filter_y)[0] == 0) {
-    aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
-                        filter_y, y_step_q4, w, h);
-  } else {
-    switch (w) {
-      case 4:
-        common_hv_8ht_8vt_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
-                                              (int32_t)dst_stride, filt_hor,
-                                              filt_ver, h);
-        break;
-      case 8:
-        common_hv_8ht_8vt_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
-                                              (int32_t)dst_stride, filt_hor,
-                                              filt_ver, h);
-        break;
-      case 16:
-        common_hv_8ht_8vt_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
-                                               (int32_t)dst_stride, filt_hor,
-                                               filt_ver, h);
-        break;
-      case 32:
-        common_hv_8ht_8vt_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
-                                               (int32_t)dst_stride, filt_hor,
-                                               filt_ver, h);
-        break;
-      case 64:
-        common_hv_8ht_8vt_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
-                                               (int32_t)dst_stride, filt_hor,
-                                               filt_ver, h);
-        break;
-      default:
-        aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
-                            x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  }
-}
diff --git a/aom_dsp/mips/aom_convolve8_avg_vert_msa.c b/aom_dsp/mips/aom_convolve8_avg_vert_msa.c
deleted file mode 100644
index dae7711..0000000
--- a/aom_dsp/mips/aom_convolve8_avg_vert_msa.c
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/mips/aom_convolve_msa.h"
-
-static void common_vt_8t_and_aver_dst_4w_msa(const uint8_t *src,
-                                             int32_t src_stride, uint8_t *dst,
-                                             int32_t dst_stride, int8_t *filter,
-                                             int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
-  v16u8 dst0, dst1, dst2, dst3, out;
-  v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
-  v16i8 src65_r, src87_r, src109_r, src2110, src4332, src6554, src8776;
-  v16i8 src10998, filt0, filt1, filt2, filt3;
-  v8i16 filt, out10, out32;
-
-  src -= (3 * src_stride);
-
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
-  src += (7 * src_stride);
-
-  ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
-             src54_r, src21_r);
-  ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
-  ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src2110,
-             src4332, src6554);
-  XORI_B3_128_SB(src2110, src4332, src6554);
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_SB4(src, src_stride, src7, src8, src9, src10);
-    src += (4 * src_stride);
-
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
-               src87_r, src98_r, src109_r);
-    ILVR_D2_SB(src87_r, src76_r, src109_r, src98_r, src8776, src10998);
-    XORI_B2_128_SB(src8776, src10998);
-    out10 = FILT_8TAP_DPADD_S_H(src2110, src4332, src6554, src8776, filt0,
-                                filt1, filt2, filt3);
-    out32 = FILT_8TAP_DPADD_S_H(src4332, src6554, src8776, src10998, filt0,
-                                filt1, filt2, filt3);
-    SRARI_H2_SH(out10, out32, FILTER_BITS);
-    SAT_SH2_SH(out10, out32, 7);
-    out = PCKEV_XORI128_UB(out10, out32);
-    ILVR_W2_UB(dst1, dst0, dst3, dst2, dst0, dst2);
-
-    dst0 = (v16u8)__msa_ilvr_d((v2i64)dst2, (v2i64)dst0);
-    out = __msa_aver_u_b(out, dst0);
-
-    ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
-    dst += (4 * dst_stride);
-
-    src2110 = src6554;
-    src4332 = src8776;
-    src6554 = src10998;
-    src6 = src10;
-  }
-}
-
-static void common_vt_8t_and_aver_dst_8w_msa(const uint8_t *src,
-                                             int32_t src_stride, uint8_t *dst,
-                                             int32_t dst_stride, int8_t *filter,
-                                             int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
-  v16u8 dst0, dst1, dst2, dst3;
-  v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
-  v16i8 src65_r, src87_r, src109_r, filt0, filt1, filt2, filt3;
-  v8i16 filt, out0, out1, out2, out3;
-
-  src -= (3 * src_stride);
-
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
-  src += (7 * src_stride);
-
-  XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
-  ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
-             src54_r, src21_r);
-  ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_SB4(src, src_stride, src7, src8, src9, src10);
-    src += (4 * src_stride);
-
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    XORI_B4_128_SB(src7, src8, src9, src10);
-    ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
-               src87_r, src98_r, src109_r);
-    out0 = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0, filt1,
-                               filt2, filt3);
-    out1 = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0, filt1,
-                               filt2, filt3);
-    out2 = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0, filt1,
-                               filt2, filt3);
-    out3 = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0,
-                               filt1, filt2, filt3);
-    SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-    SAT_SH4_SH(out0, out1, out2, out3, 7);
-    CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst2, dst3, dst,
-                            dst_stride);
-    dst += (4 * dst_stride);
-
-    src10_r = src54_r;
-    src32_r = src76_r;
-    src54_r = src98_r;
-    src21_r = src65_r;
-    src43_r = src87_r;
-    src65_r = src109_r;
-    src6 = src10;
-  }
-}
-
-static void common_vt_8t_and_aver_dst_16w_mult_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter, int32_t height, int32_t width) {
-  const uint8_t *src_tmp;
-  uint8_t *dst_tmp;
-  uint32_t loop_cnt, cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
-  v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
-  v16i8 src65_r, src87_r, src109_r, src10_l, src32_l, src54_l, src76_l;
-  v16i8 src98_l, src21_l, src43_l, src65_l, src87_l, src109_l;
-  v16i8 filt0, filt1, filt2, filt3;
-  v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
-  v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l, filt;
-
-  src -= (3 * src_stride);
-
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  for (cnt = (width >> 4); cnt--;) {
-    src_tmp = src;
-    dst_tmp = dst;
-
-    LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
-    XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
-    src_tmp += (7 * src_stride);
-
-    ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
-               src54_r, src21_r);
-    ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
-    ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l, src32_l,
-               src54_l, src21_l);
-    ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
-
-    for (loop_cnt = (height >> 2); loop_cnt--;) {
-      LD_SB4(src_tmp, src_stride, src7, src8, src9, src10);
-      src_tmp += (4 * src_stride);
-
-      LD_UB4(dst_tmp, dst_stride, dst0, dst1, dst2, dst3);
-      XORI_B4_128_SB(src7, src8, src9, src10);
-      ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
-                 src87_r, src98_r, src109_r);
-      ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_l,
-                 src87_l, src98_l, src109_l);
-      out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0,
-                                   filt1, filt2, filt3);
-      out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0,
-                                   filt1, filt2, filt3);
-      out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0,
-                                   filt1, filt2, filt3);
-      out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0,
-                                   filt1, filt2, filt3);
-      out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l, filt0,
-                                   filt1, filt2, filt3);
-      out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l, filt0,
-                                   filt1, filt2, filt3);
-      out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l, filt0,
-                                   filt1, filt2, filt3);
-      out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l, filt0,
-                                   filt1, filt2, filt3);
-      SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, FILTER_BITS);
-      SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, FILTER_BITS);
-      SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
-      SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7);
-      PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l,
-                  out3_r, tmp0, tmp1, tmp2, tmp3);
-      XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3);
-      AVER_UB4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst0, dst1,
-                  dst2, dst3);
-      ST_UB4(dst0, dst1, dst2, dst3, dst_tmp, dst_stride);
-      dst_tmp += (4 * dst_stride);
-
-      src10_r = src54_r;
-      src32_r = src76_r;
-      src54_r = src98_r;
-      src21_r = src65_r;
-      src43_r = src87_r;
-      src65_r = src109_r;
-      src10_l = src54_l;
-      src32_l = src76_l;
-      src54_l = src98_l;
-      src21_l = src65_l;
-      src43_l = src87_l;
-      src65_l = src109_l;
-      src6 = src10;
-    }
-
-    src += 16;
-    dst += 16;
-  }
-}
-
-static void common_vt_8t_and_aver_dst_16w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride,
-                                         filter, height, 16);
-}
-
-static void common_vt_8t_and_aver_dst_32w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride,
-                                         filter, height, 32);
-}
-
-static void common_vt_8t_and_aver_dst_64w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride,
-                                         filter, height, 64);
-}
-
-static void common_vt_2t_and_aver_dst_4x4_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter) {
-  v16i8 src0, src1, src2, src3, src4;
-  v16u8 dst0, dst1, dst2, dst3, out, filt0, src2110, src4332;
-  v16i8 src10_r, src32_r, src21_r, src43_r;
-  v8i16 filt;
-  v8u16 tmp0, tmp1;
-
-  filt = LD_SH(filter);
-  filt0 = (v16u8)__msa_splati_h(filt, 0);
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  src += (4 * src_stride);
-
-  src4 = LD_SB(src);
-  src += src_stride;
-
-  LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  ILVR_W2_UB(dst1, dst0, dst3, dst2, dst0, dst1);
-  dst0 = (v16u8)__msa_ilvr_d((v2i64)dst1, (v2i64)dst0);
-  ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
-             src32_r, src43_r);
-  ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
-  DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
-  SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-
-  out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
-  out = __msa_aver_u_b(out, dst0);
-
-  ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
-}
-
-static void common_vt_2t_and_aver_dst_4x8_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter) {
-  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src87_r;
-  v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r;
-  v16u8 src2110, src4332, src6554, src8776, filt0;
-  v8u16 tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt;
-
-  filt = LD_SH(filter);
-  filt0 = (v16u8)__msa_splati_h(filt, 0);
-
-  LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
-  src += (8 * src_stride);
-  src8 = LD_SB(src);
-
-  LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
-  ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst1, dst2,
-             dst3);
-  ILVR_D2_UB(dst1, dst0, dst3, dst2, dst0, dst1);
-  ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
-             src32_r, src43_r);
-  ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r,
-             src76_r, src87_r);
-  ILVR_D4_UB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src87_r,
-             src76_r, src2110, src4332, src6554, src8776);
-  DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0,
-              tmp0, tmp1, tmp2, tmp3);
-  SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-  PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src2110, src4332);
-  AVER_UB2_UB(src2110, dst0, src4332, dst1, src2110, src4332);
-  ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride);
-  dst += (4 * dst_stride);
-  ST4x4_UB(src4332, src4332, 0, 1, 2, 3, dst, dst_stride);
-}
-
-static void common_vt_2t_and_aver_dst_4w_msa(const uint8_t *src,
-                                             int32_t src_stride, uint8_t *dst,
-                                             int32_t dst_stride, int8_t *filter,
-                                             int32_t height) {
-  if (4 == height) {
-    common_vt_2t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter);
-  } else if (8 == height) {
-    common_vt_2t_and_aver_dst_4x8_msa(src, src_stride, dst, dst_stride, filter);
-  }
-}
-
-static void common_vt_2t_and_aver_dst_8x4_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter) {
-  v16u8 src0, src1, src2, src3, src4;
-  v16u8 dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3, filt0;
-  v8u16 tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt;
-
-  /* rearranging filter_y */
-  filt = LD_SH(filter);
-  filt0 = (v16u8)__msa_splati_h(filt, 0);
-
-  LD_UB5(src, src_stride, src0, src1, src2, src3, src4);
-  LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1);
-  ILVR_B2_UB(src3, src2, src4, src3, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
-              tmp2, tmp3);
-  SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-  PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst,
-                     dst_stride);
-}
-
-static void common_vt_2t_and_aver_dst_8x8mult_msa(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
-  v16u8 dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
-  v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
-  v8u16 tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt;
-
-  /* rearranging filter_y */
-  filt = LD_SH(filter);
-  filt0 = (v16u8)__msa_splati_h(filt, 0);
-
-  src0 = LD_UB(src);
-  src += src_stride;
-
-  for (loop_cnt = (height >> 3); loop_cnt--;) {
-    LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8);
-    src += (8 * src_stride);
-    LD_UB8(dst, dst_stride, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8);
-
-    ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2,
-               vec3);
-    ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, vec6,
-               vec7);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
-                tmp2, tmp3);
-    SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST8x4_UB(tmp0, dst1, tmp1, dst2, tmp2, dst3, tmp3, dst4, dst,
-                       dst_stride);
-    dst += (4 * dst_stride);
-
-    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1,
-                tmp2, tmp3);
-    SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST8x4_UB(tmp0, dst5, tmp1, dst6, tmp2, dst7, tmp3, dst8, dst,
-                       dst_stride);
-    dst += (4 * dst_stride);
-
-    src0 = src8;
-  }
-}
-
-static void common_vt_2t_and_aver_dst_8w_msa(const uint8_t *src,
-                                             int32_t src_stride, uint8_t *dst,
-                                             int32_t dst_stride, int8_t *filter,
-                                             int32_t height) {
-  if (4 == height) {
-    common_vt_2t_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, filter);
-  } else {
-    common_vt_2t_and_aver_dst_8x8mult_msa(src, src_stride, dst, dst_stride,
-                                          filter, height);
-  }
-}
-
-static void common_vt_2t_and_aver_dst_16w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16u8 src0, src1, src2, src3, src4, dst0, dst1, dst2, dst3, filt0;
-  v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8u16 tmp0, tmp1, tmp2, tmp3, filt;
-
-  /* rearranging filter_y */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  src0 = LD_UB(src);
-  src += src_stride;
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_UB4(src, src_stride, src1, src2, src3, src4);
-    src += (4 * src_stride);
-
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
-    ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
-    DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst);
-    dst += dst_stride;
-
-    ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
-    ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
-    DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst);
-    dst += dst_stride;
-
-    DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst2, dst);
-    dst += dst_stride;
-
-    DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp3, tmp2, dst3, dst);
-    dst += dst_stride;
-
-    src0 = src4;
-  }
-}
-
-static void common_vt_2t_and_aver_dst_32w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9;
-  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
-  v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
-  v8u16 tmp0, tmp1, tmp2, tmp3, filt;
-
-  /* rearranging filter_y */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_UB2(src, 16, src0, src5);
-  src += src_stride;
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_UB4(src, src_stride, src1, src2, src3, src4);
-    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
-    ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
-
-    LD_UB4(src + 16, src_stride, src6, src7, src8, src9);
-    LD_UB4(dst + 16, dst_stride, dst4, dst5, dst6, dst7);
-    src += (4 * src_stride);
-
-    DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst);
-
-    DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst + dst_stride);
-
-    ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
-    ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
-    DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst2, dst + 2 * dst_stride);
-
-    DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp3, tmp2, dst3, dst + 3 * dst_stride);
-
-    ILVR_B2_UB(src6, src5, src7, src6, vec0, vec2);
-    ILVL_B2_UB(src6, src5, src7, src6, vec1, vec3);
-    DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst4, dst + 16);
-
-    DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp3, tmp2, dst5, dst + 16 + dst_stride);
-
-    ILVR_B2_UB(src8, src7, src9, src8, vec4, vec6);
-    ILVL_B2_UB(src8, src7, src9, src8, vec5, vec7);
-    DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst6, dst + 16 + 2 * dst_stride);
-
-    DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp3, tmp2, dst7, dst + 16 + 3 * dst_stride);
-    dst += (4 * dst_stride);
-
-    src0 = src4;
-    src5 = src9;
-  }
-}
-
-static void common_vt_2t_and_aver_dst_64w_msa(const uint8_t *src,
-                                              int32_t src_stride, uint8_t *dst,
-                                              int32_t dst_stride,
-                                              int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16u8 src0, src1, src2, src3, src4, src5;
-  v16u8 src6, src7, src8, src9, src10, src11, filt0;
-  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
-  v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
-  v8u16 filt;
-
-  /* rearranging filter_y */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_UB4(src, 16, src0, src3, src6, src9);
-  src += src_stride;
-
-  for (loop_cnt = (height >> 1); loop_cnt--;) {
-    LD_UB2(src, src_stride, src1, src2);
-    LD_UB2(dst, dst_stride, dst0, dst1);
-    LD_UB2(src + 16, src_stride, src4, src5);
-    LD_UB2(dst + 16, dst_stride, dst2, dst3);
-    LD_UB2(src + 32, src_stride, src7, src8);
-    LD_UB2(dst + 32, dst_stride, dst4, dst5);
-    LD_UB2(src + 48, src_stride, src10, src11);
-    LD_UB2(dst + 48, dst_stride, dst6, dst7);
-    src += (2 * src_stride);
-
-    ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
-    ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
-    DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst);
-
-    DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst + dst_stride);
-
-    ILVR_B2_UB(src4, src3, src5, src4, vec4, vec6);
-    ILVL_B2_UB(src4, src3, src5, src4, vec5, vec7);
-    DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5);
-    SRARI_H2_UH(tmp4, tmp5, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp5, tmp4, dst2, dst + 16);
-
-    DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7);
-    SRARI_H2_UH(tmp6, tmp7, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp7, tmp6, dst3, dst + 16 + dst_stride);
-
-    ILVR_B2_UB(src7, src6, src8, src7, vec0, vec2);
-    ILVL_B2_UB(src7, src6, src8, src7, vec1, vec3);
-    DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp1, tmp0, dst4, dst + 32);
-
-    DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp3, tmp2, dst5, dst + 32 + dst_stride);
-
-    ILVR_B2_UB(src10, src9, src11, src10, vec4, vec6);
-    ILVL_B2_UB(src10, src9, src11, src10, vec5, vec7);
-    DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5);
-    SRARI_H2_UH(tmp4, tmp5, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp5, tmp4, dst6, (dst + 48));
-
-    DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7);
-    SRARI_H2_UH(tmp6, tmp7, FILTER_BITS);
-    PCKEV_AVG_ST_UB(tmp7, tmp6, dst7, dst + 48 + dst_stride);
-    dst += (2 * dst_stride);
-
-    src0 = src2;
-    src3 = src5;
-    src6 = src8;
-    src9 = src11;
-  }
-}
-
-void aom_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
-                                uint8_t *dst, ptrdiff_t dst_stride,
-                                const int16_t *filter_x, int x_step_q4,
-                                const int16_t *filter_y, int y_step_q4, int w,
-                                int h) {
-  int8_t cnt, filt_ver[8];
-
-  assert(y_step_q4 == 16);
-  assert(((const int32_t *)filter_y)[1] != 0x800000);
-
-  for (cnt = 0; cnt < 8; ++cnt) {
-    filt_ver[cnt] = filter_y[cnt];
-  }
-
-  if (((const int32_t *)filter_y)[0] == 0) {
-    switch (w) {
-      case 4:
-        common_vt_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
-                                         (int32_t)dst_stride, &filt_ver[3], h);
-        break;
-      case 8:
-        common_vt_2t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
-                                         (int32_t)dst_stride, &filt_ver[3], h);
-        break;
-      case 16:
-        common_vt_2t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, &filt_ver[3], h);
-        break;
-      case 32:
-        common_vt_2t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, &filt_ver[3], h);
-        break;
-      case 64:
-        common_vt_2t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, &filt_ver[3], h);
-        break;
-      default:
-        aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
-                                 x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  } else {
-    switch (w) {
-      case 4:
-        common_vt_8t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
-                                         (int32_t)dst_stride, filt_ver, h);
-        break;
-      case 8:
-        common_vt_8t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
-                                         (int32_t)dst_stride, filt_ver, h);
-        break;
-      case 16:
-        common_vt_8t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, filt_ver, h);
-
-        break;
-      case 32:
-        common_vt_8t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, filt_ver, h);
-        break;
-      case 64:
-        common_vt_8t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
-                                          (int32_t)dst_stride, filt_ver, h);
-        break;
-      default:
-        aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
-                                 x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  }
-}
diff --git a/aom_dsp/mips/convolve2_avg_dspr2.c b/aom_dsp/mips/convolve2_avg_dspr2.c
deleted file mode 100644
index d557115..0000000
--- a/aom_dsp/mips/convolve2_avg_dspr2.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/aom_convolve.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
-                                         uint8_t *dst, int32_t dst_stride,
-                                         const int16_t *filter_y, int32_t w,
-                                         int32_t h) {
-  int32_t x, y;
-  const uint8_t *src_ptr;
-  uint8_t *dst_ptr;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  uint32_t load1, load2;
-  uint32_t p1, p2;
-  uint32_t scratch1, scratch2;
-  uint32_t store1, store2;
-  int32_t Temp1, Temp2;
-  const int16_t *filter = &filter_y[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-
-    for (x = 0; x < w; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__(
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
-
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1),
-            [p2] "=&r"(p2), [scratch1] "=&r"(scratch1),
-            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
-            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
-          : [filter45] "r"(filter45), [vector4a] "r"(vector4a),
-            [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src,
-                                          int32_t src_stride, uint8_t *dst,
-                                          int32_t dst_stride,
-                                          const int16_t *filter_y, int32_t h) {
-  int32_t x, y;
-  const uint8_t *src_ptr;
-  uint8_t *dst_ptr;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  uint32_t load1, load2;
-  uint32_t p1, p2;
-  uint32_t scratch1, scratch2;
-  uint32_t store1, store2;
-  int32_t Temp1, Temp2;
-  const int16_t *filter = &filter_y[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-    prefetch_store(dst + dst_stride + 32);
-
-    for (x = 0; x < 64; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__(
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
-
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1),
-            [p2] "=&r"(p2), [scratch1] "=&r"(scratch1),
-            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
-            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
-          : [filter45] "r"(filter45), [vector4a] "r"(vector4a),
-            [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-void aom_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                  uint8_t *dst, ptrdiff_t dst_stride,
-                                  const int16_t *filter_x, int x_step_q4,
-                                  const int16_t *filter_y, int y_step_q4, int w,
-                                  int h) {
-  uint32_t pos = 38;
-
-  assert(y_step_q4 == 16);
-
-  /* bit positon for extract from acc */
-  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
-                       :
-                       : [pos] "r"(pos));
-
-  prefetch_store(dst);
-
-  switch (w) {
-    case 4:
-    case 8:
-    case 16:
-    case 32:
-      convolve_bi_avg_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y,
-                                   w, h);
-      break;
-    case 64:
-      prefetch_store(dst + 32);
-      convolve_bi_avg_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y,
-                                    h);
-      break;
-    default:
-      aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
-                               x_step_q4, filter_y, y_step_q4, w, h);
-      break;
-  }
-}
-#endif
diff --git a/aom_dsp/mips/convolve2_avg_horiz_dspr2.c b/aom_dsp/mips/convolve2_avg_horiz_dspr2.c
deleted file mode 100644
index efbdcf6..0000000
--- a/aom_dsp/mips/convolve2_avg_horiz_dspr2.c
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/aom_convolve.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src,
-                                          int32_t src_stride, uint8_t *dst,
-                                          int32_t dst_stride,
-                                          const int16_t *filter_x0, int32_t h) {
-  int32_t y;
-  uint8_t *cm = aom_ff_cropTbl;
-  int32_t Temp1, Temp2, Temp3, Temp4;
-  uint32_t vector4a = 64;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2, p3;
-  uint32_t tn1, tn2;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__(
-        "ulw              %[tp1],         0(%[src])                      \n\t"
-        "ulw              %[tp2],         4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        "lbu              %[p2],          3(%[dst])                      \n\t" /* load odd 2 */
-
-        /* odd 1. pixel */
-        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t" /* even 1 */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "lbu              %[Temp1],       1(%[dst])                      \n\t" /* load odd 1 */
-        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p3],          %[tp2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tn2],         0(%[dst])                      \n\t" /* load even 1 */
-
-        /* odd 2. pixel */
-        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t" /* even 2 */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t" /* odd 1 */
-        "addqh_r.w        %[tn2],         %[tn2],         %[tp1]         \n\t" /* average even 1 */
-        "dpa.w.ph         $ac2,           %[p3],          %[filter45]    \n\t"
-        "extp             %[Temp4],       $ac2,           31             \n\t"
-
-        "lbu              %[tp1],         2(%[dst])                      \n\t" /* load even 2 */
-        "sb               %[tn2],         0(%[dst])                      \n\t" /* store even 1 */
-
-        /* clamp */
-        "addqh_r.w        %[Temp1],       %[Temp1],       %[tn1]         \n\t" /* average odd 1 */
-        "lbux             %[p3],          %[Temp4](%[cm])                \n\t" /* odd 2 */
-        "sb               %[Temp1],       1(%[dst])                      \n\t" /* store odd 1 */
-
-        "addqh_r.w        %[tp1],         %[tp1],         %[tp2]         \n\t" /* average even 2 */
-        "sb               %[tp1],         2(%[dst])                      \n\t" /* store even 2 */
-
-        "addqh_r.w        %[p2],          %[p2],          %[p3]          \n\t" /* average odd 2 */
-        "sb               %[p2],          3(%[dst])                      \n\t" /* store odd 2 */
-
-        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
-          [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
-          [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
-          [Temp4] "=&r"(Temp4)
-        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
-          [dst] "r"(dst), [src] "r"(src));
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src,
-                                          int32_t src_stride, uint8_t *dst,
-                                          int32_t dst_stride,
-                                          const int16_t *filter_x0, int32_t h) {
-  int32_t y;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t tp1, tp2, tp3, tp4;
-  uint32_t p1, p2, p3, p4, n1;
-  uint32_t st0, st1;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__(
-        "ulw              %[tp1],         0(%[src])                      \n\t"
-        "ulw              %[tp2],         4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
-        "ulw              %[tp3],         8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-        "lbu              %[Temp2],       0(%[dst])                      \n\t"
-        "lbu              %[tp4],         2(%[dst])                      \n\t"
-
-        /* even 2. pixel */
-        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        /* even 3. pixel */
-        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a],    $ac1                           \n\t"
-        "mthi             $zero,          $ac1                           \n\t"
-        "lbux             %[st1],         %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,           %[p3],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac1,           31             \n\t"
-
-        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
-        "addqh_r.w        %[tp4],         %[tp4],         %[st1]         \n\t"
-        "sb               %[Temp2],       0(%[dst])                      \n\t"
-        "sb               %[tp4],         2(%[dst])                      \n\t"
-
-        /* even 4. pixel */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-
-        "balign           %[tp3],         %[tp2],         3              \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-
-        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
-        "lbu              %[Temp2],       4(%[dst])                      \n\t"
-        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
-
-        "dpa.w.ph         $ac2,           %[p4],          %[filter45]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        /* odd 1. pixel */
-        "mtlo             %[vector4a],    $ac1                           \n\t"
-        "mthi             $zero,          $ac1                           \n\t"
-        "sb               %[Temp2],       4(%[dst])                      \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp3]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp3]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tp1],         6(%[dst])                      \n\t"
-
-        /* odd 2. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "lbux             %[st0],         %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,           %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],       $ac1,           31             \n\t"
-
-        "lbu              %[tp2],         1(%[dst])                      \n\t"
-        "lbu              %[tp3],         3(%[dst])                      \n\t"
-        "addqh_r.w        %[tp1],         %[tp1],         %[st0]         \n\t"
-
-        /* odd 3. pixel */
-        "lbux             %[st1],         %[Temp2](%[cm])                \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[filter45]    \n\t"
-        "addqh_r.w        %[tp2],         %[tp2],         %[st1]         \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tp4],         5(%[dst])                      \n\t"
-
-        /* odd 4. pixel */
-        "sb               %[tp2],         1(%[dst])                      \n\t"
-        "sb               %[tp1],         6(%[dst])                      \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac2,           31             \n\t"
-
-        "lbu              %[tp1],         7(%[dst])                      \n\t"
-
-        /* clamp */
-        "lbux             %[p4],          %[Temp3](%[cm])                \n\t"
-        "addqh_r.w        %[tp3],         %[tp3],         %[p4]          \n\t"
-
-        "lbux             %[p2],          %[Temp2](%[cm])                \n\t"
-        "addqh_r.w        %[tp4],         %[tp4],         %[p2]          \n\t"
-
-        "lbux             %[p1],          %[Temp1](%[cm])                \n\t"
-        "addqh_r.w        %[tp1],         %[tp1],         %[p1]          \n\t"
-
-        /* store bytes */
-        "sb               %[tp3],         3(%[dst])                      \n\t"
-        "sb               %[tp4],         5(%[dst])                      \n\t"
-        "sb               %[tp1],         7(%[dst])                      \n\t"
-
-        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
-          [tp4] "=&r"(tp4), [st0] "=&r"(st0), [st1] "=&r"(st1), [p1] "=&r"(p1),
-          [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), [n1] "=&r"(n1),
-          [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
-        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
-          [dst] "r"(dst), [src] "r"(src));
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr,
-                                           int32_t src_stride, uint8_t *dst_ptr,
-                                           int32_t dst_stride,
-                                           const int16_t *filter_x0, int32_t h,
-                                           int32_t count) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_store(dst_ptr + dst_stride);
-
-    for (c = 0; c < count; c++) {
-      __asm__ __volatile__(
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
-          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
-          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
-          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
-          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
-          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
-          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
-          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
-          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                   \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
-          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
-          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
-          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
-          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
-          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
-          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
-          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
-          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
-
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
-
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
-
-          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
-          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
-          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
-
-          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1),
-            [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2),
-            [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3),
-            [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
-            [Temp3] "=&r"(Temp3)
-          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
-            [dst] "r"(dst), [src] "r"(src));
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr,
-                                           int32_t src_stride, uint8_t *dst_ptr,
-                                           int32_t dst_stride,
-                                           const int16_t *filter_x0,
-                                           int32_t h) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_load(src_ptr + src_stride + 64);
-    prefetch_store(dst_ptr + dst_stride);
-    prefetch_store(dst_ptr + dst_stride + 32);
-
-    for (c = 0; c < 4; c++) {
-      __asm__ __volatile__(
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
-          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
-          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
-          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
-          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
-          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
-          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
-          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
-          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                   \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
-          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
-          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
-          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
-          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
-          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
-          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
-          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
-          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
-
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
-
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
-
-          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
-          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
-          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
-
-          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1),
-            [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2),
-            [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3),
-            [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
-            [Temp3] "=&r"(Temp3)
-          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
-            [dst] "r"(dst), [src] "r"(src));
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-void aom_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                   uint8_t *dst, ptrdiff_t dst_stride,
-                                   const int16_t *filter_x, int x_step_q4,
-                                   const int16_t *filter_y, int y_step_q4,
-                                   int w, int h) {
-  uint32_t pos = 38;
-
-  assert(x_step_q4 == 16);
-
-  /* bit positon for extract from acc */
-  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
-                       :
-                       : [pos] "r"(pos));
-
-  /* prefetch data to cache memory */
-  prefetch_load(src);
-  prefetch_load(src + 32);
-  prefetch_store(dst);
-
-  switch (w) {
-    case 4:
-      convolve_bi_avg_horiz_4_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                    h);
-      break;
-    case 8:
-      convolve_bi_avg_horiz_8_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                    h);
-      break;
-    case 16:
-      convolve_bi_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                     h, 1);
-      break;
-    case 32:
-      convolve_bi_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                     h, 2);
-      break;
-    case 64:
-      prefetch_load(src + 64);
-      prefetch_store(dst + 32);
-
-      convolve_bi_avg_horiz_64_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                     h);
-      break;
-    default:
-      aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
-                                x_step_q4, filter_y, y_step_q4, w, h);
-      break;
-  }
-}
-#endif
diff --git a/aom_dsp/mips/convolve8_avg_dspr2.c b/aom_dsp/mips/convolve8_avg_dspr2.c
deleted file mode 100644
index 3574da1..0000000
--- a/aom_dsp/mips/convolve8_avg_dspr2.c
+++ /dev/null
@@ -1,646 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/aom_convolve.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
-                                      uint8_t *dst, int32_t dst_stride,
-                                      const int16_t *filter_y, int32_t w,
-                                      int32_t h) {
-  int32_t x, y;
-  const uint8_t *src_ptr;
-  uint8_t *dst_ptr;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  uint32_t load1, load2, load3, load4;
-  uint32_t p1, p2;
-  uint32_t n1, n2;
-  uint32_t scratch1, scratch2;
-  uint32_t store1, store2;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2;
-
-  vector1b = ((const int32_t *)filter_y)[0];
-  vector2b = ((const int32_t *)filter_y)[1];
-  vector3b = ((const int32_t *)filter_y)[2];
-  vector4b = ((const int32_t *)filter_y)[3];
-
-  src -= 3 * src_stride;
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-
-    for (x = 0; x < w; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__(
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
-
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
-            [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
-            [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
-            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
-            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
-          : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
-            [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
-            [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
-            [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_avg_vert_64_dspr2(const uint8_t *src, int32_t src_stride,
-                                       uint8_t *dst, int32_t dst_stride,
-                                       const int16_t *filter_y, int32_t h) {
-  int32_t x, y;
-  const uint8_t *src_ptr;
-  uint8_t *dst_ptr;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  uint32_t load1, load2, load3, load4;
-  uint32_t p1, p2;
-  uint32_t n1, n2;
-  uint32_t scratch1, scratch2;
-  uint32_t store1, store2;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2;
-
-  vector1b = ((const int32_t *)filter_y)[0];
-  vector2b = ((const int32_t *)filter_y)[1];
-  vector3b = ((const int32_t *)filter_y)[2];
-  vector4b = ((const int32_t *)filter_y)[3];
-
-  src -= 3 * src_stride;
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-    prefetch_store(dst + dst_stride + 32);
-
-    for (x = 0; x < 64; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__(
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
-
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
-            [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
-            [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
-            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
-            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
-          : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
-            [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
-            [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
-            [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-void aom_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                  uint8_t *dst, ptrdiff_t dst_stride,
-                                  const int16_t *filter_x, int x_step_q4,
-                                  const int16_t *filter_y, int y_step_q4, int w,
-                                  int h) {
-  assert(y_step_q4 == 16);
-  assert(((const int32_t *)filter_y)[1] != 0x800000);
-
-  if (((const int32_t *)filter_y)[0] == 0) {
-    aom_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                 x_step_q4, filter_y, y_step_q4, w, h);
-  } else {
-    uint32_t pos = 38;
-
-    /* bit positon for extract from acc */
-    __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
-                         :
-                         : [pos] "r"(pos));
-
-    prefetch_store(dst);
-
-    switch (w) {
-      case 4:
-      case 8:
-      case 16:
-      case 32:
-        convolve_avg_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w,
-                                  h);
-        break;
-      case 64:
-        prefetch_store(dst + 32);
-        convolve_avg_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y,
-                                   h);
-        break;
-      default:
-        aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
-                                 x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  }
-}
-
-void aom_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                             uint8_t *dst, ptrdiff_t dst_stride,
-                             const int16_t *filter_x, int x_step_q4,
-                             const int16_t *filter_y, int y_step_q4, int w,
-                             int h) {
-  /* Fixed size intermediate buffer places limits on parameters. */
-  DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
-  int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
-
-  assert(w <= 64);
-  assert(h <= 64);
-  assert(x_step_q4 == 16);
-  assert(y_step_q4 == 16);
-
-  if (intermediate_height < h) intermediate_height = h;
-
-  aom_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter_x,
-                      x_step_q4, filter_y, y_step_q4, w, intermediate_height);
-
-  aom_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter_x,
-                         x_step_q4, filter_y, y_step_q4, w, h);
-}
-
-void aom_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                            uint8_t *dst, ptrdiff_t dst_stride,
-                            const int16_t *filter_x, int filter_x_stride,
-                            const int16_t *filter_y, int filter_y_stride, int w,
-                            int h) {
-  int x, y;
-  uint32_t tp1, tp2, tn1;
-  uint32_t tp3, tp4, tn2;
-
-  (void)filter_x;
-  (void)filter_x_stride;
-  (void)filter_y;
-  (void)filter_y_stride;
-
-  /* prefetch data to cache memory */
-  prefetch_load(src);
-  prefetch_load(src + 32);
-  prefetch_store(dst);
-
-  switch (w) {
-    case 4:
-      /* 1 word storage */
-      for (y = h; y--;) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__(
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         0(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "sw               %[tn1],         0(%[dst])      \n\t" /* store */
-
-            : [tn1] "=&r"(tn1), [tp1] "=&r"(tp1), [tp2] "=&r"(tp2)
-            : [src] "r"(src), [dst] "r"(dst));
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    case 8:
-      /* 2 word storage */
-      for (y = h; y--;) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__(
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         0(%[dst])      \n\t"
-            "ulw              %[tp3],         4(%[src])      \n\t"
-            "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "sw               %[tn1],         0(%[dst])      \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t" /* store */
-
-            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
-              [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
-            : [src] "r"(src), [dst] "r"(dst));
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    case 16:
-      /* 4 word storage */
-      for (y = h; y--;) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__(
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         0(%[dst])      \n\t"
-            "ulw              %[tp3],         4(%[src])      \n\t"
-            "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "ulw              %[tp1],         8(%[src])      \n\t"
-            "ulw              %[tp2],         8(%[dst])      \n\t"
-            "sw               %[tn1],         0(%[dst])      \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t" /* store */
-            "ulw              %[tp3],         12(%[src])     \n\t"
-            "ulw              %[tp4],         12(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "sw               %[tn1],         8(%[dst])      \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         12(%[dst])     \n\t" /* store */
-
-            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
-              [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
-            : [src] "r"(src), [dst] "r"(dst));
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    case 32:
-      /* 8 word storage */
-      for (y = h; y--;) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__(
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         0(%[dst])      \n\t"
-            "ulw              %[tp3],         4(%[src])      \n\t"
-            "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "ulw              %[tp1],         8(%[src])      \n\t"
-            "ulw              %[tp2],         8(%[dst])      \n\t"
-            "sw               %[tn1],         0(%[dst])      \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t" /* store */
-            "ulw              %[tp3],         12(%[src])     \n\t"
-            "ulw              %[tp4],         12(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "ulw              %[tp1],         16(%[src])     \n\t"
-            "ulw              %[tp2],         16(%[dst])     \n\t"
-            "sw               %[tn1],         8(%[dst])      \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         12(%[dst])     \n\t" /* store */
-            "ulw              %[tp3],         20(%[src])     \n\t"
-            "ulw              %[tp4],         20(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "ulw              %[tp1],         24(%[src])     \n\t"
-            "ulw              %[tp2],         24(%[dst])     \n\t"
-            "sw               %[tn1],         16(%[dst])     \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         20(%[dst])     \n\t" /* store */
-            "ulw              %[tp3],         28(%[src])     \n\t"
-            "ulw              %[tp4],         28(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "sw               %[tn1],         24(%[dst])     \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         28(%[dst])     \n\t" /* store */
-
-            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
-              [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
-            : [src] "r"(src), [dst] "r"(dst));
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    case 64:
-      prefetch_load(src + 64);
-      prefetch_store(dst + 32);
-
-      /* 16 word storage */
-      for (y = h; y--;) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_load(src + src_stride + 64);
-        prefetch_store(dst + dst_stride);
-        prefetch_store(dst + dst_stride + 32);
-
-        __asm__ __volatile__(
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         0(%[dst])      \n\t"
-            "ulw              %[tp3],         4(%[src])      \n\t"
-            "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "ulw              %[tp1],         8(%[src])      \n\t"
-            "ulw              %[tp2],         8(%[dst])      \n\t"
-            "sw               %[tn1],         0(%[dst])      \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t" /* store */
-            "ulw              %[tp3],         12(%[src])     \n\t"
-            "ulw              %[tp4],         12(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "ulw              %[tp1],         16(%[src])     \n\t"
-            "ulw              %[tp2],         16(%[dst])     \n\t"
-            "sw               %[tn1],         8(%[dst])      \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         12(%[dst])     \n\t" /* store */
-            "ulw              %[tp3],         20(%[src])     \n\t"
-            "ulw              %[tp4],         20(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "ulw              %[tp1],         24(%[src])     \n\t"
-            "ulw              %[tp2],         24(%[dst])     \n\t"
-            "sw               %[tn1],         16(%[dst])     \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         20(%[dst])     \n\t" /* store */
-            "ulw              %[tp3],         28(%[src])     \n\t"
-            "ulw              %[tp4],         28(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "ulw              %[tp1],         32(%[src])     \n\t"
-            "ulw              %[tp2],         32(%[dst])     \n\t"
-            "sw               %[tn1],         24(%[dst])     \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         28(%[dst])     \n\t" /* store */
-            "ulw              %[tp3],         36(%[src])     \n\t"
-            "ulw              %[tp4],         36(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "ulw              %[tp1],         40(%[src])     \n\t"
-            "ulw              %[tp2],         40(%[dst])     \n\t"
-            "sw               %[tn1],         32(%[dst])     \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         36(%[dst])     \n\t" /* store */
-            "ulw              %[tp3],         44(%[src])     \n\t"
-            "ulw              %[tp4],         44(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "ulw              %[tp1],         48(%[src])     \n\t"
-            "ulw              %[tp2],         48(%[dst])     \n\t"
-            "sw               %[tn1],         40(%[dst])     \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         44(%[dst])     \n\t" /* store */
-            "ulw              %[tp3],         52(%[src])     \n\t"
-            "ulw              %[tp4],         52(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "ulw              %[tp1],         56(%[src])     \n\t"
-            "ulw              %[tp2],         56(%[dst])     \n\t"
-            "sw               %[tn1],         48(%[dst])     \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         52(%[dst])     \n\t" /* store */
-            "ulw              %[tp3],         60(%[src])     \n\t"
-            "ulw              %[tp4],         60(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
-            "sw               %[tn1],         56(%[dst])     \n\t" /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
-            "sw               %[tn2],         60(%[dst])     \n\t" /* store */
-
-            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
-              [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
-            : [src] "r"(src), [dst] "r"(dst));
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    default:
-      for (y = h; y > 0; --y) {
-        for (x = 0; x < w; ++x) {
-          dst[x] = (dst[x] + src[x] + 1) >> 1;
-        }
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-  }
-}
-#endif
diff --git a/aom_dsp/mips/convolve8_avg_horiz_dspr2.c b/aom_dsp/mips/convolve8_avg_horiz_dspr2.c
deleted file mode 100644
index f6534b4..0000000
--- a/aom_dsp/mips/convolve8_avg_horiz_dspr2.c
+++ /dev/null
@@ -1,998 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/aom_convolve.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_avg_horiz_4_dspr2(const uint8_t *src, int32_t src_stride,
-                                       uint8_t *dst, int32_t dst_stride,
-                                       const int16_t *filter_x0, int32_t h) {
-  int32_t y;
-  uint8_t *cm = aom_ff_cropTbl;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2, Temp3, Temp4;
-  uint32_t vector4a = 64;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2, p3, p4;
-  uint32_t n1, n2, n3, n4;
-  uint32_t tn1, tn2;
-
-  vector1b = ((const int32_t *)filter_x0)[0];
-  vector2b = ((const int32_t *)filter_x0)[1];
-  vector3b = ((const int32_t *)filter_x0)[2];
-  vector4b = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__(
-        "ulw              %[tp1],         0(%[src])                      \n\t"
-        "ulw              %[tp2],         4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
-        "ulw              %[tn2],         8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tn2]                         \n\t"
-        "balign           %[tn1],         %[tn2],         3              \n\t"
-        "balign           %[tn2],         %[tp2],         3              \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        "lbu              %[p2],          3(%[dst])                      \n\t" /* load odd 2 */
-
-        /* odd 1. pixel */
-        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t" /* even 1 */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "lbu              %[Temp1],       1(%[dst])                      \n\t" /* load odd 1 */
-        "preceu.ph.qbr    %[n1],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[n2],          %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[n3],          %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[n4],          %[tn2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[n1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[n2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[n3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,           %[n4],          %[vector4b]    \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tn2],         0(%[dst])                      \n\t" /* load even 1 */
-
-        /* odd 2. pixel */
-        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t" /* even 2 */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[n1],          %[tn1]                         \n\t"
-        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t" /* odd 1 */
-        "addqh_r.w        %[tn2],         %[tn2],         %[tp1]         \n\t" /* average even 1 */
-        "dpa.w.ph         $ac2,           %[n2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[n3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[n4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[n1],          %[vector4b]    \n\t"
-        "extp             %[Temp4],       $ac2,           31             \n\t"
-
-        "lbu              %[tp1],         2(%[dst])                      \n\t" /* load even 2 */
-        "sb               %[tn2],         0(%[dst])                      \n\t" /* store even 1 */
-
-        /* clamp */
-        "addqh_r.w        %[Temp1],       %[Temp1],       %[tn1]         \n\t" /* average odd 1 */
-        "lbux             %[n2],          %[Temp4](%[cm])                \n\t" /* odd 2 */
-        "sb               %[Temp1],       1(%[dst])                      \n\t" /* store odd 1 */
-
-        "addqh_r.w        %[tp1],         %[tp1],         %[tp2]         \n\t" /* average even 2 */
-        "sb               %[tp1],         2(%[dst])                      \n\t" /* store even 2 */
-
-        "addqh_r.w        %[p2],          %[p2],          %[n2]          \n\t" /* average odd 2 */
-        "sb               %[p2],          3(%[dst])                      \n\t" /* store odd 2 */
-
-        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
-          [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
-          [p4] "=&r"(p4), [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3),
-          [n4] "=&r"(n4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
-          [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4)
-        : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
-          [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
-          [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
-          [src] "r"(src));
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_avg_horiz_8_dspr2(const uint8_t *src, int32_t src_stride,
-                                       uint8_t *dst, int32_t dst_stride,
-                                       const int16_t *filter_x0, int32_t h) {
-  int32_t y;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2, p3, p4, n1;
-  uint32_t tn1, tn2, tn3;
-  uint32_t st0, st1;
-
-  vector1b = ((const int32_t *)filter_x0)[0];
-  vector2b = ((const int32_t *)filter_x0)[1];
-  vector3b = ((const int32_t *)filter_x0)[2];
-  vector4b = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__(
-        "ulw              %[tp1],         0(%[src])                      \n\t"
-        "ulw              %[tp2],         4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
-        "ulw              %[tn2],         8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-        "lbu              %[Temp2],       0(%[dst])                      \n\t"
-        "lbu              %[tn3],         2(%[dst])                      \n\t"
-
-        /* even 2. pixel */
-        "preceu.ph.qbr    %[p1],          %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[n1],          %[tn2]                         \n\t"
-        "ulw              %[tn1],         12(%[src])                     \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        /* even 3. pixel */
-        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a],    $ac1                           \n\t"
-        "mthi             $zero,          $ac1                           \n\t"
-        "preceu.ph.qbr    %[p2],          %[tn1]                         \n\t"
-        "lbux             %[st1],         %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,           %[p3],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac1,           %[p4],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac1,           %[p1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac1,           %[n1],          %[vector4b]    \n\t"
-        "extp             %[Temp1],       $ac1,           31             \n\t"
-
-        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
-        "addqh_r.w        %[tn3],         %[tn3],         %[st1]         \n\t"
-        "sb               %[Temp2],       0(%[dst])                      \n\t"
-        "sb               %[tn3],         2(%[dst])                      \n\t"
-
-        /* even 4. pixel */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-
-        "balign           %[tn3],         %[tn1],         3              \n\t"
-        "balign           %[tn1],         %[tn2],         3              \n\t"
-        "balign           %[tn2],         %[tp2],         3              \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-
-        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
-        "lbu              %[Temp2],       4(%[dst])                      \n\t"
-        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
-
-        "dpa.w.ph         $ac2,           %[p4],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[n1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector4b]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        /* odd 1. pixel */
-        "mtlo             %[vector4a],    $ac1                           \n\t"
-        "mthi             $zero,          $ac1                           \n\t"
-        "sb               %[Temp2],       4(%[dst])                      \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tn2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tp1],         6(%[dst])                      \n\t"
-
-        /* odd 2. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tn1]                         \n\t"
-        "preceu.ph.qbl    %[n1],          %[tn1]                         \n\t"
-        "lbux             %[st0],         %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,           %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac1,           %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac1,           %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac1,           %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],       $ac1,           31             \n\t"
-
-        "lbu              %[tp2],         1(%[dst])                      \n\t"
-        "lbu              %[tn2],         3(%[dst])                      \n\t"
-        "addqh_r.w        %[tp1],         %[tp1],         %[st0]         \n\t"
-
-        /* odd 3. pixel */
-        "lbux             %[st1],         %[Temp2](%[cm])                \n\t"
-        "preceu.ph.qbr    %[p2],          %[tn3]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,           %[n1],          %[vector4b]    \n\t"
-        "addqh_r.w        %[tp2],         %[tp2],         %[st1]         \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tn3],         5(%[dst])                      \n\t"
-
-        /* odd 4. pixel */
-        "sb               %[tp2],         1(%[dst])                      \n\t"
-        "sb               %[tp1],         6(%[dst])                      \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[n1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector4b]    \n\t"
-        "extp             %[Temp1],       $ac2,           31             \n\t"
-
-        "lbu              %[tn1],         7(%[dst])                      \n\t"
-
-        /* clamp */
-        "lbux             %[p4],          %[Temp3](%[cm])                \n\t"
-        "addqh_r.w        %[tn2],         %[tn2],         %[p4]          \n\t"
-
-        "lbux             %[p2],          %[Temp2](%[cm])                \n\t"
-        "addqh_r.w        %[tn3],         %[tn3],         %[p2]          \n\t"
-
-        "lbux             %[n1],          %[Temp1](%[cm])                \n\t"
-        "addqh_r.w        %[tn1],         %[tn1],         %[n1]          \n\t"
-
-        /* store bytes */
-        "sb               %[tn2],         3(%[dst])                      \n\t"
-        "sb               %[tn3],         5(%[dst])                      \n\t"
-        "sb               %[tn1],         7(%[dst])                      \n\t"
-
-        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
-          [tn2] "=&r"(tn2), [tn3] "=&r"(tn3), [st0] "=&r"(st0),
-          [st1] "=&r"(st1), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
-          [p4] "=&r"(p4), [n1] "=&r"(n1), [Temp1] "=&r"(Temp1),
-          [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
-        : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
-          [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
-          [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
-          [src] "r"(src));
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr,
-                                        int32_t src_stride, uint8_t *dst_ptr,
-                                        int32_t dst_stride,
-                                        const int16_t *filter_x0, int32_t h,
-                                        int32_t count) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t filter12, filter34, filter56, filter78;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-
-  filter12 = ((const int32_t *)filter_x0)[0];
-  filter34 = ((const int32_t *)filter_x0)[1];
-  filter56 = ((const int32_t *)filter_x0)[2];
-  filter78 = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_store(dst_ptr + dst_stride);
-
-    for (c = 0; c < count; c++) {
-      __asm__ __volatile__(
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
-          "ulw              %[qload2],    16(%[src])                   \n\t"
-          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
-          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
-          "ulw              %[qload3],    20(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
-          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
-          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
-          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                   \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
-          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
-          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
-          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
-          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
-          "ulw              %[qload2],    17(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
-          "ulw              %[qload3],    21(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
-          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
-
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
-
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
-
-          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
-          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
-          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
-
-          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1),
-            [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2),
-            [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3),
-            [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
-            [Temp3] "=&r"(Temp3)
-          : [filter12] "r"(filter12), [filter34] "r"(filter34),
-            [filter56] "r"(filter56), [filter78] "r"(filter78),
-            [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
-            [src] "r"(src));
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr,
-                                        int32_t src_stride, uint8_t *dst_ptr,
-                                        int32_t dst_stride,
-                                        const int16_t *filter_x0, int32_t h) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t filter12, filter34, filter56, filter78;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-
-  filter12 = ((const int32_t *)filter_x0)[0];
-  filter34 = ((const int32_t *)filter_x0)[1];
-  filter56 = ((const int32_t *)filter_x0)[2];
-  filter78 = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_load(src_ptr + src_stride + 64);
-    prefetch_store(dst_ptr + dst_stride);
-    prefetch_store(dst_ptr + dst_stride + 32);
-
-    for (c = 0; c < 4; c++) {
-      __asm__ __volatile__(
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
-          "ulw              %[qload2],    16(%[src])                   \n\t"
-          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
-          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
-          "ulw              %[qload3],    20(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
-          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
-          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
-          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                   \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
-          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
-          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
-          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
-          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
-          "ulw              %[qload2],    17(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
-          "ulw              %[qload3],    21(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
-          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
-
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
-
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
-
-          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
-          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
-          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
-
-          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1),
-            [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2),
-            [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3),
-            [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
-            [Temp3] "=&r"(Temp3)
-          : [filter12] "r"(filter12), [filter34] "r"(filter34),
-            [filter56] "r"(filter56), [filter78] "r"(filter78),
-            [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
-            [src] "r"(src));
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-void aom_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                   uint8_t *dst, ptrdiff_t dst_stride,
-                                   const int16_t *filter_x, int x_step_q4,
-                                   const int16_t *filter_y, int y_step_q4,
-                                   int w, int h) {
-  assert(x_step_q4 == 16);
-  assert(((const int32_t *)filter_x)[1] != 0x800000);
-
-  if (((const int32_t *)filter_x)[0] == 0) {
-    aom_convolve2_avg_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                  x_step_q4, filter_y, y_step_q4, w, h);
-  } else {
-    uint32_t pos = 38;
-
-    src -= 3;
-
-    /* bit positon for extract from acc */
-    __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
-                         :
-                         : [pos] "r"(pos));
-
-    /* prefetch data to cache memory */
-    prefetch_load(src);
-    prefetch_load(src + 32);
-    prefetch_store(dst);
-
-    switch (w) {
-      case 4:
-        convolve_avg_horiz_4_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                   h);
-        break;
-      case 8:
-        convolve_avg_horiz_8_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                   h);
-        break;
-      case 16:
-        convolve_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                    h, 1);
-        break;
-      case 32:
-        convolve_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                    h, 2);
-        break;
-      case 64:
-        prefetch_load(src + 64);
-        prefetch_store(dst + 32);
-
-        convolve_avg_horiz_64_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                                    h);
-        break;
-      default:
-        aom_convolve8_avg_horiz_c(src + 3, src_stride, dst, dst_stride,
-                                  filter_x, x_step_q4, filter_y, y_step_q4, w,
-                                  h);
-        break;
-    }
-  }
-}
-#endif
diff --git a/aom_dsp/mips/convolve_common_dspr2.h b/aom_dsp/mips/convolve_common_dspr2.h
index f8fd9e2..c6895fb 100644
--- a/aom_dsp/mips/convolve_common_dspr2.h
+++ b/aom_dsp/mips/convolve_common_dspr2.h
@@ -29,18 +29,6 @@
                                const int16_t *filter_y, int y_step_q4, int w,
                                int h);
 
-void aom_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                   uint8_t *dst, ptrdiff_t dst_stride,
-                                   const int16_t *filter_x, int x_step_q4,
-                                   const int16_t *filter_y, int y_step_q4,
-                                   int w, int h);
-
-void aom_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                  uint8_t *dst, ptrdiff_t dst_stride,
-                                  const int16_t *filter_x, int x_step_q4,
-                                  const int16_t *filter_y, int y_step_q4, int w,
-                                  int h);
-
 void aom_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                          ptrdiff_t dst_stride, const int16_t *filter, int w,
                          int h);
diff --git a/aom_dsp/x86/aom_asm_stubs.c b/aom_dsp/x86/aom_asm_stubs.c
index 522e6ef..2c51cb9 100644
--- a/aom_dsp/x86/aom_asm_stubs.c
+++ b/aom_dsp/x86/aom_asm_stubs.c
@@ -20,12 +20,6 @@
 filter8_1dfunction aom_filter_block1d8_h8_sse2;
 filter8_1dfunction aom_filter_block1d4_v8_sse2;
 filter8_1dfunction aom_filter_block1d4_h8_sse2;
-filter8_1dfunction aom_filter_block1d16_v8_avg_sse2;
-filter8_1dfunction aom_filter_block1d16_h8_avg_sse2;
-filter8_1dfunction aom_filter_block1d8_v8_avg_sse2;
-filter8_1dfunction aom_filter_block1d8_h8_avg_sse2;
-filter8_1dfunction aom_filter_block1d4_v8_avg_sse2;
-filter8_1dfunction aom_filter_block1d4_h8_avg_sse2;
 
 filter8_1dfunction aom_filter_block1d16_v2_sse2;
 filter8_1dfunction aom_filter_block1d16_h2_sse2;
@@ -33,12 +27,6 @@
 filter8_1dfunction aom_filter_block1d8_h2_sse2;
 filter8_1dfunction aom_filter_block1d4_v2_sse2;
 filter8_1dfunction aom_filter_block1d4_h2_sse2;
-filter8_1dfunction aom_filter_block1d16_v2_avg_sse2;
-filter8_1dfunction aom_filter_block1d16_h2_avg_sse2;
-filter8_1dfunction aom_filter_block1d8_v2_avg_sse2;
-filter8_1dfunction aom_filter_block1d8_h2_avg_sse2;
-filter8_1dfunction aom_filter_block1d4_v2_avg_sse2;
-filter8_1dfunction aom_filter_block1d4_h2_avg_sse2;
 
 // void aom_convolve8_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride,
 //                               uint8_t *dst, ptrdiff_t dst_stride,
@@ -50,33 +38,15 @@
 //                              const int16_t *filter_x, int x_step_q4,
 //                              const int16_t *filter_y, int y_step_q4,
 //                              int w, int h);
-// void aom_convolve8_avg_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride,
-//                                   uint8_t *dst, ptrdiff_t dst_stride,
-//                                   const int16_t *filter_x, int x_step_q4,
-//                                   const int16_t *filter_y, int y_step_q4,
-//                                   int w, int h);
-// void aom_convolve8_avg_vert_sse2(const uint8_t *src, ptrdiff_t src_stride,
-//                                  uint8_t *dst, ptrdiff_t dst_stride,
-//                                  const int16_t *filter_x, int x_step_q4,
-//                                  const int16_t *filter_y, int y_step_q4,
-//                                  int w, int h);
 FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , sse2);
 FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , sse2);
-FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, sse2);
-FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_, sse2);
 
 // void aom_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
 //                         uint8_t *dst, ptrdiff_t dst_stride,
 //                         const int16_t *filter_x, int x_step_q4,
 //                         const int16_t *filter_y, int y_step_q4,
 //                         int w, int h);
-// void aom_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride,
-//                             uint8_t *dst, ptrdiff_t dst_stride,
-//                             const int16_t *filter_x, int x_step_q4,
-//                             const int16_t *filter_y, int y_step_q4,
-//                             int w, int h);
 FUN_CONV_2D(, sse2);
-FUN_CONV_2D(avg_, sse2);
 
 #if ARCH_X86_64
 highbd_filter8_1dfunction aom_highbd_filter_block1d16_v8_sse2;
@@ -85,12 +55,6 @@
 highbd_filter8_1dfunction aom_highbd_filter_block1d8_h8_sse2;
 highbd_filter8_1dfunction aom_highbd_filter_block1d4_v8_sse2;
 highbd_filter8_1dfunction aom_highbd_filter_block1d4_h8_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d16_v8_avg_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d16_h8_avg_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d8_v8_avg_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d8_h8_avg_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d4_v8_avg_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d4_h8_avg_sse2;
 
 highbd_filter8_1dfunction aom_highbd_filter_block1d16_v2_sse2;
 highbd_filter8_1dfunction aom_highbd_filter_block1d16_h2_sse2;
@@ -98,12 +62,6 @@
 highbd_filter8_1dfunction aom_highbd_filter_block1d8_h2_sse2;
 highbd_filter8_1dfunction aom_highbd_filter_block1d4_v2_sse2;
 highbd_filter8_1dfunction aom_highbd_filter_block1d4_h2_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d16_v2_avg_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d16_h2_avg_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d8_v2_avg_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d8_h2_avg_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d4_v2_avg_sse2;
-highbd_filter8_1dfunction aom_highbd_filter_block1d4_h2_avg_sse2;
 
 // void aom_highbd_convolve8_horiz_sse2(const uint8_t *src,
 //                                      ptrdiff_t src_stride,
@@ -123,42 +81,15 @@
 //                                     const int16_t *filter_y,
 //                                     int y_step_q4,
 //                                     int w, int h, int bd);
-// void aom_highbd_convolve8_avg_horiz_sse2(const uint8_t *src,
-//                                          ptrdiff_t src_stride,
-//                                          uint8_t *dst,
-//                                          ptrdiff_t dst_stride,
-//                                          const int16_t *filter_x,
-//                                          int x_step_q4,
-//                                          const int16_t *filter_y,
-//                                          int y_step_q4,
-//                                          int w, int h, int bd);
-// void aom_highbd_convolve8_avg_vert_sse2(const uint8_t *src,
-//                                         ptrdiff_t src_stride,
-//                                         uint8_t *dst,
-//                                         ptrdiff_t dst_stride,
-//                                         const int16_t *filter_x,
-//                                         int x_step_q4,
-//                                         const int16_t *filter_y,
-//                                         int y_step_q4,
-//                                         int w, int h, int bd);
 HIGH_FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , sse2);
 HIGH_FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , sse2);
-HIGH_FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, sse2);
-HIGH_FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_,
-                 sse2);
 
 // void aom_highbd_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
 //                                uint8_t *dst, ptrdiff_t dst_stride,
 //                                const int16_t *filter_x, int x_step_q4,
 //                                const int16_t *filter_y, int y_step_q4,
 //                                int w, int h, int bd);
-// void aom_highbd_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride,
-//                                    uint8_t *dst, ptrdiff_t dst_stride,
-//                                    const int16_t *filter_x, int x_step_q4,
-//                                    const int16_t *filter_y, int y_step_q4,
-//                                    int w, int h, int bd);
 HIGH_FUN_CONV_2D(, sse2);
-HIGH_FUN_CONV_2D(avg_, sse2);
 
 // The SSE2 highbd convolve functions can deal with coefficients up to 32767.
 // So redirect highbd_convolve8_add_src to regular highbd_convolve8.
diff --git a/aom_dsp/x86/aom_high_subpixel_8t_sse2.asm b/aom_dsp/x86/aom_high_subpixel_8t_sse2.asm
index 11889bb..b6f0407 100644
--- a/aom_dsp/x86/aom_high_subpixel_8t_sse2.asm
+++ b/aom_dsp/x86/aom_high_subpixel_8t_sse2.asm
@@ -394,169 +394,6 @@
     pop         rbp
     ret
 
-global sym(aom_highbd_filter_block1d4_v8_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d4_v8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    push        rbx
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 7
-    %define k0k6 [rsp + 16 * 0]
-    %define k2k5 [rsp + 16 * 1]
-    %define k3k4 [rsp + 16 * 2]
-    %define k1k7 [rsp + 16 * 3]
-    %define krd [rsp + 16 * 4]
-    %define max [rsp + 16 * 5]
-    %define min [rsp + 16 * 6]
-
-    HIGH_GET_FILTERS_4
-
-    mov         rsi, arg(0)                 ;src_ptr
-    mov         rdi, arg(2)                 ;output_ptr
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rbx, DWORD PTR arg(3)       ;out_pitch
-    lea         rax, [rax + rax]            ;bytes per line
-    lea         rbx, [rbx + rbx]
-    lea         rdx, [rax + rax * 2]
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-
-.loop:
-    movq        xmm0, [rsi]                 ;load src: row 0
-    movq        xmm1, [rsi + rax]           ;1
-    movq        xmm6, [rsi + rdx * 2]       ;6
-    lea         rsi,  [rsi + rax]
-    movq        xmm7, [rsi + rdx * 2]       ;7
-    movq        xmm2, [rsi + rax]           ;2
-    movq        xmm3, [rsi + rax * 2]       ;3
-    movq        xmm4, [rsi + rdx]           ;4
-    movq        xmm5, [rsi + rax * 4]       ;5
-
-    HIGH_APPLY_FILTER_4 1
-
-    lea         rdi, [rdi + rbx]
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 7
-    pop rsp
-    pop rbx
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_highbd_filter_block1d8_v8_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d8_v8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    push        rbx
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 8
-    %define k0k1 [rsp + 16 * 0]
-    %define k6k7 [rsp + 16 * 1]
-    %define k2k5 [rsp + 16 * 2]
-    %define k3k4 [rsp + 16 * 3]
-    %define krd [rsp + 16 * 4]
-    %define temp [rsp + 16 * 5]
-    %define max [rsp + 16 * 6]
-    %define min [rsp + 16 * 7]
-
-    HIGH_GET_FILTERS
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rbx, DWORD PTR arg(3)       ;out_pitch
-    lea         rax, [rax + rax]            ;bytes per line
-    lea         rbx, [rbx + rbx]
-    lea         rdx, [rax + rax * 2]
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-.loop:
-    LOAD_VERT_8 0
-    HIGH_APPLY_FILTER_8 1, 0
-
-    lea         rdi, [rdi + rbx]
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 8
-    pop rsp
-    pop rbx
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_highbd_filter_block1d16_v8_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d16_v8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    push        rbx
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 8
-    %define k0k1 [rsp + 16 * 0]
-    %define k6k7 [rsp + 16 * 1]
-    %define k2k5 [rsp + 16 * 2]
-    %define k3k4 [rsp + 16 * 3]
-    %define krd [rsp + 16 * 4]
-    %define temp [rsp + 16 * 5]
-    %define max [rsp + 16 * 6]
-    %define min [rsp + 16 * 7]
-
-    HIGH_GET_FILTERS
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rbx, DWORD PTR arg(3)       ;out_pitch
-    lea         rax, [rax + rax]            ;bytes per line
-    lea         rbx, [rbx + rbx]
-    lea         rdx, [rax + rax * 2]
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-.loop:
-    LOAD_VERT_8 0
-    HIGH_APPLY_FILTER_8 1, 0
-    sub         rsi, rax
-
-    LOAD_VERT_8 16
-    HIGH_APPLY_FILTER_8 1, 16
-    add         rdi, rbx
-
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 8
-    pop rsp
-    pop rbx
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
 ;void aom_filter_block1d4_h8_sse2
 ;(
 ;    unsigned char  *src_ptr,
@@ -774,194 +611,3 @@
     UNSHADOW_ARGS
     pop         rbp
     ret
-
-global sym(aom_highbd_filter_block1d4_h8_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d4_h8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 7
-    %define k0k6 [rsp + 16 * 0]
-    %define k2k5 [rsp + 16 * 1]
-    %define k3k4 [rsp + 16 * 2]
-    %define k1k7 [rsp + 16 * 3]
-    %define krd [rsp + 16 * 4]
-    %define max [rsp + 16 * 5]
-    %define min [rsp + 16 * 6]
-
-    HIGH_GET_FILTERS_4
-
-    mov         rsi, arg(0)                 ;src_ptr
-    mov         rdi, arg(2)                 ;output_ptr
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rdx, DWORD PTR arg(3)       ;out_pitch
-    lea         rax, [rax + rax]            ;bytes per line
-    lea         rdx, [rdx + rdx]
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-
-.loop:
-    movdqu      xmm0,   [rsi - 6]           ;load src
-    movdqu      xmm4,   [rsi + 2]
-    movdqa      xmm1, xmm0
-    movdqa      xmm6, xmm4
-    movdqa      xmm7, xmm4
-    movdqa      xmm2, xmm0
-    movdqa      xmm3, xmm0
-    movdqa      xmm5, xmm4
-
-    psrldq      xmm1, 2
-    psrldq      xmm6, 4
-    psrldq      xmm7, 6
-    psrldq      xmm2, 4
-    psrldq      xmm3, 6
-    psrldq      xmm5, 2
-
-    HIGH_APPLY_FILTER_4 1
-
-    lea         rsi, [rsi + rax]
-    lea         rdi, [rdi + rdx]
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 7
-    pop rsp
-
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_highbd_filter_block1d8_h8_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d8_h8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 8
-    %define k0k1 [rsp + 16 * 0]
-    %define k6k7 [rsp + 16 * 1]
-    %define k2k5 [rsp + 16 * 2]
-    %define k3k4 [rsp + 16 * 3]
-    %define krd [rsp + 16 * 4]
-    %define temp [rsp + 16 * 5]
-    %define max [rsp + 16 * 6]
-    %define min [rsp + 16 * 7]
-
-    HIGH_GET_FILTERS
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rdx, DWORD PTR arg(3)       ;out_pitch
-    lea         rax, [rax + rax]            ;bytes per line
-    lea         rdx, [rdx + rdx]
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-
-.loop:
-    movdqu      xmm0,   [rsi - 6]           ;load src
-    movdqu      xmm1,   [rsi - 4]
-    movdqu      xmm2,   [rsi - 2]
-    movdqu      xmm3,   [rsi]
-    movdqu      xmm4,   [rsi + 2]
-    movdqu      xmm5,   [rsi + 4]
-    movdqu      xmm6,   [rsi + 6]
-    movdqu      xmm7,   [rsi + 8]
-
-    HIGH_APPLY_FILTER_8 1, 0
-
-    lea         rsi, [rsi + rax]
-    lea         rdi, [rdi + rdx]
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 8
-    pop rsp
-
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_highbd_filter_block1d16_h8_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d16_h8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 8
-    %define k0k1 [rsp + 16 * 0]
-    %define k6k7 [rsp + 16 * 1]
-    %define k2k5 [rsp + 16 * 2]
-    %define k3k4 [rsp + 16 * 3]
-    %define krd [rsp + 16 * 4]
-    %define temp [rsp + 16 * 5]
-    %define max [rsp + 16 * 6]
-    %define min [rsp + 16 * 7]
-
-    HIGH_GET_FILTERS
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rdx, DWORD PTR arg(3)       ;out_pitch
-    lea         rax, [rax + rax]            ;bytes per line
-    lea         rdx, [rdx + rdx]
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-
-.loop:
-    movdqu      xmm0,   [rsi - 6]           ;load src
-    movdqu      xmm1,   [rsi - 4]
-    movdqu      xmm2,   [rsi - 2]
-    movdqu      xmm3,   [rsi]
-    movdqu      xmm4,   [rsi + 2]
-    movdqu      xmm5,   [rsi + 4]
-    movdqu      xmm6,   [rsi + 6]
-    movdqu      xmm7,   [rsi + 8]
-
-    HIGH_APPLY_FILTER_8 1, 0
-
-    movdqu      xmm0,   [rsi + 10]           ;load src
-    movdqu      xmm1,   [rsi + 12]
-    movdqu      xmm2,   [rsi + 14]
-    movdqu      xmm3,   [rsi + 16]
-    movdqu      xmm4,   [rsi + 18]
-    movdqu      xmm5,   [rsi + 20]
-    movdqu      xmm6,   [rsi + 22]
-    movdqu      xmm7,   [rsi + 24]
-
-    HIGH_APPLY_FILTER_8 1, 16
-
-    lea         rsi, [rsi + rax]
-    lea         rdi, [rdi + rdx]
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 8
-    pop rsp
-
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
diff --git a/aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm b/aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm
index c6479d7..7b3fe64 100644
--- a/aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm
+++ b/aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm
@@ -256,86 +256,6 @@
     ret
 %endif
 
-global sym(aom_highbd_filter_block1d4_v2_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d4_v2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    HIGH_GET_PARAM_4
-.loop:
-    movq        xmm0, [rsi]                 ;load src
-    movq        xmm1, [rsi + 2*rax]
-
-    HIGH_APPLY_FILTER_4 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-%if ARCH_X86_64
-global sym(aom_highbd_filter_block1d8_v2_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d8_v2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    SAVE_XMM 8
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    HIGH_GET_PARAM
-.loop:
-    movdqu      xmm0, [rsi]                 ;0
-    movdqu      xmm1, [rsi + 2*rax]         ;1
-
-    HIGH_APPLY_FILTER_8 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_highbd_filter_block1d16_v2_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d16_v2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    SAVE_XMM 9
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    HIGH_GET_PARAM
-.loop:
-    movdqu        xmm0, [rsi]               ;0
-    movdqu        xmm1, [rsi + 2*rax]       ;1
-    movdqu        xmm2, [rsi + 16]
-    movdqu        xmm3, [rsi + 2*rax + 16]
-
-    HIGH_APPLY_FILTER_16 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-%endif
-
 global sym(aom_highbd_filter_block1d4_h2_sse2) PRIVATE
 sym(aom_highbd_filter_block1d4_h2_sse2):
     push        rbp
@@ -416,84 +336,3 @@
     pop         rbp
     ret
 %endif
-
-global sym(aom_highbd_filter_block1d4_h2_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d4_h2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    HIGH_GET_PARAM_4
-.loop:
-    movdqu      xmm0, [rsi]                 ;load src
-    movdqa      xmm1, xmm0
-    psrldq      xmm1, 2
-
-    HIGH_APPLY_FILTER_4 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-%if ARCH_X86_64
-global sym(aom_highbd_filter_block1d8_h2_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d8_h2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    SAVE_XMM 8
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    HIGH_GET_PARAM
-.loop:
-    movdqu      xmm0, [rsi]                 ;load src
-    movdqu      xmm1, [rsi + 2]
-
-    HIGH_APPLY_FILTER_8 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_highbd_filter_block1d16_h2_avg_sse2) PRIVATE
-sym(aom_highbd_filter_block1d16_h2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    SAVE_XMM 9
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    HIGH_GET_PARAM
-.loop:
-    movdqu      xmm0,   [rsi]               ;load src
-    movdqu      xmm1,   [rsi + 2]
-    movdqu      xmm2,   [rsi + 16]
-    movdqu      xmm3,   [rsi + 18]
-
-    HIGH_APPLY_FILTER_16 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-%endif
diff --git a/aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c b/aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c
index a9a15fd..7dd1f1d 100644
--- a/aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c
+++ b/aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c
@@ -285,12 +285,6 @@
 filter8_1dfunction aom_filter_block1d8_h8_ssse3;
 filter8_1dfunction aom_filter_block1d4_v8_ssse3;
 filter8_1dfunction aom_filter_block1d4_h8_ssse3;
-filter8_1dfunction aom_filter_block1d16_v8_avg_ssse3;
-filter8_1dfunction aom_filter_block1d16_h8_avg_ssse3;
-filter8_1dfunction aom_filter_block1d8_v8_avg_ssse3;
-filter8_1dfunction aom_filter_block1d8_h8_avg_ssse3;
-filter8_1dfunction aom_filter_block1d4_v8_avg_ssse3;
-filter8_1dfunction aom_filter_block1d4_h8_avg_ssse3;
 filter8_1dfunction aom_filter_block1d16_v8_add_src_ssse3;
 filter8_1dfunction aom_filter_block1d16_h8_add_src_ssse3;
 filter8_1dfunction aom_filter_block1d8_v8_add_src_ssse3;
@@ -304,12 +298,6 @@
 filter8_1dfunction aom_filter_block1d8_h2_ssse3;
 filter8_1dfunction aom_filter_block1d4_v2_ssse3;
 filter8_1dfunction aom_filter_block1d4_h2_ssse3;
-filter8_1dfunction aom_filter_block1d16_v2_avg_ssse3;
-filter8_1dfunction aom_filter_block1d16_h2_avg_ssse3;
-filter8_1dfunction aom_filter_block1d8_v2_avg_ssse3;
-filter8_1dfunction aom_filter_block1d8_h2_avg_ssse3;
-filter8_1dfunction aom_filter_block1d4_v2_avg_ssse3;
-filter8_1dfunction aom_filter_block1d4_h2_avg_ssse3;
 
 // void aom_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
 //                                uint8_t *dst, ptrdiff_t dst_stride,
@@ -321,21 +309,8 @@
 //                               const int16_t *filter_x, int x_step_q4,
 //                               const int16_t *filter_y, int y_step_q4,
 //                               int w, int h);
-// void aom_convolve8_avg_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
-//                                    uint8_t *dst, ptrdiff_t dst_stride,
-//                                    const int16_t *filter_x, int x_step_q4,
-//                                    const int16_t *filter_y, int y_step_q4,
-//                                    int w, int h);
-// void aom_convolve8_avg_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
-//                                   uint8_t *dst, ptrdiff_t dst_stride,
-//                                   const int16_t *filter_x, int x_step_q4,
-//                                   const int16_t *filter_y, int y_step_q4,
-//                                   int w, int h);
 FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , ssse3);
 FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , ssse3);
-FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, ssse3);
-FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_,
-            ssse3);
 
 FUN_CONV_1D_NO_BILINEAR(add_src_horiz, x_step_q4, filter_x, h, src, add_src_,
                         ssse3);
@@ -347,11 +322,5 @@
 //                          const int16_t *filter_x, int x_step_q4,
 //                          const int16_t *filter_y, int y_step_q4,
 //                          int w, int h);
-// void aom_convolve8_avg_ssse3(const uint8_t *src, ptrdiff_t src_stride,
-//                              uint8_t *dst, ptrdiff_t dst_stride,
-//                              const int16_t *filter_x, int x_step_q4,
-//                              const int16_t *filter_y, int y_step_q4,
-//                              int w, int h);
 FUN_CONV_2D(, ssse3);
-FUN_CONV_2D(avg_, ssse3);
 FUN_CONV_2D_NO_BILINEAR(add_src_, add_src_, ssse3);
diff --git a/aom_dsp/x86/aom_subpixel_8t_sse2.asm b/aom_dsp/x86/aom_subpixel_8t_sse2.asm
index b4bf4ee..c88fc9f 100644
--- a/aom_dsp/x86/aom_subpixel_8t_sse2.asm
+++ b/aom_dsp/x86/aom_subpixel_8t_sse2.asm
@@ -370,166 +370,6 @@
     pop         rbp
     ret
 
-global sym(aom_filter_block1d4_v8_avg_sse2) PRIVATE
-sym(aom_filter_block1d4_v8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    push        rbx
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 6
-    %define k0k1 [rsp + 16 * 0]
-    %define k2k3 [rsp + 16 * 1]
-    %define k5k4 [rsp + 16 * 2]
-    %define k6k7 [rsp + 16 * 3]
-    %define krd [rsp + 16 * 4]
-    %define zero [rsp + 16 * 5]
-
-    GET_FILTERS_4
-
-    mov         rsi, arg(0)                 ;src_ptr
-    mov         rdi, arg(2)                 ;output_ptr
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rbx, DWORD PTR arg(3)       ;out_pitch
-    lea         rdx, [rax + rax * 2]
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-
-.loop:
-    movd        xmm0, [rsi]                 ;load src: row 0
-    movd        xmm1, [rsi + rax]           ;1
-    movd        xmm6, [rsi + rdx * 2]       ;6
-    lea         rsi,  [rsi + rax]
-    movd        xmm7, [rsi + rdx * 2]       ;7
-    movd        xmm2, [rsi + rax]           ;2
-    movd        xmm3, [rsi + rax * 2]       ;3
-    movd        xmm4, [rsi + rdx]           ;4
-    movd        xmm5, [rsi + rax * 4]       ;5
-
-    APPLY_FILTER_4 1
-
-    lea         rdi, [rdi + rbx]
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 6
-    pop rsp
-    pop rbx
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d8_v8_avg_sse2) PRIVATE
-sym(aom_filter_block1d8_v8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    push        rbx
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 10
-    %define k0 [rsp + 16 * 0]
-    %define k1 [rsp + 16 * 1]
-    %define k2 [rsp + 16 * 2]
-    %define k3 [rsp + 16 * 3]
-    %define k4 [rsp + 16 * 4]
-    %define k5 [rsp + 16 * 5]
-    %define k6 [rsp + 16 * 6]
-    %define k7 [rsp + 16 * 7]
-    %define krd [rsp + 16 * 8]
-    %define zero [rsp + 16 * 9]
-
-    GET_FILTERS
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rbx, DWORD PTR arg(3)       ;out_pitch
-    lea         rdx, [rax + rax * 2]
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-.loop:
-    LOAD_VERT_8 0
-    APPLY_FILTER_8 1, 0
-
-    lea         rdi, [rdi + rbx]
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 10
-    pop rsp
-    pop rbx
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d16_v8_avg_sse2) PRIVATE
-sym(aom_filter_block1d16_v8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    push        rbx
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 10
-    %define k0 [rsp + 16 * 0]
-    %define k1 [rsp + 16 * 1]
-    %define k2 [rsp + 16 * 2]
-    %define k3 [rsp + 16 * 3]
-    %define k4 [rsp + 16 * 4]
-    %define k5 [rsp + 16 * 5]
-    %define k6 [rsp + 16 * 6]
-    %define k7 [rsp + 16 * 7]
-    %define krd [rsp + 16 * 8]
-    %define zero [rsp + 16 * 9]
-
-    GET_FILTERS
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rbx, DWORD PTR arg(3)       ;out_pitch
-    lea         rdx, [rax + rax * 2]
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-.loop:
-    LOAD_VERT_8 0
-    APPLY_FILTER_8 1, 0
-    sub         rsi, rax
-
-    LOAD_VERT_8 8
-    APPLY_FILTER_8 1, 8
-    add         rdi, rbx
-
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 10
-    pop rsp
-    pop rbx
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
 ;void aom_filter_block1d4_h8_sse2
 ;(
 ;    unsigned char  *src_ptr,
@@ -773,220 +613,3 @@
     UNSHADOW_ARGS
     pop         rbp
     ret
-
-global sym(aom_filter_block1d4_h8_avg_sse2) PRIVATE
-sym(aom_filter_block1d4_h8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 6
-    %define k0k1 [rsp + 16 * 0]
-    %define k2k3 [rsp + 16 * 1]
-    %define k5k4 [rsp + 16 * 2]
-    %define k6k7 [rsp + 16 * 3]
-    %define krd [rsp + 16 * 4]
-    %define zero [rsp + 16 * 5]
-
-    GET_FILTERS_4
-
-    mov         rsi, arg(0)                 ;src_ptr
-    mov         rdi, arg(2)                 ;output_ptr
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rdx, DWORD PTR arg(3)       ;out_pitch
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-
-.loop:
-    movdqu      xmm0,   [rsi - 3]           ;load src
-
-    movdqa      xmm1, xmm0
-    movdqa      xmm6, xmm0
-    movdqa      xmm7, xmm0
-    movdqa      xmm2, xmm0
-    movdqa      xmm3, xmm0
-    movdqa      xmm5, xmm0
-    movdqa      xmm4, xmm0
-
-    psrldq      xmm1, 1
-    psrldq      xmm6, 6
-    psrldq      xmm7, 7
-    psrldq      xmm2, 2
-    psrldq      xmm3, 3
-    psrldq      xmm5, 5
-    psrldq      xmm4, 4
-
-    APPLY_FILTER_4 1
-
-    lea         rsi, [rsi + rax]
-    lea         rdi, [rdi + rdx]
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 6
-    pop rsp
-
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d8_h8_avg_sse2) PRIVATE
-sym(aom_filter_block1d8_h8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 10
-    %define k0 [rsp + 16 * 0]
-    %define k1 [rsp + 16 * 1]
-    %define k2 [rsp + 16 * 2]
-    %define k3 [rsp + 16 * 3]
-    %define k4 [rsp + 16 * 4]
-    %define k5 [rsp + 16 * 5]
-    %define k6 [rsp + 16 * 6]
-    %define k7 [rsp + 16 * 7]
-    %define krd [rsp + 16 * 8]
-    %define zero [rsp + 16 * 9]
-
-    GET_FILTERS
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rdx, DWORD PTR arg(3)       ;out_pitch
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-
-.loop:
-    movdqu      xmm0,   [rsi - 3]           ;load src
-
-    movdqa      xmm1, xmm0
-    movdqa      xmm6, xmm0
-    movdqa      xmm7, xmm0
-    movdqa      xmm2, xmm0
-    movdqa      xmm5, xmm0
-    movdqa      xmm3, xmm0
-    movdqa      xmm4, xmm0
-
-    psrldq      xmm1, 1
-    psrldq      xmm6, 6
-    psrldq      xmm7, 7
-    psrldq      xmm2, 2
-    psrldq      xmm5, 5
-    psrldq      xmm3, 3
-    psrldq      xmm4, 4
-
-    APPLY_FILTER_8 1, 0
-
-    lea         rsi, [rsi + rax]
-    lea         rdi, [rdi + rdx]
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 10
-    pop rsp
-
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d16_h8_avg_sse2) PRIVATE
-sym(aom_filter_block1d16_h8_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    ALIGN_STACK 16, rax
-    sub         rsp, 16 * 10
-    %define k0 [rsp + 16 * 0]
-    %define k1 [rsp + 16 * 1]
-    %define k2 [rsp + 16 * 2]
-    %define k3 [rsp + 16 * 3]
-    %define k4 [rsp + 16 * 4]
-    %define k5 [rsp + 16 * 5]
-    %define k6 [rsp + 16 * 6]
-    %define k7 [rsp + 16 * 7]
-    %define krd [rsp + 16 * 8]
-    %define zero [rsp + 16 * 9]
-
-    GET_FILTERS
-
-    movsxd      rax, DWORD PTR arg(1)       ;pixels_per_line
-    movsxd      rdx, DWORD PTR arg(3)       ;out_pitch
-    movsxd      rcx, DWORD PTR arg(4)       ;output_height
-
-.loop:
-    movdqu      xmm0,   [rsi - 3]           ;load src
-
-    movdqa      xmm1, xmm0
-    movdqa      xmm6, xmm0
-    movdqa      xmm7, xmm0
-    movdqa      xmm2, xmm0
-    movdqa      xmm5, xmm0
-    movdqa      xmm3, xmm0
-    movdqa      xmm4, xmm0
-
-    psrldq      xmm1, 1
-    psrldq      xmm6, 6
-    psrldq      xmm7, 7
-    psrldq      xmm2, 2
-    psrldq      xmm5, 5
-    psrldq      xmm3, 3
-    psrldq      xmm4, 4
-
-    APPLY_FILTER_8 1, 0
-
-    movdqu      xmm0,   [rsi + 5]           ;load src
-
-    movdqa      xmm1, xmm0
-    movdqa      xmm6, xmm0
-    movdqa      xmm7, xmm0
-    movdqa      xmm2, xmm0
-    movdqa      xmm5, xmm0
-    movdqa      xmm3, xmm0
-    movdqa      xmm4, xmm0
-
-    psrldq      xmm1, 1
-    psrldq      xmm6, 6
-    psrldq      xmm7, 7
-    psrldq      xmm2, 2
-    psrldq      xmm5, 5
-    psrldq      xmm3, 3
-    psrldq      xmm4, 4
-
-    APPLY_FILTER_8 1, 8
-
-    lea         rsi, [rsi + rax]
-    lea         rdi, [rdi + rdx]
-    dec         rcx
-    jnz         .loop
-
-    add rsp, 16 * 10
-    pop rsp
-
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
diff --git a/aom_dsp/x86/aom_subpixel_8t_ssse3.asm b/aom_dsp/x86/aom_subpixel_8t_ssse3.asm
index 05bed72..6dc2310 100644
--- a/aom_dsp/x86/aom_subpixel_8t_ssse3.asm
+++ b/aom_dsp/x86/aom_subpixel_8t_ssse3.asm
@@ -375,11 +375,8 @@
 
 INIT_XMM ssse3
 SUBPIX_HFILTER16 h8
-SUBPIX_HFILTER16 h8_avg
 SUBPIX_HFILTER8  h8
-SUBPIX_HFILTER8  h8_avg
 SUBPIX_HFILTER4  h8
-SUBPIX_HFILTER4  h8_avg
 
 SUBPIX_HFILTER16 h8_add_src
 SUBPIX_HFILTER8  h8_add_src
@@ -873,11 +870,8 @@
 
 INIT_XMM ssse3
 SUBPIX_VFILTER16     v8
-SUBPIX_VFILTER16 v8_avg
 SUBPIX_VFILTER       v8, 8
-SUBPIX_VFILTER   v8_avg, 8
 SUBPIX_VFILTER       v8, 4
-SUBPIX_VFILTER   v8_avg, 4
 
 %if (ARCH_X86 || X86_SUBPIX_VFILTER_PREFER_SLOW_CELERON)
 SUBPIX_VFILTER16 v8_add_src
diff --git a/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm b/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm
index 57aedcd..d0b4b28 100644
--- a/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm
+++ b/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm
@@ -214,84 +214,6 @@
     pop         rbp
     ret
 
-global sym(aom_filter_block1d4_v2_avg_sse2) PRIVATE
-sym(aom_filter_block1d4_v2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM_4
-.loop:
-    movd        xmm0, [rsi]                 ;load src
-    movd        xmm1, [rsi + rax]
-
-    APPLY_FILTER_4 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d8_v2_avg_sse2) PRIVATE
-sym(aom_filter_block1d8_v2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM
-.loop:
-    movq        xmm0, [rsi]                 ;0
-    movq        xmm1, [rsi + rax]           ;1
-
-    APPLY_FILTER_8 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d16_v2_avg_sse2) PRIVATE
-sym(aom_filter_block1d16_v2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM
-.loop:
-    movdqu        xmm0, [rsi]               ;0
-    movdqu        xmm1, [rsi + rax]         ;1
-    movdqa        xmm2, xmm0
-    movdqa        xmm3, xmm1
-
-    APPLY_FILTER_16 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
 global sym(aom_filter_block1d4_h2_sse2) PRIVATE
 sym(aom_filter_block1d4_h2_sse2):
     push        rbp
@@ -371,83 +293,3 @@
     UNSHADOW_ARGS
     pop         rbp
     ret
-
-global sym(aom_filter_block1d4_h2_avg_sse2) PRIVATE
-sym(aom_filter_block1d4_h2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM_4
-.loop:
-    movdqu      xmm0, [rsi]                 ;load src
-    movdqa      xmm1, xmm0
-    psrldq      xmm1, 1
-
-    APPLY_FILTER_4 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d8_h2_avg_sse2) PRIVATE
-sym(aom_filter_block1d8_h2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM
-.loop:
-    movdqu      xmm0, [rsi]                 ;load src
-    movdqa      xmm1, xmm0
-    psrldq      xmm1, 1
-
-    APPLY_FILTER_8 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d16_h2_avg_sse2) PRIVATE
-sym(aom_filter_block1d16_h2_avg_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM
-.loop:
-    movdqu      xmm0,   [rsi]               ;load src
-    movdqu      xmm1,   [rsi + 1]
-    movdqa      xmm2, xmm0
-    movdqa      xmm3, xmm1
-
-    APPLY_FILTER_16 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
diff --git a/aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm b/aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm
index 727fb30..59edc49 100644
--- a/aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm
+++ b/aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm
@@ -187,83 +187,6 @@
     pop         rbp
     ret
 
-global sym(aom_filter_block1d4_v2_avg_ssse3) PRIVATE
-sym(aom_filter_block1d4_v2_avg_ssse3):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM_4
-.loop:
-    movd        xmm0, [rsi]                 ;load src
-    movd        xmm1, [rsi + rax]
-
-    APPLY_FILTER_4 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d8_v2_avg_ssse3) PRIVATE
-sym(aom_filter_block1d8_v2_avg_ssse3):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM
-.loop:
-    movq        xmm0, [rsi]                 ;0
-    movq        xmm1, [rsi + rax]           ;1
-
-    APPLY_FILTER_8 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d16_v2_avg_ssse3) PRIVATE
-sym(aom_filter_block1d16_v2_avg_ssse3):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM
-.loop:
-    movdqu        xmm0, [rsi]               ;0
-    movdqu        xmm1, [rsi + rax]         ;1
-    movdqa        xmm2, xmm0
-
-    APPLY_FILTER_16 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
 global sym(aom_filter_block1d4_h2_ssse3) PRIVATE
 sym(aom_filter_block1d4_h2_ssse3):
     push        rbp
@@ -342,82 +265,3 @@
     UNSHADOW_ARGS
     pop         rbp
     ret
-
-global sym(aom_filter_block1d4_h2_avg_ssse3) PRIVATE
-sym(aom_filter_block1d4_h2_avg_ssse3):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM_4
-.loop:
-    movdqu      xmm0, [rsi]                 ;load src
-    movdqa      xmm1, xmm0
-    psrldq      xmm1, 1
-
-    APPLY_FILTER_4 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d8_h2_avg_ssse3) PRIVATE
-sym(aom_filter_block1d8_h2_avg_ssse3):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM
-.loop:
-    movdqu      xmm0, [rsi]                 ;load src
-    movdqa      xmm1, xmm0
-    psrldq      xmm1, 1
-
-    APPLY_FILTER_8 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-global sym(aom_filter_block1d16_h2_avg_ssse3) PRIVATE
-sym(aom_filter_block1d16_h2_avg_ssse3):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-    GET_PARAM
-.loop:
-    movdqu      xmm0,   [rsi]               ;load src
-    movdqu      xmm1,   [rsi + 1]
-    movdqa      xmm2, xmm0
-
-    APPLY_FILTER_16 1
-    jnz         .loop
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
diff --git a/aom_dsp/x86/highbd_convolve_avx2.c b/aom_dsp/x86/highbd_convolve_avx2.c
index a19af49..81586df 100644
--- a/aom_dsp/x86/highbd_convolve_avx2.c
+++ b/aom_dsp/x86/highbd_convolve_avx2.c
@@ -1066,250 +1066,6 @@
   } while (height > 0);
 }
 
-// Calculation with averaging the input pixels
-
-static INLINE void store_8x1_avg_pixels(const __m256i *y0, const __m256i *mask,
-                                        uint16_t *dst) {
-  const __m128i a0 = _mm256_castsi256_si128(*y0);
-  const __m128i a1 = _mm256_extractf128_si256(*y0, 1);
-  __m128i res = _mm_packus_epi32(a0, a1);
-  const __m128i pix = _mm_loadu_si128((const __m128i *)dst);
-  res = _mm_min_epi16(res, _mm256_castsi256_si128(*mask));
-  res = _mm_avg_epu16(res, pix);
-  _mm_storeu_si128((__m128i *)dst, res);
-}
-
-static INLINE void store_8x2_avg_pixels(const __m256i *y0, const __m256i *y1,
-                                        const __m256i *mask, uint16_t *dst,
-                                        ptrdiff_t pitch) {
-  __m256i a = _mm256_packus_epi32(*y0, *y1);
-  const __m128i pix0 = _mm_loadu_si128((const __m128i *)dst);
-  const __m128i pix1 = _mm_loadu_si128((const __m128i *)(dst + pitch));
-  const __m256i pix =
-      _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
-  a = _mm256_min_epi16(a, *mask);
-  a = _mm256_avg_epu16(a, pix);
-  _mm_storeu_si128((__m128i *)dst, _mm256_castsi256_si128(a));
-  _mm_storeu_si128((__m128i *)(dst + pitch), _mm256_extractf128_si256(a, 1));
-}
-
-static INLINE void store_16x1_avg_pixels(const __m256i *y0, const __m256i *y1,
-                                         const __m256i *mask, uint16_t *dst) {
-  __m256i a = _mm256_packus_epi32(*y0, *y1);
-  const __m256i pix = _mm256_loadu_si256((const __m256i *)dst);
-  a = _mm256_min_epi16(a, *mask);
-  a = _mm256_avg_epu16(a, pix);
-  _mm256_storeu_si256((__m256i *)dst, a);
-}
-
-static INLINE void store_16x2_avg_pixels(const __m256i *y0, const __m256i *y1,
-                                         const __m256i *mask, uint16_t *dst,
-                                         ptrdiff_t pitch) {
-  const __m256i pix0 = _mm256_loadu_si256((const __m256i *)dst);
-  const __m256i pix1 = _mm256_loadu_si256((const __m256i *)(dst + pitch));
-  __m256i p = _mm256_min_epi16(*y0, *mask);
-  p = _mm256_avg_epu16(p, pix0);
-  _mm256_storeu_si256((__m256i *)dst, p);
-
-  p = _mm256_min_epi16(*y1, *mask);
-  p = _mm256_avg_epu16(p, pix1);
-  _mm256_storeu_si256((__m256i *)(dst + pitch), p);
-}
-
-static INLINE void store_8x1_2t_avg_pixels_ver(const __m128i *y0,
-                                               const __m128i *y1,
-                                               const __m128i *mask,
-                                               uint16_t *dst) {
-  __m128i res = _mm_packus_epi32(*y0, *y1);
-  const __m128i pix = _mm_loadu_si128((const __m128i *)dst);
-  res = _mm_min_epi16(res, *mask);
-  res = _mm_avg_epu16(res, pix);
-  _mm_storeu_si128((__m128i *)dst, res);
-}
-
-static void aom_highbd_filter_block1d8_h8_avg_avx2(
-    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
-    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
-  __m256i signal[8], res0, res1;
-  const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
-
-  __m256i ff[4];
-  pack_filters(filter, ff);
-
-  src_ptr -= 3;
-  do {
-    pack_8x2_pixels(src_ptr, src_pitch, signal);
-    filter_8x1_pixels(signal, ff, &res0);
-    filter_8x1_pixels(&signal[4], ff, &res1);
-    store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
-    height -= 2;
-    src_ptr += src_pitch << 1;
-    dst_ptr += dst_pitch << 1;
-  } while (height > 1);
-
-  if (height > 0) {
-    pack_8x1_pixels(src_ptr, signal);
-    filter_8x1_pixels(signal, ff, &res0);
-    store_8x1_avg_pixels(&res0, &max, dst_ptr);
-  }
-}
-
-static void aom_highbd_filter_block1d16_h8_avg_avx2(
-    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
-    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
-  __m256i signal[8], res0, res1;
-  const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
-
-  __m256i ff[4];
-  pack_filters(filter, ff);
-
-  src_ptr -= 3;
-  do {
-    pack_16x1_pixels(src_ptr, signal);
-    filter_8x1_pixels(signal, ff, &res0);
-    filter_8x1_pixels(&signal[4], ff, &res1);
-    store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
-    height -= 1;
-    src_ptr += src_pitch;
-    dst_ptr += dst_pitch;
-  } while (height > 0);
-}
-
-static void aom_highbd_filter_block1d8_v8_avg_avx2(
-    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
-    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
-  __m256i signal[9], res0, res1;
-  const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
-
-  __m256i ff[4];
-  pack_filters(filter, ff);
-
-  pack_8x9_init(src_ptr, src_pitch, signal);
-
-  do {
-    pack_8x9_pixels(src_ptr, src_pitch, signal);
-
-    filter_8x9_pixels(signal, ff, &res0, &res1);
-    store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
-    update_pixels(signal);
-
-    src_ptr += src_pitch << 1;
-    dst_ptr += dst_pitch << 1;
-    height -= 2;
-  } while (height > 0);
-}
-
-static void aom_highbd_filter_block1d16_v8_avg_avx2(
-    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
-    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
-  __m256i signal[17], res0, res1;
-  const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
-
-  __m256i ff[4];
-  pack_filters(filter, ff);
-
-  pack_16x9_init(src_ptr, src_pitch, signal);
-
-  do {
-    pack_16x9_pixels(src_ptr, src_pitch, signal);
-    filter_16x9_pixels(signal, ff, &res0, &res1);
-    store_16x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
-    update_16x9_pixels(signal);
-
-    src_ptr += src_pitch << 1;
-    dst_ptr += dst_pitch << 1;
-    height -= 2;
-  } while (height > 0);
-}
-
-static void aom_highbd_filter_block1d8_h2_avg_avx2(
-    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
-    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
-  __m256i signal[2], res0, res1;
-  const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
-
-  __m256i ff;
-  pack_2t_filter(filter, &ff);
-
-  src_ptr -= 3;
-  do {
-    pack_8x2_2t_pixels(src_ptr, src_pitch, signal);
-    filter_16_2t_pixels(signal, &ff, &res0, &res1);
-    store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
-    height -= 2;
-    src_ptr += src_pitch << 1;
-    dst_ptr += dst_pitch << 1;
-  } while (height > 1);
-
-  if (height > 0) {
-    pack_8x1_2t_pixels(src_ptr, signal);
-    filter_8x1_2t_pixels(signal, &ff, &res0);
-    store_8x1_avg_pixels(&res0, &max, dst_ptr);
-  }
-}
-
-static void aom_highbd_filter_block1d16_h2_avg_avx2(
-    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
-    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
-  __m256i signal[2], res0, res1;
-  const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
-
-  __m256i ff;
-  pack_2t_filter(filter, &ff);
-
-  src_ptr -= 3;
-  do {
-    pack_16x1_2t_pixels(src_ptr, signal);
-    filter_16_2t_pixels(signal, &ff, &res0, &res1);
-    store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
-    height -= 1;
-    src_ptr += src_pitch;
-    dst_ptr += dst_pitch;
-  } while (height > 0);
-}
-
-static void aom_highbd_filter_block1d16_v2_avg_avx2(
-    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
-    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
-  __m256i signal[3], res0, res1;
-  const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
-  __m256i ff;
-
-  pack_2t_filter(filter, &ff);
-  pack_16x2_init(src_ptr, signal);
-
-  do {
-    pack_16x2_2t_pixels(src_ptr, src_pitch, signal);
-    filter_16x2_2t_pixels(signal, &ff, &res0, &res1);
-    store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
-
-    src_ptr += src_pitch;
-    dst_ptr += dst_pitch;
-    height -= 1;
-  } while (height > 0);
-}
-
-static void aom_highbd_filter_block1d8_v2_avg_avx2(
-    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
-    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
-  __m128i signal[3], res0, res1;
-  const __m128i max = _mm_set1_epi16((1 << bd) - 1);
-  __m128i ff;
-
-  pack_8x1_2t_filter(filter, &ff);
-  pack_8x2_init(src_ptr, signal);
-
-  do {
-    pack_8x2_2t_pixels_ver(src_ptr, src_pitch, signal);
-    filter_8_2t_pixels(signal, &ff, &res0, &res1);
-    store_8x1_2t_avg_pixels_ver(&res0, &res1, &max, dst_ptr);
-
-    src_ptr += src_pitch;
-    dst_ptr += dst_pitch;
-    height -= 1;
-  } while (height > 0);
-}
-
 void aom_highbd_filter_block1d4_h8_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
                                         ptrdiff_t, uint32_t, const int16_t *,
                                         int);
@@ -1331,30 +1087,4 @@
 HIGH_FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , avx2);
 HIGH_FUN_CONV_2D(, avx2);
 
-void aom_highbd_filter_block1d4_h8_avg_sse2(const uint16_t *, ptrdiff_t,
-                                            uint16_t *, ptrdiff_t, uint32_t,
-                                            const int16_t *, int);
-void aom_highbd_filter_block1d4_h2_avg_sse2(const uint16_t *, ptrdiff_t,
-                                            uint16_t *, ptrdiff_t, uint32_t,
-                                            const int16_t *, int);
-void aom_highbd_filter_block1d4_v8_avg_sse2(const uint16_t *, ptrdiff_t,
-                                            uint16_t *, ptrdiff_t, uint32_t,
-                                            const int16_t *, int);
-void aom_highbd_filter_block1d4_v2_avg_sse2(const uint16_t *, ptrdiff_t,
-                                            uint16_t *, ptrdiff_t, uint32_t,
-                                            const int16_t *, int);
-#define aom_highbd_filter_block1d4_h8_avg_avx2 \
-  aom_highbd_filter_block1d4_h8_avg_sse2
-#define aom_highbd_filter_block1d4_h2_avg_avx2 \
-  aom_highbd_filter_block1d4_h2_avg_sse2
-#define aom_highbd_filter_block1d4_v8_avg_avx2 \
-  aom_highbd_filter_block1d4_v8_avg_sse2
-#define aom_highbd_filter_block1d4_v2_avg_avx2 \
-  aom_highbd_filter_block1d4_v2_avg_sse2
-
-HIGH_FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, avx2);
-HIGH_FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_,
-                 avx2);
-HIGH_FUN_CONV_2D(avg_, avx2);
-
 #undef HIGHBD_FUNC
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index d0dffdd..c3e2fdc 100755
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -164,15 +164,6 @@
 add_proto qw/void av1_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
 specialize qw/av1_highbd_convolve8_vert/, "$sse2_x86_64";
 
-add_proto qw/void av1_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-specialize qw/av1_highbd_convolve8_avg/, "$sse2_x86_64";
-
-add_proto qw/void av1_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-specialize qw/av1_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
-
-add_proto qw/void av1_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-specialize qw/av1_highbd_convolve8_avg_vert/, "$sse2_x86_64";
-
 #
 # dct
 #
diff --git a/test/convolve_test.cc b/test/convolve_test.cc
index 103bfb9..52df913 100644
--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -38,19 +38,14 @@
 
 struct ConvolveFunctions {
   ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg, ConvolveFunc h8,
-                    ConvolveFunc h8_avg, ConvolveFunc v8, ConvolveFunc v8_avg,
-                    ConvolveFunc hv8, ConvolveFunc hv8_avg, int bd)
-      : copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg),
-        v8_avg_(v8_avg), hv8_avg_(hv8_avg), use_highbd_(bd) {}
+                    ConvolveFunc v8, ConvolveFunc hv8, int bd)
+      : copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), use_highbd_(bd) {}
 
   ConvolveFunc copy_;
   ConvolveFunc avg_;
   ConvolveFunc h8_;
   ConvolveFunc v8_;
   ConvolveFunc hv8_;
-  ConvolveFunc h8_avg_;
-  ConvolveFunc v8_avg_;
-  ConvolveFunc hv8_avg_;
   int use_highbd_;  // 0 if high bitdepth not used, else the actual bit depth.
 };
 
@@ -600,78 +595,6 @@
   }
 }
 
-TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
-  uint8_t *const in = input();
-  uint8_t *const out = output();
-  uint8_t ref8[kOutputStride * kMaxDimension];
-  uint16_t ref16[kOutputStride * kMaxDimension];
-  uint8_t *ref;
-  if (UUT_->use_highbd_ == 0) {
-    ref = ref8;
-  } else {
-    ref = CONVERT_TO_BYTEPTR(ref16);
-  }
-
-  // Populate ref and out with some random data
-  ::libaom_test::ACMRandom prng;
-  for (int y = 0; y < Height(); ++y) {
-    for (int x = 0; x < Width(); ++x) {
-      uint16_t r;
-      if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
-        r = prng.Rand8Extremes();
-      } else {
-        r = prng.Rand16() & mask_;
-      }
-      assign_val(out, y * kOutputStride + x, r);
-      assign_val(ref, y * kOutputStride + x, r);
-    }
-  }
-
-  for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
-    const InterpFilter filter = (InterpFilter)filter_bank;
-    const InterpKernel *filters =
-        (const InterpKernel *)av1_get_interp_filter_kernel(filter);
-    const InterpFilterParams filter_params =
-        av1_get_interp_filter_params(filter);
-    if (filter_params.taps != SUBPEL_TAPS) continue;
-
-    for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
-      for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
-        wrapper_filter_average_block2d_8_c(in, kInputStride, filters[filter_x],
-                                           filters[filter_y], ref,
-                                           kOutputStride, Width(), Height());
-
-        if (filter_x && filter_y)
-          ASM_REGISTER_STATE_CHECK(UUT_->hv8_avg_(
-              in, kInputStride, out, kOutputStride, filters[filter_x], 16,
-              filters[filter_y], 16, Width(), Height()));
-        else if (filter_y)
-          ASM_REGISTER_STATE_CHECK(UUT_->v8_avg_(
-              in, kInputStride, out, kOutputStride, kInvalidFilter, 16,
-              filters[filter_y], 16, Width(), Height()));
-        else if (filter_x)
-          ASM_REGISTER_STATE_CHECK(UUT_->h8_avg_(
-              in, kInputStride, out, kOutputStride, filters[filter_x], 16,
-              kInvalidFilter, 16, Width(), Height()));
-        else
-          ASM_REGISTER_STATE_CHECK(
-              UUT_->avg_(in, kInputStride, out, kOutputStride, kInvalidFilter,
-                         0, kInvalidFilter, 0, Width(), Height()));
-
-        CheckGuardBlocks();
-
-        for (int y = 0; y < Height(); ++y)
-          for (int x = 0; x < Width(); ++x)
-            ASSERT_EQ(lookup(ref, y * kOutputStride + x),
-                      lookup(out, y * kOutputStride + x))
-                << "mismatch at (" << x << "," << y << "), "
-                << "filters (" << filter_bank << "," << filter_x << ","
-                << filter_y << ")";
-      }
-    }
-  }
-}
-
 TEST_P(ConvolveTest, FilterExtremes) {
   uint8_t *const in = input();
   uint8_t *const out = output();
@@ -874,7 +797,7 @@
 using std::tr1::make_tuple;
 
 #define WRAP(func, bd)                                                       \
-  void wrap_##func##_##bd(                                                   \
+  static void wrap_##func##_##bd(                                            \
       const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,                \
       ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride,    \
       const int16_t *filter_y, int filter_y_stride, int w, int h) {          \
@@ -889,95 +812,69 @@
 WRAP(convolve_copy_sse2, 12)
 WRAP(convolve_avg_sse2, 12)
 WRAP(convolve8_horiz_sse2, 8)
-WRAP(convolve8_avg_horiz_sse2, 8)
 WRAP(convolve8_vert_sse2, 8)
-WRAP(convolve8_avg_vert_sse2, 8)
 WRAP(convolve8_sse2, 8)
-WRAP(convolve8_avg_sse2, 8)
 WRAP(convolve8_horiz_sse2, 10)
-WRAP(convolve8_avg_horiz_sse2, 10)
 WRAP(convolve8_vert_sse2, 10)
-WRAP(convolve8_avg_vert_sse2, 10)
 WRAP(convolve8_sse2, 10)
-WRAP(convolve8_avg_sse2, 10)
 WRAP(convolve8_horiz_sse2, 12)
-WRAP(convolve8_avg_horiz_sse2, 12)
 WRAP(convolve8_vert_sse2, 12)
-WRAP(convolve8_avg_vert_sse2, 12)
 WRAP(convolve8_sse2, 12)
-WRAP(convolve8_avg_sse2, 12)
 #endif  // HAVE_SSE2 && ARCH_X86_64
 
 WRAP(convolve_copy_c, 8)
 WRAP(convolve_avg_c, 8)
 WRAP(convolve8_horiz_c, 8)
-WRAP(convolve8_avg_horiz_c, 8)
 WRAP(convolve8_vert_c, 8)
-WRAP(convolve8_avg_vert_c, 8)
 WRAP(convolve8_c, 8)
-WRAP(convolve8_avg_c, 8)
 WRAP(convolve_copy_c, 10)
 WRAP(convolve_avg_c, 10)
 WRAP(convolve8_horiz_c, 10)
-WRAP(convolve8_avg_horiz_c, 10)
 WRAP(convolve8_vert_c, 10)
-WRAP(convolve8_avg_vert_c, 10)
 WRAP(convolve8_c, 10)
-WRAP(convolve8_avg_c, 10)
 WRAP(convolve_copy_c, 12)
 WRAP(convolve_avg_c, 12)
 WRAP(convolve8_horiz_c, 12)
-WRAP(convolve8_avg_horiz_c, 12)
 WRAP(convolve8_vert_c, 12)
-WRAP(convolve8_avg_vert_c, 12)
 WRAP(convolve8_c, 12)
-WRAP(convolve8_avg_c, 12)
 
 #if HAVE_AVX2
 WRAP(convolve_copy_avx2, 8)
 WRAP(convolve_avg_avx2, 8)
 WRAP(convolve8_horiz_avx2, 8)
-WRAP(convolve8_avg_horiz_avx2, 8)
 WRAP(convolve8_vert_avx2, 8)
-WRAP(convolve8_avg_vert_avx2, 8)
 WRAP(convolve8_avx2, 8)
-WRAP(convolve8_avg_avx2, 8)
 
 WRAP(convolve_copy_avx2, 10)
 WRAP(convolve_avg_avx2, 10)
 WRAP(convolve8_avx2, 10)
 WRAP(convolve8_horiz_avx2, 10)
 WRAP(convolve8_vert_avx2, 10)
-WRAP(convolve8_avg_avx2, 10)
-WRAP(convolve8_avg_horiz_avx2, 10)
-WRAP(convolve8_avg_vert_avx2, 10)
 
 WRAP(convolve_copy_avx2, 12)
 WRAP(convolve_avg_avx2, 12)
 WRAP(convolve8_avx2, 12)
 WRAP(convolve8_horiz_avx2, 12)
 WRAP(convolve8_vert_avx2, 12)
-WRAP(convolve8_avg_avx2, 12)
-WRAP(convolve8_avg_horiz_avx2, 12)
-WRAP(convolve8_avg_vert_avx2, 12)
 #endif  // HAVE_AVX2
 
 #undef WRAP
 
-const ConvolveFunctions convolve8_c(
-    wrap_convolve_copy_c_8, wrap_convolve_avg_c_8, wrap_convolve8_horiz_c_8,
-    wrap_convolve8_avg_horiz_c_8, wrap_convolve8_vert_c_8,
-    wrap_convolve8_avg_vert_c_8, wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8);
-const ConvolveFunctions convolve10_c(
-    wrap_convolve_copy_c_10, wrap_convolve_avg_c_10, wrap_convolve8_horiz_c_10,
-    wrap_convolve8_avg_horiz_c_10, wrap_convolve8_vert_c_10,
-    wrap_convolve8_avg_vert_c_10, wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
-    10);
-const ConvolveFunctions convolve12_c(
-    wrap_convolve_copy_c_12, wrap_convolve_avg_c_12, wrap_convolve8_horiz_c_12,
-    wrap_convolve8_avg_horiz_c_12, wrap_convolve8_vert_c_12,
-    wrap_convolve8_avg_vert_c_12, wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
-    12);
+const ConvolveFunctions convolve8_c(wrap_convolve_copy_c_8,
+                                    wrap_convolve_avg_c_8,
+                                    wrap_convolve8_horiz_c_8,
+                                    wrap_convolve8_vert_c_8, wrap_convolve8_c_8,
+                                    8);
+const ConvolveFunctions convolve10_c(wrap_convolve_copy_c_10,
+                                     wrap_convolve_avg_c_10,
+                                     wrap_convolve8_horiz_c_10,
+                                     wrap_convolve8_vert_c_10,
+                                     wrap_convolve8_c_10, 10);
+const ConvolveFunctions convolve12_c(wrap_convolve_copy_c_12,
+                                     wrap_convolve_avg_c_12,
+                                     wrap_convolve8_horiz_c_12,
+                                     wrap_convolve8_vert_c_12,
+                                     wrap_convolve8_c_12, 12);
 const ConvolveParam kArrayConvolve_c[] = {
   ALL_SIZES(convolve8_c), ALL_SIZES(convolve10_c), ALL_SIZES(convolve12_c)
 };
@@ -985,21 +882,21 @@
 INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::ValuesIn(kArrayConvolve_c));
 
 #if HAVE_SSE2 && ARCH_X86_64
-const ConvolveFunctions convolve8_sse2(
-    wrap_convolve_copy_sse2_8, wrap_convolve_avg_sse2_8,
-    wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
-    wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
-    wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8, 8);
-const ConvolveFunctions convolve10_sse2(
-    wrap_convolve_copy_sse2_10, wrap_convolve_avg_sse2_10,
-    wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
-    wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
-    wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10, 10);
-const ConvolveFunctions convolve12_sse2(
-    wrap_convolve_copy_sse2_12, wrap_convolve_avg_sse2_12,
-    wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
-    wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
-    wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12);
+const ConvolveFunctions convolve8_sse2(wrap_convolve_copy_sse2_8,
+                                       wrap_convolve_avg_sse2_8,
+                                       wrap_convolve8_horiz_sse2_8,
+                                       wrap_convolve8_vert_sse2_8,
+                                       wrap_convolve8_sse2_8, 8);
+const ConvolveFunctions convolve10_sse2(wrap_convolve_copy_sse2_10,
+                                        wrap_convolve_avg_sse2_10,
+                                        wrap_convolve8_horiz_sse2_10,
+                                        wrap_convolve8_vert_sse2_10,
+                                        wrap_convolve8_sse2_10, 10);
+const ConvolveFunctions convolve12_sse2(wrap_convolve_copy_sse2_12,
+                                        wrap_convolve_avg_sse2_12,
+                                        wrap_convolve8_horiz_sse2_12,
+                                        wrap_convolve8_vert_sse2_12,
+                                        wrap_convolve8_sse2_12, 12);
 const ConvolveParam kArrayConvolve_sse2[] = { ALL_SIZES(convolve8_sse2),
                                               ALL_SIZES(convolve10_sse2),
                                               ALL_SIZES(convolve12_sse2) };
@@ -1010,11 +907,8 @@
 #if HAVE_SSSE3
 const ConvolveFunctions convolve8_ssse3(aom_convolve_copy_c, aom_convolve_avg_c,
                                         aom_convolve8_horiz_ssse3,
-                                        aom_convolve8_avg_horiz_ssse3,
                                         aom_convolve8_vert_ssse3,
-                                        aom_convolve8_avg_vert_ssse3,
-                                        aom_convolve8_ssse3,
-                                        aom_convolve8_avg_ssse3, 0);
+                                        aom_convolve8_ssse3, 0);
 
 const ConvolveParam kArrayConvolve8_ssse3[] = { ALL_SIZES(convolve8_ssse3) };
 INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest,
@@ -1022,21 +916,21 @@
 #endif
 
 #if HAVE_AVX2
-const ConvolveFunctions convolve8_avx2(
-    wrap_convolve_copy_avx2_8, wrap_convolve_avg_avx2_8,
-    wrap_convolve8_horiz_avx2_8, wrap_convolve8_avg_horiz_avx2_8,
-    wrap_convolve8_vert_avx2_8, wrap_convolve8_avg_vert_avx2_8,
-    wrap_convolve8_avx2_8, wrap_convolve8_avg_avx2_8, 8);
-const ConvolveFunctions convolve10_avx2(
-    wrap_convolve_copy_avx2_10, wrap_convolve_avg_avx2_10,
-    wrap_convolve8_horiz_avx2_10, wrap_convolve8_avg_horiz_avx2_10,
-    wrap_convolve8_vert_avx2_10, wrap_convolve8_avg_vert_avx2_10,
-    wrap_convolve8_avx2_10, wrap_convolve8_avg_avx2_10, 10);
-const ConvolveFunctions convolve12_avx2(
-    wrap_convolve_copy_avx2_12, wrap_convolve_avg_avx2_12,
-    wrap_convolve8_horiz_avx2_12, wrap_convolve8_avg_horiz_avx2_12,
-    wrap_convolve8_vert_avx2_12, wrap_convolve8_avg_vert_avx2_12,
-    wrap_convolve8_avx2_12, wrap_convolve8_avg_avx2_12, 12);
+const ConvolveFunctions convolve8_avx2(wrap_convolve_copy_avx2_8,
+                                       wrap_convolve_avg_avx2_8,
+                                       wrap_convolve8_horiz_avx2_8,
+                                       wrap_convolve8_vert_avx2_8,
+                                       wrap_convolve8_avx2_8, 8);
+const ConvolveFunctions convolve10_avx2(wrap_convolve_copy_avx2_10,
+                                        wrap_convolve_avg_avx2_10,
+                                        wrap_convolve8_horiz_avx2_10,
+                                        wrap_convolve8_vert_avx2_10,
+                                        wrap_convolve8_avx2_10, 10);
+const ConvolveFunctions convolve12_avx2(wrap_convolve_copy_avx2_12,
+                                        wrap_convolve_avg_avx2_12,
+                                        wrap_convolve8_horiz_avx2_12,
+                                        wrap_convolve8_vert_avx2_12,
+                                        wrap_convolve8_avx2_12, 12);
 const ConvolveParam kArrayConvolve8_avx2[] = { ALL_SIZES_64(convolve8_avx2),
                                                ALL_SIZES_64(convolve10_avx2),
                                                ALL_SIZES_64(convolve12_avx2) };
@@ -1047,15 +941,17 @@
 // TODO(any): Make NEON versions support 128x128 128x64 64x128 block sizes
 #if HAVE_NEON && !(CONFIG_AV1)
 #if HAVE_NEON_ASM
-const ConvolveFunctions convolve8_neon(
-    aom_convolve_copy_neon, aom_convolve_avg_neon, aom_convolve8_horiz_neon,
-    aom_convolve8_avg_horiz_neon, aom_convolve8_vert_neon,
-    aom_convolve8_avg_vert_neon, aom_convolve8_neon, aom_convolve8_avg_neon, 0);
+const ConvolveFunctions convolve8_neon(aom_convolve_copy_neon,
+                                       aom_convolve_avg_neon,
+                                       aom_convolve8_horiz_neon,
+                                       aom_convolve8_vert_neon,
+                                       aom_convolve8_neon, 0);
 #else   // HAVE_NEON
-const ConvolveFunctions convolve8_neon(
-    aom_convolve_copy_neon, aom_convolve_avg_neon, aom_convolve8_horiz_neon,
-    aom_convolve8_avg_horiz_neon, aom_convolve8_vert_neon,
-    aom_convolve8_avg_vert_neon, aom_convolve8_neon, aom_convolve8_avg_neon, 0);
+const ConvolveFunctions convolve8_neon(aom_convolve_copy_neon,
+                                       aom_convolve_avg_neon,
+                                       aom_convolve8_horiz_neon,
+                                       aom_convolve8_vert_neon,
+                                       aom_convolve8_neon, 0);
 #endif  // HAVE_NEON_ASM
 
 const ConvolveParam kArrayConvolve8_neon[] = { ALL_SIZES_64(convolve8_neon) };
@@ -1065,11 +961,11 @@
 
 // TODO(any): Make DSPR2 versions support 128x128 128x64 64x128 block sizes
 #if HAVE_DSPR2 && !(CONFIG_AV1)
-const ConvolveFunctions convolve8_dspr2(
-    aom_convolve_copy_dspr2, aom_convolve_avg_dspr2, aom_convolve8_horiz_dspr2,
-    aom_convolve8_avg_horiz_dspr2, aom_convolve8_vert_dspr2,
-    aom_convolve8_avg_vert_dspr2, aom_convolve8_dspr2, aom_convolve8_avg_dspr2,
-    0);
+const ConvolveFunctions convolve8_dspr2(aom_convolve_copy_dspr2,
+                                        aom_convolve_avg_dspr2,
+                                        aom_convolve8_horiz_dspr2,
+                                        aom_convolve8_vert_dspr2,
+                                        aom_convolve8_dspr2, 0);
 
 const ConvolveParam kArrayConvolve8_dspr2[] = { ALL_SIZES_64(convolve8_dspr2) };
 INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest,
@@ -1078,10 +974,11 @@
 
 // TODO(any): Make MSA versions support 128x128 128x64 64x128 block sizes
 #if HAVE_MSA && !(CONFIG_AV1)
-const ConvolveFunctions convolve8_msa(
-    aom_convolve_copy_msa, aom_convolve_avg_msa, aom_convolve8_horiz_msa,
-    aom_convolve8_avg_horiz_msa, aom_convolve8_vert_msa,
-    aom_convolve8_avg_vert_msa, aom_convolve8_msa, aom_convolve8_avg_msa, 0);
+const ConvolveFunctions convolve8_msa(aom_convolve_copy_msa,
+                                      aom_convolve_avg_msa,
+                                      aom_convolve8_horiz_msa,
+                                      aom_convolve8_vert_msa, aom_convolve8_msa,
+                                      0);
 
 const ConvolveParam kArrayConvolve8_msa[] = { ALL_SIZES_64(convolve8_msa) };
 INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest,