remove mips optimizations

These were largely inherited from libvpx and aren't covered in regular
testing. The MSA and DSPR2 targets fail to build currently.

Bug: aomedia:3305
Change-Id: I6e40179702557b7b0f65e85692bc2d8480b56fd2
Fixed: aomedia:3305
diff --git a/README.md b/README.md
index d70c707..7af05af 100644
--- a/README.md
+++ b/README.md
@@ -165,8 +165,6 @@
  - armv7-linux-gcc.cmake
  - armv7-mingw-gcc.cmake
  - armv7s-ios.cmake
- - mips32-linux-gcc.cmake
- - mips64-linux-gcc.cmake
  - x86-ios-simulator.cmake
  - x86-linux.cmake
  - x86-macos.cmake
diff --git a/aom_dsp/aom_dsp.cmake b/aom_dsp/aom_dsp.cmake
index 29a9d3f..cf677af 100644
--- a/aom_dsp/aom_dsp.cmake
+++ b/aom_dsp/aom_dsp.cmake
@@ -114,29 +114,6 @@
             "${AOM_ROOT}/aom_dsp/arm/subtract_neon.c"
             "${AOM_ROOT}/aom_dsp/arm/blend_a64_mask_neon.c")
 
-list(APPEND AOM_DSP_COMMON_INTRIN_DSPR2
-            "${AOM_ROOT}/aom_dsp/mips/aom_convolve_copy_dspr2.c"
-            "${AOM_ROOT}/aom_dsp/mips/common_dspr2.c"
-            "${AOM_ROOT}/aom_dsp/mips/common_dspr2.h"
-            "${AOM_ROOT}/aom_dsp/mips/convolve2_dspr2.c"
-            "${AOM_ROOT}/aom_dsp/mips/convolve2_horiz_dspr2.c"
-            "${AOM_ROOT}/aom_dsp/mips/convolve2_vert_dspr2.c"
-            "${AOM_ROOT}/aom_dsp/mips/convolve8_horiz_dspr2.c"
-            "${AOM_ROOT}/aom_dsp/mips/convolve8_vert_dspr2.c"
-            "${AOM_ROOT}/aom_dsp/mips/convolve_common_dspr2.h"
-            "${AOM_ROOT}/aom_dsp/mips/intrapred16_dspr2.c"
-            "${AOM_ROOT}/aom_dsp/mips/intrapred4_dspr2.c"
-            "${AOM_ROOT}/aom_dsp/mips/intrapred8_dspr2.c"
-            "${AOM_ROOT}/aom_dsp/mips/inv_txfm_dspr2.h")
-
-list(APPEND AOM_DSP_COMMON_INTRIN_MSA
-            "${AOM_ROOT}/aom_dsp/mips/aom_convolve8_horiz_msa.c"
-            "${AOM_ROOT}/aom_dsp/mips/aom_convolve8_vert_msa.c"
-            "${AOM_ROOT}/aom_dsp/mips/aom_convolve_copy_msa.c"
-            "${AOM_ROOT}/aom_dsp/mips/aom_convolve_msa.h"
-            "${AOM_ROOT}/aom_dsp/mips/intrapred_msa.c"
-            "${AOM_ROOT}/aom_dsp/mips/macros_msa.h")
-
 if(CONFIG_AV1_HIGHBITDEPTH)
   list(APPEND AOM_DSP_COMMON_INTRIN_SSE2
               "${AOM_ROOT}/aom_dsp/x86/highbd_convolve_sse2.c"
@@ -265,11 +242,6 @@
               "${AOM_ROOT}/aom_dsp/arm/sse_neon.c"
               "${AOM_ROOT}/aom_dsp/arm/sum_squares_neon.c")
 
-  list(APPEND AOM_DSP_ENCODER_INTRIN_MSA "${AOM_ROOT}/aom_dsp/mips/sad_msa.c"
-              "${AOM_ROOT}/aom_dsp/mips/subtract_msa.c"
-              "${AOM_ROOT}/aom_dsp/mips/variance_msa.c"
-              "${AOM_ROOT}/aom_dsp/mips/sub_pixel_variance_msa.c")
-
   if(CONFIG_AV1_HIGHBITDEPTH)
     list(APPEND AOM_DSP_ENCODER_ASM_SSE2
                 "${AOM_ROOT}/aom_dsp/x86/highbd_sad4d_sse2.asm"
@@ -428,20 +400,6 @@
     endif()
   endif()
 
-  if(HAVE_DSPR2)
-    add_intrinsics_object_library("" "dspr2" "aom_dsp_common"
-                                  "AOM_DSP_COMMON_INTRIN_DSPR2")
-  endif()
-
-  if(HAVE_MSA)
-    add_intrinsics_object_library("" "msa" "aom_dsp_common"
-                                  "AOM_DSP_COMMON_INTRIN_MSA")
-    if(CONFIG_AV1_ENCODER)
-      add_intrinsics_object_library("" "msa" "aom_dsp_encoder"
-                                    "AOM_DSP_ENCODER_INTRIN_MSA")
-    endif()
-  endif()
-
   target_sources(aom PRIVATE $<TARGET_OBJECTS:aom_dsp>)
   if(BUILD_SHARED_LIBS)
     target_sources(aom_static PRIVATE $<TARGET_OBJECTS:aom_dsp>)
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 94f15c4..aeaf9f1 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -85,101 +85,101 @@
   }
 }
 
-specialize qw/aom_dc_top_predictor_4x4 msa neon sse2/;
+specialize qw/aom_dc_top_predictor_4x4 neon sse2/;
 specialize qw/aom_dc_top_predictor_4x8 sse2/;
 specialize qw/aom_dc_top_predictor_4x16 sse2/;
 specialize qw/aom_dc_top_predictor_8x4 sse2/;
-specialize qw/aom_dc_top_predictor_8x8 neon msa sse2/;
+specialize qw/aom_dc_top_predictor_8x8 neon sse2/;
 specialize qw/aom_dc_top_predictor_8x16 sse2/;
 specialize qw/aom_dc_top_predictor_8x32 sse2/;
 specialize qw/aom_dc_top_predictor_16x4 sse2/;
 specialize qw/aom_dc_top_predictor_16x8 sse2/;
-specialize qw/aom_dc_top_predictor_16x16 neon msa sse2/;
+specialize qw/aom_dc_top_predictor_16x16 neon sse2/;
 specialize qw/aom_dc_top_predictor_16x32 sse2/;
 specialize qw/aom_dc_top_predictor_16x64 sse2/;
 specialize qw/aom_dc_top_predictor_32x8 sse2/;
 specialize qw/aom_dc_top_predictor_32x16 sse2 avx2/;
-specialize qw/aom_dc_top_predictor_32x32 msa neon sse2 avx2/;
+specialize qw/aom_dc_top_predictor_32x32 neon sse2 avx2/;
 specialize qw/aom_dc_top_predictor_32x64 sse2 avx2/;
 specialize qw/aom_dc_top_predictor_64x16 sse2 avx2/;
 specialize qw/aom_dc_top_predictor_64x32 sse2 avx2/;
 specialize qw/aom_dc_top_predictor_64x64 sse2 avx2/;
 
-specialize qw/aom_dc_left_predictor_4x4 msa neon sse2/;
+specialize qw/aom_dc_left_predictor_4x4 neon sse2/;
 specialize qw/aom_dc_left_predictor_4x8 sse2/;
 specialize qw/aom_dc_left_predictor_4x16 sse2/;
 specialize qw/aom_dc_left_predictor_8x4 sse2/;
-specialize qw/aom_dc_left_predictor_8x8 neon msa sse2/;
+specialize qw/aom_dc_left_predictor_8x8 neon sse2/;
 specialize qw/aom_dc_left_predictor_8x16 sse2/;
 specialize qw/aom_dc_left_predictor_8x32 sse2/;
 specialize qw/aom_dc_left_predictor_16x4 sse2/;
 specialize qw/aom_dc_left_predictor_16x8 sse2/;
-specialize qw/aom_dc_left_predictor_16x16 neon msa sse2/;
+specialize qw/aom_dc_left_predictor_16x16 neon sse2/;
 specialize qw/aom_dc_left_predictor_16x32 sse2/;
 specialize qw/aom_dc_left_predictor_16x64 sse2/;
 specialize qw/aom_dc_left_predictor_32x8 sse2/;
 specialize qw/aom_dc_left_predictor_32x16 sse2 avx2/;
-specialize qw/aom_dc_left_predictor_32x32 msa neon sse2 avx2/;
+specialize qw/aom_dc_left_predictor_32x32 neon sse2 avx2/;
 specialize qw/aom_dc_left_predictor_32x64 sse2 avx2/;
 specialize qw/aom_dc_left_predictor_64x16 sse2 avx2/;
 specialize qw/aom_dc_left_predictor_64x32 sse2 avx2/;
 specialize qw/aom_dc_left_predictor_64x64 sse2 avx2/;
 
-specialize qw/aom_dc_128_predictor_4x4 msa neon sse2/;
+specialize qw/aom_dc_128_predictor_4x4 neon sse2/;
 specialize qw/aom_dc_128_predictor_4x8 sse2/;
 specialize qw/aom_dc_128_predictor_4x16 sse2/;
 specialize qw/aom_dc_128_predictor_8x4 sse2/;
-specialize qw/aom_dc_128_predictor_8x8 neon msa sse2/;
+specialize qw/aom_dc_128_predictor_8x8 neon sse2/;
 specialize qw/aom_dc_128_predictor_8x16 sse2/;
 specialize qw/aom_dc_128_predictor_8x32 sse2/;
 specialize qw/aom_dc_128_predictor_16x4 sse2/;
 specialize qw/aom_dc_128_predictor_16x8 sse2/;
-specialize qw/aom_dc_128_predictor_16x16 neon msa sse2/;
+specialize qw/aom_dc_128_predictor_16x16 neon sse2/;
 specialize qw/aom_dc_128_predictor_16x32 sse2/;
 specialize qw/aom_dc_128_predictor_16x64 sse2/;
 specialize qw/aom_dc_128_predictor_32x8 sse2/;
 specialize qw/aom_dc_128_predictor_32x16 sse2 avx2/;
-specialize qw/aom_dc_128_predictor_32x32 msa neon sse2 avx2/;
+specialize qw/aom_dc_128_predictor_32x32 neon sse2 avx2/;
 specialize qw/aom_dc_128_predictor_32x64 sse2 avx2/;
 specialize qw/aom_dc_128_predictor_64x16 sse2 avx2/;
 specialize qw/aom_dc_128_predictor_64x32 sse2 avx2/;
 specialize qw/aom_dc_128_predictor_64x64 sse2 avx2/;
 
-specialize qw/aom_v_predictor_4x4 neon msa sse2/;
+specialize qw/aom_v_predictor_4x4 neon sse2/;
 specialize qw/aom_v_predictor_4x8 sse2/;
 specialize qw/aom_v_predictor_4x16 sse2/;
 specialize qw/aom_v_predictor_8x4 sse2/;
-specialize qw/aom_v_predictor_8x8 neon msa sse2/;
+specialize qw/aom_v_predictor_8x8 neon sse2/;
 specialize qw/aom_v_predictor_8x16 sse2/;
 specialize qw/aom_v_predictor_8x32 sse2/;
 specialize qw/aom_v_predictor_16x4 sse2/;
 specialize qw/aom_v_predictor_16x8 sse2/;
-specialize qw/aom_v_predictor_16x16 neon msa sse2/;
+specialize qw/aom_v_predictor_16x16 neon sse2/;
 specialize qw/aom_v_predictor_16x32 sse2/;
 specialize qw/aom_v_predictor_16x64 sse2/;
 specialize qw/aom_v_predictor_32x8 sse2/;
 specialize qw/aom_v_predictor_32x16 sse2 avx2/;
-specialize qw/aom_v_predictor_32x32 neon msa sse2 avx2/;
+specialize qw/aom_v_predictor_32x32 neon sse2 avx2/;
 specialize qw/aom_v_predictor_32x64 sse2 avx2/;
 specialize qw/aom_v_predictor_64x16 sse2 avx2/;
 specialize qw/aom_v_predictor_64x32 sse2 avx2/;
 specialize qw/aom_v_predictor_64x64 sse2 avx2/;
 
-specialize qw/aom_h_predictor_4x4 neon dspr2 msa sse2/;
+specialize qw/aom_h_predictor_4x4 neon sse2/;
 specialize qw/aom_h_predictor_4x8 sse2/;
 specialize qw/aom_h_predictor_4x16 sse2/;
 specialize qw/aom_h_predictor_8x4 sse2/;
-specialize qw/aom_h_predictor_8x8 neon dspr2 msa sse2/;
+specialize qw/aom_h_predictor_8x8 neon sse2/;
 specialize qw/aom_h_predictor_8x16 sse2/;
 specialize qw/aom_h_predictor_8x32 sse2/;
 specialize qw/aom_h_predictor_16x4 sse2/;
 specialize qw/aom_h_predictor_16x8 sse2/;
-specialize qw/aom_h_predictor_16x16 neon dspr2 msa sse2/;
+specialize qw/aom_h_predictor_16x16 neon sse2/;
 specialize qw/aom_h_predictor_16x32 sse2/;
 specialize qw/aom_h_predictor_16x64 sse2/;
 specialize qw/aom_h_predictor_32x8 sse2/;
 specialize qw/aom_h_predictor_32x16 sse2/;
-specialize qw/aom_h_predictor_32x32 neon msa sse2 avx2/;
+specialize qw/aom_h_predictor_32x32 neon sse2 avx2/;
 specialize qw/aom_h_predictor_32x64 sse2/;
 specialize qw/aom_h_predictor_64x16 sse2/;
 specialize qw/aom_h_predictor_64x32 sse2/;
@@ -267,21 +267,21 @@
 
 # TODO(yunqingwang): optimize rectangular DC_PRED to replace division
 # by multiply and shift.
-specialize qw/aom_dc_predictor_4x4 dspr2 msa neon sse2/;
+specialize qw/aom_dc_predictor_4x4 neon sse2/;
 specialize qw/aom_dc_predictor_4x8 sse2/;
 specialize qw/aom_dc_predictor_4x16 sse2/;
 specialize qw/aom_dc_predictor_8x4 sse2/;
-specialize qw/aom_dc_predictor_8x8 dspr2 neon msa sse2/;
+specialize qw/aom_dc_predictor_8x8 neon sse2/;
 specialize qw/aom_dc_predictor_8x16 sse2/;
 specialize qw/aom_dc_predictor_8x32 sse2/;
 specialize qw/aom_dc_predictor_16x4 sse2/;
 specialize qw/aom_dc_predictor_16x8 sse2/;
-specialize qw/aom_dc_predictor_16x16 dspr2 neon msa sse2/;
+specialize qw/aom_dc_predictor_16x16 neon sse2/;
 specialize qw/aom_dc_predictor_16x32 sse2/;
 specialize qw/aom_dc_predictor_16x64 sse2/;
 specialize qw/aom_dc_predictor_32x8 sse2/;
 specialize qw/aom_dc_predictor_32x16 sse2 avx2/;
-specialize qw/aom_dc_predictor_32x32 msa neon sse2 avx2/;
+specialize qw/aom_dc_predictor_32x32 neon sse2 avx2/;
 specialize qw/aom_dc_predictor_32x64 sse2 avx2/;
 specialize qw/aom_dc_predictor_64x64 sse2 avx2/;
 specialize qw/aom_dc_predictor_64x32 sse2 avx2/;
@@ -450,7 +450,7 @@
 add_proto qw/void aom_convolve8_horiz/,           "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
 add_proto qw/void aom_convolve8_vert/,            "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
 
-specialize qw/aom_convolve_copy       neon dspr2 msa sse2 avx2/;
+specialize qw/aom_convolve_copy       neon sse2 avx2/;
 specialize qw/aom_convolve8_horiz     sse2 ssse3/, "$avx2_ssse3";
 specialize qw/aom_convolve8_vert      sse2 ssse3/, "$avx2_ssse3";
 
@@ -719,7 +719,7 @@
   # Block subtraction
   #
   add_proto qw/void aom_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride";
-  specialize qw/aom_subtract_block neon msa sse2 avx2/;
+  specialize qw/aom_subtract_block neon sse2 avx2/;
 
   add_proto qw/int64_t/, "aom_sse", "const uint8_t *a, int a_stride, const uint8_t *b,int b_stride, int width, int height";
   specialize qw/aom_sse  sse4_1 avx2 neon/;
@@ -764,30 +764,30 @@
   }
 
   add_proto qw/uint64_t aom_sum_sse_2d_i16/, "const int16_t *src, int src_stride, int width, int height, int *sum";
-  specialize qw/aom_sum_sse_2d_i16 avx2 neon    sse2/;
-  specialize qw/aom_sad128x128    avx2 neon     sse2/;
-  specialize qw/aom_sad128x64     avx2 neon     sse2/;
-  specialize qw/aom_sad64x128     avx2 neon     sse2/;
-  specialize qw/aom_sad64x64      avx2 neon msa sse2/;
-  specialize qw/aom_sad64x32      avx2 neon msa sse2/;
-  specialize qw/aom_sad32x64      avx2 neon msa sse2/;
-  specialize qw/aom_sad32x32      avx2 neon msa sse2/;
-  specialize qw/aom_sad32x16      avx2 neon msa sse2/;
-  specialize qw/aom_sad16x32           neon msa sse2/;
-  specialize qw/aom_sad16x16           neon msa sse2/;
-  specialize qw/aom_sad16x8            neon msa sse2/;
-  specialize qw/aom_sad8x16            neon msa sse2/;
-  specialize qw/aom_sad8x8             neon msa sse2/;
-  specialize qw/aom_sad8x4             neon msa sse2/;
-  specialize qw/aom_sad4x8             neon msa sse2/;
-  specialize qw/aom_sad4x4             neon msa sse2/;
+  specialize qw/aom_sum_sse_2d_i16 avx2 neon sse2/;
+  specialize qw/aom_sad128x128    avx2 neon sse2/;
+  specialize qw/aom_sad128x64     avx2 neon sse2/;
+  specialize qw/aom_sad64x128     avx2 neon sse2/;
+  specialize qw/aom_sad64x64      avx2 neon sse2/;
+  specialize qw/aom_sad64x32      avx2 neon sse2/;
+  specialize qw/aom_sad32x64      avx2 neon sse2/;
+  specialize qw/aom_sad32x32      avx2 neon sse2/;
+  specialize qw/aom_sad32x16      avx2 neon sse2/;
+  specialize qw/aom_sad16x32           neon sse2/;
+  specialize qw/aom_sad16x16           neon sse2/;
+  specialize qw/aom_sad16x8            neon sse2/;
+  specialize qw/aom_sad8x16            neon sse2/;
+  specialize qw/aom_sad8x8             neon sse2/;
+  specialize qw/aom_sad8x4             neon sse2/;
+  specialize qw/aom_sad4x8             neon sse2/;
+  specialize qw/aom_sad4x4             neon sse2/;
 
-  specialize qw/aom_sad4x16            neon     sse2/;
-  specialize qw/aom_sad16x4            neon     sse2/;
-  specialize qw/aom_sad8x32            neon     sse2/;
-  specialize qw/aom_sad32x8            neon     sse2/;
-  specialize qw/aom_sad16x64           neon     sse2/;
-  specialize qw/aom_sad64x16           neon     sse2/;
+  specialize qw/aom_sad4x16            neon sse2/;
+  specialize qw/aom_sad16x4            neon sse2/;
+  specialize qw/aom_sad8x32            neon sse2/;
+  specialize qw/aom_sad32x8            neon sse2/;
+  specialize qw/aom_sad16x64           neon sse2/;
+  specialize qw/aom_sad64x16           neon sse2/;
 
   specialize qw/aom_sad_skip_128x128    avx2          sse2  neon/;
   specialize qw/aom_sad_skip_128x64     avx2          sse2  neon/;
@@ -810,29 +810,29 @@
   specialize qw/aom_sad_skip_16x64                    sse2  neon/;
   specialize qw/aom_sad_skip_64x16                    sse2  neon/;
 
-  specialize qw/aom_sad128x128_avg avx2     sse2/;
-  specialize qw/aom_sad128x64_avg  avx2     sse2/;
-  specialize qw/aom_sad64x128_avg  avx2     sse2/;
-  specialize qw/aom_sad64x64_avg   avx2 msa sse2/;
-  specialize qw/aom_sad64x32_avg   avx2 msa sse2/;
-  specialize qw/aom_sad32x64_avg   avx2 msa sse2/;
-  specialize qw/aom_sad32x32_avg   avx2 msa sse2/;
-  specialize qw/aom_sad32x16_avg   avx2 msa sse2/;
-  specialize qw/aom_sad16x32_avg        msa sse2/;
-  specialize qw/aom_sad16x16_avg        msa sse2/;
-  specialize qw/aom_sad16x8_avg         msa sse2/;
-  specialize qw/aom_sad8x16_avg         msa sse2/;
-  specialize qw/aom_sad8x8_avg          msa sse2/;
-  specialize qw/aom_sad8x4_avg          msa sse2/;
-  specialize qw/aom_sad4x8_avg          msa sse2/;
-  specialize qw/aom_sad4x4_avg          msa sse2/;
+  specialize qw/aom_sad128x128_avg avx2 sse2/;
+  specialize qw/aom_sad128x64_avg  avx2 sse2/;
+  specialize qw/aom_sad64x128_avg  avx2 sse2/;
+  specialize qw/aom_sad64x64_avg   avx2 sse2/;
+  specialize qw/aom_sad64x32_avg   avx2 sse2/;
+  specialize qw/aom_sad32x64_avg   avx2 sse2/;
+  specialize qw/aom_sad32x32_avg   avx2 sse2/;
+  specialize qw/aom_sad32x16_avg   avx2 sse2/;
+  specialize qw/aom_sad16x32_avg        sse2/;
+  specialize qw/aom_sad16x16_avg        sse2/;
+  specialize qw/aom_sad16x8_avg         sse2/;
+  specialize qw/aom_sad8x16_avg         sse2/;
+  specialize qw/aom_sad8x8_avg          sse2/;
+  specialize qw/aom_sad8x4_avg          sse2/;
+  specialize qw/aom_sad4x8_avg          sse2/;
+  specialize qw/aom_sad4x4_avg          sse2/;
 
-  specialize qw/aom_sad4x16_avg             sse2/;
-  specialize qw/aom_sad16x4_avg             sse2/;
-  specialize qw/aom_sad8x32_avg             sse2/;
-  specialize qw/aom_sad32x8_avg             sse2/;
-  specialize qw/aom_sad16x64_avg            sse2/;
-  specialize qw/aom_sad64x16_avg            sse2/;
+  specialize qw/aom_sad4x16_avg         sse2/;
+  specialize qw/aom_sad16x4_avg         sse2/;
+  specialize qw/aom_sad8x32_avg         sse2/;
+  specialize qw/aom_sad32x8_avg         sse2/;
+  specialize qw/aom_sad16x64_avg        sse2/;
+  specialize qw/aom_sad64x16_avg        sse2/;
 
   specialize qw/aom_dist_wtd_sad128x128_avg ssse3/;
   specialize qw/aom_dist_wtd_sad128x64_avg  ssse3/;
@@ -1002,31 +1002,31 @@
     add_proto qw/void/, "aom_masked_sad${w}x${h}x4d", "const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]";
   }
 
-  specialize qw/aom_sad128x128x4d avx2 neon     sse2/;
-  specialize qw/aom_sad128x64x4d  avx2 neon     sse2/;
-  specialize qw/aom_sad64x128x4d  avx2 neon     sse2/;
-  specialize qw/aom_sad64x64x4d   avx2 neon msa sse2/;
-  specialize qw/aom_sad64x32x4d   avx2 neon msa sse2/;
-  specialize qw/aom_sad32x64x4d   avx2 neon msa sse2/;
-  specialize qw/aom_sad32x32x4d   avx2 neon msa sse2/;
-  specialize qw/aom_sad32x16x4d   avx2 neon msa sse2/;
-  specialize qw/aom_sad16x32x4d        neon msa sse2/;
-  specialize qw/aom_sad16x16x4d        neon msa sse2/;
-  specialize qw/aom_sad16x8x4d         neon msa sse2/;
+  specialize qw/aom_sad128x128x4d avx2 neon sse2/;
+  specialize qw/aom_sad128x64x4d  avx2 neon sse2/;
+  specialize qw/aom_sad64x128x4d  avx2 neon sse2/;
+  specialize qw/aom_sad64x64x4d   avx2 neon sse2/;
+  specialize qw/aom_sad64x32x4d   avx2 neon sse2/;
+  specialize qw/aom_sad32x64x4d   avx2 neon sse2/;
+  specialize qw/aom_sad32x32x4d   avx2 neon sse2/;
+  specialize qw/aom_sad32x16x4d   avx2 neon sse2/;
+  specialize qw/aom_sad16x32x4d        neon sse2/;
+  specialize qw/aom_sad16x16x4d        neon sse2/;
+  specialize qw/aom_sad16x8x4d         neon sse2/;
 
-  specialize qw/aom_sad8x16x4d         neon msa sse2/;
-  specialize qw/aom_sad8x8x4d          neon msa sse2/;
-  specialize qw/aom_sad8x4x4d          neon msa sse2/;
-  specialize qw/aom_sad4x32x4d         neon     sse2/;
-  specialize qw/aom_sad4x8x4d          neon msa sse2/;
-  specialize qw/aom_sad4x4x4d          neon msa sse2/;
+  specialize qw/aom_sad8x16x4d         neon sse2/;
+  specialize qw/aom_sad8x8x4d          neon sse2/;
+  specialize qw/aom_sad8x4x4d          neon sse2/;
+  specialize qw/aom_sad4x32x4d         neon sse2/;
+  specialize qw/aom_sad4x8x4d          neon sse2/;
+  specialize qw/aom_sad4x4x4d          neon sse2/;
 
-  specialize qw/aom_sad64x16x4d   avx2 neon     sse2/;
-  specialize qw/aom_sad32x8x4d    avx2 neon     sse2/;
-  specialize qw/aom_sad16x64x4d        neon     sse2/;
-  specialize qw/aom_sad16x4x4d         neon     sse2/;
-  specialize qw/aom_sad8x32x4d         neon     sse2/;
-  specialize qw/aom_sad4x16x4d         neon msa sse2/;
+  specialize qw/aom_sad64x16x4d   avx2 neon sse2/;
+  specialize qw/aom_sad32x8x4d    avx2 neon sse2/;
+  specialize qw/aom_sad16x64x4d        neon sse2/;
+  specialize qw/aom_sad16x4x4d         neon sse2/;
+  specialize qw/aom_sad8x32x4d         neon sse2/;
+  specialize qw/aom_sad4x16x4d         neon sse2/;
 
   specialize qw/aom_sad_skip_128x128x4d avx2 sse2 neon/;
   specialize qw/aom_sad_skip_128x64x4d  avx2 sse2 neon/;
@@ -1267,8 +1267,8 @@
   add_proto qw/void aom_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
   add_proto qw/void aom_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
 
-  specialize qw/aom_get16x16var                neon msa/;
-  specialize qw/aom_get8x8var             sse2 neon msa/;
+  specialize qw/aom_get16x16var                neon/;
+  specialize qw/aom_get8x8var             sse2 neon/;
 
   add_proto qw/void aom_get_sse_sum_8x8_quad/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
   specialize qw/aom_get_sse_sum_8x8_quad        avx2 sse2 neon/;
@@ -1278,10 +1278,10 @@
   add_proto qw/unsigned int aom_mse8x16/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
   add_proto qw/unsigned int aom_mse8x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
 
-  specialize qw/aom_mse16x16          sse2 avx2 neon msa/;
-  specialize qw/aom_mse16x8           sse2      neon msa/;
-  specialize qw/aom_mse8x16           sse2      neon msa/;
-  specialize qw/aom_mse8x8            sse2      neon msa/;
+  specialize qw/aom_mse16x16          sse2 avx2 neon/;
+  specialize qw/aom_mse16x8           sse2      neon/;
+  specialize qw/aom_mse8x16           sse2      neon/;
+  specialize qw/aom_mse8x8            sse2      neon/;
 
   if (aom_config("CONFIG_AV1_HIGHBITDEPTH") eq "yes") {
     foreach $bd (8, 10, 12) {
@@ -1304,8 +1304,8 @@
   add_proto qw/unsigned int aom_get_mb_ss/, "const int16_t *";
   add_proto qw/unsigned int aom_get4x4sse_cs/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride";
 
-  specialize qw/aom_get_mb_ss sse2 msa/;
-  specialize qw/aom_get4x4sse_cs neon msa/;
+  specialize qw/aom_get_mb_ss sse2/;
+  specialize qw/aom_get4x4sse_cs neon/;
 
   #
   # Variance / Subpixel Variance / Subpixel Avg Variance
@@ -1326,56 +1326,56 @@
     add_proto qw/uint32_t/, "aom_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
     add_proto qw/uint32_t/, "aom_dist_wtd_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param";
   }
-  specialize qw/aom_variance128x128   sse2 avx2 neon    /;
-  specialize qw/aom_variance128x64    sse2 avx2 neon    /;
-  specialize qw/aom_variance64x128    sse2 avx2 neon    /;
-  specialize qw/aom_variance64x64     sse2 avx2 neon msa/;
-  specialize qw/aom_variance64x32     sse2 avx2 neon msa/;
-  specialize qw/aom_variance32x64     sse2 avx2 neon msa/;
-  specialize qw/aom_variance32x32     sse2 avx2 neon msa/;
-  specialize qw/aom_variance32x16     sse2 avx2 neon msa/;
-  specialize qw/aom_variance16x32     sse2 avx2 neon msa/;
-  specialize qw/aom_variance16x16     sse2 avx2 neon msa/;
-  specialize qw/aom_variance16x8      sse2 avx2 neon msa/;
-  specialize qw/aom_variance8x16      sse2      neon msa/;
-  specialize qw/aom_variance8x8       sse2      neon msa/;
-  specialize qw/aom_variance8x4       sse2      neon msa/;
-  specialize qw/aom_variance4x8       sse2      neon msa/;
-  specialize qw/aom_variance4x4       sse2      neon msa/;
+  specialize qw/aom_variance128x128   sse2 avx2 neon/;
+  specialize qw/aom_variance128x64    sse2 avx2 neon/;
+  specialize qw/aom_variance64x128    sse2 avx2 neon/;
+  specialize qw/aom_variance64x64     sse2 avx2 neon/;
+  specialize qw/aom_variance64x32     sse2 avx2 neon/;
+  specialize qw/aom_variance32x64     sse2 avx2 neon/;
+  specialize qw/aom_variance32x32     sse2 avx2 neon/;
+  specialize qw/aom_variance32x16     sse2 avx2 neon/;
+  specialize qw/aom_variance16x32     sse2 avx2 neon/;
+  specialize qw/aom_variance16x16     sse2 avx2 neon/;
+  specialize qw/aom_variance16x8      sse2 avx2 neon/;
+  specialize qw/aom_variance8x16      sse2      neon/;
+  specialize qw/aom_variance8x8       sse2      neon/;
+  specialize qw/aom_variance8x4       sse2      neon/;
+  specialize qw/aom_variance4x8       sse2      neon/;
+  specialize qw/aom_variance4x4       sse2      neon/;
 
-  specialize qw/aom_sub_pixel_variance128x128   avx2 neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance128x64    avx2 neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance64x128    avx2 neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance64x64     avx2 neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance64x32     avx2 neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance32x64     avx2 neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance32x32     avx2 neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance32x16     avx2 neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance16x32     avx2 neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance16x16     avx2 neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance16x8      avx2 neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance8x16           neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance8x8            neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance8x4            neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance4x8            neon msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_variance4x4            neon msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance128x128   avx2 neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance128x64    avx2 neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance64x128    avx2 neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance64x64     avx2 neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance64x32     avx2 neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance32x64     avx2 neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance32x32     avx2 neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance32x16     avx2 neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance16x32     avx2 neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance16x16     avx2 neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance16x8      avx2 neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance8x16           neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance8x8            neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance8x4            neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance4x8            neon sse2 ssse3/;
+  specialize qw/aom_sub_pixel_variance4x4            neon sse2 ssse3/;
 
-  specialize qw/aom_sub_pixel_avg_variance128x128 avx2     sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance128x64  avx2     sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance64x128  avx2     sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance64x64   avx2 msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance64x32   avx2 msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance32x64   avx2 msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance32x32   avx2 msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance32x16   avx2 msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance16x32        msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance16x16        msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance16x8         msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance8x16         msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance8x8          msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance8x4          msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance4x8          msa sse2 ssse3/;
-  specialize qw/aom_sub_pixel_avg_variance4x4          msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance128x128 avx2 sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance128x64  avx2 sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance64x128  avx2 sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance64x64   avx2 sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance64x32   avx2 sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance32x64   avx2 sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance32x32   avx2 sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance32x16   avx2 sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance16x32        sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance16x16        sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance16x8         sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance8x16         sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance8x8          sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance8x4          sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance4x8          sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance4x4          sse2 ssse3/;
 
   if (aom_config("CONFIG_REALTIME_ONLY") ne "yes") {
     specialize qw/aom_variance4x16  neon sse2/;
@@ -1519,43 +1519,43 @@
   }
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance64x64 avx2 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance64x64 avx2 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance64x32 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance64x32 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance32x64 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance32x64 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance32x32 avx2 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance32x32 avx2 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance32x16 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance32x16 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance16x32 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance16x32 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance16x16 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance16x16 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance16x8 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance16x8 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance8x16 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance8x16 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance8x8 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance8x8 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance8x4 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance8x4 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance4x8 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance4x8 sse2 ssse3/;
 
   add_proto qw/uint32_t aom_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-  specialize qw/aom_sub_pixel_avg_variance4x4 msa sse2 ssse3/;
+  specialize qw/aom_sub_pixel_avg_variance4x4 sse2 ssse3/;
 
   #
   # Comp Avg
diff --git a/aom_dsp/mips/aom_convolve8_horiz_msa.c b/aom_dsp/mips/aom_convolve8_horiz_msa.c
deleted file mode 100644
index c8ab612..0000000
--- a/aom_dsp/mips/aom_convolve8_horiz_msa.c
+++ /dev/null
@@ -1,693 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom_dsp/mips/aom_convolve_msa.h"
-
-static void common_hz_8t_4x4_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter) {
-  v16u8 mask0, mask1, mask2, mask3, out;
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v8i16 filt, out0, out1;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[16]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  XORI_B4_128_SB(src0, src1, src2, src3);
-  HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
-                             filt0, filt1, filt2, filt3, out0, out1);
-  SRARI_H2_SH(out0, out1, FILTER_BITS);
-  SAT_SH2_SH(out0, out1, 7);
-  out = PCKEV_XORI128_UB(out0, out1);
-  ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
-}
-
-static void common_hz_8t_4x8_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter) {
-  v16i8 filt0, filt1, filt2, filt3;
-  v16i8 src0, src1, src2, src3;
-  v16u8 mask0, mask1, mask2, mask3, out;
-  v8i16 filt, out0, out1, out2, out3;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[16]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  XORI_B4_128_SB(src0, src1, src2, src3);
-  src += (4 * src_stride);
-  HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
-                             filt0, filt1, filt2, filt3, out0, out1);
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  XORI_B4_128_SB(src0, src1, src2, src3);
-  HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
-                             filt0, filt1, filt2, filt3, out2, out3);
-  SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-  SAT_SH4_SH(out0, out1, out2, out3, 7);
-  out = PCKEV_XORI128_UB(out0, out1);
-  ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
-  dst += (4 * dst_stride);
-  out = PCKEV_XORI128_UB(out2, out3);
-  ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
-}
-
-static void common_hz_8t_4w_msa(const uint8_t *src, int32_t src_stride,
-                                uint8_t *dst, int32_t dst_stride,
-                                int8_t *filter, int32_t height) {
-  if (4 == height) {
-    common_hz_8t_4x4_msa(src, src_stride, dst, dst_stride, filter);
-  } else if (8 == height) {
-    common_hz_8t_4x8_msa(src, src_stride, dst, dst_stride, filter);
-  }
-}
-
-static void common_hz_8t_8x4_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter) {
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v16u8 mask0, mask1, mask2, mask3, tmp0, tmp1;
-  v8i16 filt, out0, out1, out2, out3;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[0]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  XORI_B4_128_SB(src0, src1, src2, src3);
-  HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
-                             filt0, filt1, filt2, filt3, out0, out1, out2,
-                             out3);
-  SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-  SAT_SH4_SH(out0, out1, out2, out3, 7);
-  tmp0 = PCKEV_XORI128_UB(out0, out1);
-  tmp1 = PCKEV_XORI128_UB(out2, out3);
-  ST8x4_UB(tmp0, tmp1, dst, dst_stride);
-}
-
-static void common_hz_8t_8x8mult_msa(const uint8_t *src, int32_t src_stride,
-                                     uint8_t *dst, int32_t dst_stride,
-                                     int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v16u8 mask0, mask1, mask2, mask3, tmp0, tmp1;
-  v8i16 filt, out0, out1, out2, out3;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[0]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_SB4(src, src_stride, src0, src1, src2, src3);
-    XORI_B4_128_SB(src0, src1, src2, src3);
-    src += (4 * src_stride);
-    HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
-                               mask3, filt0, filt1, filt2, filt3, out0, out1,
-                               out2, out3);
-    SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-    SAT_SH4_SH(out0, out1, out2, out3, 7);
-    tmp0 = PCKEV_XORI128_UB(out0, out1);
-    tmp1 = PCKEV_XORI128_UB(out2, out3);
-    ST8x4_UB(tmp0, tmp1, dst, dst_stride);
-    dst += (4 * dst_stride);
-  }
-}
-
-static void common_hz_8t_8w_msa(const uint8_t *src, int32_t src_stride,
-                                uint8_t *dst, int32_t dst_stride,
-                                int8_t *filter, int32_t height) {
-  if (4 == height) {
-    common_hz_8t_8x4_msa(src, src_stride, dst, dst_stride, filter);
-  } else {
-    common_hz_8t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, height);
-  }
-}
-
-static void common_hz_8t_16w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v16u8 mask0, mask1, mask2, mask3, out;
-  v8i16 filt, out0, out1, out2, out3;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[0]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  for (loop_cnt = (height >> 1); loop_cnt--;) {
-    LD_SB2(src, src_stride, src0, src2);
-    LD_SB2(src + 8, src_stride, src1, src3);
-    XORI_B4_128_SB(src0, src1, src2, src3);
-    src += (2 * src_stride);
-    HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
-                               mask3, filt0, filt1, filt2, filt3, out0, out1,
-                               out2, out3);
-    SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-    SAT_SH4_SH(out0, out1, out2, out3, 7);
-    out = PCKEV_XORI128_UB(out0, out1);
-    ST_UB(out, dst);
-    dst += dst_stride;
-    out = PCKEV_XORI128_UB(out2, out3);
-    ST_UB(out, dst);
-    dst += dst_stride;
-  }
-}
-
-static void common_hz_8t_32w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v16u8 mask0, mask1, mask2, mask3, out;
-  v8i16 filt, out0, out1, out2, out3;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[0]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  for (loop_cnt = (height >> 1); loop_cnt--;) {
-    src0 = LD_SB(src);
-    src2 = LD_SB(src + 16);
-    src3 = LD_SB(src + 24);
-    src1 = __msa_sldi_b(src2, src0, 8);
-    src += src_stride;
-    XORI_B4_128_SB(src0, src1, src2, src3);
-    HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
-                               mask3, filt0, filt1, filt2, filt3, out0, out1,
-                               out2, out3);
-    SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-    SAT_SH4_SH(out0, out1, out2, out3, 7);
-
-    src0 = LD_SB(src);
-    src2 = LD_SB(src + 16);
-    src3 = LD_SB(src + 24);
-    src1 = __msa_sldi_b(src2, src0, 8);
-    src += src_stride;
-
-    out = PCKEV_XORI128_UB(out0, out1);
-    ST_UB(out, dst);
-    out = PCKEV_XORI128_UB(out2, out3);
-    ST_UB(out, dst + 16);
-    dst += dst_stride;
-
-    XORI_B4_128_SB(src0, src1, src2, src3);
-    HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
-                               mask3, filt0, filt1, filt2, filt3, out0, out1,
-                               out2, out3);
-    SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-    SAT_SH4_SH(out0, out1, out2, out3, 7);
-    out = PCKEV_XORI128_UB(out0, out1);
-    ST_UB(out, dst);
-    out = PCKEV_XORI128_UB(out2, out3);
-    ST_UB(out, dst + 16);
-    dst += dst_stride;
-  }
-}
-
-static void common_hz_8t_64w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  int32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
-  v16u8 mask0, mask1, mask2, mask3, out;
-  v8i16 filt, out0, out1, out2, out3;
-
-  mask0 = LD_UB(&mc_filt_mask_arr[0]);
-  src -= 3;
-
-  /* rearranging filter */
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  mask1 = mask0 + 2;
-  mask2 = mask0 + 4;
-  mask3 = mask0 + 6;
-
-  for (loop_cnt = height; loop_cnt--;) {
-    src0 = LD_SB(src);
-    src2 = LD_SB(src + 16);
-    src3 = LD_SB(src + 24);
-    src1 = __msa_sldi_b(src2, src0, 8);
-
-    XORI_B4_128_SB(src0, src1, src2, src3);
-    HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
-                               mask3, filt0, filt1, filt2, filt3, out0, out1,
-                               out2, out3);
-    SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-    SAT_SH4_SH(out0, out1, out2, out3, 7);
-    out = PCKEV_XORI128_UB(out0, out1);
-    ST_UB(out, dst);
-    out = PCKEV_XORI128_UB(out2, out3);
-    ST_UB(out, dst + 16);
-
-    src0 = LD_SB(src + 32);
-    src2 = LD_SB(src + 48);
-    src3 = LD_SB(src + 56);
-    src1 = __msa_sldi_b(src2, src0, 8);
-    src += src_stride;
-
-    XORI_B4_128_SB(src0, src1, src2, src3);
-    HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
-                               mask3, filt0, filt1, filt2, filt3, out0, out1,
-                               out2, out3);
-    SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
-    SAT_SH4_SH(out0, out1, out2, out3, 7);
-    out = PCKEV_XORI128_UB(out0, out1);
-    ST_UB(out, dst + 32);
-    out = PCKEV_XORI128_UB(out2, out3);
-    ST_UB(out, dst + 48);
-    dst += dst_stride;
-  }
-}
-
-static void common_hz_2t_4x4_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter) {
-  v16i8 src0, src1, src2, src3, mask;
-  v16u8 filt0, vec0, vec1, res0, res1;
-  v8u16 vec2, vec3, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[16]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
-  DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3);
-  SRARI_H2_UH(vec2, vec3, FILTER_BITS);
-  PCKEV_B2_UB(vec2, vec2, vec3, vec3, res0, res1);
-  ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
-}
-
-static void common_hz_2t_4x8_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter) {
-  v16u8 vec0, vec1, vec2, vec3, filt0;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
-  v16i8 res0, res1, res2, res3;
-  v8u16 vec4, vec5, vec6, vec7, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[16]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
-  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
-  VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5,
-              vec6, vec7);
-  SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS);
-  PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2,
-              res3);
-  ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
-  dst += (4 * dst_stride);
-  ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
-}
-
-static void common_hz_2t_4w_msa(const uint8_t *src, int32_t src_stride,
-                                uint8_t *dst, int32_t dst_stride,
-                                int8_t *filter, int32_t height) {
-  if (4 == height) {
-    common_hz_2t_4x4_msa(src, src_stride, dst, dst_stride, filter);
-  } else if (8 == height) {
-    common_hz_2t_4x8_msa(src, src_stride, dst, dst_stride, filter);
-  }
-}
-
-static void common_hz_2t_8x4_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter) {
-  v16u8 filt0;
-  v16i8 src0, src1, src2, src3, mask;
-  v8u16 vec0, vec1, vec2, vec3, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
-  VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-              vec2, vec3);
-  SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-  PCKEV_B2_SB(vec1, vec0, vec3, vec2, src0, src1);
-  ST8x4_UB(src0, src1, dst, dst_stride);
-}
-
-static void common_hz_2t_8x8mult_msa(const uint8_t *src, int32_t src_stride,
-                                     uint8_t *dst, int32_t dst_stride,
-                                     int8_t *filter, int32_t height) {
-  v16u8 filt0;
-  v16i8 src0, src1, src2, src3, mask, out0, out1;
-  v8u16 vec0, vec1, vec2, vec3, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  src += (4 * src_stride);
-
-  VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
-  VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-              vec2, vec3);
-  SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-
-  LD_SB4(src, src_stride, src0, src1, src2, src3);
-  src += (4 * src_stride);
-
-  PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
-  ST8x4_UB(out0, out1, dst, dst_stride);
-  dst += (4 * dst_stride);
-
-  VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
-  VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-              vec2, vec3);
-  SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-  PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
-  ST8x4_UB(out0, out1, dst, dst_stride);
-  dst += (4 * dst_stride);
-
-  if (16 == height) {
-    LD_SB4(src, src_stride, src0, src1, src2, src3);
-    src += (4 * src_stride);
-
-    VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
-    VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-                vec2, vec3);
-    SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-    LD_SB4(src, src_stride, src0, src1, src2, src3);
-
-    PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
-    ST8x4_UB(out0, out1, dst, dst_stride);
-
-    VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
-    VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
-                vec2, vec3);
-    SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-    PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
-    ST8x4_UB(out0, out1, dst + 4 * dst_stride, dst_stride);
-  }
-}
-
-static void common_hz_2t_8w_msa(const uint8_t *src, int32_t src_stride,
-                                uint8_t *dst, int32_t dst_stride,
-                                int8_t *filter, int32_t height) {
-  if (4 == height) {
-    common_hz_2t_8x4_msa(src, src_stride, dst, dst_stride, filter);
-  } else {
-    common_hz_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, height);
-  }
-}
-
-static void common_hz_2t_16w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
-  v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  loop_cnt = (height >> 2) - 1;
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  LD_SB4(src, src_stride, src0, src2, src4, src6);
-  LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
-  src += (4 * src_stride);
-
-  VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
-  VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
-  VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
-  VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
-              out2, out3);
-  DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
-              out6, out7);
-  SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
-  SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
-  PCKEV_ST_SB(out0, out1, dst);
-  dst += dst_stride;
-  PCKEV_ST_SB(out2, out3, dst);
-  dst += dst_stride;
-  PCKEV_ST_SB(out4, out5, dst);
-  dst += dst_stride;
-  PCKEV_ST_SB(out6, out7, dst);
-  dst += dst_stride;
-
-  for (; loop_cnt--;) {
-    LD_SB4(src, src_stride, src0, src2, src4, src6);
-    LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
-    src += (4 * src_stride);
-
-    VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
-    VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
-    VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
-                out2, out3);
-    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
-                out6, out7);
-    SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
-    SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
-    PCKEV_ST_SB(out0, out1, dst);
-    dst += dst_stride;
-    PCKEV_ST_SB(out2, out3, dst);
-    dst += dst_stride;
-    PCKEV_ST_SB(out4, out5, dst);
-    dst += dst_stride;
-    PCKEV_ST_SB(out6, out7, dst);
-    dst += dst_stride;
-  }
-}
-
-static void common_hz_2t_32w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
-  v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  for (loop_cnt = height >> 1; loop_cnt--;) {
-    src0 = LD_SB(src);
-    src2 = LD_SB(src + 16);
-    src3 = LD_SB(src + 24);
-    src1 = __msa_sldi_b(src2, src0, 8);
-    src += src_stride;
-    src4 = LD_SB(src);
-    src6 = LD_SB(src + 16);
-    src7 = LD_SB(src + 24);
-    src5 = __msa_sldi_b(src6, src4, 8);
-    src += src_stride;
-
-    VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
-    VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
-    VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
-                out2, out3);
-    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
-                out6, out7);
-    SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
-    SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
-    PCKEV_ST_SB(out0, out1, dst);
-    PCKEV_ST_SB(out2, out3, dst + 16);
-    dst += dst_stride;
-    PCKEV_ST_SB(out4, out5, dst);
-    PCKEV_ST_SB(out6, out7, dst + 16);
-    dst += dst_stride;
-  }
-}
-
-static void common_hz_2t_64w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
-  v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt;
-
-  mask = LD_SB(&mc_filt_mask_arr[0]);
-
-  /* rearranging filter */
-  filt = LD_UH(filter);
-  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
-  for (loop_cnt = height; loop_cnt--;) {
-    src0 = LD_SB(src);
-    src2 = LD_SB(src + 16);
-    src4 = LD_SB(src + 32);
-    src6 = LD_SB(src + 48);
-    src7 = LD_SB(src + 56);
-    SLDI_B3_SB(src2, src4, src6, src0, src2, src4, src1, src3, src5, 8);
-    src += src_stride;
-
-    VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
-    VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
-    VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
-                out2, out3);
-    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
-                out6, out7);
-    SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
-    SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
-    PCKEV_ST_SB(out0, out1, dst);
-    PCKEV_ST_SB(out2, out3, dst + 16);
-    PCKEV_ST_SB(out4, out5, dst + 32);
-    PCKEV_ST_SB(out6, out7, dst + 48);
-    dst += dst_stride;
-  }
-}
-
-void aom_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
-                             uint8_t *dst, ptrdiff_t dst_stride,
-                             const int16_t *filter_x, int x_step_q4,
-                             const int16_t *filter_y, int y_step_q4, int w,
-                             int h) {
-  int8_t cnt, filt_hor[8];
-
-  assert(x_step_q4 == 16);
-  assert(((const int32_t *)filter_x)[1] != 0x800000);
-
-  for (cnt = 0; cnt < 8; ++cnt) {
-    filt_hor[cnt] = filter_x[cnt];
-  }
-
-  if (((const int32_t *)filter_x)[0] == 0) {
-    switch (w) {
-      case 4:
-        common_hz_2t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                            &filt_hor[3], h);
-        break;
-      case 8:
-        common_hz_2t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                            &filt_hor[3], h);
-        break;
-      case 16:
-        common_hz_2t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             &filt_hor[3], h);
-        break;
-      case 32:
-        common_hz_2t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             &filt_hor[3], h);
-        break;
-      case 64:
-        common_hz_2t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             &filt_hor[3], h);
-        break;
-      default:
-        aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
-                              x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  } else {
-    switch (w) {
-      case 4:
-        common_hz_8t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                            filt_hor, h);
-        break;
-      case 8:
-        common_hz_8t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                            filt_hor, h);
-        break;
-      case 16:
-        common_hz_8t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             filt_hor, h);
-        break;
-      case 32:
-        common_hz_8t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             filt_hor, h);
-        break;
-      case 64:
-        common_hz_8t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             filt_hor, h);
-        break;
-      default:
-        aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
-                              x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  }
-}
diff --git a/aom_dsp/mips/aom_convolve8_vert_msa.c b/aom_dsp/mips/aom_convolve8_vert_msa.c
deleted file mode 100644
index 2c3bc08..0000000
--- a/aom_dsp/mips/aom_convolve8_vert_msa.c
+++ /dev/null
@@ -1,699 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom_dsp/mips/aom_convolve_msa.h"
-
-static void common_vt_8t_4w_msa(const uint8_t *src, int32_t src_stride,
-                                uint8_t *dst, int32_t dst_stride,
-                                int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
-  v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
-  v16i8 src65_r, src87_r, src109_r, src2110, src4332, src6554, src8776;
-  v16i8 src10998, filt0, filt1, filt2, filt3;
-  v16u8 out;
-  v8i16 filt, out10, out32;
-
-  src -= (3 * src_stride);
-
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
-  src += (7 * src_stride);
-
-  ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
-             src54_r, src21_r);
-  ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
-  ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src2110,
-             src4332, src6554);
-  XORI_B3_128_SB(src2110, src4332, src6554);
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_SB4(src, src_stride, src7, src8, src9, src10);
-    src += (4 * src_stride);
-
-    ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
-               src87_r, src98_r, src109_r);
-    ILVR_D2_SB(src87_r, src76_r, src109_r, src98_r, src8776, src10998);
-    XORI_B2_128_SB(src8776, src10998);
-    out10 = FILT_8TAP_DPADD_S_H(src2110, src4332, src6554, src8776, filt0,
-                                filt1, filt2, filt3);
-    out32 = FILT_8TAP_DPADD_S_H(src4332, src6554, src8776, src10998, filt0,
-                                filt1, filt2, filt3);
-    SRARI_H2_SH(out10, out32, FILTER_BITS);
-    SAT_SH2_SH(out10, out32, 7);
-    out = PCKEV_XORI128_UB(out10, out32);
-    ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
-    dst += (4 * dst_stride);
-
-    src2110 = src6554;
-    src4332 = src8776;
-    src6554 = src10998;
-    src6 = src10;
-  }
-}
-
-static void common_vt_8t_8w_msa(const uint8_t *src, int32_t src_stride,
-                                uint8_t *dst, int32_t dst_stride,
-                                int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
-  v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
-  v16i8 src65_r, src87_r, src109_r, filt0, filt1, filt2, filt3;
-  v16u8 tmp0, tmp1;
-  v8i16 filt, out0_r, out1_r, out2_r, out3_r;
-
-  src -= (3 * src_stride);
-
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
-  XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
-  src += (7 * src_stride);
-  ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
-             src54_r, src21_r);
-  ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_SB4(src, src_stride, src7, src8, src9, src10);
-    XORI_B4_128_SB(src7, src8, src9, src10);
-    src += (4 * src_stride);
-
-    ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
-               src87_r, src98_r, src109_r);
-    out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0,
-                                 filt1, filt2, filt3);
-    out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0,
-                                 filt1, filt2, filt3);
-    out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0,
-                                 filt1, filt2, filt3);
-    out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0,
-                                 filt1, filt2, filt3);
-    SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, FILTER_BITS);
-    SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
-    tmp0 = PCKEV_XORI128_UB(out0_r, out1_r);
-    tmp1 = PCKEV_XORI128_UB(out2_r, out3_r);
-    ST8x4_UB(tmp0, tmp1, dst, dst_stride);
-    dst += (4 * dst_stride);
-
-    src10_r = src54_r;
-    src32_r = src76_r;
-    src54_r = src98_r;
-    src21_r = src65_r;
-    src43_r = src87_r;
-    src65_r = src109_r;
-    src6 = src10;
-  }
-}
-
-static void common_vt_8t_16w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
-  v16i8 filt0, filt1, filt2, filt3;
-  v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
-  v16i8 src65_r, src87_r, src109_r, src10_l, src32_l, src54_l, src76_l;
-  v16i8 src98_l, src21_l, src43_l, src65_l, src87_l, src109_l;
-  v16u8 tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
-
-  src -= (3 * src_stride);
-
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
-  XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
-  src += (7 * src_stride);
-  ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
-             src54_r, src21_r);
-  ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
-  ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l, src32_l,
-             src54_l, src21_l);
-  ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_SB4(src, src_stride, src7, src8, src9, src10);
-    XORI_B4_128_SB(src7, src8, src9, src10);
-    src += (4 * src_stride);
-
-    ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
-               src87_r, src98_r, src109_r);
-    ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_l,
-               src87_l, src98_l, src109_l);
-    out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0,
-                                 filt1, filt2, filt3);
-    out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0,
-                                 filt1, filt2, filt3);
-    out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0,
-                                 filt1, filt2, filt3);
-    out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0,
-                                 filt1, filt2, filt3);
-    out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l, filt0,
-                                 filt1, filt2, filt3);
-    out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l, filt0,
-                                 filt1, filt2, filt3);
-    out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l, filt0,
-                                 filt1, filt2, filt3);
-    out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l, filt0,
-                                 filt1, filt2, filt3);
-    SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, FILTER_BITS);
-    SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, FILTER_BITS);
-    SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
-    SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7);
-    PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, out3_r,
-                tmp0, tmp1, tmp2, tmp3);
-    XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3);
-    ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
-    dst += (4 * dst_stride);
-
-    src10_r = src54_r;
-    src32_r = src76_r;
-    src54_r = src98_r;
-    src21_r = src65_r;
-    src43_r = src87_r;
-    src65_r = src109_r;
-    src10_l = src54_l;
-    src32_l = src76_l;
-    src54_l = src98_l;
-    src21_l = src65_l;
-    src43_l = src87_l;
-    src65_l = src109_l;
-    src6 = src10;
-  }
-}
-
-static void common_vt_8t_16w_mult_msa(const uint8_t *src, int32_t src_stride,
-                                      uint8_t *dst, int32_t dst_stride,
-                                      int8_t *filter, int32_t height,
-                                      int32_t width) {
-  const uint8_t *src_tmp;
-  uint8_t *dst_tmp;
-  uint32_t loop_cnt, cnt;
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
-  v16i8 filt0, filt1, filt2, filt3;
-  v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
-  v16i8 src65_r, src87_r, src109_r, src10_l, src32_l, src54_l, src76_l;
-  v16i8 src98_l, src21_l, src43_l, src65_l, src87_l, src109_l;
-  v16u8 tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
-
-  src -= (3 * src_stride);
-
-  filt = LD_SH(filter);
-  SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
-
-  for (cnt = (width >> 4); cnt--;) {
-    src_tmp = src;
-    dst_tmp = dst;
-
-    LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
-    XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
-    src_tmp += (7 * src_stride);
-    ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
-               src54_r, src21_r);
-    ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
-    ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l, src32_l,
-               src54_l, src21_l);
-    ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
-
-    for (loop_cnt = (height >> 2); loop_cnt--;) {
-      LD_SB4(src_tmp, src_stride, src7, src8, src9, src10);
-      XORI_B4_128_SB(src7, src8, src9, src10);
-      src_tmp += (4 * src_stride);
-      ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
-                 src87_r, src98_r, src109_r);
-      ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_l,
-                 src87_l, src98_l, src109_l);
-      out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0,
-                                   filt1, filt2, filt3);
-      out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0,
-                                   filt1, filt2, filt3);
-      out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0,
-                                   filt1, filt2, filt3);
-      out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0,
-                                   filt1, filt2, filt3);
-      out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l, filt0,
-                                   filt1, filt2, filt3);
-      out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l, filt0,
-                                   filt1, filt2, filt3);
-      out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l, filt0,
-                                   filt1, filt2, filt3);
-      out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l, filt0,
-                                   filt1, filt2, filt3);
-      SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, FILTER_BITS);
-      SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, FILTER_BITS);
-      SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
-      SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7);
-      PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l,
-                  out3_r, tmp0, tmp1, tmp2, tmp3);
-      XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3);
-      ST_UB4(tmp0, tmp1, tmp2, tmp3, dst_tmp, dst_stride);
-      dst_tmp += (4 * dst_stride);
-
-      src10_r = src54_r;
-      src32_r = src76_r;
-      src54_r = src98_r;
-      src21_r = src65_r;
-      src43_r = src87_r;
-      src65_r = src109_r;
-      src10_l = src54_l;
-      src32_l = src76_l;
-      src54_l = src98_l;
-      src21_l = src65_l;
-      src43_l = src87_l;
-      src65_l = src109_l;
-      src6 = src10;
-    }
-
-    src += 16;
-    dst += 16;
-  }
-}
-
-static void common_vt_8t_32w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride, filter, height,
-                            32);
-}
-
-static void common_vt_8t_64w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride, filter, height,
-                            64);
-}
-
-static void common_vt_2t_4x4_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter) {
-  v16i8 src0, src1, src2, src3, src4;
-  v16i8 src10_r, src32_r, src21_r, src43_r, src2110, src4332;
-  v16u8 filt0;
-  v8i16 filt;
-  v8u16 tmp0, tmp1;
-
-  filt = LD_SH(filter);
-  filt0 = (v16u8)__msa_splati_h(filt, 0);
-
-  LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
-
-  ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
-             src32_r, src43_r);
-  ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
-  DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
-  SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-  src2110 = __msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
-  ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride);
-}
-
-static void common_vt_2t_4x8_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter) {
-  v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
-  v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r;
-  v16i8 src65_r, src87_r, src2110, src4332, src6554, src8776;
-  v8u16 tmp0, tmp1, tmp2, tmp3;
-  v16u8 filt0;
-  v8i16 filt;
-
-  filt = LD_SH(filter);
-  filt0 = (v16u8)__msa_splati_h(filt, 0);
-
-  LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
-  src += (8 * src_stride);
-
-  src8 = LD_SB(src);
-
-  ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
-             src32_r, src43_r);
-  ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r,
-             src76_r, src87_r);
-  ILVR_D4_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src87_r,
-             src76_r, src2110, src4332, src6554, src8776);
-  DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0,
-              tmp0, tmp1, tmp2, tmp3);
-  SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-  PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, src2110, src4332);
-  ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride);
-  ST4x4_UB(src4332, src4332, 0, 1, 2, 3, dst + 4 * dst_stride, dst_stride);
-}
-
-static void common_vt_2t_4w_msa(const uint8_t *src, int32_t src_stride,
-                                uint8_t *dst, int32_t dst_stride,
-                                int8_t *filter, int32_t height) {
-  if (4 == height) {
-    common_vt_2t_4x4_msa(src, src_stride, dst, dst_stride, filter);
-  } else if (8 == height) {
-    common_vt_2t_4x8_msa(src, src_stride, dst, dst_stride, filter);
-  }
-}
-
-static void common_vt_2t_8x4_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter) {
-  v16u8 src0, src1, src2, src3, src4, vec0, vec1, vec2, vec3, filt0;
-  v16i8 out0, out1;
-  v8u16 tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt;
-
-  /* rearranging filter_y */
-  filt = LD_SH(filter);
-  filt0 = (v16u8)__msa_splati_h(filt, 0);
-
-  LD_UB5(src, src_stride, src0, src1, src2, src3, src4);
-  ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1);
-  ILVR_B2_UB(src3, src2, src4, src3, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
-              tmp2, tmp3);
-  SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-  PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
-  ST8x4_UB(out0, out1, dst, dst_stride);
-}
-
-static void common_vt_2t_8x8mult_msa(const uint8_t *src, int32_t src_stride,
-                                     uint8_t *dst, int32_t dst_stride,
-                                     int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
-  v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
-  v16i8 out0, out1;
-  v8u16 tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt;
-
-  /* rearranging filter_y */
-  filt = LD_SH(filter);
-  filt0 = (v16u8)__msa_splati_h(filt, 0);
-
-  src0 = LD_UB(src);
-  src += src_stride;
-
-  for (loop_cnt = (height >> 3); loop_cnt--;) {
-    LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8);
-    src += (8 * src_stride);
-
-    ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2,
-               vec3);
-    ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, vec6,
-               vec7);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
-                tmp2, tmp3);
-    SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-    PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
-    ST8x4_UB(out0, out1, dst, dst_stride);
-    dst += (4 * dst_stride);
-
-    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1,
-                tmp2, tmp3);
-    SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-    PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
-    ST8x4_UB(out0, out1, dst, dst_stride);
-    dst += (4 * dst_stride);
-
-    src0 = src8;
-  }
-}
-
-static void common_vt_2t_8w_msa(const uint8_t *src, int32_t src_stride,
-                                uint8_t *dst, int32_t dst_stride,
-                                int8_t *filter, int32_t height) {
-  if (4 == height) {
-    common_vt_2t_8x4_msa(src, src_stride, dst, dst_stride, filter);
-  } else {
-    common_vt_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, height);
-  }
-}
-
-static void common_vt_2t_16w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16u8 src0, src1, src2, src3, src4;
-  v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
-  v8u16 tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt;
-
-  /* rearranging filter_y */
-  filt = LD_SH(filter);
-  filt0 = (v16u8)__msa_splati_h(filt, 0);
-
-  src0 = LD_UB(src);
-  src += src_stride;
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_UB4(src, src_stride, src1, src2, src3, src4);
-    src += (4 * src_stride);
-
-    ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
-    ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
-    DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_ST_SB(tmp0, tmp1, dst);
-    dst += dst_stride;
-
-    ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
-    ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
-    DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_ST_SB(tmp2, tmp3, dst);
-    dst += dst_stride;
-
-    DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_ST_SB(tmp0, tmp1, dst);
-    dst += dst_stride;
-
-    DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_ST_SB(tmp2, tmp3, dst);
-    dst += dst_stride;
-
-    src0 = src4;
-  }
-}
-
-static void common_vt_2t_32w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9;
-  v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
-  v8u16 tmp0, tmp1, tmp2, tmp3;
-  v8i16 filt;
-
-  /* rearranging filter_y */
-  filt = LD_SH(filter);
-  filt0 = (v16u8)__msa_splati_h(filt, 0);
-
-  src0 = LD_UB(src);
-  src5 = LD_UB(src + 16);
-  src += src_stride;
-
-  for (loop_cnt = (height >> 2); loop_cnt--;) {
-    LD_UB4(src, src_stride, src1, src2, src3, src4);
-    ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
-    ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
-
-    LD_UB4(src + 16, src_stride, src6, src7, src8, src9);
-    src += (4 * src_stride);
-
-    DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_ST_SB(tmp0, tmp1, dst);
-    DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_ST_SB(tmp2, tmp3, dst + dst_stride);
-
-    ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
-    ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
-    DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_ST_SB(tmp0, tmp1, dst + 2 * dst_stride);
-
-    DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_ST_SB(tmp2, tmp3, dst + 3 * dst_stride);
-
-    ILVR_B2_UB(src6, src5, src7, src6, vec0, vec2);
-    ILVL_B2_UB(src6, src5, src7, src6, vec1, vec3);
-    DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_ST_SB(tmp0, tmp1, dst + 16);
-
-    DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_ST_SB(tmp2, tmp3, dst + 16 + dst_stride);
-
-    ILVR_B2_UB(src8, src7, src9, src8, vec4, vec6);
-    ILVL_B2_UB(src8, src7, src9, src8, vec5, vec7);
-    DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_ST_SB(tmp0, tmp1, dst + 16 + 2 * dst_stride);
-
-    DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_ST_SB(tmp2, tmp3, dst + 16 + 3 * dst_stride);
-    dst += (4 * dst_stride);
-
-    src0 = src4;
-    src5 = src9;
-  }
-}
-
-static void common_vt_2t_64w_msa(const uint8_t *src, int32_t src_stride,
-                                 uint8_t *dst, int32_t dst_stride,
-                                 int8_t *filter, int32_t height) {
-  uint32_t loop_cnt;
-  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
-  v16u8 src11, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
-  v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
-  v8i16 filt;
-
-  /* rearranging filter_y */
-  filt = LD_SH(filter);
-  filt0 = (v16u8)__msa_splati_h(filt, 0);
-
-  LD_UB4(src, 16, src0, src3, src6, src9);
-  src += src_stride;
-
-  for (loop_cnt = (height >> 1); loop_cnt--;) {
-    LD_UB2(src, src_stride, src1, src2);
-    LD_UB2(src + 16, src_stride, src4, src5);
-    LD_UB2(src + 32, src_stride, src7, src8);
-    LD_UB2(src + 48, src_stride, src10, src11);
-    src += (2 * src_stride);
-
-    ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
-    ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
-    DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_ST_SB(tmp0, tmp1, dst);
-
-    DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_ST_SB(tmp2, tmp3, dst + dst_stride);
-
-    ILVR_B2_UB(src4, src3, src5, src4, vec4, vec6);
-    ILVL_B2_UB(src4, src3, src5, src4, vec5, vec7);
-    DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5);
-    SRARI_H2_UH(tmp4, tmp5, FILTER_BITS);
-    PCKEV_ST_SB(tmp4, tmp5, dst + 16);
-
-    DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7);
-    SRARI_H2_UH(tmp6, tmp7, FILTER_BITS);
-    PCKEV_ST_SB(tmp6, tmp7, dst + 16 + dst_stride);
-
-    ILVR_B2_UB(src7, src6, src8, src7, vec0, vec2);
-    ILVL_B2_UB(src7, src6, src8, src7, vec1, vec3);
-    DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
-    SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
-    PCKEV_ST_SB(tmp0, tmp1, dst + 32);
-
-    DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
-    SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
-    PCKEV_ST_SB(tmp2, tmp3, dst + 32 + dst_stride);
-
-    ILVR_B2_UB(src10, src9, src11, src10, vec4, vec6);
-    ILVL_B2_UB(src10, src9, src11, src10, vec5, vec7);
-    DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5);
-    SRARI_H2_UH(tmp4, tmp5, FILTER_BITS);
-    PCKEV_ST_SB(tmp4, tmp5, dst + 48);
-
-    DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7);
-    SRARI_H2_UH(tmp6, tmp7, FILTER_BITS);
-    PCKEV_ST_SB(tmp6, tmp7, dst + 48 + dst_stride);
-    dst += (2 * dst_stride);
-
-    src0 = src2;
-    src3 = src5;
-    src6 = src8;
-    src9 = src11;
-  }
-}
-
-void aom_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
-                            uint8_t *dst, ptrdiff_t dst_stride,
-                            const int16_t *filter_x, int x_step_q4,
-                            const int16_t *filter_y, int y_step_q4, int w,
-                            int h) {
-  int8_t cnt, filt_ver[8];
-
-  assert(y_step_q4 == 16);
-  assert(((const int32_t *)filter_y)[1] != 0x800000);
-
-  for (cnt = 8; cnt--;) {
-    filt_ver[cnt] = filter_y[cnt];
-  }
-
-  if (((const int32_t *)filter_y)[0] == 0) {
-    switch (w) {
-      case 4:
-        common_vt_2t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                            &filt_ver[3], h);
-        break;
-      case 8:
-        common_vt_2t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                            &filt_ver[3], h);
-        break;
-      case 16:
-        common_vt_2t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             &filt_ver[3], h);
-        break;
-      case 32:
-        common_vt_2t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             &filt_ver[3], h);
-        break;
-      case 64:
-        common_vt_2t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             &filt_ver[3], h);
-        break;
-      default:
-        aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
-                             x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  } else {
-    switch (w) {
-      case 4:
-        common_vt_8t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                            filt_ver, h);
-        break;
-      case 8:
-        common_vt_8t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                            filt_ver, h);
-        break;
-      case 16:
-        common_vt_8t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             filt_ver, h);
-        break;
-      case 32:
-        common_vt_8t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             filt_ver, h);
-        break;
-      case 64:
-        common_vt_8t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
-                             filt_ver, h);
-        break;
-      default:
-        aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
-                             x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  }
-}
diff --git a/aom_dsp/mips/aom_convolve_copy_dspr2.c b/aom_dsp/mips/aom_convolve_copy_dspr2.c
deleted file mode 100644
index 12a213e..0000000
--- a/aom_dsp/mips/aom_convolve_copy_dspr2.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/aom_filter.h"
-#include "aom_ports/mem.h"
-
-#if HAVE_DSPR2
-void aom_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                             uint8_t *dst, ptrdiff_t dst_stride, int w, int h) {
-  int x, y;
-
-  /* prefetch data to cache memory */
-  prefetch_load(src);
-  prefetch_load(src + 32);
-  prefetch_store(dst);
-
-  switch (w) {
-    case 4: {
-      uint32_t tp1;
-
-      /* 1 word storage */
-      for (y = h; y--;) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__(
-            "ulw              %[tp1],         (%[src])      \n\t"
-            "sw               %[tp1],         (%[dst])      \n\t" /* store */
-
-            : [tp1] "=&r"(tp1)
-            : [src] "r"(src), [dst] "r"(dst));
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-    } break;
-    case 8: {
-      uint32_t tp1, tp2;
-
-      /* 2 word storage */
-      for (y = h; y--;) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__(
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         4(%[src])      \n\t"
-            "sw               %[tp1],         0(%[dst])      \n\t" /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t" /* store */
-
-            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2)
-            : [src] "r"(src), [dst] "r"(dst));
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-    } break;
-    case 16: {
-      uint32_t tp1, tp2, tp3, tp4;
-
-      /* 4 word storage */
-      for (y = h; y--;) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__(
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         4(%[src])      \n\t"
-            "ulw              %[tp3],         8(%[src])      \n\t"
-            "ulw              %[tp4],         12(%[src])     \n\t"
-
-            "sw               %[tp1],         0(%[dst])      \n\t" /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t" /* store */
-            "sw               %[tp3],         8(%[dst])      \n\t" /* store */
-            "sw               %[tp4],         12(%[dst])     \n\t" /* store */
-
-            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
-              [tp4] "=&r"(tp4)
-            : [src] "r"(src), [dst] "r"(dst));
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-    } break;
-    case 32: {
-      uint32_t tp1, tp2, tp3, tp4;
-      uint32_t tp5, tp6, tp7, tp8;
-
-      /* 8 word storage */
-      for (y = h; y--;) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__(
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         4(%[src])      \n\t"
-            "ulw              %[tp3],         8(%[src])      \n\t"
-            "ulw              %[tp4],         12(%[src])     \n\t"
-            "ulw              %[tp5],         16(%[src])     \n\t"
-            "ulw              %[tp6],         20(%[src])     \n\t"
-            "ulw              %[tp7],         24(%[src])     \n\t"
-            "ulw              %[tp8],         28(%[src])     \n\t"
-
-            "sw               %[tp1],         0(%[dst])      \n\t" /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t" /* store */
-            "sw               %[tp3],         8(%[dst])      \n\t" /* store */
-            "sw               %[tp4],         12(%[dst])     \n\t" /* store */
-            "sw               %[tp5],         16(%[dst])     \n\t" /* store */
-            "sw               %[tp6],         20(%[dst])     \n\t" /* store */
-            "sw               %[tp7],         24(%[dst])     \n\t" /* store */
-            "sw               %[tp8],         28(%[dst])     \n\t" /* store */
-
-            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
-              [tp4] "=&r"(tp4), [tp5] "=&r"(tp5), [tp6] "=&r"(tp6),
-              [tp7] "=&r"(tp7), [tp8] "=&r"(tp8)
-            : [src] "r"(src), [dst] "r"(dst));
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-    } break;
-    case 64: {
-      uint32_t tp1, tp2, tp3, tp4;
-      uint32_t tp5, tp6, tp7, tp8;
-
-      prefetch_load(src + 64);
-      prefetch_store(dst + 32);
-
-      /* 16 word storage */
-      for (y = h; y--;) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_load(src + src_stride + 64);
-        prefetch_store(dst + dst_stride);
-        prefetch_store(dst + dst_stride + 32);
-
-        __asm__ __volatile__(
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         4(%[src])      \n\t"
-            "ulw              %[tp3],         8(%[src])      \n\t"
-            "ulw              %[tp4],         12(%[src])     \n\t"
-            "ulw              %[tp5],         16(%[src])     \n\t"
-            "ulw              %[tp6],         20(%[src])     \n\t"
-            "ulw              %[tp7],         24(%[src])     \n\t"
-            "ulw              %[tp8],         28(%[src])     \n\t"
-
-            "sw               %[tp1],         0(%[dst])      \n\t" /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t" /* store */
-            "sw               %[tp3],         8(%[dst])      \n\t" /* store */
-            "sw               %[tp4],         12(%[dst])     \n\t" /* store */
-            "sw               %[tp5],         16(%[dst])     \n\t" /* store */
-            "sw               %[tp6],         20(%[dst])     \n\t" /* store */
-            "sw               %[tp7],         24(%[dst])     \n\t" /* store */
-            "sw               %[tp8],         28(%[dst])     \n\t" /* store */
-
-            "ulw              %[tp1],         32(%[src])     \n\t"
-            "ulw              %[tp2],         36(%[src])     \n\t"
-            "ulw              %[tp3],         40(%[src])     \n\t"
-            "ulw              %[tp4],         44(%[src])     \n\t"
-            "ulw              %[tp5],         48(%[src])     \n\t"
-            "ulw              %[tp6],         52(%[src])     \n\t"
-            "ulw              %[tp7],         56(%[src])     \n\t"
-            "ulw              %[tp8],         60(%[src])     \n\t"
-
-            "sw               %[tp1],         32(%[dst])     \n\t" /* store */
-            "sw               %[tp2],         36(%[dst])     \n\t" /* store */
-            "sw               %[tp3],         40(%[dst])     \n\t" /* store */
-            "sw               %[tp4],         44(%[dst])     \n\t" /* store */
-            "sw               %[tp5],         48(%[dst])     \n\t" /* store */
-            "sw               %[tp6],         52(%[dst])     \n\t" /* store */
-            "sw               %[tp7],         56(%[dst])     \n\t" /* store */
-            "sw               %[tp8],         60(%[dst])     \n\t" /* store */
-
-            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
-              [tp4] "=&r"(tp4), [tp5] "=&r"(tp5), [tp6] "=&r"(tp6),
-              [tp7] "=&r"(tp7), [tp8] "=&r"(tp8)
-            : [src] "r"(src), [dst] "r"(dst));
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-    } break;
-    default:
-      for (y = h; y--;) {
-        for (x = 0; x < w; ++x) {
-          dst[x] = src[x];
-        }
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-  }
-}
-#endif
diff --git a/aom_dsp/mips/aom_convolve_copy_msa.c b/aom_dsp/mips/aom_convolve_copy_msa.c
deleted file mode 100644
index 12e7d95..0000000
--- a/aom_dsp/mips/aom_convolve_copy_msa.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <string.h>
-#include "aom_dsp/mips/macros_msa.h"
-
-static void copy_width8_msa(const uint8_t *src, int32_t src_stride,
-                            uint8_t *dst, int32_t dst_stride, int32_t height) {
-  int32_t cnt;
-  uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
-  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
-
-  if (0 == height % 12) {
-    for (cnt = (height / 12); cnt--;) {
-      LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
-      src += (8 * src_stride);
-
-      out0 = __msa_copy_u_d((v2i64)src0, 0);
-      out1 = __msa_copy_u_d((v2i64)src1, 0);
-      out2 = __msa_copy_u_d((v2i64)src2, 0);
-      out3 = __msa_copy_u_d((v2i64)src3, 0);
-      out4 = __msa_copy_u_d((v2i64)src4, 0);
-      out5 = __msa_copy_u_d((v2i64)src5, 0);
-      out6 = __msa_copy_u_d((v2i64)src6, 0);
-      out7 = __msa_copy_u_d((v2i64)src7, 0);
-
-      SD4(out0, out1, out2, out3, dst, dst_stride);
-      dst += (4 * dst_stride);
-      SD4(out4, out5, out6, out7, dst, dst_stride);
-      dst += (4 * dst_stride);
-
-      LD_UB4(src, src_stride, src0, src1, src2, src3);
-      src += (4 * src_stride);
-
-      out0 = __msa_copy_u_d((v2i64)src0, 0);
-      out1 = __msa_copy_u_d((v2i64)src1, 0);
-      out2 = __msa_copy_u_d((v2i64)src2, 0);
-      out3 = __msa_copy_u_d((v2i64)src3, 0);
-      SD4(out0, out1, out2, out3, dst, dst_stride);
-      dst += (4 * dst_stride);
-    }
-  } else if (0 == height % 8) {
-    for (cnt = height >> 3; cnt--;) {
-      LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
-      src += (8 * src_stride);
-
-      out0 = __msa_copy_u_d((v2i64)src0, 0);
-      out1 = __msa_copy_u_d((v2i64)src1, 0);
-      out2 = __msa_copy_u_d((v2i64)src2, 0);
-      out3 = __msa_copy_u_d((v2i64)src3, 0);
-      out4 = __msa_copy_u_d((v2i64)src4, 0);
-      out5 = __msa_copy_u_d((v2i64)src5, 0);
-      out6 = __msa_copy_u_d((v2i64)src6, 0);
-      out7 = __msa_copy_u_d((v2i64)src7, 0);
-
-      SD4(out0, out1, out2, out3, dst, dst_stride);
-      dst += (4 * dst_stride);
-      SD4(out4, out5, out6, out7, dst, dst_stride);
-      dst += (4 * dst_stride);
-    }
-  } else if (0 == height % 4) {
-    for (cnt = (height / 4); cnt--;) {
-      LD_UB4(src, src_stride, src0, src1, src2, src3);
-      src += (4 * src_stride);
-      out0 = __msa_copy_u_d((v2i64)src0, 0);
-      out1 = __msa_copy_u_d((v2i64)src1, 0);
-      out2 = __msa_copy_u_d((v2i64)src2, 0);
-      out3 = __msa_copy_u_d((v2i64)src3, 0);
-
-      SD4(out0, out1, out2, out3, dst, dst_stride);
-      dst += (4 * dst_stride);
-    }
-  } else if (0 == height % 2) {
-    for (cnt = (height / 2); cnt--;) {
-      LD_UB2(src, src_stride, src0, src1);
-      src += (2 * src_stride);
-      out0 = __msa_copy_u_d((v2i64)src0, 0);
-      out1 = __msa_copy_u_d((v2i64)src1, 0);
-
-      SD(out0, dst);
-      dst += dst_stride;
-      SD(out1, dst);
-      dst += dst_stride;
-    }
-  }
-}
-
-static void copy_16multx8mult_msa(const uint8_t *src, int32_t src_stride,
-                                  uint8_t *dst, int32_t dst_stride,
-                                  int32_t height, int32_t width) {
-  int32_t cnt, loop_cnt;
-  const uint8_t *src_tmp;
-  uint8_t *dst_tmp;
-  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
-
-  for (cnt = (width >> 4); cnt--;) {
-    src_tmp = src;
-    dst_tmp = dst;
-
-    for (loop_cnt = (height >> 3); loop_cnt--;) {
-      LD_UB8(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6,
-             src7);
-      src_tmp += (8 * src_stride);
-
-      ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst_tmp,
-             dst_stride);
-      dst_tmp += (8 * dst_stride);
-    }
-
-    src += 16;
-    dst += 16;
-  }
-}
-
-static void copy_width16_msa(const uint8_t *src, int32_t src_stride,
-                             uint8_t *dst, int32_t dst_stride, int32_t height) {
-  int32_t cnt;
-  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
-
-  if (0 == height % 12) {
-    for (cnt = (height / 12); cnt--;) {
-      LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
-      src += (8 * src_stride);
-      ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride);
-      dst += (8 * dst_stride);
-
-      LD_UB4(src, src_stride, src0, src1, src2, src3);
-      src += (4 * src_stride);
-      ST_UB4(src0, src1, src2, src3, dst, dst_stride);
-      dst += (4 * dst_stride);
-    }
-  } else if (0 == height % 8) {
-    copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 16);
-  } else if (0 == height % 4) {
-    for (cnt = (height >> 2); cnt--;) {
-      LD_UB4(src, src_stride, src0, src1, src2, src3);
-      src += (4 * src_stride);
-
-      ST_UB4(src0, src1, src2, src3, dst, dst_stride);
-      dst += (4 * dst_stride);
-    }
-  }
-}
-
-static void copy_width32_msa(const uint8_t *src, int32_t src_stride,
-                             uint8_t *dst, int32_t dst_stride, int32_t height) {
-  int32_t cnt;
-  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
-
-  if (0 == height % 12) {
-    for (cnt = (height / 12); cnt--;) {
-      LD_UB4(src, src_stride, src0, src1, src2, src3);
-      LD_UB4(src + 16, src_stride, src4, src5, src6, src7);
-      src += (4 * src_stride);
-      ST_UB4(src0, src1, src2, src3, dst, dst_stride);
-      ST_UB4(src4, src5, src6, src7, dst + 16, dst_stride);
-      dst += (4 * dst_stride);
-
-      LD_UB4(src, src_stride, src0, src1, src2, src3);
-      LD_UB4(src + 16, src_stride, src4, src5, src6, src7);
-      src += (4 * src_stride);
-      ST_UB4(src0, src1, src2, src3, dst, dst_stride);
-      ST_UB4(src4, src5, src6, src7, dst + 16, dst_stride);
-      dst += (4 * dst_stride);
-
-      LD_UB4(src, src_stride, src0, src1, src2, src3);
-      LD_UB4(src + 16, src_stride, src4, src5, src6, src7);
-      src += (4 * src_stride);
-      ST_UB4(src0, src1, src2, src3, dst, dst_stride);
-      ST_UB4(src4, src5, src6, src7, dst + 16, dst_stride);
-      dst += (4 * dst_stride);
-    }
-  } else if (0 == height % 8) {
-    copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 32);
-  } else if (0 == height % 4) {
-    for (cnt = (height >> 2); cnt--;) {
-      LD_UB4(src, src_stride, src0, src1, src2, src3);
-      LD_UB4(src + 16, src_stride, src4, src5, src6, src7);
-      src += (4 * src_stride);
-      ST_UB4(src0, src1, src2, src3, dst, dst_stride);
-      ST_UB4(src4, src5, src6, src7, dst + 16, dst_stride);
-      dst += (4 * dst_stride);
-    }
-  }
-}
-
-static void copy_width64_msa(const uint8_t *src, int32_t src_stride,
-                             uint8_t *dst, int32_t dst_stride, int32_t height) {
-  copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 64);
-}
-
-void aom_convolve_copy_msa(const uint8_t *src, ptrdiff_t src_stride,
-                           uint8_t *dst, ptrdiff_t dst_stride, int32_t w,
-                           int32_t h) {
-  switch (w) {
-    case 4: {
-      uint32_t cnt, tmp;
-      /* 1 word storage */
-      for (cnt = h; cnt--;) {
-        tmp = LW(src);
-        SW(tmp, dst);
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    }
-    case 8: {
-      copy_width8_msa(src, src_stride, dst, dst_stride, h);
-      break;
-    }
-    case 16: {
-      copy_width16_msa(src, src_stride, dst, dst_stride, h);
-      break;
-    }
-    case 32: {
-      copy_width32_msa(src, src_stride, dst, dst_stride, h);
-      break;
-    }
-    case 64: {
-      copy_width64_msa(src, src_stride, dst, dst_stride, h);
-      break;
-    }
-    default: {
-      uint32_t cnt;
-      for (cnt = h; cnt--;) {
-        memmove(dst, src, w);
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    }
-  }
-}
diff --git a/aom_dsp/mips/aom_convolve_msa.h b/aom_dsp/mips/aom_convolve_msa.h
deleted file mode 100644
index 852415c..0000000
--- a/aom_dsp/mips/aom_convolve_msa.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#ifndef AOM_AOM_DSP_MIPS_AOM_CONVOLVE_MSA_H_
-#define AOM_AOM_DSP_MIPS_AOM_CONVOLVE_MSA_H_
-
-#include "aom_dsp/mips/macros_msa.h"
-#include "aom_dsp/aom_filter.h"
-
-extern const uint8_t mc_filt_mask_arr[16 * 3];
-
-#define FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, filt0, filt1, filt2,   \
-                            filt3)                                         \
-  ({                                                                       \
-    v8i16 tmp_dpadd_0, tmp_dpadd_1;                                        \
-                                                                           \
-    tmp_dpadd_0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0);               \
-    tmp_dpadd_0 = __msa_dpadd_s_h(tmp_dpadd_0, (v16i8)vec1, (v16i8)filt1); \
-    tmp_dpadd_1 = __msa_dotp_s_h((v16i8)vec2, (v16i8)filt2);               \
-    tmp_dpadd_1 = __msa_dpadd_s_h(tmp_dpadd_1, (v16i8)vec3, (v16i8)filt3); \
-    tmp_dpadd_0 = __msa_adds_s_h(tmp_dpadd_0, tmp_dpadd_1);                \
-                                                                           \
-    tmp_dpadd_0;                                                           \
-  })
-
-#define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,     \
-                                   mask2, mask3, filt0, filt1, filt2, filt3, \
-                                   out0, out1)                               \
-  {                                                                          \
-    v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m;    \
-    v8i16 res0_m, res1_m, res2_m, res3_m;                                    \
-                                                                             \
-    VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m);        \
-    DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, res0_m, res1_m);               \
-    VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m);        \
-    DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, res0_m, res1_m);              \
-    VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m);        \
-    DOTP_SB2_SH(vec4_m, vec5_m, filt2, filt2, res2_m, res3_m);               \
-    VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m);        \
-    DPADD_SB2_SH(vec6_m, vec7_m, filt3, filt3, res2_m, res3_m);              \
-    ADDS_SH2_SH(res0_m, res2_m, res1_m, res3_m, out0, out1);                 \
-  }
-
-#define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,     \
-                                   mask2, mask3, filt0, filt1, filt2, filt3, \
-                                   out0, out1, out2, out3)                   \
-  {                                                                          \
-    v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m;    \
-    v8i16 res0_m, res1_m, res2_m, res3_m, res4_m, res5_m, res6_m, res7_m;    \
-                                                                             \
-    VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m);        \
-    VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m);        \
-    DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0,  \
-                res0_m, res1_m, res2_m, res3_m);                             \
-    VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0_m, vec1_m);        \
-    VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2_m, vec3_m);        \
-    DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt2, filt2, filt2, filt2,  \
-                res4_m, res5_m, res6_m, res7_m);                             \
-    VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec4_m, vec5_m);        \
-    VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6_m, vec7_m);        \
-    DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt1, filt1, filt1, filt1, \
-                 res0_m, res1_m, res2_m, res3_m);                            \
-    VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec4_m, vec5_m);        \
-    VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec6_m, vec7_m);        \
-    DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt3, filt3, filt3, filt3, \
-                 res4_m, res5_m, res6_m, res7_m);                            \
-    ADDS_SH4_SH(res0_m, res4_m, res1_m, res5_m, res2_m, res6_m, res3_m,      \
-                res7_m, out0, out1, out2, out3);                             \
-  }
-
-#endif  // AOM_AOM_DSP_MIPS_AOM_CONVOLVE_MSA_H_
diff --git a/aom_dsp/mips/common_dspr2.c b/aom_dsp/mips/common_dspr2.c
deleted file mode 100644
index 00ab75d..0000000
--- a/aom_dsp/mips/common_dspr2.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "aom_dsp/mips/common_dspr2.h"
-
-#if HAVE_DSPR2
-uint8_t aom_ff_cropTbl_a[256 + 2 * CROP_WIDTH];
-uint8_t *aom_ff_cropTbl;
-
-void aom_dsputil_static_init(void) {
-  int i;
-
-  for (i = 0; i < 256; i++) aom_ff_cropTbl_a[i + CROP_WIDTH] = i;
-
-  for (i = 0; i < CROP_WIDTH; i++) {
-    aom_ff_cropTbl_a[i] = 0;
-    aom_ff_cropTbl_a[i + CROP_WIDTH + 256] = 255;
-  }
-
-  aom_ff_cropTbl = &aom_ff_cropTbl_a[CROP_WIDTH];
-}
-
-#endif
diff --git a/aom_dsp/mips/common_dspr2.h b/aom_dsp/mips/common_dspr2.h
deleted file mode 100644
index c42188d..0000000
--- a/aom_dsp/mips/common_dspr2.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#ifndef AOM_AOM_DSP_MIPS_COMMON_DSPR2_H_
-#define AOM_AOM_DSP_MIPS_COMMON_DSPR2_H_
-
-#include <assert.h>
-
-#include "config/aom_config.h"
-
-#include "aom/aom_integer.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-#if HAVE_DSPR2
-#define CROP_WIDTH 512
-
-extern uint8_t *aom_ff_cropTbl;  // From "aom_dsp/mips/intrapred4_dspr2.c"
-
-static INLINE void prefetch_load(const unsigned char *src) {
-  __asm__ __volatile__("pref   0,  0(%[src])   \n\t" : : [src] "r"(src));
-}
-
-/* prefetch data for store */
-static INLINE void prefetch_store(unsigned char *dst) {
-  __asm__ __volatile__("pref   1,  0(%[dst])   \n\t" : : [dst] "r"(dst));
-}
-
-static INLINE void prefetch_load_streamed(const unsigned char *src) {
-  __asm__ __volatile__("pref   4,  0(%[src])   \n\t" : : [src] "r"(src));
-}
-
-/* prefetch data for store */
-static INLINE void prefetch_store_streamed(unsigned char *dst) {
-  __asm__ __volatile__("pref   5,  0(%[dst])   \n\t" : : [dst] "r"(dst));
-}
-#endif  // #if HAVE_DSPR2
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // AOM_AOM_DSP_MIPS_COMMON_DSPR2_H_
diff --git a/aom_dsp/mips/convolve2_dspr2.c b/aom_dsp/mips/convolve2_dspr2.c
deleted file mode 100644
index 08bf1ab..0000000
--- a/aom_dsp/mips/convolve2_dspr2.c
+++ /dev/null
@@ -1,1031 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/aom_filter.h"
-#include "aom_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_bi_horiz_4_transposed_dspr2(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    const int16_t *filter_x0, int32_t h) {
-  int32_t y;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint8_t *dst_ptr;
-  int32_t Temp1, Temp2;
-  uint32_t vector4a = 64;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    dst_ptr = dst;
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-
-    __asm__ __volatile__(
-        "ulw              %[tp1],         0(%[src])                      \n\t"
-        "ulw              %[tp2],         4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp2],       $ac2,           31             \n\t"
-
-        /* odd 1. pixel */
-        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-
-        /* odd 2. pixel */
-        "lbux             %[tp2],         %[Temp2](%[cm])                \n\t"
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp2],       $ac2,           31             \n\t"
-
-        /* clamp */
-        "lbux             %[p1],          %[Temp1](%[cm])                \n\t"
-        "lbux             %[p2],          %[Temp2](%[cm])                \n\t"
-
-        /* store bytes */
-        "sb               %[tp1],         0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        "sb               %[p1],          0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        "sb               %[tp2],         0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        "sb               %[p2],          0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [p1] "=&r"(p1), [p2] "=&r"(p2),
-          [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [dst_ptr] "+r"(dst_ptr)
-        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
-          [src] "r"(src), [dst_stride] "r"(dst_stride));
-
-    /* Next row... */
-    src += src_stride;
-    dst += 1;
-  }
-}
-
-static void convolve_bi_horiz_8_transposed_dspr2(
-    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
-    const int16_t *filter_x0, int32_t h) {
-  int32_t y;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint8_t *dst_ptr;
-  uint32_t vector4a = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t tp1, tp2, tp3;
-  uint32_t p1, p2, p3, p4;
-  uint8_t *odd_dst;
-  uint32_t dst_pitch_2 = (dst_stride << 1);
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-
-    dst_ptr = dst;
-    odd_dst = (dst_ptr + dst_stride);
-
-    __asm__ __volatile__(
-        "ulw              %[tp1],         0(%[src])                       \n\t"
-        "ulw              %[tp2],         4(%[src])                       \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                            \n\t"
-        "mthi             $zero,          $ac3                            \n\t"
-        "mtlo             %[vector4a],    $ac2                            \n\t"
-        "mthi             $zero,          $ac2                            \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                          \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                          \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp2]                          \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp2]                          \n\t"
-        "ulw              %[tp3],         8(%[src])                       \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]     \n\t"
-        "extp             %[Temp1],       $ac3,           31              \n\t"
-
-        /* even 2. pixel */
-        "dpa.w.ph         $ac2,           %[p2],          %[filter45]     \n\t"
-        "extp             %[Temp3],       $ac2,           31              \n\t"
-
-        /* even 3. pixel */
-        "lbux             %[Temp2],       %[Temp1](%[cm])                 \n\t"
-        "mtlo             %[vector4a],    $ac1                            \n\t"
-        "mthi             $zero,          $ac1                            \n\t"
-        "balign           %[tp3],         %[tp2],         3              \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac1,           %[p3],          %[filter45]     \n\t"
-        "lbux             %[tp1],         %[Temp3](%[cm])                 \n\t"
-        "extp             %[p3],          $ac1,           31              \n\t"
-
-        /* even 4. pixel */
-        "mtlo             %[vector4a],    $ac2                            \n\t"
-        "mthi             $zero,          $ac2                            \n\t"
-        "mtlo             %[vector4a],    $ac3                            \n\t"
-        "mthi             $zero,          $ac3                            \n\t"
-        "sb               %[Temp2],       0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-        "sb               %[tp1],         0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-
-        "dpa.w.ph         $ac2,           %[p4],          %[filter45]     \n\t"
-        "extp             %[Temp3],       $ac2,           31              \n\t"
-
-        "lbux             %[Temp1],         %[p3](%[cm])                    "
-        "\n\t"
-
-        /* odd 1. pixel */
-        "mtlo             %[vector4a],    $ac1                            \n\t"
-        "mthi             $zero,          $ac1                            \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp2]                          \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp2]                          \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp3]                          \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp3]                          \n\t"
-        "sb               %[Temp1],       0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]     \n\t"
-        "extp             %[Temp2],       $ac3,           31              \n\t"
-
-        /* odd 2. pixel */
-        "lbux             %[tp1],         %[Temp3](%[cm])                 \n\t"
-        "mtlo             %[vector4a],    $ac3                            \n\t"
-        "mthi             $zero,          $ac3                            \n\t"
-        "mtlo             %[vector4a],    $ac2                            \n\t"
-        "mthi             $zero,          $ac2                            \n\t"
-        "dpa.w.ph         $ac1,           %[p2],          %[filter45]     \n\t"
-        "sb               %[tp1],         0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-        "extp             %[Temp3],       $ac1,           31              \n\t"
-
-        /* odd 3. pixel */
-        "lbux             %[tp3],         %[Temp2](%[cm])                 \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[filter45]     \n\t"
-        "extp             %[Temp2],       $ac3,           31              \n\t"
-
-        /* odd 4. pixel */
-        "sb               %[tp3],         0(%[odd_dst])                   \n\t"
-        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[filter45]     \n\t"
-        "extp             %[Temp1],       $ac2,           31              \n\t"
-
-        /* clamp */
-        "lbux             %[p4],          %[Temp3](%[cm])                 \n\t"
-        "lbux             %[p2],          %[Temp2](%[cm])                 \n\t"
-        "lbux             %[p1],          %[Temp1](%[cm])                 \n\t"
-
-        /* store bytes */
-        "sb               %[p4],          0(%[odd_dst])                   \n\t"
-        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-        "sb               %[p2],          0(%[odd_dst])                   \n\t"
-        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-        "sb               %[p1],          0(%[odd_dst])                   \n\t"
-
-        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), [p1] "=&r"(p1),
-          [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), [Temp1] "=&r"(Temp1),
-          [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), [dst_ptr] "+r"(dst_ptr),
-          [odd_dst] "+r"(odd_dst)
-        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
-          [src] "r"(src), [dst_pitch_2] "r"(dst_pitch_2));
-
-    /* Next row... */
-    src += src_stride;
-    dst += 1;
-  }
-}
-
-static void convolve_bi_horiz_16_transposed_dspr2(
-    const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr,
-    int32_t dst_stride, const int16_t *filter_x0, int32_t h, int32_t count) {
-  int32_t c, y;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  uint32_t dst_pitch_2 = (dst_stride << 1);
-  uint8_t *odd_dst;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-
-    src = src_ptr;
-    dst = dst_ptr;
-
-    odd_dst = (dst + dst_stride);
-
-    for (c = 0; c < count; c++) {
-      __asm__ __volatile__(
-          "ulw              %[qload1],        0(%[src])                       "
-          "\n\t"
-          "ulw              %[qload2],        4(%[src])                       "
-          "\n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* even 1 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "mtlo             %[vector_64],     $ac2                            "
-          "\n\t" /* even 2 */
-          "mthi             $zero,            $ac2                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       "
-          "\n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       "
-          "\n\t"
-          "ulw              %[qload1],        8(%[src])                       "
-          "\n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     "
-          "\n\t" /* even 1 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64],     $ac3                            "
-          "\n\t" /* even 3 */
-          "mthi             $zero,            $ac3                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p5],            %[qload1]                       "
-          "\n\t"
-          "ulw              %[qload2],        12(%[src])                      "
-          "\n\t"
-          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     "
-          "\n\t" /* even 1 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* even 1 */
-          "extp             %[Temp2],         $ac2,           31              "
-          "\n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* even 4 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p2],            %[qload2]                       "
-          "\n\t"
-          "sb               %[st1],           0(%[dst])                       "
-          "\n\t" /* even 1 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]   "
-          "          \n\t"
-          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     "
-          "\n\t" /* even 3 */
-          "extp             %[Temp3],         $ac3,           31              "
-          "\n\t" /* even 3 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 "
-          "\n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64],     $ac2                            "
-          "\n\t" /* even 5 */
-          "mthi             $zero,            $ac2                            "
-          "\n\t"
-          "preceu.ph.qbl    %[p3],            %[qload2]                       "
-          "\n\t"
-          "sb               %[st2],           0(%[dst])                       "
-          "\n\t" /* even 2 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     "
-          "\n\t" /* even 4 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* even 4 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 "
-          "\n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64],     $ac3                            "
-          "\n\t" /* even 6 */
-          "mthi             $zero,            $ac3                            "
-          "\n\t"
-          "sb               %[st3],           0(%[dst])                       "
-          "\n\t" /* even 3 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     "
-          "\n\t" /* even 5 */
-          "extp             %[Temp2],         $ac2,           31              "
-          "\n\t" /* even 5 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* even 7 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "sb               %[st1],           0(%[dst])                       "
-          "\n\t" /* even 4 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "ulw              %[qload1],        20(%[src])                      "
-          "\n\t"
-          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     "
-          "\n\t" /* even 6 */
-          "extp             %[Temp3],         $ac3,           31              "
-          "\n\t" /* even 6 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 "
-          "\n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64],     $ac2                            "
-          "\n\t" /* even 8 */
-          "mthi             $zero,            $ac2                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       "
-          "\n\t"
-          "sb               %[st2],           0(%[dst])                       "
-          "\n\t" /* even 5 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     "
-          "\n\t" /* even 7 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* even 7 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 "
-          "\n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64],     $ac3                            "
-          "\n\t" /* odd 1 */
-          "mthi             $zero,            $ac3                            "
-          "\n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     "
-          "\n\t" /* even 8 */
-          "sb               %[st3],           0(%[dst])                       "
-          "\n\t" /* even 6 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "extp             %[Temp2],         $ac2,           31              "
-          "\n\t" /* even 8 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],        1(%[src])                       "
-          "\n\t"
-          "ulw              %[qload2],        5(%[src])                       "
-          "\n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* odd 2 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       "
-          "\n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       "
-          "\n\t"
-          "sb               %[st1],           0(%[dst])                       "
-          "\n\t" /* even 7 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "ulw              %[qload2],        9(%[src])                       "
-          "\n\t"
-          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     "
-          "\n\t" /* odd 1 */
-          "extp             %[Temp3],         $ac3,           31              "
-          "\n\t" /* odd 1 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 "
-          "\n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64],     $ac2                            "
-          "\n\t" /* odd 3 */
-          "mthi             $zero,            $ac2                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       "
-          "\n\t"
-          "sb               %[st2],           0(%[dst])                       "
-          "\n\t" /* even 8 */
-          "ulw              %[qload1],        13(%[src])                      "
-          "\n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     "
-          "\n\t" /* odd 2 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* odd 2 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 "
-          "\n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64],     $ac3                            "
-          "\n\t" /* odd 4 */
-          "mthi             $zero,            $ac3                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       "
-          "\n\t"
-          "sb               %[st3],           0(%[odd_dst])                   "
-          "\n\t" /* odd 1 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     "
-          "\n\t" /* odd 3 */
-          "extp             %[Temp2],         $ac2,           31              "
-          "\n\t" /* odd 3 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* odd 5 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       "
-          "\n\t"
-          "sb               %[st1],           0(%[odd_dst])                   "
-          "\n\t" /* odd 2 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     "
-          "\n\t" /* odd 4 */
-          "extp             %[Temp3],         $ac3,           31              "
-          "\n\t" /* odd 4 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 "
-          "\n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64],     $ac2                            "
-          "\n\t" /* odd 6 */
-          "mthi             $zero,            $ac2                            "
-          "\n\t"
-          "sb               %[st2],           0(%[odd_dst])                   "
-          "\n\t" /* odd 3 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     "
-          "\n\t" /* odd 5 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* odd 5 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 "
-          "\n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64],     $ac3                            "
-          "\n\t" /* odd 7 */
-          "mthi             $zero,            $ac3                            "
-          "\n\t"
-          "sb               %[st3],           0(%[odd_dst])                   "
-          "\n\t" /* odd 4 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-          "ulw              %[qload1],        21(%[src])                      "
-          "\n\t"
-          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     "
-          "\n\t" /* odd 6 */
-          "extp             %[Temp2],         $ac2,           31              "
-          "\n\t" /* odd 6 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* odd 8 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       "
-          "\n\t"
-          "sb               %[st1],           0(%[odd_dst])                   "
-          "\n\t" /* odd 5 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     "
-          "\n\t" /* odd 7 */
-          "extp             %[Temp3],         $ac3,           31              "
-          "\n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     "
-          "\n\t" /* odd 8 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* odd 8 */
-
-          "lbux             %[st2],           %[Temp2](%[cm])                 "
-          "\n\t" /* odd 6 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 "
-          "\n\t" /* odd 7 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* odd 8 */
-
-          "sb               %[st2],           0(%[odd_dst])                   "
-          "\n\t" /* odd 6 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-
-          "sb               %[st3],           0(%[odd_dst])                   "
-          "\n\t" /* odd 7 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-
-          "sb               %[st1],           0(%[odd_dst])                   "
-          "\n\t" /* odd 8 */
-
-          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5),
-            [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3),
-            [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4),
-            [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
-            [dst] "+r"(dst), [odd_dst] "+r"(odd_dst)
-          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
-            [src] "r"(src), [dst_pitch_2] "r"(dst_pitch_2));
-
-      src += 16;
-      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
-      odd_dst = (dst + dst_stride);
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += 1;
-  }
-}
-
-static void convolve_bi_horiz_64_transposed_dspr2(
-    const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr,
-    int32_t dst_stride, const int16_t *filter_x0, int32_t h) {
-  int32_t c, y;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  uint32_t dst_pitch_2 = (dst_stride << 1);
-  uint8_t *odd_dst;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_load(src_ptr + src_stride + 64);
-
-    src = src_ptr;
-    dst = dst_ptr;
-
-    odd_dst = (dst + dst_stride);
-
-    for (c = 0; c < 4; c++) {
-      __asm__ __volatile__(
-          "ulw              %[qload1],        0(%[src])                       "
-          "\n\t"
-          "ulw              %[qload2],        4(%[src])                       "
-          "\n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* even 1 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "mtlo             %[vector_64],     $ac2                            "
-          "\n\t" /* even 2 */
-          "mthi             $zero,            $ac2                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       "
-          "\n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       "
-          "\n\t"
-          "ulw              %[qload1],        8(%[src])                       "
-          "\n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     "
-          "\n\t" /* even 1 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64],     $ac3                            "
-          "\n\t" /* even 3 */
-          "mthi             $zero,            $ac3                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p5],            %[qload1]                       "
-          "\n\t"
-          "ulw              %[qload2],        12(%[src])                      "
-          "\n\t"
-          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     "
-          "\n\t" /* even 1 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* even 1 */
-          "extp             %[Temp2],         $ac2,           31              "
-          "\n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* even 4 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p2],            %[qload2]                       "
-          "\n\t"
-          "sb               %[st1],           0(%[dst])                       "
-          "\n\t" /* even 1 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]   "
-          "          \n\t"
-          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     "
-          "\n\t" /* even 3 */
-          "extp             %[Temp3],         $ac3,           31              "
-          "\n\t" /* even 3 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 "
-          "\n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64],     $ac2                            "
-          "\n\t" /* even 5 */
-          "mthi             $zero,            $ac2                            "
-          "\n\t"
-          "preceu.ph.qbl    %[p3],            %[qload2]                       "
-          "\n\t"
-          "sb               %[st2],           0(%[dst])                       "
-          "\n\t" /* even 2 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     "
-          "\n\t" /* even 4 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* even 4 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 "
-          "\n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64],     $ac3                            "
-          "\n\t" /* even 6 */
-          "mthi             $zero,            $ac3                            "
-          "\n\t"
-          "sb               %[st3],           0(%[dst])                       "
-          "\n\t" /* even 3 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     "
-          "\n\t" /* even 5 */
-          "extp             %[Temp2],         $ac2,           31              "
-          "\n\t" /* even 5 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* even 7 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "sb               %[st1],           0(%[dst])                       "
-          "\n\t" /* even 4 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "ulw              %[qload1],        20(%[src])                      "
-          "\n\t"
-          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     "
-          "\n\t" /* even 6 */
-          "extp             %[Temp3],         $ac3,           31              "
-          "\n\t" /* even 6 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 "
-          "\n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64],     $ac2                            "
-          "\n\t" /* even 8 */
-          "mthi             $zero,            $ac2                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       "
-          "\n\t"
-          "sb               %[st2],           0(%[dst])                       "
-          "\n\t" /* even 5 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     "
-          "\n\t" /* even 7 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* even 7 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 "
-          "\n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64],     $ac3                            "
-          "\n\t" /* odd 1 */
-          "mthi             $zero,            $ac3                            "
-          "\n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     "
-          "\n\t" /* even 8 */
-          "sb               %[st3],           0(%[dst])                       "
-          "\n\t" /* even 6 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "extp             %[Temp2],         $ac2,           31              "
-          "\n\t" /* even 8 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],        1(%[src])                       "
-          "\n\t"
-          "ulw              %[qload2],        5(%[src])                       "
-          "\n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* odd 2 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       "
-          "\n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       "
-          "\n\t"
-          "sb               %[st1],           0(%[dst])                       "
-          "\n\t" /* even 7 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
-          "\n\t"
-          "ulw              %[qload2],        9(%[src])                       "
-          "\n\t"
-          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     "
-          "\n\t" /* odd 1 */
-          "extp             %[Temp3],         $ac3,           31              "
-          "\n\t" /* odd 1 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 "
-          "\n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64],     $ac2                            "
-          "\n\t" /* odd 3 */
-          "mthi             $zero,            $ac2                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       "
-          "\n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       "
-          "\n\t"
-          "sb               %[st2],           0(%[dst])                       "
-          "\n\t" /* even 8 */
-          "ulw              %[qload1],        13(%[src])                      "
-          "\n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     "
-          "\n\t" /* odd 2 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* odd 2 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 "
-          "\n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64],     $ac3                            "
-          "\n\t" /* odd 4 */
-          "mthi             $zero,            $ac3                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       "
-          "\n\t"
-          "sb               %[st3],           0(%[odd_dst])                   "
-          "\n\t" /* odd 1 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     "
-          "\n\t" /* odd 3 */
-          "extp             %[Temp2],         $ac2,           31              "
-          "\n\t" /* odd 3 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* odd 5 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       "
-          "\n\t"
-          "sb               %[st1],           0(%[odd_dst])                   "
-          "\n\t" /* odd 2 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     "
-          "\n\t" /* odd 4 */
-          "extp             %[Temp3],         $ac3,           31              "
-          "\n\t" /* odd 4 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 "
-          "\n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64],     $ac2                            "
-          "\n\t" /* odd 6 */
-          "mthi             $zero,            $ac2                            "
-          "\n\t"
-          "sb               %[st2],           0(%[odd_dst])                   "
-          "\n\t" /* odd 3 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     "
-          "\n\t" /* odd 5 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* odd 5 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 "
-          "\n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64],     $ac3                            "
-          "\n\t" /* odd 7 */
-          "mthi             $zero,            $ac3                            "
-          "\n\t"
-          "sb               %[st3],           0(%[odd_dst])                   "
-          "\n\t" /* odd 4 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-          "ulw              %[qload1],        21(%[src])                      "
-          "\n\t"
-          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     "
-          "\n\t" /* odd 6 */
-          "extp             %[Temp2],         $ac2,           31              "
-          "\n\t" /* odd 6 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64],     $ac1                            "
-          "\n\t" /* odd 8 */
-          "mthi             $zero,            $ac1                            "
-          "\n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       "
-          "\n\t"
-          "sb               %[st1],           0(%[odd_dst])                   "
-          "\n\t" /* odd 5 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     "
-          "\n\t" /* odd 7 */
-          "extp             %[Temp3],         $ac3,           31              "
-          "\n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     "
-          "\n\t" /* odd 8 */
-          "extp             %[Temp1],         $ac1,           31              "
-          "\n\t" /* odd 8 */
-
-          "lbux             %[st2],           %[Temp2](%[cm])                 "
-          "\n\t" /* odd 6 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 "
-          "\n\t" /* odd 7 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 "
-          "\n\t" /* odd 8 */
-
-          "sb               %[st2],           0(%[odd_dst])                   "
-          "\n\t" /* odd 6 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-
-          "sb               %[st3],           0(%[odd_dst])                   "
-          "\n\t" /* odd 7 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
-          "\n\t"
-
-          "sb               %[st1],           0(%[odd_dst])                   "
-          "\n\t" /* odd 8 */
-
-          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5),
-            [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3),
-            [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4),
-            [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
-            [dst] "+r"(dst), [odd_dst] "+r"(odd_dst)
-          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
-            [src] "r"(src), [dst_pitch_2] "r"(dst_pitch_2));
-
-      src += 16;
-      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
-      odd_dst = (dst + dst_stride);
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += 1;
-  }
-}
-
-void convolve_bi_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
-                                  uint8_t *dst, ptrdiff_t dst_stride,
-                                  const int16_t *filter, int w, int h) {
-  int x, y;
-
-  for (y = 0; y < h; ++y) {
-    for (x = 0; x < w; ++x) {
-      int sum = 0;
-
-      sum += src[x] * filter[3];
-      sum += src[x + 1] * filter[4];
-
-      dst[x * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
-    }
-
-    src += src_stride;
-    dst += 1;
-  }
-}
-
-void aom_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
-                         ptrdiff_t dst_stride, const int16_t *filter, int w,
-                         int h) {
-  uint32_t pos = 38;
-
-  /* bit positon for extract from acc */
-  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
-                       :
-                       : [pos] "r"(pos));
-
-  /* prefetch data to cache memory */
-  prefetch_load(src);
-  prefetch_load(src + 32);
-
-  switch (w) {
-    case 4:
-      convolve_bi_horiz_4_transposed_dspr2(src, src_stride, dst, dst_stride,
-                                           filter, h);
-      break;
-    case 8:
-      convolve_bi_horiz_8_transposed_dspr2(src, src_stride, dst, dst_stride,
-                                           filter, h);
-      break;
-    case 16:
-    case 32:
-      convolve_bi_horiz_16_transposed_dspr2(src, src_stride, dst, dst_stride,
-                                            filter, h, (w / 16));
-      break;
-    case 64:
-      prefetch_load(src + 32);
-      convolve_bi_horiz_64_transposed_dspr2(src, src_stride, dst, dst_stride,
-                                            filter, h);
-      break;
-    default:
-      convolve_bi_horiz_transposed(src, src_stride, dst, dst_stride, filter, w,
-                                   h);
-      break;
-  }
-}
-#endif
diff --git a/aom_dsp/mips/convolve2_horiz_dspr2.c b/aom_dsp/mips/convolve2_horiz_dspr2.c
deleted file mode 100644
index 097da73..0000000
--- a/aom_dsp/mips/convolve2_horiz_dspr2.c
+++ /dev/null
@@ -1,681 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_bi_horiz_4_dspr2(const uint8_t *src, int32_t src_stride,
-                                      uint8_t *dst, int32_t dst_stride,
-                                      const int16_t *filter_x0, int32_t h) {
-  int32_t y;
-  uint8_t *cm = aom_ff_cropTbl;
-  int32_t Temp1, Temp2, Temp3, Temp4;
-  uint32_t vector4a = 64;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__(
-        "ulw              %[tp1],      0(%[src])                      \n\t"
-        "ulw              %[tp2],      4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],    $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "balign           %[tp2],      %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        /* odd 1. pixel */
-        "lbux             %[tp1],      %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp2]                         \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 2. pixel */
-        "lbux             %[tp2],      %[Temp3](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp4],    $ac2,           31             \n\t"
-
-        /* clamp */
-        "lbux             %[p1],       %[Temp2](%[cm])                \n\t"
-        "lbux             %[p2],       %[Temp4](%[cm])                \n\t"
-
-        /* store bytes */
-        "sb               %[tp1],      0(%[dst])                      \n\t"
-        "sb               %[p1],       1(%[dst])                      \n\t"
-        "sb               %[tp2],      2(%[dst])                      \n\t"
-        "sb               %[p2],       3(%[dst])                      \n\t"
-
-        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [p1] "=&r"(p1), [p2] "=&r"(p2),
-          [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
-          [Temp4] "=&r"(Temp4)
-        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
-          [dst] "r"(dst), [src] "r"(src));
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_horiz_8_dspr2(const uint8_t *src, int32_t src_stride,
-                                      uint8_t *dst, int32_t dst_stride,
-                                      const int16_t *filter_x0, int32_t h) {
-  int32_t y;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t tp1, tp2, tp3;
-  uint32_t p1, p2, p3, p4;
-  uint32_t st0, st1;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__(
-        "ulw              %[tp1],      0(%[src])                      \n\t"
-        "ulw              %[tp2],      4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],       %[tp2]                         \n\t"
-        "ulw              %[tp3],      8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],    $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        /* even 3. pixel */
-        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac1                           \n\t"
-        "mthi             $zero,       $ac1                           \n\t"
-        "dpa.w.ph         $ac1,        %[p3],          %[filter45]    \n\t"
-        "extp             %[Temp1],    $ac1,           31             \n\t"
-
-        /* even 4. pixel */
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "sb               %[st0],      0(%[dst])                      \n\t"
-        "lbux             %[st1],      %[Temp3](%[cm])                \n\t"
-
-        "balign           %[tp3],      %[tp2],         3              \n\t"
-        "balign           %[tp2],      %[tp1],         3              \n\t"
-
-        "dpa.w.ph         $ac2,        %[p4],          %[filter45]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
-
-        /* odd 1. pixel */
-        "mtlo             %[vector4a], $ac1                           \n\t"
-        "mthi             $zero,       $ac1                           \n\t"
-        "sb               %[st1],      2(%[dst])                      \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[p3],       %[tp3]                         \n\t"
-        "preceu.ph.qbl    %[p4],       %[tp3]                         \n\t"
-        "sb               %[st0],      4(%[dst])                      \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 2. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "lbux             %[st0],      %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,        %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],    $ac1,           31             \n\t"
-
-        /* odd 3. pixel */
-        "lbux             %[st1],      %[Temp2](%[cm])                \n\t"
-        "dpa.w.ph         $ac3,        %[p3],          %[filter45]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 4. pixel */
-        "sb               %[st1],      1(%[dst])                      \n\t"
-        "sb               %[st0],      6(%[dst])                      \n\t"
-        "dpa.w.ph         $ac2,        %[p4],          %[filter45]    \n\t"
-        "extp             %[Temp1],    $ac2,           31             \n\t"
-
-        /* clamp */
-        "lbux             %[p4],       %[Temp3](%[cm])                \n\t"
-        "lbux             %[p2],       %[Temp2](%[cm])                \n\t"
-        "lbux             %[p1],       %[Temp1](%[cm])                \n\t"
-
-        /* store bytes */
-        "sb               %[p4],       3(%[dst])                      \n\t"
-        "sb               %[p2],       5(%[dst])                      \n\t"
-        "sb               %[p1],       7(%[dst])                      \n\t"
-
-        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
-          [st0] "=&r"(st0), [st1] "=&r"(st1), [p1] "=&r"(p1), [p2] "=&r"(p2),
-          [p3] "=&r"(p3), [p4] "=&r"(p4), [Temp1] "=&r"(Temp1),
-          [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
-        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
-          [dst] "r"(dst), [src] "r"(src));
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr,
-                                       int32_t src_stride, uint8_t *dst_ptr,
-                                       int32_t dst_stride,
-                                       const int16_t *filter_x0, int32_t h,
-                                       int32_t count) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_store(dst_ptr + dst_stride);
-
-    for (c = 0; c < count; c++) {
-      __asm__ __volatile__(
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
-          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                    \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-
-          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
-          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
-          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2),
-            [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2),
-            [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
-            [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
-          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
-            [dst] "r"(dst), [src] "r"(src));
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr,
-                                       int32_t src_stride, uint8_t *dst_ptr,
-                                       int32_t dst_stride,
-                                       const int16_t *filter_x0, int32_t h) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_load(src_ptr + src_stride + 64);
-    prefetch_store(dst_ptr + dst_stride);
-    prefetch_store(dst_ptr + dst_stride + 32);
-
-    for (c = 0; c < 4; c++) {
-      __asm__ __volatile__(
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
-          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                    \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-
-          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
-          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
-          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2),
-            [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2),
-            [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
-            [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
-          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
-            [dst] "r"(dst), [src] "r"(src));
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-void aom_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                               uint8_t *dst, ptrdiff_t dst_stride,
-                               const int16_t *filter_x, int x_step_q4,
-                               const int16_t *filter_y, int y_step_q4, int w,
-                               int h) {
-  uint32_t pos = 38;
-
-  assert(x_step_q4 == 16);
-
-  prefetch_load((const uint8_t *)filter_x);
-
-  /* bit positon for extract from acc */
-  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
-                       :
-                       : [pos] "r"(pos));
-
-  /* prefetch data to cache memory */
-  prefetch_load(src);
-  prefetch_load(src + 32);
-  prefetch_store(dst);
-
-  switch (w) {
-    case 4:
-      convolve_bi_horiz_4_dspr2(src, (int32_t)src_stride, dst,
-                                (int32_t)dst_stride, filter_x, (int32_t)h);
-      break;
-    case 8:
-      convolve_bi_horiz_8_dspr2(src, (int32_t)src_stride, dst,
-                                (int32_t)dst_stride, filter_x, (int32_t)h);
-      break;
-    case 16:
-      convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride, dst,
-                                 (int32_t)dst_stride, filter_x, (int32_t)h, 1);
-      break;
-    case 32:
-      convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride, dst,
-                                 (int32_t)dst_stride, filter_x, (int32_t)h, 2);
-      break;
-    case 64:
-      prefetch_load(src + 64);
-      prefetch_store(dst + 32);
-
-      convolve_bi_horiz_64_dspr2(src, (int32_t)src_stride, dst,
-                                 (int32_t)dst_stride, filter_x, (int32_t)h);
-      break;
-    default:
-      aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
-                            x_step_q4, filter_y, y_step_q4, w, h);
-      break;
-  }
-}
-#endif
diff --git a/aom_dsp/mips/convolve2_vert_dspr2.c b/aom_dsp/mips/convolve2_vert_dspr2.c
deleted file mode 100644
index 40abfd8..0000000
--- a/aom_dsp/mips/convolve2_vert_dspr2.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_bi_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
-                                     uint8_t *dst, int32_t dst_stride,
-                                     const int16_t *filter_y, int32_t w,
-                                     int32_t h) {
-  int32_t x, y;
-  const uint8_t *src_ptr;
-  uint8_t *dst_ptr;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  uint32_t load1, load2;
-  uint32_t p1, p2;
-  uint32_t scratch1;
-  uint32_t store1, store2;
-  int32_t Temp1, Temp2;
-  const int16_t *filter = &filter_y[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-
-    for (x = 0; x < w; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__(
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
-
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1),
-            [p2] "=&r"(p2), [scratch1] "=&r"(scratch1), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
-            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
-          : [filter45] "r"(filter45), [vector4a] "r"(vector4a),
-            [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_vert_64_dspr2(const uint8_t *src, int32_t src_stride,
-                                      uint8_t *dst, int32_t dst_stride,
-                                      const int16_t *filter_y, int32_t h) {
-  int32_t x, y;
-  const uint8_t *src_ptr;
-  uint8_t *dst_ptr;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  uint32_t load1, load2;
-  uint32_t p1, p2;
-  uint32_t scratch1;
-  uint32_t store1, store2;
-  int32_t Temp1, Temp2;
-  const int16_t *filter = &filter_y[3];
-  uint32_t filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-
-    for (x = 0; x < 64; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__(
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
-
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1),
-            [p2] "=&r"(p2), [scratch1] "=&r"(scratch1), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
-            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
-          : [filter45] "r"(filter45), [vector4a] "r"(vector4a),
-            [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-void aom_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                              uint8_t *dst, ptrdiff_t dst_stride,
-                              const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4, int w,
-                              int h) {
-  uint32_t pos = 38;
-
-  assert(y_step_q4 == 16);
-
-  /* bit positon for extract from acc */
-  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
-                       :
-                       : [pos] "r"(pos));
-
-  prefetch_store(dst);
-
-  switch (w) {
-    case 4:
-    case 8:
-    case 16:
-    case 32:
-      convolve_bi_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w,
-                               h);
-      break;
-    case 64:
-      prefetch_store(dst + 32);
-      convolve_bi_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h);
-      break;
-    default:
-      aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
-                           x_step_q4, filter_y, y_step_q4, w, h);
-      break;
-  }
-}
-#endif
diff --git a/aom_dsp/mips/convolve8_horiz_dspr2.c b/aom_dsp/mips/convolve8_horiz_dspr2.c
deleted file mode 100644
index f9c6879..0000000
--- a/aom_dsp/mips/convolve8_horiz_dspr2.c
+++ /dev/null
@@ -1,879 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/aom_filter.h"
-#include "aom_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_horiz_4_dspr2(const uint8_t *src, int32_t src_stride,
-                                   uint8_t *dst, int32_t dst_stride,
-                                   const int16_t *filter_x0, int32_t h) {
-  int32_t y;
-  uint8_t *cm = aom_ff_cropTbl;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2, Temp3, Temp4;
-  uint32_t vector4a = 64;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2, p3, p4;
-  uint32_t n1, n2, n3, n4;
-  uint32_t tn1, tn2;
-
-  vector1b = ((const int32_t *)filter_x0)[0];
-  vector2b = ((const int32_t *)filter_x0)[1];
-  vector3b = ((const int32_t *)filter_x0)[2];
-  vector4b = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__(
-        "ulw              %[tp1],      0(%[src])                      \n\t"
-        "ulw              %[tp2],      4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],       %[tp2]                         \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p3],          %[vector3b]    \n\t"
-        "ulw              %[tn2],      8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,        %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp1],    $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tn2]                         \n\t"
-        "balign           %[tn1],      %[tn2],         3              \n\t"
-        "balign           %[tn2],      %[tp2],         3              \n\t"
-        "balign           %[tp2],      %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        /* odd 1. pixel */
-        "lbux             %[tp1],      %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "preceu.ph.qbr    %[n1],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[n2],       %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[n3],       %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[n4],       %[tn2]                         \n\t"
-        "dpa.w.ph         $ac3,        %[n1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,        %[n2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,        %[n3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,        %[n4],          %[vector4b]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 2. pixel */
-        "lbux             %[tp2],      %[Temp3](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "preceu.ph.qbr    %[n1],       %[tn1]                         \n\t"
-        "dpa.w.ph         $ac2,        %[n2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,        %[n3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,        %[n4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,        %[n1],          %[vector4b]    \n\t"
-        "extp             %[Temp4],    $ac2,           31             \n\t"
-
-        /* clamp */
-        "lbux             %[tn1],      %[Temp2](%[cm])                \n\t"
-        "lbux             %[n2],       %[Temp4](%[cm])                \n\t"
-
-        /* store bytes */
-        "sb               %[tp1],      0(%[dst])                      \n\t"
-        "sb               %[tn1],      1(%[dst])                      \n\t"
-        "sb               %[tp2],      2(%[dst])                      \n\t"
-        "sb               %[n2],       3(%[dst])                      \n\t"
-
-        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
-          [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
-          [p4] "=&r"(p4), [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3),
-          [n4] "=&r"(n4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
-          [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4)
-        : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
-          [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
-          [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
-          [src] "r"(src));
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_horiz_8_dspr2(const uint8_t *src, int32_t src_stride,
-                                   uint8_t *dst, int32_t dst_stride,
-                                   const int16_t *filter_x0, int32_t h) {
-  int32_t y;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2, p3, p4, n1;
-  uint32_t tn1, tn2, tn3;
-  uint32_t st0, st1;
-
-  vector1b = ((const int32_t *)filter_x0)[0];
-  vector2b = ((const int32_t *)filter_x0)[1];
-  vector3b = ((const int32_t *)filter_x0)[2];
-  vector4b = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__(
-        "ulw              %[tp1],      0(%[src])                      \n\t"
-        "ulw              %[tp2],      4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],       %[tp2]                         \n\t"
-        "ulw              %[tn2],      8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp1],    $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "preceu.ph.qbr    %[p1],       %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[n1],       %[tn2]                         \n\t"
-        "ulw              %[tn1],      12(%[src])                     \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        /* even 3. pixel */
-        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac1                           \n\t"
-        "mthi             $zero,       $ac1                           \n\t"
-        "preceu.ph.qbr    %[p2],       %[tn1]                         \n\t"
-        "dpa.w.ph         $ac1,        %[p3],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac1,        %[p4],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac1,        %[p1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac1,        %[n1],          %[vector4b]    \n\t"
-        "extp             %[Temp1],    $ac1,           31             \n\t"
-
-        /* even 4. pixel */
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "sb               %[st0],      0(%[dst])                      \n\t"
-        "lbux             %[st1],      %[Temp3](%[cm])                \n\t"
-
-        "balign           %[tn3],      %[tn1],         3              \n\t"
-        "balign           %[tn1],      %[tn2],         3              \n\t"
-        "balign           %[tn2],      %[tp2],         3              \n\t"
-        "balign           %[tp2],      %[tp1],         3              \n\t"
-
-        "dpa.w.ph         $ac2,        %[p4],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p1],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,        %[n1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[vector4b]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
-
-        /* odd 1. pixel */
-        "mtlo             %[vector4a], $ac1                           \n\t"
-        "mthi             $zero,       $ac1                           \n\t"
-        "sb               %[st1],      2(%[dst])                      \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[p3],       %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[p4],       %[tn2]                         \n\t"
-        "sb               %[st0],      4(%[dst])                      \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 2. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tn1]                         \n\t"
-        "preceu.ph.qbl    %[n1],       %[tn1]                         \n\t"
-        "lbux             %[st0],      %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,        %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac1,        %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac1,        %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac1,        %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],    $ac1,           31             \n\t"
-
-        /* odd 3. pixel */
-        "lbux             %[st1],      %[Temp2](%[cm])                \n\t"
-        "preceu.ph.qbr    %[p2],       %[tn3]                         \n\t"
-        "dpa.w.ph         $ac3,        %[p3],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p4],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,        %[n1],          %[vector4b]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 4. pixel */
-        "sb               %[st1],      1(%[dst])                      \n\t"
-        "sb               %[st0],      6(%[dst])                      \n\t"
-        "dpa.w.ph         $ac2,        %[p4],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p1],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,        %[n1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[vector4b]    \n\t"
-        "extp             %[Temp1],    $ac2,           31             \n\t"
-
-        /* clamp */
-        "lbux             %[p4],       %[Temp3](%[cm])                \n\t"
-        "lbux             %[p2],       %[Temp2](%[cm])                \n\t"
-        "lbux             %[n1],       %[Temp1](%[cm])                \n\t"
-
-        /* store bytes */
-        "sb               %[p4],       3(%[dst])                      \n\t"
-        "sb               %[p2],       5(%[dst])                      \n\t"
-        "sb               %[n1],       7(%[dst])                      \n\t"
-
-        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
-          [tn2] "=&r"(tn2), [tn3] "=&r"(tn3), [st0] "=&r"(st0),
-          [st1] "=&r"(st1), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
-          [p4] "=&r"(p4), [n1] "=&r"(n1), [Temp1] "=&r"(Temp1),
-          [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
-        : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
-          [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
-          [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
-          [src] "r"(src));
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_horiz_16_dspr2(const uint8_t *src_ptr, int32_t src_stride,
-                                    uint8_t *dst_ptr, int32_t dst_stride,
-                                    const int16_t *filter_x0, int32_t h,
-                                    int32_t count) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t filter12, filter34, filter56, filter78;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-
-  filter12 = ((const int32_t *)filter_x0)[0];
-  filter34 = ((const int32_t *)filter_x0)[1];
-  filter56 = ((const int32_t *)filter_x0)[2];
-  filter78 = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_store(dst_ptr + dst_stride);
-
-    for (c = 0; c < count; c++) {
-      __asm__ __volatile__(
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
-          "ulw              %[qload2],    16(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
-          "ulw              %[qload3],    20(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
-          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                    \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
-          "ulw              %[qload2],    17(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
-          "ulw              %[qload3],    21(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-
-          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
-          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
-          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2),
-            [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2),
-            [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
-            [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
-          : [filter12] "r"(filter12), [filter34] "r"(filter34),
-            [filter56] "r"(filter56), [filter78] "r"(filter78),
-            [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
-            [src] "r"(src));
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-static void convolve_horiz_64_dspr2(const uint8_t *src_ptr, int32_t src_stride,
-                                    uint8_t *dst_ptr, int32_t dst_stride,
-                                    const int16_t *filter_x0, int32_t h) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t filter12, filter34, filter56, filter78;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-
-  filter12 = ((const int32_t *)filter_x0)[0];
-  filter34 = ((const int32_t *)filter_x0)[1];
-  filter56 = ((const int32_t *)filter_x0)[2];
-  filter78 = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_load(src_ptr + src_stride + 64);
-    prefetch_store(dst_ptr + dst_stride);
-    prefetch_store(dst_ptr + dst_stride + 32);
-
-    for (c = 0; c < 4; c++) {
-      __asm__ __volatile__(
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
-          "ulw              %[qload2],    16(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
-          "ulw              %[qload3],    20(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
-          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                    \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
-          "ulw              %[qload2],    17(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
-          "ulw              %[qload3],    21(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-
-          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
-          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
-          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2),
-            [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2),
-            [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
-            [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
-          : [filter12] "r"(filter12), [filter34] "r"(filter34),
-            [filter56] "r"(filter56), [filter78] "r"(filter78),
-            [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
-            [src] "r"(src));
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-void aom_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                               uint8_t *dst, ptrdiff_t dst_stride,
-                               const int16_t *filter_x, int x_step_q4,
-                               const int16_t *filter_y, int y_step_q4, int w,
-                               int h) {
-  assert(x_step_q4 == 16);
-  assert(((const int32_t *)filter_x)[1] != 0x800000);
-
-  if (((const int32_t *)filter_x)[0] == 0) {
-    aom_convolve2_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                              x_step_q4, filter_y, y_step_q4, w, h);
-  } else {
-    uint32_t pos = 38;
-
-    prefetch_load((const uint8_t *)filter_x);
-    src -= 3;
-
-    /* bit positon for extract from acc */
-    __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
-                         :
-                         : [pos] "r"(pos));
-
-    /* prefetch data to cache memory */
-    prefetch_load(src);
-    prefetch_load(src + 32);
-    prefetch_store(dst);
-
-    switch (w) {
-      case 4:
-        convolve_horiz_4_dspr2(src, (int32_t)src_stride, dst,
-                               (int32_t)dst_stride, filter_x, (int32_t)h);
-        break;
-      case 8:
-        convolve_horiz_8_dspr2(src, (int32_t)src_stride, dst,
-                               (int32_t)dst_stride, filter_x, (int32_t)h);
-        break;
-      case 16:
-        convolve_horiz_16_dspr2(src, (int32_t)src_stride, dst,
-                                (int32_t)dst_stride, filter_x, (int32_t)h, 1);
-        break;
-      case 32:
-        convolve_horiz_16_dspr2(src, (int32_t)src_stride, dst,
-                                (int32_t)dst_stride, filter_x, (int32_t)h, 2);
-        break;
-      case 64:
-        prefetch_load(src + 64);
-        prefetch_store(dst + 32);
-
-        convolve_horiz_64_dspr2(src, (int32_t)src_stride, dst,
-                                (int32_t)dst_stride, filter_x, (int32_t)h);
-        break;
-      default:
-        aom_convolve8_horiz_c(src + 3, src_stride, dst, dst_stride, filter_x,
-                              x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  }
-}
-#endif
diff --git a/aom_dsp/mips/convolve8_vert_dspr2.c b/aom_dsp/mips/convolve8_vert_dspr2.c
deleted file mode 100644
index 201e664..0000000
--- a/aom_dsp/mips/convolve8_vert_dspr2.c
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/aom_filter.h"
-#include "aom_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
-                                  uint8_t *dst, int32_t dst_stride,
-                                  const int16_t *filter_y, int32_t w,
-                                  int32_t h) {
-  int32_t x, y;
-  const uint8_t *src_ptr;
-  uint8_t *dst_ptr;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  uint32_t load1, load2, load3, load4;
-  uint32_t p1, p2;
-  uint32_t n1, n2;
-  uint32_t scratch1, scratch2;
-  uint32_t store1, store2;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2;
-
-  vector1b = ((const int32_t *)filter_y)[0];
-  vector2b = ((const int32_t *)filter_y)[1];
-  vector3b = ((const int32_t *)filter_y)[2];
-  vector4b = ((const int32_t *)filter_y)[3];
-
-  src -= 3 * src_stride;
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-
-    for (x = 0; x < w; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__(
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
-
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
-            [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
-            [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
-            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
-            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
-          : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
-            [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
-            [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
-            [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_vert_64_dspr2(const uint8_t *src, int32_t src_stride,
-                                   uint8_t *dst, int32_t dst_stride,
-                                   const int16_t *filter_y, int32_t h) {
-  int32_t x, y;
-  const uint8_t *src_ptr;
-  uint8_t *dst_ptr;
-  uint8_t *cm = aom_ff_cropTbl;
-  uint32_t vector4a = 64;
-  uint32_t load1, load2, load3, load4;
-  uint32_t p1, p2;
-  uint32_t n1, n2;
-  uint32_t scratch1, scratch2;
-  uint32_t store1, store2;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2;
-
-  vector1b = ((const int32_t *)filter_y)[0];
-  vector2b = ((const int32_t *)filter_y)[1];
-  vector3b = ((const int32_t *)filter_y)[2];
-  vector4b = ((const int32_t *)filter_y)[3];
-
-  src -= 3 * src_stride;
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-    prefetch_store(dst + dst_stride + 32);
-
-    for (x = 0; x < 64; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__(
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
-
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
-            [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
-            [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
-            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
-            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
-            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
-          : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
-            [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
-            [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
-            [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-void aom_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                              uint8_t *dst, ptrdiff_t dst_stride,
-                              const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4, int w,
-                              int h) {
-  assert(y_step_q4 == 16);
-  assert(((const int32_t *)filter_y)[1] != 0x800000);
-
-  if (((const int32_t *)filter_y)[0] == 0) {
-    aom_convolve2_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
-                             x_step_q4, filter_y, y_step_q4, w, h);
-  } else {
-    uint32_t pos = 38;
-
-    /* bit positon for extract from acc */
-    __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
-                         :
-                         : [pos] "r"(pos));
-
-    prefetch_store(dst);
-
-    switch (w) {
-      case 4:
-      case 8:
-      case 16:
-      case 32:
-        convolve_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w, h);
-        break;
-      case 64:
-        prefetch_store(dst + 32);
-        convolve_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h);
-        break;
-      default:
-        aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
-                             x_step_q4, filter_y, y_step_q4, w, h);
-        break;
-    }
-  }
-}
-
-#endif
diff --git a/aom_dsp/mips/convolve_common_dspr2.h b/aom_dsp/mips/convolve_common_dspr2.h
deleted file mode 100644
index e5d48a8..0000000
--- a/aom_dsp/mips/convolve_common_dspr2.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#ifndef AOM_AOM_DSP_MIPS_CONVOLVE_COMMON_DSPR2_H_
-#define AOM_AOM_DSP_MIPS_CONVOLVE_COMMON_DSPR2_H_
-
-#include <assert.h>
-
-#include "config/aom_config.h"
-
-#include "aom/aom_integer.h"
-#include "aom_dsp/mips/common_dspr2.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if HAVE_DSPR2
-void aom_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                               uint8_t *dst, ptrdiff_t dst_stride,
-                               const int16_t *filter_x, int x_step_q4,
-                               const int16_t *filter_y, int y_step_q4, int w,
-                               int h);
-
-void aom_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
-                         ptrdiff_t dst_stride, const int16_t *filter, int w,
-                         int h);
-
-void aom_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                              uint8_t *dst, ptrdiff_t dst_stride,
-                              const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4, int w,
-                              int h);
-
-#endif  // #if HAVE_DSPR2
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // AOM_AOM_DSP_MIPS_CONVOLVE_COMMON_DSPR2_H_
diff --git a/aom_dsp/mips/intrapred16_dspr2.c b/aom_dsp/mips/intrapred16_dspr2.c
deleted file mode 100644
index 7c221ae..0000000
--- a/aom_dsp/mips/intrapred16_dspr2.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "aom_dsp/mips/common_dspr2.h"
-
-#if HAVE_DSPR2
-void aom_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
-                                 const uint8_t *above, const uint8_t *left) {
-  int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
-  int32_t tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
-
-  (void)above;
-
-  __asm__ __volatile__(
-      "lb         %[tmp1],      (%[left])                    \n\t"
-      "lb         %[tmp2],      1(%[left])                   \n\t"
-      "lb         %[tmp3],      2(%[left])                   \n\t"
-      "lb         %[tmp4],      3(%[left])                   \n\t"
-      "lb         %[tmp5],      4(%[left])                   \n\t"
-      "lb         %[tmp6],      5(%[left])                   \n\t"
-      "lb         %[tmp7],      6(%[left])                   \n\t"
-      "lb         %[tmp8],      7(%[left])                   \n\t"
-      "lb         %[tmp9],      8(%[left])                   \n\t"
-      "lb         %[tmp10],     9(%[left])                   \n\t"
-      "lb         %[tmp11],     10(%[left])                  \n\t"
-      "lb         %[tmp12],     11(%[left])                  \n\t"
-      "lb         %[tmp13],     12(%[left])                  \n\t"
-      "lb         %[tmp14],     13(%[left])                  \n\t"
-      "lb         %[tmp15],     14(%[left])                  \n\t"
-      "lb         %[tmp16],     15(%[left])                  \n\t"
-
-      "replv.qb   %[tmp1],      %[tmp1]                      \n\t"
-      "replv.qb   %[tmp2],      %[tmp2]                      \n\t"
-      "replv.qb   %[tmp3],      %[tmp3]                      \n\t"
-      "replv.qb   %[tmp4],      %[tmp4]                      \n\t"
-      "replv.qb   %[tmp5],      %[tmp5]                      \n\t"
-      "replv.qb   %[tmp6],      %[tmp6]                      \n\t"
-      "replv.qb   %[tmp7],      %[tmp7]                      \n\t"
-      "replv.qb   %[tmp8],      %[tmp8]                      \n\t"
-      "replv.qb   %[tmp9],      %[tmp9]                      \n\t"
-      "replv.qb   %[tmp10],     %[tmp10]                     \n\t"
-      "replv.qb   %[tmp11],     %[tmp11]                     \n\t"
-      "replv.qb   %[tmp12],     %[tmp12]                     \n\t"
-      "replv.qb   %[tmp13],     %[tmp13]                     \n\t"
-      "replv.qb   %[tmp14],     %[tmp14]                     \n\t"
-      "replv.qb   %[tmp15],     %[tmp15]                     \n\t"
-      "replv.qb   %[tmp16],     %[tmp16]                     \n\t"
-
-      "sw         %[tmp1],      (%[dst])                     \n\t"
-      "sw         %[tmp1],      4(%[dst])                    \n\t"
-      "sw         %[tmp1],      8(%[dst])                    \n\t"
-      "sw         %[tmp1],      12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp2],      (%[dst])                     \n\t"
-      "sw         %[tmp2],      4(%[dst])                    \n\t"
-      "sw         %[tmp2],      8(%[dst])                    \n\t"
-      "sw         %[tmp2],      12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp3],      (%[dst])                     \n\t"
-      "sw         %[tmp3],      4(%[dst])                    \n\t"
-      "sw         %[tmp3],      8(%[dst])                    \n\t"
-      "sw         %[tmp3],      12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp4],      (%[dst])                     \n\t"
-      "sw         %[tmp4],      4(%[dst])                    \n\t"
-      "sw         %[tmp4],      8(%[dst])                    \n\t"
-      "sw         %[tmp4],      12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp5],      (%[dst])                     \n\t"
-      "sw         %[tmp5],      4(%[dst])                    \n\t"
-      "sw         %[tmp5],      8(%[dst])                    \n\t"
-      "sw         %[tmp5],      12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp6],      (%[dst])                     \n\t"
-      "sw         %[tmp6],      4(%[dst])                    \n\t"
-      "sw         %[tmp6],      8(%[dst])                    \n\t"
-      "sw         %[tmp6],      12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp7],      (%[dst])                     \n\t"
-      "sw         %[tmp7],      4(%[dst])                    \n\t"
-      "sw         %[tmp7],      8(%[dst])                    \n\t"
-      "sw         %[tmp7],      12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp8],      (%[dst])                     \n\t"
-      "sw         %[tmp8],      4(%[dst])                    \n\t"
-      "sw         %[tmp8],      8(%[dst])                    \n\t"
-      "sw         %[tmp8],      12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp9],      (%[dst])                     \n\t"
-      "sw         %[tmp9],      4(%[dst])                    \n\t"
-      "sw         %[tmp9],      8(%[dst])                    \n\t"
-      "sw         %[tmp9],      12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp10],     (%[dst])                     \n\t"
-      "sw         %[tmp10],     4(%[dst])                    \n\t"
-      "sw         %[tmp10],     8(%[dst])                    \n\t"
-      "sw         %[tmp10],     12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp11],     (%[dst])                     \n\t"
-      "sw         %[tmp11],     4(%[dst])                    \n\t"
-      "sw         %[tmp11],     8(%[dst])                    \n\t"
-      "sw         %[tmp11],     12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp12],     (%[dst])                     \n\t"
-      "sw         %[tmp12],     4(%[dst])                    \n\t"
-      "sw         %[tmp12],     8(%[dst])                    \n\t"
-      "sw         %[tmp12],     12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp13],     (%[dst])                     \n\t"
-      "sw         %[tmp13],     4(%[dst])                    \n\t"
-      "sw         %[tmp13],     8(%[dst])                    \n\t"
-      "sw         %[tmp13],     12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp14],     (%[dst])                     \n\t"
-      "sw         %[tmp14],     4(%[dst])                    \n\t"
-      "sw         %[tmp14],     8(%[dst])                    \n\t"
-      "sw         %[tmp14],     12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp15],     (%[dst])                     \n\t"
-      "sw         %[tmp15],     4(%[dst])                    \n\t"
-      "sw         %[tmp15],     8(%[dst])                    \n\t"
-      "sw         %[tmp15],     12(%[dst])                   \n\t"
-
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp16],     (%[dst])                     \n\t"
-      "sw         %[tmp16],     4(%[dst])                    \n\t"
-      "sw         %[tmp16],     8(%[dst])                    \n\t"
-      "sw         %[tmp16],     12(%[dst])                   \n\t"
-
-      : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [tmp3] "=&r"(tmp3),
-        [tmp4] "=&r"(tmp4), [tmp5] "=&r"(tmp5), [tmp7] "=&r"(tmp7),
-        [tmp6] "=&r"(tmp6), [tmp8] "=&r"(tmp8), [tmp9] "=&r"(tmp9),
-        [tmp10] "=&r"(tmp10), [tmp11] "=&r"(tmp11), [tmp12] "=&r"(tmp12),
-        [tmp13] "=&r"(tmp13), [tmp14] "=&r"(tmp14), [tmp15] "=&r"(tmp15),
-        [tmp16] "=&r"(tmp16)
-      : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
-}
-
-void aom_dc_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
-                                  const uint8_t *above, const uint8_t *left) {
-  int32_t expected_dc;
-  int32_t average;
-  int32_t tmp, above1, above_l1, above_r1, left1, left_r1, left_l1;
-  int32_t above2, left2;
-
-  __asm__ __volatile__(
-      "lw              %[above1],           (%[above])                    \n\t"
-      "lw              %[above2],           4(%[above])                   \n\t"
-      "lw              %[left1],            (%[left])                     \n\t"
-      "lw              %[left2],            4(%[left])                    \n\t"
-
-      "preceu.ph.qbl   %[above_l1],         %[above1]                     \n\t"
-      "preceu.ph.qbr   %[above_r1],         %[above1]                     \n\t"
-      "preceu.ph.qbl   %[left_l1],          %[left1]                      \n\t"
-      "preceu.ph.qbr   %[left_r1],          %[left1]                      \n\t"
-
-      "addu.ph         %[average],          %[above_r1],     %[above_l1]  \n\t"
-      "addu.ph         %[average],          %[average],      %[left_l1]   \n\t"
-      "addu.ph         %[average],          %[average],      %[left_r1]   \n\t"
-
-      "preceu.ph.qbl   %[above_l1],         %[above2]                     \n\t"
-      "preceu.ph.qbr   %[above_r1],         %[above2]                     \n\t"
-      "preceu.ph.qbl   %[left_l1],          %[left2]                      \n\t"
-      "preceu.ph.qbr   %[left_r1],          %[left2]                      \n\t"
-
-      "addu.ph         %[average],          %[average],      %[above_l1]  \n\t"
-      "addu.ph         %[average],          %[average],      %[above_r1]  \n\t"
-      "addu.ph         %[average],          %[average],      %[left_l1]   \n\t"
-      "addu.ph         %[average],          %[average],      %[left_r1]   \n\t"
-
-      "lw              %[above1],           8(%[above])                   \n\t"
-      "lw              %[above2],           12(%[above])                  \n\t"
-      "lw              %[left1],            8(%[left])                    \n\t"
-      "lw              %[left2],            12(%[left])                   \n\t"
-
-      "preceu.ph.qbl   %[above_l1],         %[above1]                     \n\t"
-      "preceu.ph.qbr   %[above_r1],         %[above1]                     \n\t"
-      "preceu.ph.qbl   %[left_l1],          %[left1]                      \n\t"
-      "preceu.ph.qbr   %[left_r1],          %[left1]                      \n\t"
-
-      "addu.ph         %[average],          %[average],      %[above_l1]  \n\t"
-      "addu.ph         %[average],          %[average],      %[above_r1]  \n\t"
-      "addu.ph         %[average],          %[average],      %[left_l1]   \n\t"
-      "addu.ph         %[average],          %[average],      %[left_r1]   \n\t"
-
-      "preceu.ph.qbl   %[above_l1],         %[above2]                     \n\t"
-      "preceu.ph.qbr   %[above_r1],         %[above2]                     \n\t"
-      "preceu.ph.qbl   %[left_l1],          %[left2]                      \n\t"
-      "preceu.ph.qbr   %[left_r1],          %[left2]                      \n\t"
-
-      "addu.ph         %[average],          %[average],      %[above_l1]  \n\t"
-      "addu.ph         %[average],          %[average],      %[above_r1]  \n\t"
-      "addu.ph         %[average],          %[average],      %[left_l1]   \n\t"
-      "addu.ph         %[average],          %[average],      %[left_r1]   \n\t"
-
-      "addiu           %[average],          %[average],      16           \n\t"
-      "srl             %[tmp],              %[average],      16           \n\t"
-      "addu.ph         %[average],          %[tmp],          %[average]   \n\t"
-      "srl             %[expected_dc],      %[average],      5            \n\t"
-      "replv.qb        %[expected_dc],      %[expected_dc]                \n\t"
-
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      "add             %[dst],              %[dst],          %[stride]    \n\t"
-      "sw              %[expected_dc],      (%[dst])                      \n\t"
-      "sw              %[expected_dc],      4(%[dst])                     \n\t"
-      "sw              %[expected_dc],      8(%[dst])                     \n\t"
-      "sw              %[expected_dc],      12(%[dst])                    \n\t"
-
-      : [left1] "=&r"(left1), [above1] "=&r"(above1), [left_l1] "=&r"(left_l1),
-        [above_l1] "=&r"(above_l1), [left_r1] "=&r"(left_r1),
-        [above_r1] "=&r"(above_r1), [above2] "=&r"(above2),
-        [left2] "=&r"(left2), [average] "=&r"(average), [tmp] "=&r"(tmp),
-        [expected_dc] "=&r"(expected_dc)
-      : [above] "r"(above), [left] "r"(left), [dst] "r"(dst),
-        [stride] "r"(stride));
-}
-#endif  // #if HAVE_DSPR2
diff --git a/aom_dsp/mips/intrapred4_dspr2.c b/aom_dsp/mips/intrapred4_dspr2.c
deleted file mode 100644
index 0a21979..0000000
--- a/aom_dsp/mips/intrapred4_dspr2.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "aom_dsp/mips/common_dspr2.h"
-
-#if HAVE_DSPR2
-void aom_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
-                               const uint8_t *above, const uint8_t *left) {
-  int32_t tmp1, tmp2, tmp3, tmp4;
-  (void)above;
-
-  __asm__ __volatile__(
-      "lb         %[tmp1],      (%[left])                    \n\t"
-      "lb         %[tmp2],      1(%[left])                   \n\t"
-      "lb         %[tmp3],      2(%[left])                   \n\t"
-      "lb         %[tmp4],      3(%[left])                   \n\t"
-      "replv.qb   %[tmp1],      %[tmp1]                      \n\t"
-      "replv.qb   %[tmp2],      %[tmp2]                      \n\t"
-      "replv.qb   %[tmp3],      %[tmp3]                      \n\t"
-      "replv.qb   %[tmp4],      %[tmp4]                      \n\t"
-      "sw         %[tmp1],      (%[dst])                     \n\t"
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp2],      (%[dst])                     \n\t"
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp3],      (%[dst])                     \n\t"
-      "add        %[dst],       %[dst],         %[stride]    \n\t"
-      "sw         %[tmp4],      (%[dst])                     \n\t"
-
-      : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [tmp3] "=&r"(tmp3),
-        [tmp4] "=&r"(tmp4)
-      : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
-}
-
-void aom_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
-                                const uint8_t *above, const uint8_t *left) {
-  int32_t expected_dc;
-  int32_t average;
-  int32_t tmp, above_c, above_l, above_r, left_c, left_r, left_l;
-
-  __asm__ __volatile__(
-      "lw              %[above_c],         (%[above])                    \n\t"
-      "lw              %[left_c],          (%[left])                     \n\t"
-
-      "preceu.ph.qbl   %[above_l],         %[above_c]                    \n\t"
-      "preceu.ph.qbr   %[above_r],         %[above_c]                    \n\t"
-      "preceu.ph.qbl   %[left_l],          %[left_c]                     \n\t"
-      "preceu.ph.qbr   %[left_r],          %[left_c]                     \n\t"
-
-      "addu.ph         %[average],         %[above_r],       %[above_l]  \n\t"
-      "addu.ph         %[average],         %[average],       %[left_l]   \n\t"
-      "addu.ph         %[average],         %[average],       %[left_r]   \n\t"
-      "addiu           %[average],         %[average],       4           \n\t"
-      "srl             %[tmp],             %[average],       16          \n\t"
-      "addu.ph         %[average],         %[tmp],           %[average]  \n\t"
-      "srl             %[expected_dc],     %[average],       3           \n\t"
-      "replv.qb        %[expected_dc],     %[expected_dc]                \n\t"
-
-      "sw              %[expected_dc],     (%[dst])                      \n\t"
-      "add             %[dst],              %[dst],          %[stride]   \n\t"
-      "sw              %[expected_dc],     (%[dst])                      \n\t"
-      "add             %[dst],              %[dst],          %[stride]   \n\t"
-      "sw              %[expected_dc],     (%[dst])                      \n\t"
-      "add             %[dst],              %[dst],          %[stride]   \n\t"
-      "sw              %[expected_dc],     (%[dst])                      \n\t"
-
-      : [above_c] "=&r"(above_c), [above_l] "=&r"(above_l),
-        [above_r] "=&r"(above_r), [left_c] "=&r"(left_c),
-        [left_l] "=&r"(left_l), [left_r] "=&r"(left_r),
-        [average] "=&r"(average), [tmp] "=&r"(tmp),
-        [expected_dc] "=&r"(expected_dc)
-      : [above] "r"(above), [left] "r"(left), [dst] "r"(dst),
-        [stride] "r"(stride));
-}
-#endif  // #if HAVE_DSPR2
diff --git a/aom_dsp/mips/intrapred8_dspr2.c b/aom_dsp/mips/intrapred8_dspr2.c
deleted file mode 100644
index d42a77c..0000000
--- a/aom_dsp/mips/intrapred8_dspr2.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "aom_dsp/mips/common_dspr2.h"
-
-#if HAVE_DSPR2
-void aom_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
-                               const uint8_t *above, const uint8_t *left) {
-  int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
-  (void)above;
-
-  __asm__ __volatile__(
-      "lb         %[tmp1],      (%[left])                   \n\t"
-      "lb         %[tmp2],      1(%[left])                  \n\t"
-      "lb         %[tmp3],      2(%[left])                  \n\t"
-      "lb         %[tmp4],      3(%[left])                  \n\t"
-      "lb         %[tmp5],      4(%[left])                  \n\t"
-      "lb         %[tmp6],      5(%[left])                  \n\t"
-      "lb         %[tmp7],      6(%[left])                  \n\t"
-      "lb         %[tmp8],      7(%[left])                  \n\t"
-
-      "replv.qb   %[tmp1],      %[tmp1]                     \n\t"
-      "replv.qb   %[tmp2],      %[tmp2]                     \n\t"
-      "replv.qb   %[tmp3],      %[tmp3]                     \n\t"
-      "replv.qb   %[tmp4],      %[tmp4]                     \n\t"
-      "replv.qb   %[tmp5],      %[tmp5]                     \n\t"
-      "replv.qb   %[tmp6],      %[tmp6]                     \n\t"
-      "replv.qb   %[tmp7],      %[tmp7]                     \n\t"
-      "replv.qb   %[tmp8],      %[tmp8]                     \n\t"
-
-      "sw         %[tmp1],      (%[dst])                    \n\t"
-      "sw         %[tmp1],      4(%[dst])                   \n\t"
-      "add        %[dst],       %[dst],         %[stride]   \n\t"
-      "sw         %[tmp2],      (%[dst])                    \n\t"
-      "sw         %[tmp2],      4(%[dst])                   \n\t"
-      "add        %[dst],       %[dst],         %[stride]   \n\t"
-      "sw         %[tmp3],      (%[dst])                    \n\t"
-      "sw         %[tmp3],      4(%[dst])                   \n\t"
-      "add        %[dst],       %[dst],         %[stride]   \n\t"
-      "sw         %[tmp4],      (%[dst])                    \n\t"
-      "sw         %[tmp4],      4(%[dst])                   \n\t"
-      "add        %[dst],       %[dst],         %[stride]   \n\t"
-      "sw         %[tmp5],      (%[dst])                    \n\t"
-      "sw         %[tmp5],      4(%[dst])                   \n\t"
-      "add        %[dst],       %[dst],         %[stride]   \n\t"
-      "sw         %[tmp6],      (%[dst])                    \n\t"
-      "sw         %[tmp6],      4(%[dst])                   \n\t"
-      "add        %[dst],       %[dst],         %[stride]   \n\t"
-      "sw         %[tmp7],      (%[dst])                    \n\t"
-      "sw         %[tmp7],      4(%[dst])                   \n\t"
-      "add        %[dst],       %[dst],         %[stride]   \n\t"
-      "sw         %[tmp8],      (%[dst])                    \n\t"
-      "sw         %[tmp8],      4(%[dst])                   \n\t"
-
-      : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [tmp3] "=&r"(tmp3),
-        [tmp4] "=&r"(tmp4), [tmp5] "=&r"(tmp5), [tmp7] "=&r"(tmp7),
-        [tmp6] "=&r"(tmp6), [tmp8] "=&r"(tmp8)
-      : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
-}
-
-void aom_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
-                                const uint8_t *above, const uint8_t *left) {
-  int32_t expected_dc;
-  int32_t average;
-  int32_t tmp, above1, above_l1, above_r1, left1, left_r1, left_l1;
-  int32_t above2, above_l2, above_r2, left2, left_r2, left_l2;
-
-  __asm__ __volatile__(
-      "lw              %[above1],         (%[above])                      \n\t"
-      "lw              %[above2],         4(%[above])                     \n\t"
-      "lw              %[left1],          (%[left])                       \n\t"
-      "lw              %[left2],          4(%[left])                      \n\t"
-
-      "preceu.ph.qbl   %[above_l1],       %[above1]                       \n\t"
-      "preceu.ph.qbr   %[above_r1],       %[above1]                       \n\t"
-      "preceu.ph.qbl   %[left_l1],        %[left1]                        \n\t"
-      "preceu.ph.qbr   %[left_r1],        %[left1]                        \n\t"
-
-      "preceu.ph.qbl   %[above_l2],       %[above2]                       \n\t"
-      "preceu.ph.qbr   %[above_r2],       %[above2]                       \n\t"
-      "preceu.ph.qbl   %[left_l2],        %[left2]                        \n\t"
-      "preceu.ph.qbr   %[left_r2],        %[left2]                        \n\t"
-
-      "addu.ph         %[average],        %[above_r1],      %[above_l1]   \n\t"
-      "addu.ph         %[average],        %[average],       %[left_l1]    \n\t"
-      "addu.ph         %[average],        %[average],       %[left_r1]    \n\t"
-
-      "addu.ph         %[average],        %[average],       %[above_l2]   \n\t"
-      "addu.ph         %[average],        %[average],       %[above_r2]   \n\t"
-      "addu.ph         %[average],        %[average],       %[left_l2]    \n\t"
-      "addu.ph         %[average],        %[average],       %[left_r2]    \n\t"
-
-      "addiu           %[average],        %[average],       8             \n\t"
-
-      "srl             %[tmp],            %[average],       16            \n\t"
-      "addu.ph         %[average],        %[tmp],           %[average]    \n\t"
-      "srl             %[expected_dc],    %[average],       4             \n\t"
-      "replv.qb        %[expected_dc],    %[expected_dc]                  \n\t"
-
-      "sw              %[expected_dc],    (%[dst])                        \n\t"
-      "sw              %[expected_dc],    4(%[dst])                       \n\t"
-
-      "add             %[dst],             %[dst],          %[stride]     \n\t"
-      "sw              %[expected_dc],    (%[dst])                        \n\t"
-      "sw              %[expected_dc],    4(%[dst])                       \n\t"
-
-      "add             %[dst],             %[dst],          %[stride]     \n\t"
-      "sw              %[expected_dc],    (%[dst])                        \n\t"
-      "sw              %[expected_dc],    4(%[dst])                       \n\t"
-
-      "add             %[dst],             %[dst],          %[stride]     \n\t"
-      "sw              %[expected_dc],    (%[dst])                        \n\t"
-      "sw              %[expected_dc],    4(%[dst])                       \n\t"
-
-      "add             %[dst],             %[dst],          %[stride]     \n\t"
-      "sw              %[expected_dc],    (%[dst])                        \n\t"
-      "sw              %[expected_dc],    4(%[dst])                       \n\t"
-
-      "add             %[dst],             %[dst],          %[stride]     \n\t"
-      "sw              %[expected_dc],    (%[dst])                        \n\t"
-      "sw              %[expected_dc],    4(%[dst])                       \n\t"
-
-      "add             %[dst],             %[dst],          %[stride]     \n\t"
-      "sw              %[expected_dc],    (%[dst])                        \n\t"
-      "sw              %[expected_dc],    4(%[dst])                       \n\t"
-
-      "add             %[dst],             %[dst],          %[stride]     \n\t"
-      "sw              %[expected_dc],    (%[dst])                        \n\t"
-      "sw              %[expected_dc],    4(%[dst])                       \n\t"
-
-      : [above1] "=&r"(above1), [above_l1] "=&r"(above_l1),
-        [above_r1] "=&r"(above_r1), [left1] "=&r"(left1),
-        [left_l1] "=&r"(left_l1), [left_r1] "=&r"(left_r1),
-        [above2] "=&r"(above2), [above_l2] "=&r"(above_l2),
-        [above_r2] "=&r"(above_r2), [left2] "=&r"(left2),
-        [left_l2] "=&r"(left_l2), [left_r2] "=&r"(left_r2),
-        [average] "=&r"(average), [tmp] "=&r"(tmp),
-        [expected_dc] "=&r"(expected_dc)
-      : [above] "r"(above), [left] "r"(left), [dst] "r"(dst),
-        [stride] "r"(stride));
-}
-#endif  // #if HAVE_DSPR2
diff --git a/aom_dsp/mips/intrapred_msa.c b/aom_dsp/mips/intrapred_msa.c
deleted file mode 100644
index 9f25cc1..0000000
--- a/aom_dsp/mips/intrapred_msa.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom_dsp/mips/macros_msa.h"
-
-#define IPRED_SUBS_UH2_UH(in0, in1, out0, out1) \
-  {                                             \
-    out0 = __msa_subs_u_h(out0, in0);           \
-    out1 = __msa_subs_u_h(out1, in1);           \
-  }
-
-static void intra_predict_vert_4x4_msa(const uint8_t *src, uint8_t *dst,
-                                       int32_t dst_stride) {
-  uint32_t src_data;
-
-  src_data = LW(src);
-
-  SW4(src_data, src_data, src_data, src_data, dst, dst_stride);
-}
-
-static void intra_predict_vert_8x8_msa(const uint8_t *src, uint8_t *dst,
-                                       int32_t dst_stride) {
-  uint32_t row;
-  uint32_t src_data1, src_data2;
-
-  src_data1 = LW(src);
-  src_data2 = LW(src + 4);
-
-  for (row = 8; row--;) {
-    SW(src_data1, dst);
-    SW(src_data2, (dst + 4));
-    dst += dst_stride;
-  }
-}
-
-static void intra_predict_vert_16x16_msa(const uint8_t *src, uint8_t *dst,
-                                         int32_t dst_stride) {
-  uint32_t row;
-  v16u8 src0;
-
-  src0 = LD_UB(src);
-
-  for (row = 16; row--;) {
-    ST_UB(src0, dst);
-    dst += dst_stride;
-  }
-}
-
-static void intra_predict_vert_32x32_msa(const uint8_t *src, uint8_t *dst,
-                                         int32_t dst_stride) {
-  uint32_t row;
-  v16u8 src1, src2;
-
-  src1 = LD_UB(src);
-  src2 = LD_UB(src + 16);
-
-  for (row = 32; row--;) {
-    ST_UB2(src1, src2, dst, 16);
-    dst += dst_stride;
-  }
-}
-
-static void intra_predict_horiz_4x4_msa(const uint8_t *src, uint8_t *dst,
-                                        int32_t dst_stride) {
-  uint32_t out0, out1, out2, out3;
-
-  out0 = src[0] * 0x01010101;
-  out1 = src[1] * 0x01010101;
-  out2 = src[2] * 0x01010101;
-  out3 = src[3] * 0x01010101;
-
-  SW4(out0, out1, out2, out3, dst, dst_stride);
-}
-
-static void intra_predict_horiz_8x8_msa(const uint8_t *src, uint8_t *dst,
-                                        int32_t dst_stride) {
-  uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
-
-  out0 = src[0] * 0x0101010101010101ull;
-  out1 = src[1] * 0x0101010101010101ull;
-  out2 = src[2] * 0x0101010101010101ull;
-  out3 = src[3] * 0x0101010101010101ull;
-  out4 = src[4] * 0x0101010101010101ull;
-  out5 = src[5] * 0x0101010101010101ull;
-  out6 = src[6] * 0x0101010101010101ull;
-  out7 = src[7] * 0x0101010101010101ull;
-
-  SD4(out0, out1, out2, out3, dst, dst_stride);
-  dst += (4 * dst_stride);
-  SD4(out4, out5, out6, out7, dst, dst_stride);
-}
-
-static void intra_predict_horiz_16x16_msa(const uint8_t *src, uint8_t *dst,
-                                          int32_t dst_stride) {
-  uint32_t row;
-  uint8_t inp0, inp1, inp2, inp3;
-  v16u8 src0, src1, src2, src3;
-
-  for (row = 4; row--;) {
-    inp0 = src[0];
-    inp1 = src[1];
-    inp2 = src[2];
-    inp3 = src[3];
-    src += 4;
-
-    src0 = (v16u8)__msa_fill_b(inp0);
-    src1 = (v16u8)__msa_fill_b(inp1);
-    src2 = (v16u8)__msa_fill_b(inp2);
-    src3 = (v16u8)__msa_fill_b(inp3);
-
-    ST_UB4(src0, src1, src2, src3, dst, dst_stride);
-    dst += (4 * dst_stride);
-  }
-}
-
-static void intra_predict_horiz_32x32_msa(const uint8_t *src, uint8_t *dst,
-                                          int32_t dst_stride) {
-  uint32_t row;
-  uint8_t inp0, inp1, inp2, inp3;
-  v16u8 src0, src1, src2, src3;
-
-  for (row = 8; row--;) {
-    inp0 = src[0];
-    inp1 = src[1];
-    inp2 = src[2];
-    inp3 = src[3];
-    src += 4;
-
-    src0 = (v16u8)__msa_fill_b(inp0);
-    src1 = (v16u8)__msa_fill_b(inp1);
-    src2 = (v16u8)__msa_fill_b(inp2);
-    src3 = (v16u8)__msa_fill_b(inp3);
-
-    ST_UB2(src0, src0, dst, 16);
-    dst += dst_stride;
-    ST_UB2(src1, src1, dst, 16);
-    dst += dst_stride;
-    ST_UB2(src2, src2, dst, 16);
-    dst += dst_stride;
-    ST_UB2(src3, src3, dst, 16);
-    dst += dst_stride;
-  }
-}
-
-static void intra_predict_dc_4x4_msa(const uint8_t *src_top,
-                                     const uint8_t *src_left, uint8_t *dst,
-                                     int32_t dst_stride) {
-  uint32_t val0, val1;
-  v16i8 store, src = { 0 };
-  v8u16 sum_h;
-  v4u32 sum_w;
-  v2u64 sum_d;
-
-  val0 = LW(src_top);
-  val1 = LW(src_left);
-  INSERT_W2_SB(val0, val1, src);
-  sum_h = __msa_hadd_u_h((v16u8)src, (v16u8)src);
-  sum_w = __msa_hadd_u_w(sum_h, sum_h);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 3);
-  store = __msa_splati_b((v16i8)sum_w, 0);
-  val0 = __msa_copy_u_w((v4i32)store, 0);
-
-  SW4(val0, val0, val0, val0, dst, dst_stride);
-}
-
-static void intra_predict_dc_tl_4x4_msa(const uint8_t *src, uint8_t *dst,
-                                        int32_t dst_stride) {
-  uint32_t val0;
-  v16i8 store, data = { 0 };
-  v8u16 sum_h;
-  v4u32 sum_w;
-
-  val0 = LW(src);
-  data = (v16i8)__msa_insert_w((v4i32)data, 0, val0);
-  sum_h = __msa_hadd_u_h((v16u8)data, (v16u8)data);
-  sum_w = __msa_hadd_u_w(sum_h, sum_h);
-  sum_w = (v4u32)__msa_srari_w((v4i32)sum_w, 2);
-  store = __msa_splati_b((v16i8)sum_w, 0);
-  val0 = __msa_copy_u_w((v4i32)store, 0);
-
-  SW4(val0, val0, val0, val0, dst, dst_stride);
-}
-
-static void intra_predict_128dc_4x4_msa(uint8_t *dst, int32_t dst_stride) {
-  uint32_t out;
-  const v16i8 store = __msa_ldi_b(128);
-
-  out = __msa_copy_u_w((v4i32)store, 0);
-
-  SW4(out, out, out, out, dst, dst_stride);
-}
-
-static void intra_predict_dc_8x8_msa(const uint8_t *src_top,
-                                     const uint8_t *src_left, uint8_t *dst,
-                                     int32_t dst_stride) {
-  uint64_t val0, val1;
-  v16i8 store;
-  v16u8 src = { 0 };
-  v8u16 sum_h;
-  v4u32 sum_w;
-  v2u64 sum_d;
-
-  val0 = LD(src_top);
-  val1 = LD(src_left);
-  INSERT_D2_UB(val0, val1, src);
-  sum_h = __msa_hadd_u_h(src, src);
-  sum_w = __msa_hadd_u_w(sum_h, sum_h);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 4);
-  store = __msa_splati_b((v16i8)sum_w, 0);
-  val0 = __msa_copy_u_d((v2i64)store, 0);
-
-  SD4(val0, val0, val0, val0, dst, dst_stride);
-  dst += (4 * dst_stride);
-  SD4(val0, val0, val0, val0, dst, dst_stride);
-}
-
-static void intra_predict_dc_tl_8x8_msa(const uint8_t *src, uint8_t *dst,
-                                        int32_t dst_stride) {
-  uint64_t val0;
-  v16i8 store;
-  v16u8 data = { 0 };
-  v8u16 sum_h;
-  v4u32 sum_w;
-  v2u64 sum_d;
-
-  val0 = LD(src);
-  data = (v16u8)__msa_insert_d((v2i64)data, 0, val0);
-  sum_h = __msa_hadd_u_h(data, data);
-  sum_w = __msa_hadd_u_w(sum_h, sum_h);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 3);
-  store = __msa_splati_b((v16i8)sum_w, 0);
-  val0 = __msa_copy_u_d((v2i64)store, 0);
-
-  SD4(val0, val0, val0, val0, dst, dst_stride);
-  dst += (4 * dst_stride);
-  SD4(val0, val0, val0, val0, dst, dst_stride);
-}
-
-static void intra_predict_128dc_8x8_msa(uint8_t *dst, int32_t dst_stride) {
-  uint64_t out;
-  const v16i8 store = __msa_ldi_b(128);
-
-  out = __msa_copy_u_d((v2i64)store, 0);
-
-  SD4(out, out, out, out, dst, dst_stride);
-  dst += (4 * dst_stride);
-  SD4(out, out, out, out, dst, dst_stride);
-}
-
-static void intra_predict_dc_16x16_msa(const uint8_t *src_top,
-                                       const uint8_t *src_left, uint8_t *dst,
-                                       int32_t dst_stride) {
-  v16u8 top, left, out;
-  v8u16 sum_h, sum_top, sum_left;
-  v4u32 sum_w;
-  v2u64 sum_d;
-
-  top = LD_UB(src_top);
-  left = LD_UB(src_left);
-  HADD_UB2_UH(top, left, sum_top, sum_left);
-  sum_h = sum_top + sum_left;
-  sum_w = __msa_hadd_u_w(sum_h, sum_h);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 5);
-  out = (v16u8)__msa_splati_b((v16i8)sum_w, 0);
-
-  ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride);
-  dst += (8 * dst_stride);
-  ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride);
-}
-
-static void intra_predict_dc_tl_16x16_msa(const uint8_t *src, uint8_t *dst,
-                                          int32_t dst_stride) {
-  v16u8 data, out;
-  v8u16 sum_h;
-  v4u32 sum_w;
-  v2u64 sum_d;
-
-  data = LD_UB(src);
-  sum_h = __msa_hadd_u_h(data, data);
-  sum_w = __msa_hadd_u_w(sum_h, sum_h);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 4);
-  out = (v16u8)__msa_splati_b((v16i8)sum_w, 0);
-
-  ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride);
-  dst += (8 * dst_stride);
-  ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride);
-}
-
-static void intra_predict_128dc_16x16_msa(uint8_t *dst, int32_t dst_stride) {
-  const v16u8 out = (v16u8)__msa_ldi_b(128);
-
-  ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride);
-  dst += (8 * dst_stride);
-  ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride);
-}
-
-static void intra_predict_dc_32x32_msa(const uint8_t *src_top,
-                                       const uint8_t *src_left, uint8_t *dst,
-                                       int32_t dst_stride) {
-  uint32_t row;
-  v16u8 top0, top1, left0, left1, out;
-  v8u16 sum_h, sum_top0, sum_top1, sum_left0, sum_left1;
-  v4u32 sum_w;
-  v2u64 sum_d;
-
-  LD_UB2(src_top, 16, top0, top1);
-  LD_UB2(src_left, 16, left0, left1);
-  HADD_UB2_UH(top0, top1, sum_top0, sum_top1);
-  HADD_UB2_UH(left0, left1, sum_left0, sum_left1);
-  sum_h = sum_top0 + sum_top1;
-  sum_h += sum_left0 + sum_left1;
-  sum_w = __msa_hadd_u_w(sum_h, sum_h);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 6);
-  out = (v16u8)__msa_splati_b((v16i8)sum_w, 0);
-
-  for (row = 16; row--;) {
-    ST_UB2(out, out, dst, 16);
-    dst += dst_stride;
-    ST_UB2(out, out, dst, 16);
-    dst += dst_stride;
-  }
-}
-
-static void intra_predict_dc_tl_32x32_msa(const uint8_t *src, uint8_t *dst,
-                                          int32_t dst_stride) {
-  uint32_t row;
-  v16u8 data0, data1, out;
-  v8u16 sum_h, sum_data0, sum_data1;
-  v4u32 sum_w;
-  v2u64 sum_d;
-
-  LD_UB2(src, 16, data0, data1);
-  HADD_UB2_UH(data0, data1, sum_data0, sum_data1);
-  sum_h = sum_data0 + sum_data1;
-  sum_w = __msa_hadd_u_w(sum_h, sum_h);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d);
-  sum_d = __msa_hadd_u_d(sum_w, sum_w);
-  sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 5);
-  out = (v16u8)__msa_splati_b((v16i8)sum_w, 0);
-
-  for (row = 16; row--;) {
-    ST_UB2(out, out, dst, 16);
-    dst += dst_stride;
-    ST_UB2(out, out, dst, 16);
-    dst += dst_stride;
-  }
-}
-
-static void intra_predict_128dc_32x32_msa(uint8_t *dst, int32_t dst_stride) {
-  uint32_t row;
-  const v16u8 out = (v16u8)__msa_ldi_b(128);
-
-  for (row = 16; row--;) {
-    ST_UB2(out, out, dst, 16);
-    dst += dst_stride;
-    ST_UB2(out, out, dst, 16);
-    dst += dst_stride;
-  }
-}
-
-void aom_v_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
-                             const uint8_t *above, const uint8_t *left) {
-  (void)left;
-
-  intra_predict_vert_4x4_msa(above, dst, y_stride);
-}
-
-void aom_v_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
-                             const uint8_t *above, const uint8_t *left) {
-  (void)left;
-
-  intra_predict_vert_8x8_msa(above, dst, y_stride);
-}
-
-void aom_v_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
-                               const uint8_t *above, const uint8_t *left) {
-  (void)left;
-
-  intra_predict_vert_16x16_msa(above, dst, y_stride);
-}
-
-void aom_v_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
-                               const uint8_t *above, const uint8_t *left) {
-  (void)left;
-
-  intra_predict_vert_32x32_msa(above, dst, y_stride);
-}
-
-void aom_h_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
-                             const uint8_t *above, const uint8_t *left) {
-  (void)above;
-
-  intra_predict_horiz_4x4_msa(left, dst, y_stride);
-}
-
-void aom_h_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
-                             const uint8_t *above, const uint8_t *left) {
-  (void)above;
-
-  intra_predict_horiz_8x8_msa(left, dst, y_stride);
-}
-
-void aom_h_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
-                               const uint8_t *above, const uint8_t *left) {
-  (void)above;
-
-  intra_predict_horiz_16x16_msa(left, dst, y_stride);
-}
-
-void aom_h_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
-                               const uint8_t *above, const uint8_t *left) {
-  (void)above;
-
-  intra_predict_horiz_32x32_msa(left, dst, y_stride);
-}
-
-void aom_dc_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
-                              const uint8_t *above, const uint8_t *left) {
-  intra_predict_dc_4x4_msa(above, left, dst, y_stride);
-}
-
-void aom_dc_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
-                              const uint8_t *above, const uint8_t *left) {
-  intra_predict_dc_8x8_msa(above, left, dst, y_stride);
-}
-
-void aom_dc_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                const uint8_t *above, const uint8_t *left) {
-  intra_predict_dc_16x16_msa(above, left, dst, y_stride);
-}
-
-void aom_dc_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                const uint8_t *above, const uint8_t *left) {
-  intra_predict_dc_32x32_msa(above, left, dst, y_stride);
-}
-
-void aom_dc_top_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                  const uint8_t *above, const uint8_t *left) {
-  (void)left;
-
-  intra_predict_dc_tl_4x4_msa(above, dst, y_stride);
-}
-
-void aom_dc_top_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                  const uint8_t *above, const uint8_t *left) {
-  (void)left;
-
-  intra_predict_dc_tl_8x8_msa(above, dst, y_stride);
-}
-
-void aom_dc_top_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                    const uint8_t *above, const uint8_t *left) {
-  (void)left;
-
-  intra_predict_dc_tl_16x16_msa(above, dst, y_stride);
-}
-
-void aom_dc_top_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                    const uint8_t *above, const uint8_t *left) {
-  (void)left;
-
-  intra_predict_dc_tl_32x32_msa(above, dst, y_stride);
-}
-
-void aom_dc_left_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                   const uint8_t *above, const uint8_t *left) {
-  (void)above;
-
-  intra_predict_dc_tl_4x4_msa(left, dst, y_stride);
-}
-
-void aom_dc_left_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                   const uint8_t *above, const uint8_t *left) {
-  (void)above;
-
-  intra_predict_dc_tl_8x8_msa(left, dst, y_stride);
-}
-
-void aom_dc_left_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                     const uint8_t *above,
-                                     const uint8_t *left) {
-  (void)above;
-
-  intra_predict_dc_tl_16x16_msa(left, dst, y_stride);
-}
-
-void aom_dc_left_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                     const uint8_t *above,
-                                     const uint8_t *left) {
-  (void)above;
-
-  intra_predict_dc_tl_32x32_msa(left, dst, y_stride);
-}
-
-void aom_dc_128_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                  const uint8_t *above, const uint8_t *left) {
-  (void)above;
-  (void)left;
-
-  intra_predict_128dc_4x4_msa(dst, y_stride);
-}
-
-void aom_dc_128_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                  const uint8_t *above, const uint8_t *left) {
-  (void)above;
-  (void)left;
-
-  intra_predict_128dc_8x8_msa(dst, y_stride);
-}
-
-void aom_dc_128_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                    const uint8_t *above, const uint8_t *left) {
-  (void)above;
-  (void)left;
-
-  intra_predict_128dc_16x16_msa(dst, y_stride);
-}
-
-void aom_dc_128_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
-                                    const uint8_t *above, const uint8_t *left) {
-  (void)above;
-  (void)left;
-
-  intra_predict_128dc_32x32_msa(dst, y_stride);
-}
diff --git a/aom_dsp/mips/loopfilter_16_msa.c b/aom_dsp/mips/loopfilter_16_msa.c
deleted file mode 100644
index 38a10e9..0000000
--- a/aom_dsp/mips/loopfilter_16_msa.c
+++ /dev/null
@@ -1,1488 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "aom_ports/mem.h"
-#include "aom_dsp/mips/loopfilter_msa.h"
-
-int32_t aom_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, uint8_t *filter48,
-                                 const uint8_t *b_limit_ptr,
-                                 const uint8_t *limit_ptr,
-                                 const uint8_t *thresh_ptr) {
-  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
-  v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
-  v16u8 flat, mask, hev, thresh, b_limit, limit;
-  v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r;
-  v8u16 p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
-  v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r;
-  v8i16 p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l;
-  v16u8 zero = { 0 };
-
-  /* load vector elements */
-  LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
-
-  thresh = (v16u8)__msa_fill_b(*thresh_ptr);
-  b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
-  limit = (v16u8)__msa_fill_b(*limit_ptr);
-
-  /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
-               mask, flat);
-  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
-
-  if (__msa_test_bz_v(flat)) {
-    ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch);
-
-    return 1;
-  } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
-               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
-                p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
-
-    ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
-    ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
-    AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
-                p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
-
-    /* convert 16 bit output data into 8 bit */
-    PCKEV_B4_SH(p2_filt8_l, p2_filt8_r, p1_filt8_l, p1_filt8_r, p0_filt8_l,
-                p0_filt8_r, q0_filt8_l, q0_filt8_r, p2_filt8_r, p1_filt8_r,
-                p0_filt8_r, q0_filt8_r);
-    PCKEV_B2_SH(q1_filt8_l, q1_filt8_r, q2_filt8_l, q2_filt8_r, q1_filt8_r,
-                q2_filt8_r);
-
-    /* store pixel values */
-    p2_out = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat);
-    p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat);
-    p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat);
-    q0_out = __msa_bmnz_v(q0_out, (v16u8)q0_filt8_r, flat);
-    q1_out = __msa_bmnz_v(q1_out, (v16u8)q1_filt8_r, flat);
-    q2_out = __msa_bmnz_v(q2, (v16u8)q2_filt8_r, flat);
-
-    ST_UB4(p2_out, p1_out, p0_out, q0_out, filter48, 16);
-    filter48 += (4 * 16);
-    ST_UB2(q1_out, q2_out, filter48, 16);
-    filter48 += (2 * 16);
-    ST_UB(flat, filter48);
-
-    return 0;
-  }
-}
-
-void aom_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) {
-  v16u8 flat, flat2, filter8;
-  v16i8 zero = { 0 };
-  v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
-  v8u16 p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in, p2_r_in, p1_r_in, p0_r_in;
-  v8u16 q7_r_in, q6_r_in, q5_r_in, q4_r_in, q3_r_in, q2_r_in, q1_r_in, q0_r_in;
-  v8u16 p7_l_in, p6_l_in, p5_l_in, p4_l_in, p3_l_in, p2_l_in, p1_l_in, p0_l_in;
-  v8u16 q7_l_in, q6_l_in, q5_l_in, q4_l_in, q3_l_in, q2_l_in, q1_l_in, q0_l_in;
-  v8u16 tmp0_r, tmp1_r, tmp0_l, tmp1_l;
-  v8i16 l_out, r_out;
-
-  flat = LD_UB(filter48 + 96);
-
-  LD_UB8((src - 8 * pitch), pitch, p7, p6, p5, p4, p3, p2, p1, p0);
-  LD_UB8(src, pitch, q0, q1, q2, q3, q4, q5, q6, q7);
-  AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
-
-  if (__msa_test_bz_v(flat2)) {
-    LD_UB4(filter48, 16, p2, p1, p0, q0);
-    LD_UB2(filter48 + 4 * 16, 16, q1, q2);
-
-    src -= 3 * pitch;
-    ST_UB4(p2, p1, p0, q0, src, pitch);
-    src += (4 * pitch);
-    ST_UB2(q1, q2, src, pitch);
-  } else {
-    src -= 7 * pitch;
-
-    ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, zero,
-               p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in,
-               p2_r_in, p1_r_in, p0_r_in);
-
-    q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0);
-
-    tmp0_r = p7_r_in << 3;
-    tmp0_r -= p7_r_in;
-    tmp0_r += p6_r_in;
-    tmp0_r += q0_r_in;
-    tmp1_r = p6_r_in + p5_r_in;
-    tmp1_r += p4_r_in;
-    tmp1_r += p3_r_in;
-    tmp1_r += p2_r_in;
-    tmp1_r += p1_r_in;
-    tmp1_r += p0_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    ILVL_B4_UH(zero, p7, zero, p6, zero, p5, zero, p4, p7_l_in, p6_l_in,
-               p5_l_in, p4_l_in);
-    ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l_in, p2_l_in,
-               p1_l_in, p0_l_in);
-    q0_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q0);
-
-    tmp0_l = p7_l_in << 3;
-    tmp0_l -= p7_l_in;
-    tmp0_l += p6_l_in;
-    tmp0_l += q0_l_in;
-    tmp1_l = p6_l_in + p5_l_in;
-    tmp1_l += p4_l_in;
-    tmp1_l += p3_l_in;
-    tmp1_l += p2_l_in;
-    tmp1_l += p1_l_in;
-    tmp1_l += p0_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    p6 = __msa_bmnz_v(p6, (v16u8)r_out, flat2);
-    ST_UB(p6, src);
-    src += pitch;
-
-    /* p5 */
-    q1_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q1);
-    tmp0_r = p5_r_in - p6_r_in;
-    tmp0_r += q1_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    q1_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q1);
-    tmp0_l = p5_l_in - p6_l_in;
-    tmp0_l += q1_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    p5 = __msa_bmnz_v(p5, (v16u8)r_out, flat2);
-    ST_UB(p5, src);
-    src += pitch;
-
-    /* p4 */
-    q2_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q2);
-    tmp0_r = p4_r_in - p5_r_in;
-    tmp0_r += q2_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = (v8i16)__msa_srari_h((v8i16)tmp1_r, 4);
-
-    q2_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q2);
-    tmp0_l = p4_l_in - p5_l_in;
-    tmp0_l += q2_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    p4 = __msa_bmnz_v(p4, (v16u8)r_out, flat2);
-    ST_UB(p4, src);
-    src += pitch;
-
-    /* p3 */
-    q3_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q3);
-    tmp0_r = p3_r_in - p4_r_in;
-    tmp0_r += q3_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    q3_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q3);
-    tmp0_l = p3_l_in - p4_l_in;
-    tmp0_l += q3_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    p3 = __msa_bmnz_v(p3, (v16u8)r_out, flat2);
-    ST_UB(p3, src);
-    src += pitch;
-
-    /* p2 */
-    q4_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q4);
-    filter8 = LD_UB(filter48);
-    tmp0_r = p2_r_in - p3_r_in;
-    tmp0_r += q4_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    q4_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q4);
-    tmp0_l = p2_l_in - p3_l_in;
-    tmp0_l += q4_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += pitch;
-
-    /* p1 */
-    q5_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q5);
-    filter8 = LD_UB(filter48 + 16);
-    tmp0_r = p1_r_in - p2_r_in;
-    tmp0_r += q5_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    q5_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q5);
-    tmp0_l = p1_l_in - p2_l_in;
-    tmp0_l += q5_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += pitch;
-
-    /* p0 */
-    q6_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q6);
-    filter8 = LD_UB(filter48 + 32);
-    tmp0_r = p0_r_in - p1_r_in;
-    tmp0_r += q6_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    q6_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q6);
-    tmp0_l = p0_l_in - p1_l_in;
-    tmp0_l += q6_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += pitch;
-
-    /* q0 */
-    q7_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q7);
-    filter8 = LD_UB(filter48 + 48);
-    tmp0_r = q7_r_in - p0_r_in;
-    tmp0_r += q0_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    q7_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q7);
-    tmp0_l = q7_l_in - p0_l_in;
-    tmp0_l += q0_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += pitch;
-
-    /* q1 */
-    filter8 = LD_UB(filter48 + 64);
-    tmp0_r = q7_r_in - q0_r_in;
-    tmp0_r += q1_r_in;
-    tmp0_r -= p6_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    tmp0_l = q7_l_in - q0_l_in;
-    tmp0_l += q1_l_in;
-    tmp0_l -= p6_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += pitch;
-
-    /* q2 */
-    filter8 = LD_UB(filter48 + 80);
-    tmp0_r = q7_r_in - q1_r_in;
-    tmp0_r += q2_r_in;
-    tmp0_r -= p5_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    tmp0_l = q7_l_in - q1_l_in;
-    tmp0_l += q2_l_in;
-    tmp0_l -= p5_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += pitch;
-
-    /* q3 */
-    tmp0_r = q7_r_in - q2_r_in;
-    tmp0_r += q3_r_in;
-    tmp0_r -= p4_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    tmp0_l = q7_l_in - q2_l_in;
-    tmp0_l += q3_l_in;
-    tmp0_l -= p4_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    q3 = __msa_bmnz_v(q3, (v16u8)r_out, flat2);
-    ST_UB(q3, src);
-    src += pitch;
-
-    /* q4 */
-    tmp0_r = q7_r_in - q3_r_in;
-    tmp0_r += q4_r_in;
-    tmp0_r -= p3_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    tmp0_l = q7_l_in - q3_l_in;
-    tmp0_l += q4_l_in;
-    tmp0_l -= p3_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    q4 = __msa_bmnz_v(q4, (v16u8)r_out, flat2);
-    ST_UB(q4, src);
-    src += pitch;
-
-    /* q5 */
-    tmp0_r = q7_r_in - q4_r_in;
-    tmp0_r += q5_r_in;
-    tmp0_r -= p2_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    tmp0_l = q7_l_in - q4_l_in;
-    tmp0_l += q5_l_in;
-    tmp0_l -= p2_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    q5 = __msa_bmnz_v(q5, (v16u8)r_out, flat2);
-    ST_UB(q5, src);
-    src += pitch;
-
-    /* q6 */
-    tmp0_r = q7_r_in - q5_r_in;
-    tmp0_r += q6_r_in;
-    tmp0_r -= p1_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    tmp0_l = q7_l_in - q5_l_in;
-    tmp0_l += q6_l_in;
-    tmp0_l -= p1_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    q6 = __msa_bmnz_v(q6, (v16u8)r_out, flat2);
-    ST_UB(q6, src);
-  }
-}
-
-static void mb_lpf_horizontal_edge_dual(uint8_t *src, int32_t pitch,
-                                        const uint8_t *b_limit_ptr,
-                                        const uint8_t *limit_ptr,
-                                        const uint8_t *thresh_ptr,
-                                        int32_t count) {
-  DECLARE_ALIGNED(32, uint8_t, filter48[16 * 8]);
-  uint8_t early_exit = 0;
-
-  (void)count;
-
-  early_exit = aom_hz_lpf_t4_and_t8_16w(src, pitch, &filter48[0], b_limit_ptr,
-                                        limit_ptr, thresh_ptr);
-
-  if (0 == early_exit) {
-    aom_hz_lpf_t16_16w(src, pitch, filter48);
-  }
-}
-
-static void mb_lpf_horizontal_edge(uint8_t *src, int32_t pitch,
-                                   const uint8_t *b_limit_ptr,
-                                   const uint8_t *limit_ptr,
-                                   const uint8_t *thresh_ptr, int32_t count) {
-  if (1 == count) {
-    uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d;
-    uint64_t dword0, dword1;
-    v16u8 flat2, mask, hev, flat, thresh, b_limit, limit;
-    v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p7, p6, p5, p4, q4, q5, q6, q7;
-    v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
-    v16u8 p0_filter16, p1_filter16;
-    v8i16 p2_filter8, p1_filter8, p0_filter8;
-    v8i16 q0_filter8, q1_filter8, q2_filter8;
-    v8u16 p7_r, p6_r, p5_r, p4_r, q7_r, q6_r, q5_r, q4_r;
-    v8u16 p3_r, p2_r, p1_r, p0_r, q3_r, q2_r, q1_r, q0_r;
-    v16i8 zero = { 0 };
-    v8u16 tmp0, tmp1, tmp2;
-
-    /* load vector elements */
-    LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
-
-    thresh = (v16u8)__msa_fill_b(*thresh_ptr);
-    b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
-    limit = (v16u8)__msa_fill_b(*limit_ptr);
-
-    LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
-                 mask, flat);
-    AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-    AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
-                       q1_out);
-
-    flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
-
-    if (__msa_test_bz_v(flat)) {
-      p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
-      p0_d = __msa_copy_u_d((v2i64)p0_out, 0);
-      q0_d = __msa_copy_u_d((v2i64)q0_out, 0);
-      q1_d = __msa_copy_u_d((v2i64)q1_out, 0);
-      SD4(p1_d, p0_d, q0_d, q1_d, src - 2 * pitch, pitch);
-    } else {
-      /* convert 8 bit input data into 16 bit */
-      ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
-                 zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
-                 q3_r);
-      AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
-                  p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
-
-      /* convert 16 bit output data into 8 bit */
-      PCKEV_B4_SH(zero, p2_filter8, zero, p1_filter8, zero, p0_filter8, zero,
-                  q0_filter8, p2_filter8, p1_filter8, p0_filter8, q0_filter8);
-      PCKEV_B2_SH(zero, q1_filter8, zero, q2_filter8, q1_filter8, q2_filter8);
-
-      /* store pixel values */
-      p2_out = __msa_bmnz_v(p2, (v16u8)p2_filter8, flat);
-      p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filter8, flat);
-      p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filter8, flat);
-      q0_out = __msa_bmnz_v(q0_out, (v16u8)q0_filter8, flat);
-      q1_out = __msa_bmnz_v(q1_out, (v16u8)q1_filter8, flat);
-      q2_out = __msa_bmnz_v(q2, (v16u8)q2_filter8, flat);
-
-      /* load 16 vector elements */
-      LD_UB4((src - 8 * pitch), pitch, p7, p6, p5, p4);
-      LD_UB4(src + (4 * pitch), pitch, q4, q5, q6, q7);
-
-      AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
-
-      if (__msa_test_bz_v(flat2)) {
-        p2_d = __msa_copy_u_d((v2i64)p2_out, 0);
-        p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
-        p0_d = __msa_copy_u_d((v2i64)p0_out, 0);
-        q0_d = __msa_copy_u_d((v2i64)q0_out, 0);
-        q1_d = __msa_copy_u_d((v2i64)q1_out, 0);
-        q2_d = __msa_copy_u_d((v2i64)q2_out, 0);
-
-        SD4(p2_d, p1_d, p0_d, q0_d, src - 3 * pitch, pitch);
-        SD(q1_d, src + pitch);
-        SD(q2_d, src + 2 * pitch);
-      } else {
-        /* LSB(right) 8 pixel operation */
-        ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, q4, zero, q5,
-                   zero, q6, zero, q7, p7_r, p6_r, p5_r, p4_r, q4_r, q5_r, q6_r,
-                   q7_r);
-
-        tmp0 = p7_r << 3;
-        tmp0 -= p7_r;
-        tmp0 += p6_r;
-        tmp0 += q0_r;
-
-        src -= 7 * pitch;
-
-        /* calculation of p6 and p5 */
-        tmp1 = p6_r + p5_r + p4_r + p3_r;
-        tmp1 += (p2_r + p1_r + p0_r);
-        tmp1 += tmp0;
-        p0_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        tmp0 = p5_r - p6_r + q1_r - p7_r;
-        tmp1 += tmp0;
-        p1_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        PCKEV_B2_UB(zero, p0_filter16, zero, p1_filter16, p0_filter16,
-                    p1_filter16);
-        p0_filter16 = __msa_bmnz_v(p6, p0_filter16, flat2);
-        p1_filter16 = __msa_bmnz_v(p5, p1_filter16, flat2);
-        dword0 = __msa_copy_u_d((v2i64)p0_filter16, 0);
-        dword1 = __msa_copy_u_d((v2i64)p1_filter16, 0);
-        SD(dword0, src);
-        src += pitch;
-        SD(dword1, src);
-        src += pitch;
-
-        /* calculation of p4 and p3 */
-        tmp0 = p4_r - p5_r + q2_r - p7_r;
-        tmp2 = p3_r - p4_r + q3_r - p7_r;
-        tmp1 += tmp0;
-        p0_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        tmp1 += tmp2;
-        p1_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        PCKEV_B2_UB(zero, p0_filter16, zero, p1_filter16, p0_filter16,
-                    p1_filter16);
-        p0_filter16 = __msa_bmnz_v(p4, p0_filter16, flat2);
-        p1_filter16 = __msa_bmnz_v(p3, p1_filter16, flat2);
-        dword0 = __msa_copy_u_d((v2i64)p0_filter16, 0);
-        dword1 = __msa_copy_u_d((v2i64)p1_filter16, 0);
-        SD(dword0, src);
-        src += pitch;
-        SD(dword1, src);
-        src += pitch;
-
-        /* calculation of p2 and p1 */
-        tmp0 = p2_r - p3_r + q4_r - p7_r;
-        tmp2 = p1_r - p2_r + q5_r - p7_r;
-        tmp1 += tmp0;
-        p0_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        tmp1 += tmp2;
-        p1_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        PCKEV_B2_UB(zero, p0_filter16, zero, p1_filter16, p0_filter16,
-                    p1_filter16);
-        p0_filter16 = __msa_bmnz_v(p2_out, p0_filter16, flat2);
-        p1_filter16 = __msa_bmnz_v(p1_out, p1_filter16, flat2);
-        dword0 = __msa_copy_u_d((v2i64)p0_filter16, 0);
-        dword1 = __msa_copy_u_d((v2i64)p1_filter16, 0);
-        SD(dword0, src);
-        src += pitch;
-        SD(dword1, src);
-        src += pitch;
-
-        /* calculation of p0 and q0 */
-        tmp0 = (p0_r - p1_r) + (q6_r - p7_r);
-        tmp2 = (q7_r - p0_r) + (q0_r - p7_r);
-        tmp1 += tmp0;
-        p0_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        tmp1 += tmp2;
-        p1_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        PCKEV_B2_UB(zero, p0_filter16, zero, p1_filter16, p0_filter16,
-                    p1_filter16);
-        p0_filter16 = __msa_bmnz_v(p0_out, p0_filter16, flat2);
-        p1_filter16 = __msa_bmnz_v(q0_out, p1_filter16, flat2);
-        dword0 = __msa_copy_u_d((v2i64)p0_filter16, 0);
-        dword1 = __msa_copy_u_d((v2i64)p1_filter16, 0);
-        SD(dword0, src);
-        src += pitch;
-        SD(dword1, src);
-        src += pitch;
-
-        /* calculation of q1 and q2 */
-        tmp0 = q7_r - q0_r + q1_r - p6_r;
-        tmp2 = q7_r - q1_r + q2_r - p5_r;
-        tmp1 += tmp0;
-        p0_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        tmp1 += tmp2;
-        p1_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        PCKEV_B2_UB(zero, p0_filter16, zero, p1_filter16, p0_filter16,
-                    p1_filter16);
-        p0_filter16 = __msa_bmnz_v(q1_out, p0_filter16, flat2);
-        p1_filter16 = __msa_bmnz_v(q2_out, p1_filter16, flat2);
-        dword0 = __msa_copy_u_d((v2i64)p0_filter16, 0);
-        dword1 = __msa_copy_u_d((v2i64)p1_filter16, 0);
-        SD(dword0, src);
-        src += pitch;
-        SD(dword1, src);
-        src += pitch;
-
-        /* calculation of q3 and q4 */
-        tmp0 = (q7_r - q2_r) + (q3_r - p4_r);
-        tmp2 = (q7_r - q3_r) + (q4_r - p3_r);
-        tmp1 += tmp0;
-        p0_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        tmp1 += tmp2;
-        p1_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        PCKEV_B2_UB(zero, p0_filter16, zero, p1_filter16, p0_filter16,
-                    p1_filter16);
-        p0_filter16 = __msa_bmnz_v(q3, p0_filter16, flat2);
-        p1_filter16 = __msa_bmnz_v(q4, p1_filter16, flat2);
-        dword0 = __msa_copy_u_d((v2i64)p0_filter16, 0);
-        dword1 = __msa_copy_u_d((v2i64)p1_filter16, 0);
-        SD(dword0, src);
-        src += pitch;
-        SD(dword1, src);
-        src += pitch;
-
-        /* calculation of q5 and q6 */
-        tmp0 = (q7_r - q4_r) + (q5_r - p2_r);
-        tmp2 = (q7_r - q5_r) + (q6_r - p1_r);
-        tmp1 += tmp0;
-        p0_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        tmp1 += tmp2;
-        p1_filter16 = (v16u8)__msa_srari_h((v8i16)tmp1, 4);
-        PCKEV_B2_UB(zero, p0_filter16, zero, p1_filter16, p0_filter16,
-                    p1_filter16);
-        p0_filter16 = __msa_bmnz_v(q5, p0_filter16, flat2);
-        p1_filter16 = __msa_bmnz_v(q6, p1_filter16, flat2);
-        dword0 = __msa_copy_u_d((v2i64)p0_filter16, 0);
-        dword1 = __msa_copy_u_d((v2i64)p1_filter16, 0);
-        SD(dword0, src);
-        src += pitch;
-        SD(dword1, src);
-      }
-    }
-  } else {
-    mb_lpf_horizontal_edge_dual(src, pitch, b_limit_ptr, limit_ptr, thresh_ptr,
-                                count);
-  }
-}
-
-void aom_lpf_horizontal_16_msa(uint8_t *src, int32_t pitch,
-                               const uint8_t *b_limit_ptr,
-                               const uint8_t *limit_ptr,
-                               const uint8_t *thresh_ptr) {
-  mb_lpf_horizontal_edge(src, pitch, b_limit_ptr, limit_ptr, thresh_ptr, 1);
-}
-
-void aom_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch,
-                                    const uint8_t *b_limit_ptr,
-                                    const uint8_t *limit_ptr,
-                                    const uint8_t *thresh_ptr) {
-  mb_lpf_horizontal_edge(src, pitch, b_limit_ptr, limit_ptr, thresh_ptr, 2);
-}
-
-static void transpose_16x8_to_8x16(uint8_t *input, int32_t in_pitch,
-                                   uint8_t *output, int32_t out_pitch) {
-  v16u8 p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, p1_org, p0_org;
-  v16i8 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
-  v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
-
-  LD_UB8(input, in_pitch, p7_org, p6_org, p5_org, p4_org, p3_org, p2_org,
-         p1_org, p0_org);
-  /* 8x8 transpose */
-  TRANSPOSE8x8_UB_UB(p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, p1_org,
-                     p0_org, p7, p6, p5, p4, p3, p2, p1, p0);
-  /* 8x8 transpose */
-  ILVL_B4_SB(p5_org, p7_org, p4_org, p6_org, p1_org, p3_org, p0_org, p2_org,
-             tmp0, tmp1, tmp2, tmp3);
-  ILVR_B2_SB(tmp1, tmp0, tmp3, tmp2, tmp4, tmp6);
-  ILVL_B2_SB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp7);
-  ILVR_W2_UB(tmp6, tmp4, tmp7, tmp5, q0, q4);
-  ILVL_W2_UB(tmp6, tmp4, tmp7, tmp5, q2, q6);
-  SLDI_B4_0_UB(q0, q2, q4, q6, q1, q3, q5, q7, 8);
-
-  ST_UB8(p7, p6, p5, p4, p3, p2, p1, p0, output, out_pitch);
-  output += (8 * out_pitch);
-  ST_UB8(q0, q1, q2, q3, q4, q5, q6, q7, output, out_pitch);
-}
-
-static void transpose_8x16_to_16x8(uint8_t *input, int32_t in_pitch,
-                                   uint8_t *output, int32_t out_pitch) {
-  v16u8 p7_o, p6_o, p5_o, p4_o, p3_o, p2_o, p1_o, p0_o;
-  v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
-
-  LD_UB8(input, in_pitch, p7, p6, p5, p4, p3, p2, p1, p0);
-  LD_UB8(input + (8 * in_pitch), in_pitch, q0, q1, q2, q3, q4, q5, q6, q7);
-  TRANSPOSE16x8_UB_UB(p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5,
-                      q6, q7, p7_o, p6_o, p5_o, p4_o, p3_o, p2_o, p1_o, p0_o);
-  ST_UB8(p7_o, p6_o, p5_o, p4_o, p3_o, p2_o, p1_o, p0_o, output, out_pitch);
-}
-
-static void transpose_16x16(uint8_t *input, int32_t in_pitch, uint8_t *output,
-                            int32_t out_pitch) {
-  v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
-  v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
-  v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
-  v8i16 tmp0, tmp1, tmp4, tmp5, tmp6, tmp7;
-  v4i32 tmp2, tmp3;
-
-  LD_UB8(input, in_pitch, row0, row1, row2, row3, row4, row5, row6, row7);
-  input += (8 * in_pitch);
-  LD_UB8(input, in_pitch, row8, row9, row10, row11, row12, row13, row14, row15);
-
-  TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8,
-                      row9, row10, row11, row12, row13, row14, row15, p7, p6,
-                      p5, p4, p3, p2, p1, p0);
-
-  /* transpose 16x8 matrix into 8x16 */
-  /* total 8 intermediate register and 32 instructions */
-  q7 = (v16u8)__msa_ilvod_d((v2i64)row8, (v2i64)row0);
-  q6 = (v16u8)__msa_ilvod_d((v2i64)row9, (v2i64)row1);
-  q5 = (v16u8)__msa_ilvod_d((v2i64)row10, (v2i64)row2);
-  q4 = (v16u8)__msa_ilvod_d((v2i64)row11, (v2i64)row3);
-  q3 = (v16u8)__msa_ilvod_d((v2i64)row12, (v2i64)row4);
-  q2 = (v16u8)__msa_ilvod_d((v2i64)row13, (v2i64)row5);
-  q1 = (v16u8)__msa_ilvod_d((v2i64)row14, (v2i64)row6);
-  q0 = (v16u8)__msa_ilvod_d((v2i64)row15, (v2i64)row7);
-
-  ILVEV_B2_SH(q7, q6, q5, q4, tmp0, tmp1);
-  tmp4 = (v8i16)__msa_ilvod_b((v16i8)q6, (v16i8)q7);
-  tmp5 = (v8i16)__msa_ilvod_b((v16i8)q4, (v16i8)q5);
-
-  ILVEV_B2_UB(q3, q2, q1, q0, q5, q7);
-  tmp6 = (v8i16)__msa_ilvod_b((v16i8)q2, (v16i8)q3);
-  tmp7 = (v8i16)__msa_ilvod_b((v16i8)q0, (v16i8)q1);
-
-  ILVEV_H2_SW(tmp0, tmp1, q5, q7, tmp2, tmp3);
-  q0 = (v16u8)__msa_ilvev_w(tmp3, tmp2);
-  q4 = (v16u8)__msa_ilvod_w(tmp3, tmp2);
-
-  tmp2 = (v4i32)__msa_ilvod_h(tmp1, tmp0);
-  tmp3 = (v4i32)__msa_ilvod_h((v8i16)q7, (v8i16)q5);
-  q2 = (v16u8)__msa_ilvev_w(tmp3, tmp2);
-  q6 = (v16u8)__msa_ilvod_w(tmp3, tmp2);
-
-  ILVEV_H2_SW(tmp4, tmp5, tmp6, tmp7, tmp2, tmp3);
-  q1 = (v16u8)__msa_ilvev_w(tmp3, tmp2);
-  q5 = (v16u8)__msa_ilvod_w(tmp3, tmp2);
-
-  tmp2 = (v4i32)__msa_ilvod_h(tmp5, tmp4);
-  tmp3 = (v4i32)__msa_ilvod_h(tmp7, tmp6);
-  q3 = (v16u8)__msa_ilvev_w(tmp3, tmp2);
-  q7 = (v16u8)__msa_ilvod_w(tmp3, tmp2);
-
-  ST_UB8(p7, p6, p5, p4, p3, p2, p1, p0, output, out_pitch);
-  output += (8 * out_pitch);
-  ST_UB8(q0, q1, q2, q3, q4, q5, q6, q7, output, out_pitch);
-}
-
-int32_t aom_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48,
-                                uint8_t *src_org, int32_t pitch_org,
-                                const uint8_t *b_limit_ptr,
-                                const uint8_t *limit_ptr,
-                                const uint8_t *thresh_ptr) {
-  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
-  v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
-  v16u8 flat, mask, hev, thresh, b_limit, limit;
-  v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r;
-  v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r;
-  v16i8 zero = { 0 };
-  v8i16 vec0, vec1, vec2, vec3;
-
-  /* load vector elements */
-  LD_UB8(src - (4 * 16), 16, p3, p2, p1, p0, q0, q1, q2, q3);
-
-  thresh = (v16u8)__msa_fill_b(*thresh_ptr);
-  b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
-  limit = (v16u8)__msa_fill_b(*limit_ptr);
-
-  /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
-               mask, flat);
-  /* flat4 */
-  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  /* filter4 */
-  AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
-
-  flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
-
-  if (__msa_test_bz_v(flat)) {
-    ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec2, vec3);
-    ST4x8_UB(vec2, vec3, (src_org - 2), pitch_org);
-    return 1;
-  } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
-               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
-                p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
-
-    /* convert 16 bit output data into 8 bit */
-    p2_r = (v8u16)__msa_pckev_b((v16i8)p2_filt8_r, (v16i8)p2_filt8_r);
-    p1_r = (v8u16)__msa_pckev_b((v16i8)p1_filt8_r, (v16i8)p1_filt8_r);
-    p0_r = (v8u16)__msa_pckev_b((v16i8)p0_filt8_r, (v16i8)p0_filt8_r);
-    q0_r = (v8u16)__msa_pckev_b((v16i8)q0_filt8_r, (v16i8)q0_filt8_r);
-    q1_r = (v8u16)__msa_pckev_b((v16i8)q1_filt8_r, (v16i8)q1_filt8_r);
-    q2_r = (v8u16)__msa_pckev_b((v16i8)q2_filt8_r, (v16i8)q2_filt8_r);
-
-    /* store pixel values */
-    p2_out = __msa_bmnz_v(p2, (v16u8)p2_r, flat);
-    p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_r, flat);
-    p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_r, flat);
-    q0_out = __msa_bmnz_v(q0_out, (v16u8)q0_r, flat);
-    q1_out = __msa_bmnz_v(q1_out, (v16u8)q1_r, flat);
-    q2_out = __msa_bmnz_v(q2, (v16u8)q2_r, flat);
-
-    ST_UB4(p2_out, p1_out, p0_out, q0_out, filter48, 16);
-    filter48 += (4 * 16);
-    ST_UB2(q1_out, q2_out, filter48, 16);
-    filter48 += (2 * 16);
-    ST_UB(flat, filter48);
-
-    return 0;
-  }
-}
-
-int32_t aom_vt_lpf_t16_8w(uint8_t *src, uint8_t *src_org, int32_t pitch,
-                          uint8_t *filter48) {
-  v16i8 zero = { 0 };
-  v16u8 filter8, flat, flat2;
-  v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
-  v8u16 p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in, p2_r_in, p1_r_in, p0_r_in;
-  v8u16 q7_r_in, q6_r_in, q5_r_in, q4_r_in, q3_r_in, q2_r_in, q1_r_in, q0_r_in;
-  v8u16 tmp0_r, tmp1_r;
-  v8i16 r_out;
-
-  flat = LD_UB(filter48 + 6 * 16);
-
-  LD_UB8((src - 8 * 16), 16, p7, p6, p5, p4, p3, p2, p1, p0);
-  LD_UB8(src, 16, q0, q1, q2, q3, q4, q5, q6, q7);
-
-  AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
-
-  if (__msa_test_bz_v(flat2)) {
-    v8i16 vec0, vec1, vec2, vec3, vec4;
-
-    LD_UB4(filter48, 16, p2, p1, p0, q0);
-    LD_UB2(filter48 + 4 * 16, 16, q1, q2);
-
-    ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec3, vec4);
-    vec2 = (v8i16)__msa_ilvr_b((v16i8)q2, (v16i8)q1);
-
-    src_org -= 3;
-    ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src_org, pitch);
-    ST2x4_UB(vec2, 0, (src_org + 4), pitch);
-    src_org += (4 * pitch);
-    ST4x4_UB(vec4, vec4, 0, 1, 2, 3, src_org, pitch);
-    ST2x4_UB(vec2, 4, (src_org + 4), pitch);
-
-    return 1;
-  } else {
-    src -= 7 * 16;
-
-    ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, zero,
-               p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in,
-               p2_r_in, p1_r_in, p0_r_in);
-    q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0);
-
-    tmp0_r = p7_r_in << 3;
-    tmp0_r -= p7_r_in;
-    tmp0_r += p6_r_in;
-    tmp0_r += q0_r_in;
-    tmp1_r = p6_r_in + p5_r_in;
-    tmp1_r += p4_r_in;
-    tmp1_r += p3_r_in;
-    tmp1_r += p2_r_in;
-    tmp1_r += p1_r_in;
-    tmp1_r += p0_r_in;
-    tmp1_r += tmp0_r;
-
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    p6 = __msa_bmnz_v(p6, (v16u8)r_out, flat2);
-    ST8x1_UB(p6, src);
-    src += 16;
-
-    /* p5 */
-    q1_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q1);
-    tmp0_r = p5_r_in - p6_r_in;
-    tmp0_r += q1_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    p5 = __msa_bmnz_v(p5, (v16u8)r_out, flat2);
-    ST8x1_UB(p5, src);
-    src += 16;
-
-    /* p4 */
-    q2_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q2);
-    tmp0_r = p4_r_in - p5_r_in;
-    tmp0_r += q2_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    p4 = __msa_bmnz_v(p4, (v16u8)r_out, flat2);
-    ST8x1_UB(p4, src);
-    src += 16;
-
-    /* p3 */
-    q3_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q3);
-    tmp0_r = p3_r_in - p4_r_in;
-    tmp0_r += q3_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    p3 = __msa_bmnz_v(p3, (v16u8)r_out, flat2);
-    ST8x1_UB(p3, src);
-    src += 16;
-
-    /* p2 */
-    q4_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q4);
-    filter8 = LD_UB(filter48);
-    tmp0_r = p2_r_in - p3_r_in;
-    tmp0_r += q4_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST8x1_UB(filter8, src);
-    src += 16;
-
-    /* p1 */
-    q5_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q5);
-    filter8 = LD_UB(filter48 + 16);
-    tmp0_r = p1_r_in - p2_r_in;
-    tmp0_r += q5_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST8x1_UB(filter8, src);
-    src += 16;
-
-    /* p0 */
-    q6_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q6);
-    filter8 = LD_UB(filter48 + 32);
-    tmp0_r = p0_r_in - p1_r_in;
-    tmp0_r += q6_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST8x1_UB(filter8, src);
-    src += 16;
-
-    /* q0 */
-    q7_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q7);
-    filter8 = LD_UB(filter48 + 48);
-    tmp0_r = q7_r_in - p0_r_in;
-    tmp0_r += q0_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST8x1_UB(filter8, src);
-    src += 16;
-
-    /* q1 */
-    filter8 = LD_UB(filter48 + 64);
-    tmp0_r = q7_r_in - q0_r_in;
-    tmp0_r += q1_r_in;
-    tmp0_r -= p6_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST8x1_UB(filter8, src);
-    src += 16;
-
-    /* q2 */
-    filter8 = LD_UB(filter48 + 80);
-    tmp0_r = q7_r_in - q1_r_in;
-    tmp0_r += q2_r_in;
-    tmp0_r -= p5_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST8x1_UB(filter8, src);
-    src += 16;
-
-    /* q3 */
-    tmp0_r = q7_r_in - q2_r_in;
-    tmp0_r += q3_r_in;
-    tmp0_r -= p4_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    q3 = __msa_bmnz_v(q3, (v16u8)r_out, flat2);
-    ST8x1_UB(q3, src);
-    src += 16;
-
-    /* q4 */
-    tmp0_r = q7_r_in - q3_r_in;
-    tmp0_r += q4_r_in;
-    tmp0_r -= p3_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    q4 = __msa_bmnz_v(q4, (v16u8)r_out, flat2);
-    ST8x1_UB(q4, src);
-    src += 16;
-
-    /* q5 */
-    tmp0_r = q7_r_in - q4_r_in;
-    tmp0_r += q5_r_in;
-    tmp0_r -= p2_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    q5 = __msa_bmnz_v(q5, (v16u8)r_out, flat2);
-    ST8x1_UB(q5, src);
-    src += 16;
-
-    /* q6 */
-    tmp0_r = q7_r_in - q5_r_in;
-    tmp0_r += q6_r_in;
-    tmp0_r -= p1_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)r_out, (v16i8)r_out);
-    q6 = __msa_bmnz_v(q6, (v16u8)r_out, flat2);
-    ST8x1_UB(q6, src);
-
-    return 0;
-  }
-}
-
-void aom_lpf_vertical_16_msa(uint8_t *src, int32_t pitch,
-                             const uint8_t *b_limit_ptr,
-                             const uint8_t *limit_ptr,
-                             const uint8_t *thresh_ptr) {
-  uint8_t early_exit = 0;
-  DECLARE_ALIGNED(32, uint8_t, transposed_input[16 * 24]);
-  uint8_t *filter48 = &transposed_input[16 * 16];
-
-  transpose_16x8_to_8x16(src - 8, pitch, transposed_input, 16);
-
-  early_exit =
-      aom_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8), &filter48[0], src,
-                              pitch, b_limit_ptr, limit_ptr, thresh_ptr);
-
-  if (0 == early_exit) {
-    early_exit = aom_vt_lpf_t16_8w((transposed_input + 16 * 8), src, pitch,
-                                   &filter48[0]);
-
-    if (0 == early_exit) {
-      transpose_8x16_to_16x8(transposed_input, 16, src - 8, pitch);
-    }
-  }
-}
-
-int32_t aom_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48,
-                                 uint8_t *src_org, int32_t pitch,
-                                 const uint8_t *b_limit_ptr,
-                                 const uint8_t *limit_ptr,
-                                 const uint8_t *thresh_ptr) {
-  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
-  v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
-  v16u8 flat, mask, hev, thresh, b_limit, limit;
-  v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r;
-  v8u16 p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
-  v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r;
-  v8i16 p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l;
-  v16i8 zero = { 0 };
-  v8i16 vec0, vec1, vec2, vec3, vec4, vec5;
-
-  /* load vector elements */
-  LD_UB8(src - (4 * 16), 16, p3, p2, p1, p0, q0, q1, q2, q3);
-
-  thresh = (v16u8)__msa_fill_b(*thresh_ptr);
-  b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
-  limit = (v16u8)__msa_fill_b(*limit_ptr);
-
-  /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
-               mask, flat);
-  /* flat4 */
-  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  /* filter4 */
-  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
-
-  if (__msa_test_bz_v(flat)) {
-    ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec2, vec3);
-    ILVL_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec4, vec5);
-
-    src_org -= 2;
-    ST4x8_UB(vec2, vec3, src_org, pitch);
-    src_org += 8 * pitch;
-    ST4x8_UB(vec4, vec5, src_org, pitch);
-
-    return 1;
-  } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
-               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
-                p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
-    ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
-    ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
-    AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
-                p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
-
-    /* convert 16 bit output data into 8 bit */
-    PCKEV_B4_SH(p2_filt8_l, p2_filt8_r, p1_filt8_l, p1_filt8_r, p0_filt8_l,
-                p0_filt8_r, q0_filt8_l, q0_filt8_r, p2_filt8_r, p1_filt8_r,
-                p0_filt8_r, q0_filt8_r);
-    PCKEV_B2_SH(q1_filt8_l, q1_filt8_r, q2_filt8_l, q2_filt8_r, q1_filt8_r,
-                q2_filt8_r);
-
-    /* store pixel values */
-    p2_out = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat);
-    p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat);
-    p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat);
-    q0_out = __msa_bmnz_v(q0_out, (v16u8)q0_filt8_r, flat);
-    q1_out = __msa_bmnz_v(q1_out, (v16u8)q1_filt8_r, flat);
-    q2_out = __msa_bmnz_v(q2, (v16u8)q2_filt8_r, flat);
-
-    ST_UB4(p2_out, p1_out, p0_out, q0_out, filter48, 16);
-    filter48 += (4 * 16);
-    ST_UB2(q1_out, q2_out, filter48, 16);
-    filter48 += (2 * 16);
-    ST_UB(flat, filter48);
-
-    return 0;
-  }
-}
-
-int32_t aom_vt_lpf_t16_16w(uint8_t *src, uint8_t *src_org, int32_t pitch,
-                           uint8_t *filter48) {
-  v16u8 flat, flat2, filter8;
-  v16i8 zero = { 0 };
-  v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
-  v8u16 p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in, p2_r_in, p1_r_in, p0_r_in;
-  v8u16 q7_r_in, q6_r_in, q5_r_in, q4_r_in, q3_r_in, q2_r_in, q1_r_in, q0_r_in;
-  v8u16 p7_l_in, p6_l_in, p5_l_in, p4_l_in, p3_l_in, p2_l_in, p1_l_in, p0_l_in;
-  v8u16 q7_l_in, q6_l_in, q5_l_in, q4_l_in, q3_l_in, q2_l_in, q1_l_in, q0_l_in;
-  v8u16 tmp0_r, tmp1_r, tmp0_l, tmp1_l;
-  v8i16 l_out, r_out;
-
-  flat = LD_UB(filter48 + 6 * 16);
-
-  LD_UB8((src - 8 * 16), 16, p7, p6, p5, p4, p3, p2, p1, p0);
-  LD_UB8(src, 16, q0, q1, q2, q3, q4, q5, q6, q7);
-
-  AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
-
-  if (__msa_test_bz_v(flat2)) {
-    v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-
-    LD_UB4(filter48, 16, p2, p1, p0, q0);
-    LD_UB2(filter48 + 4 * 16, 16, q1, q2);
-
-    ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec3, vec4);
-    ILVL_B2_SH(p1, p2, q0, p0, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec6, vec7);
-    ILVRL_B2_SH(q2, q1, vec2, vec5);
-
-    src_org -= 3;
-    ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src_org, pitch);
-    ST2x4_UB(vec2, 0, (src_org + 4), pitch);
-    src_org += (4 * pitch);
-    ST4x4_UB(vec4, vec4, 0, 1, 2, 3, src_org, pitch);
-    ST2x4_UB(vec2, 4, (src_org + 4), pitch);
-    src_org += (4 * pitch);
-    ST4x4_UB(vec6, vec6, 0, 1, 2, 3, src_org, pitch);
-    ST2x4_UB(vec5, 0, (src_org + 4), pitch);
-    src_org += (4 * pitch);
-    ST4x4_UB(vec7, vec7, 0, 1, 2, 3, src_org, pitch);
-    ST2x4_UB(vec5, 4, (src_org + 4), pitch);
-
-    return 1;
-  } else {
-    src -= 7 * 16;
-
-    ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, zero,
-               p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in,
-               p2_r_in, p1_r_in, p0_r_in);
-    q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0);
-
-    tmp0_r = p7_r_in << 3;
-    tmp0_r -= p7_r_in;
-    tmp0_r += p6_r_in;
-    tmp0_r += q0_r_in;
-    tmp1_r = p6_r_in + p5_r_in;
-    tmp1_r += p4_r_in;
-    tmp1_r += p3_r_in;
-    tmp1_r += p2_r_in;
-    tmp1_r += p1_r_in;
-    tmp1_r += p0_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-
-    ILVL_B4_UH(zero, p7, zero, p6, zero, p5, zero, p4, p7_l_in, p6_l_in,
-               p5_l_in, p4_l_in);
-    ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l_in, p2_l_in,
-               p1_l_in, p0_l_in);
-    q0_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q0);
-
-    tmp0_l = p7_l_in << 3;
-    tmp0_l -= p7_l_in;
-    tmp0_l += p6_l_in;
-    tmp0_l += q0_l_in;
-    tmp1_l = p6_l_in + p5_l_in;
-    tmp1_l += p4_l_in;
-    tmp1_l += p3_l_in;
-    tmp1_l += p2_l_in;
-    tmp1_l += p1_l_in;
-    tmp1_l += p0_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    p6 = __msa_bmnz_v(p6, (v16u8)r_out, flat2);
-    ST_UB(p6, src);
-    src += 16;
-
-    /* p5 */
-    q1_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q1);
-    tmp0_r = p5_r_in - p6_r_in;
-    tmp0_r += q1_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    q1_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q1);
-    tmp0_l = p5_l_in - p6_l_in;
-    tmp0_l += q1_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    p5 = __msa_bmnz_v(p5, (v16u8)r_out, flat2);
-    ST_UB(p5, src);
-    src += 16;
-
-    /* p4 */
-    q2_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q2);
-    tmp0_r = p4_r_in - p5_r_in;
-    tmp0_r += q2_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    q2_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q2);
-    tmp0_l = p4_l_in - p5_l_in;
-    tmp0_l += q2_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    p4 = __msa_bmnz_v(p4, (v16u8)r_out, flat2);
-    ST_UB(p4, src);
-    src += 16;
-
-    /* p3 */
-    q3_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q3);
-    tmp0_r = p3_r_in - p4_r_in;
-    tmp0_r += q3_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    q3_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q3);
-    tmp0_l = p3_l_in - p4_l_in;
-    tmp0_l += q3_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    p3 = __msa_bmnz_v(p3, (v16u8)r_out, flat2);
-    ST_UB(p3, src);
-    src += 16;
-
-    /* p2 */
-    q4_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q4);
-    filter8 = LD_UB(filter48);
-    tmp0_r = p2_r_in - p3_r_in;
-    tmp0_r += q4_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    q4_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q4);
-    tmp0_l = p2_l_in - p3_l_in;
-    tmp0_l += q4_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += 16;
-
-    /* p1 */
-    q5_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q5);
-    filter8 = LD_UB(filter48 + 16);
-    tmp0_r = p1_r_in - p2_r_in;
-    tmp0_r += q5_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    q5_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q5);
-    tmp0_l = p1_l_in - p2_l_in;
-    tmp0_l += q5_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)(tmp1_l), 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += 16;
-
-    /* p0 */
-    q6_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q6);
-    filter8 = LD_UB(filter48 + 32);
-    tmp0_r = p0_r_in - p1_r_in;
-    tmp0_r += q6_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    q6_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q6);
-    tmp0_l = p0_l_in - p1_l_in;
-    tmp0_l += q6_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += 16;
-
-    /* q0 */
-    q7_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q7);
-    filter8 = LD_UB(filter48 + 48);
-    tmp0_r = q7_r_in - p0_r_in;
-    tmp0_r += q0_r_in;
-    tmp0_r -= p7_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    q7_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q7);
-    tmp0_l = q7_l_in - p0_l_in;
-    tmp0_l += q0_l_in;
-    tmp0_l -= p7_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += 16;
-
-    /* q1 */
-    filter8 = LD_UB(filter48 + 64);
-    tmp0_r = q7_r_in - q0_r_in;
-    tmp0_r += q1_r_in;
-    tmp0_r -= p6_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    tmp0_l = q7_l_in - q0_l_in;
-    tmp0_l += q1_l_in;
-    tmp0_l -= p6_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += 16;
-
-    /* q2 */
-    filter8 = LD_UB(filter48 + 80);
-    tmp0_r = q7_r_in - q1_r_in;
-    tmp0_r += q2_r_in;
-    tmp0_r -= p5_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    tmp0_l = q7_l_in - q1_l_in;
-    tmp0_l += q2_l_in;
-    tmp0_l -= p5_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    filter8 = __msa_bmnz_v(filter8, (v16u8)r_out, flat2);
-    ST_UB(filter8, src);
-    src += 16;
-
-    /* q3 */
-    tmp0_r = q7_r_in - q2_r_in;
-    tmp0_r += q3_r_in;
-    tmp0_r -= p4_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    tmp0_l = q7_l_in - q2_l_in;
-    tmp0_l += q3_l_in;
-    tmp0_l -= p4_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    q3 = __msa_bmnz_v(q3, (v16u8)r_out, flat2);
-    ST_UB(q3, src);
-    src += 16;
-
-    /* q4 */
-    tmp0_r = q7_r_in - q3_r_in;
-    tmp0_r += q4_r_in;
-    tmp0_r -= p3_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    tmp0_l = q7_l_in - q3_l_in;
-    tmp0_l += q4_l_in;
-    tmp0_l -= p3_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    q4 = __msa_bmnz_v(q4, (v16u8)r_out, flat2);
-    ST_UB(q4, src);
-    src += 16;
-
-    /* q5 */
-    tmp0_r = q7_r_in - q4_r_in;
-    tmp0_r += q5_r_in;
-    tmp0_r -= p2_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    tmp0_l = q7_l_in - q4_l_in;
-    tmp0_l += q5_l_in;
-    tmp0_l -= p2_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    q5 = __msa_bmnz_v(q5, (v16u8)r_out, flat2);
-    ST_UB(q5, src);
-    src += 16;
-
-    /* q6 */
-    tmp0_r = q7_r_in - q5_r_in;
-    tmp0_r += q6_r_in;
-    tmp0_r -= p1_r_in;
-    tmp1_r += tmp0_r;
-    r_out = __msa_srari_h((v8i16)tmp1_r, 4);
-    tmp0_l = q7_l_in - q5_l_in;
-    tmp0_l += q6_l_in;
-    tmp0_l -= p1_l_in;
-    tmp1_l += tmp0_l;
-    l_out = __msa_srari_h((v8i16)tmp1_l, 4);
-    r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
-    q6 = __msa_bmnz_v(q6, (v16u8)r_out, flat2);
-    ST_UB(q6, src);
-
-    return 0;
-  }
-}
-
-void aom_lpf_vertical_16_dual_msa(uint8_t *src, int32_t pitch,
-                                  const uint8_t *b_limit_ptr,
-                                  const uint8_t *limit_ptr,
-                                  const uint8_t *thresh_ptr) {
-  uint8_t early_exit = 0;
-  DECLARE_ALIGNED(32, uint8_t, transposed_input[16 * 24]);
-  uint8_t *filter48 = &transposed_input[16 * 16];
-
-  transpose_16x16((src - 8), pitch, &transposed_input[0], 16);
-
-  early_exit =
-      aom_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8), &filter48[0], src,
-                               pitch, b_limit_ptr, limit_ptr, thresh_ptr);
-
-  if (0 == early_exit) {
-    early_exit = aom_vt_lpf_t16_16w((transposed_input + 16 * 8), src, pitch,
-                                    &filter48[0]);
-
-    if (0 == early_exit) {
-      transpose_16x16(transposed_input, 16, (src - 8), pitch);
-    }
-  }
-}
diff --git a/aom_dsp/mips/loopfilter_4_msa.c b/aom_dsp/mips/loopfilter_4_msa.c
deleted file mode 100644
index dc0a977..0000000
--- a/aom_dsp/mips/loopfilter_4_msa.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "aom_dsp/mips/loopfilter_msa.h"
-
-void aom_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch,
-                              const uint8_t *b_limit_ptr,
-                              const uint8_t *limit_ptr,
-                              const uint8_t *thresh_ptr) {
-  uint64_t p1_d, p0_d, q0_d, q1_d;
-  v16u8 mask, hev, flat, thresh, b_limit, limit;
-  v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p1_out, p0_out, q0_out, q1_out;
-
-  /* load vector elements */
-  LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
-
-  thresh = (v16u8)__msa_fill_b(*thresh_ptr);
-  b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
-  limit = (v16u8)__msa_fill_b(*limit_ptr);
-
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
-               mask, flat);
-  AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
-
-  p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
-  p0_d = __msa_copy_u_d((v2i64)p0_out, 0);
-  q0_d = __msa_copy_u_d((v2i64)q0_out, 0);
-  q1_d = __msa_copy_u_d((v2i64)q1_out, 0);
-  SD4(p1_d, p0_d, q0_d, q1_d, (src - 2 * pitch), pitch);
-}
-
-void aom_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch,
-                                   const uint8_t *b_limit0_ptr,
-                                   const uint8_t *limit0_ptr,
-                                   const uint8_t *thresh0_ptr,
-                                   const uint8_t *b_limit1_ptr,
-                                   const uint8_t *limit1_ptr,
-                                   const uint8_t *thresh1_ptr) {
-  v16u8 mask, hev, flat, thresh0, b_limit0, limit0, thresh1, b_limit1, limit1;
-  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
-
-  /* load vector elements */
-  LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
-
-  thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr);
-  thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr);
-  thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0);
-
-  b_limit0 = (v16u8)__msa_fill_b(*b_limit0_ptr);
-  b_limit1 = (v16u8)__msa_fill_b(*b_limit1_ptr);
-  b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0);
-
-  limit0 = (v16u8)__msa_fill_b(*limit0_ptr);
-  limit1 = (v16u8)__msa_fill_b(*limit1_ptr);
-  limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0);
-
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
-               mask, flat);
-  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
-
-  ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch);
-}
-
-void aom_lpf_vertical_4_msa(uint8_t *src, int32_t pitch,
-                            const uint8_t *b_limit_ptr,
-                            const uint8_t *limit_ptr,
-                            const uint8_t *thresh_ptr) {
-  v16u8 mask, hev, flat, limit, thresh, b_limit;
-  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
-  v8i16 vec0, vec1, vec2, vec3;
-
-  LD_UB8((src - 4), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
-
-  thresh = (v16u8)__msa_fill_b(*thresh_ptr);
-  b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
-  limit = (v16u8)__msa_fill_b(*limit_ptr);
-
-  TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2,
-                     q3);
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
-               mask, flat);
-  AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
-  ILVR_B2_SH(p0, p1, q1, q0, vec0, vec1);
-  ILVRL_H2_SH(vec1, vec0, vec2, vec3);
-
-  src -= 2;
-  ST4x4_UB(vec2, vec2, 0, 1, 2, 3, src, pitch);
-  src += 4 * pitch;
-  ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch);
-}
-
-void aom_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch,
-                                 const uint8_t *b_limit0_ptr,
-                                 const uint8_t *limit0_ptr,
-                                 const uint8_t *thresh0_ptr,
-                                 const uint8_t *b_limit1_ptr,
-                                 const uint8_t *limit1_ptr,
-                                 const uint8_t *thresh1_ptr) {
-  v16u8 mask, hev, flat;
-  v16u8 thresh0, b_limit0, limit0, thresh1, b_limit1, limit1;
-  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
-  v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
-  v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
-  v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
-
-  LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
-  LD_UB8(src - 4 + (8 * pitch), pitch, row8, row9, row10, row11, row12, row13,
-         row14, row15);
-
-  TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8,
-                      row9, row10, row11, row12, row13, row14, row15, p3, p2,
-                      p1, p0, q0, q1, q2, q3);
-
-  thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr);
-  thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr);
-  thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0);
-
-  b_limit0 = (v16u8)__msa_fill_b(*b_limit0_ptr);
-  b_limit1 = (v16u8)__msa_fill_b(*b_limit1_ptr);
-  b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0);
-
-  limit0 = (v16u8)__msa_fill_b(*limit0_ptr);
-  limit1 = (v16u8)__msa_fill_b(*limit1_ptr);
-  limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0);
-
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
-               mask, flat);
-  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
-  ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
-  ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3);
-  ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
-  ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5);
-
-  src -= 2;
-
-  ST4x8_UB(tmp2, tmp3, src, pitch);
-  src += (8 * pitch);
-  ST4x8_UB(tmp4, tmp5, src, pitch);
-}
diff --git a/aom_dsp/mips/loopfilter_8_msa.c b/aom_dsp/mips/loopfilter_8_msa.c
deleted file mode 100644
index dc203e7..0000000
--- a/aom_dsp/mips/loopfilter_8_msa.c
+++ /dev/null
@@ -1,333 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "aom_dsp/mips/loopfilter_msa.h"
-
-void aom_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch,
-                              const uint8_t *b_limit_ptr,
-                              const uint8_t *limit_ptr,
-                              const uint8_t *thresh_ptr) {
-  uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d;
-  v16u8 mask, hev, flat, thresh, b_limit, limit;
-  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
-  v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
-  v8i16 p2_filter8, p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8;
-  v8u16 p3_r, p2_r, p1_r, p0_r, q3_r, q2_r, q1_r, q0_r;
-  v16i8 zero = { 0 };
-
-  /* load vector elements */
-  LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
-
-  thresh = (v16u8)__msa_fill_b(*thresh_ptr);
-  b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
-  limit = (v16u8)__msa_fill_b(*limit_ptr);
-
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
-               mask, flat);
-  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
-
-  flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
-
-  if (__msa_test_bz_v(flat)) {
-    p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
-    p0_d = __msa_copy_u_d((v2i64)p0_out, 0);
-    q0_d = __msa_copy_u_d((v2i64)q0_out, 0);
-    q1_d = __msa_copy_u_d((v2i64)q1_out, 0);
-    SD4(p1_d, p0_d, q0_d, q1_d, (src - 2 * pitch), pitch);
-  } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
-               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
-                p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
-
-    /* convert 16 bit output data into 8 bit */
-    PCKEV_B4_SH(zero, p2_filter8, zero, p1_filter8, zero, p0_filter8, zero,
-                q0_filter8, p2_filter8, p1_filter8, p0_filter8, q0_filter8);
-    PCKEV_B2_SH(zero, q1_filter8, zero, q2_filter8, q1_filter8, q2_filter8);
-
-    /* store pixel values */
-    p2_out = __msa_bmnz_v(p2, (v16u8)p2_filter8, flat);
-    p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filter8, flat);
-    p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filter8, flat);
-    q0_out = __msa_bmnz_v(q0_out, (v16u8)q0_filter8, flat);
-    q1_out = __msa_bmnz_v(q1_out, (v16u8)q1_filter8, flat);
-    q2_out = __msa_bmnz_v(q2, (v16u8)q2_filter8, flat);
-
-    p2_d = __msa_copy_u_d((v2i64)p2_out, 0);
-    p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
-    p0_d = __msa_copy_u_d((v2i64)p0_out, 0);
-    q0_d = __msa_copy_u_d((v2i64)q0_out, 0);
-    q1_d = __msa_copy_u_d((v2i64)q1_out, 0);
-    q2_d = __msa_copy_u_d((v2i64)q2_out, 0);
-
-    src -= 3 * pitch;
-
-    SD4(p2_d, p1_d, p0_d, q0_d, src, pitch);
-    src += (4 * pitch);
-    SD(q1_d, src);
-    src += pitch;
-    SD(q2_d, src);
-  }
-}
-
-void aom_lpf_horizontal_8_dual_msa(
-    uint8_t *src, int32_t pitch, const uint8_t *b_limit0, const uint8_t *limit0,
-    const uint8_t *thresh0, const uint8_t *b_limit1, const uint8_t *limit1,
-    const uint8_t *thresh1) {
-  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
-  v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
-  v16u8 flat, mask, hev, tmp, thresh, b_limit, limit;
-  v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r;
-  v8u16 p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
-  v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r;
-  v8i16 p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l;
-  v16u8 zero = { 0 };
-
-  /* load vector elements */
-  LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
-
-  thresh = (v16u8)__msa_fill_b(*thresh0);
-  tmp = (v16u8)__msa_fill_b(*thresh1);
-  thresh = (v16u8)__msa_ilvr_d((v2i64)tmp, (v2i64)thresh);
-
-  b_limit = (v16u8)__msa_fill_b(*b_limit0);
-  tmp = (v16u8)__msa_fill_b(*b_limit1);
-  b_limit = (v16u8)__msa_ilvr_d((v2i64)tmp, (v2i64)b_limit);
-
-  limit = (v16u8)__msa_fill_b(*limit0);
-  tmp = (v16u8)__msa_fill_b(*limit1);
-  limit = (v16u8)__msa_ilvr_d((v2i64)tmp, (v2i64)limit);
-
-  /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
-               mask, flat);
-  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
-
-  if (__msa_test_bz_v(flat)) {
-    ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch);
-  } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
-               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
-                p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
-
-    ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
-    ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
-    AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
-                p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
-
-    /* convert 16 bit output data into 8 bit */
-    PCKEV_B4_SH(p2_filt8_l, p2_filt8_r, p1_filt8_l, p1_filt8_r, p0_filt8_l,
-                p0_filt8_r, q0_filt8_l, q0_filt8_r, p2_filt8_r, p1_filt8_r,
-                p0_filt8_r, q0_filt8_r);
-    PCKEV_B2_SH(q1_filt8_l, q1_filt8_r, q2_filt8_l, q2_filt8_r, q1_filt8_r,
-                q2_filt8_r);
-
-    /* store pixel values */
-    p2_out = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat);
-    p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat);
-    p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat);
-    q0_out = __msa_bmnz_v(q0_out, (v16u8)q0_filt8_r, flat);
-    q1_out = __msa_bmnz_v(q1_out, (v16u8)q1_filt8_r, flat);
-    q2_out = __msa_bmnz_v(q2, (v16u8)q2_filt8_r, flat);
-
-    src -= 3 * pitch;
-
-    ST_UB4(p2_out, p1_out, p0_out, q0_out, src, pitch);
-    src += (4 * pitch);
-    ST_UB2(q1_out, q2_out, src, pitch);
-    src += (2 * pitch);
-  }
-}
-
-void aom_lpf_vertical_8_msa(uint8_t *src, int32_t pitch,
-                            const uint8_t *b_limit_ptr,
-                            const uint8_t *limit_ptr,
-                            const uint8_t *thresh_ptr) {
-  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
-  v16u8 p1_out, p0_out, q0_out, q1_out;
-  v16u8 flat, mask, hev, thresh, b_limit, limit;
-  v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r;
-  v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r;
-  v16u8 zero = { 0 };
-  v8i16 vec0, vec1, vec2, vec3, vec4;
-
-  /* load vector elements */
-  LD_UB8(src - 4, pitch, p3, p2, p1, p0, q0, q1, q2, q3);
-
-  TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2,
-                     q3);
-
-  thresh = (v16u8)__msa_fill_b(*thresh_ptr);
-  b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
-  limit = (v16u8)__msa_fill_b(*limit_ptr);
-
-  /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
-               mask, flat);
-  /* flat4 */
-  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  /* filter4 */
-  AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
-
-  flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
-
-  if (__msa_test_bz_v(flat)) {
-    /* Store 4 pixels p1-_q1 */
-    ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec2, vec3);
-
-    src -= 2;
-    ST4x4_UB(vec2, vec2, 0, 1, 2, 3, src, pitch);
-    src += 4 * pitch;
-    ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch);
-  } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
-               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
-                p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
-    /* convert 16 bit output data into 8 bit */
-    PCKEV_B4_SH(p2_filt8_r, p2_filt8_r, p1_filt8_r, p1_filt8_r, p0_filt8_r,
-                p0_filt8_r, q0_filt8_r, q0_filt8_r, p2_filt8_r, p1_filt8_r,
-                p0_filt8_r, q0_filt8_r);
-    PCKEV_B2_SH(q1_filt8_r, q1_filt8_r, q2_filt8_r, q2_filt8_r, q1_filt8_r,
-                q2_filt8_r);
-
-    /* store pixel values */
-    p2 = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat);
-    p1 = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat);
-    p0 = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat);
-    q0 = __msa_bmnz_v(q0_out, (v16u8)q0_filt8_r, flat);
-    q1 = __msa_bmnz_v(q1_out, (v16u8)q1_filt8_r, flat);
-    q2 = __msa_bmnz_v(q2, (v16u8)q2_filt8_r, flat);
-
-    /* Store 6 pixels p2-_q2 */
-    ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec2, vec3);
-    vec4 = (v8i16)__msa_ilvr_b((v16i8)q2, (v16i8)q1);
-
-    src -= 3;
-    ST4x4_UB(vec2, vec2, 0, 1, 2, 3, src, pitch);
-    ST2x4_UB(vec4, 0, src + 4, pitch);
-    src += (4 * pitch);
-    ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch);
-    ST2x4_UB(vec4, 4, src + 4, pitch);
-  }
-}
-
-void aom_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch,
-                                 const uint8_t *b_limit0, const uint8_t *limit0,
-                                 const uint8_t *thresh0,
-                                 const uint8_t *b_limit1, const uint8_t *limit1,
-                                 const uint8_t *thresh1) {
-  uint8_t *temp_src;
-  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
-  v16u8 p1_out, p0_out, q0_out, q1_out;
-  v16u8 flat, mask, hev, thresh, b_limit, limit;
-  v16u8 row4, row5, row6, row7, row12, row13, row14, row15;
-  v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r;
-  v8u16 p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
-  v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r;
-  v8i16 p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l;
-  v16u8 zero = { 0 };
-  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-
-  temp_src = src - 4;
-
-  LD_UB8(temp_src, pitch, p0, p1, p2, p3, row4, row5, row6, row7);
-  temp_src += (8 * pitch);
-  LD_UB8(temp_src, pitch, q3, q2, q1, q0, row12, row13, row14, row15);
-
-  /* transpose 16x8 matrix into 8x16 */
-  TRANSPOSE16x8_UB_UB(p0, p1, p2, p3, row4, row5, row6, row7, q3, q2, q1, q0,
-                      row12, row13, row14, row15, p3, p2, p1, p0, q0, q1, q2,
-                      q3);
-
-  thresh = (v16u8)__msa_fill_b(*thresh0);
-  vec0 = (v8i16)__msa_fill_b(*thresh1);
-  thresh = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)thresh);
-
-  b_limit = (v16u8)__msa_fill_b(*b_limit0);
-  vec0 = (v8i16)__msa_fill_b(*b_limit1);
-  b_limit = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)b_limit);
-
-  limit = (v16u8)__msa_fill_b(*limit0);
-  vec0 = (v8i16)__msa_fill_b(*limit1);
-  limit = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)limit);
-
-  /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
-               mask, flat);
-  /* flat4 */
-  AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  /* filter4 */
-  AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
-
-  if (__msa_test_bz_v(flat)) {
-    ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec2, vec3);
-    ILVL_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec4, vec5);
-
-    src -= 2;
-    ST4x8_UB(vec2, vec3, src, pitch);
-    src += 8 * pitch;
-    ST4x8_UB(vec4, vec5, src, pitch);
-  } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
-               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
-    AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
-                p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
-
-    ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
-    ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
-
-    /* filter8 */
-    AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
-                p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
-
-    /* convert 16 bit output data into 8 bit */
-    PCKEV_B4_SH(p2_filt8_l, p2_filt8_r, p1_filt8_l, p1_filt8_r, p0_filt8_l,
-                p0_filt8_r, q0_filt8_l, q0_filt8_r, p2_filt8_r, p1_filt8_r,
-                p0_filt8_r, q0_filt8_r);
-    PCKEV_B2_SH(q1_filt8_l, q1_filt8_r, q2_filt8_l, q2_filt8_r, q1_filt8_r,
-                q2_filt8_r);
-
-    /* store pixel values */
-    p2 = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat);
-    p1 = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat);
-    p0 = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat);
-    q0 = __msa_bmnz_v(q0_out, (v16u8)q0_filt8_r, flat);
-    q1 = __msa_bmnz_v(q1_out, (v16u8)q1_filt8_r, flat);
-    q2 = __msa_bmnz_v(q2, (v16u8)q2_filt8_r, flat);
-
-    ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec3, vec4);
-    ILVL_B2_SH(p1, p2, q0, p0, vec0, vec1);
-    ILVRL_H2_SH(vec1, vec0, vec6, vec7);
-    ILVRL_B2_SH(q2, q1, vec2, vec5);
-
-    src -= 3;
-    ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch);
-    ST2x4_UB(vec2, 0, src + 4, pitch);
-    src += (4 * pitch);
-    ST4x4_UB(vec4, vec4, 0, 1, 2, 3, src, pitch);
-    ST2x4_UB(vec2, 4, src + 4, pitch);
-    src += (4 * pitch);
-    ST4x4_UB(vec6, vec6, 0, 1, 2, 3, src, pitch);
-    ST2x4_UB(vec5, 0, src + 4, pitch);
-    src += (4 * pitch);
-    ST4x4_UB(vec7, vec7, 0, 1, 2, 3, src, pitch);
-    ST2x4_UB(vec5, 4, src + 4, pitch);
-  }
-}
diff --git a/aom_dsp/mips/loopfilter_filters_dspr2.c b/aom_dsp/mips/loopfilter_filters_dspr2.c
deleted file mode 100644
index 8c41278..0000000
--- a/aom_dsp/mips/loopfilter_filters_dspr2.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <stdlib.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom/aom_integer.h"
-#include "aom_dsp/mips/common_dspr2.h"
-#include "aom_dsp/mips/loopfilter_filters_dspr2.h"
-#include "aom_dsp/mips/loopfilter_macros_dspr2.h"
-#include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/aom_mem.h"
-
-#if HAVE_DSPR2
-void aom_lpf_horizontal_4_dspr2(unsigned char *s, int pitch,
-                                const uint8_t *blimit, const uint8_t *limit,
-                                const uint8_t *thresh) {
-  uint8_t i;
-  uint32_t mask;
-  uint32_t hev;
-  uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
-  uint8_t *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6;
-  uint32_t thresh_vec, flimit_vec, limit_vec;
-  uint32_t uflimit, ulimit, uthresh;
-
-  uflimit = *blimit;
-  ulimit = *limit;
-  uthresh = *thresh;
-
-  /* create quad-byte */
-  __asm__ __volatile__(
-      "replv.qb       %[thresh_vec],    %[uthresh]    \n\t"
-      "replv.qb       %[flimit_vec],    %[uflimit]    \n\t"
-      "replv.qb       %[limit_vec],     %[ulimit]     \n\t"
-
-      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
-        [limit_vec] "=r"(limit_vec)
-      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
-
-  /* prefetch data for store */
-  prefetch_store(s);
-
-  /* loop filter designed to work using chars so that we can make maximum use
-     of 8 bit simd instructions. */
-  for (i = 0; i < 2; i++) {
-    sm1 = s - (pitch << 2);
-    s0 = sm1 + pitch;
-    s1 = s0 + pitch;
-    s2 = s - pitch;
-    s3 = s;
-    s4 = s + pitch;
-    s5 = s4 + pitch;
-    s6 = s5 + pitch;
-
-    __asm__ __volatile__(
-        "lw     %[p1],  (%[s1])    \n\t"
-        "lw     %[p2],  (%[s2])    \n\t"
-        "lw     %[p3],  (%[s3])    \n\t"
-        "lw     %[p4],  (%[s4])    \n\t"
-
-        : [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4)
-        : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4));
-
-    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
-       mask will be zero and filtering is not needed */
-    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
-      __asm__ __volatile__(
-          "lw       %[pm1], (%[sm1])   \n\t"
-          "lw       %[p0],  (%[s0])    \n\t"
-          "lw       %[p5],  (%[s5])    \n\t"
-          "lw       %[p6],  (%[s6])    \n\t"
-
-          : [pm1] "=&r"(pm1), [p0] "=&r"(p0), [p5] "=&r"(p5), [p6] "=&r"(p6)
-          : [sm1] "r"(sm1), [s0] "r"(s0), [s5] "r"(s5), [s6] "r"(s6));
-
-      filter_hev_mask_dspr2(limit_vec, flimit_vec, p1, p2, pm1, p0, p3, p4, p5,
-                            p6, thresh_vec, &hev, &mask);
-
-      /* if mask == 0 do filtering is not needed */
-      if (mask) {
-        /* filtering */
-        filter_dspr2(mask, hev, &p1, &p2, &p3, &p4);
-
-        __asm__ __volatile__(
-            "sw     %[p1],  (%[s1])    \n\t"
-            "sw     %[p2],  (%[s2])    \n\t"
-            "sw     %[p3],  (%[s3])    \n\t"
-            "sw     %[p4],  (%[s4])    \n\t"
-
-            :
-            : [p1] "r"(p1), [p2] "r"(p2), [p3] "r"(p3), [p4] "r"(p4),
-              [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4));
-      }
-    }
-
-    s = s + 4;
-  }
-}
-
-void aom_lpf_vertical_4_dspr2(unsigned char *s, int pitch,
-                              const uint8_t *blimit, const uint8_t *limit,
-                              const uint8_t *thresh) {
-  uint8_t i;
-  uint32_t mask, hev;
-  uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
-  uint8_t *s1, *s2, *s3, *s4;
-  uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
-  uint32_t thresh_vec, flimit_vec, limit_vec;
-  uint32_t uflimit, ulimit, uthresh;
-
-  uflimit = *blimit;
-  ulimit = *limit;
-  uthresh = *thresh;
-
-  /* create quad-byte */
-  __asm__ __volatile__(
-      "replv.qb       %[thresh_vec],    %[uthresh]    \n\t"
-      "replv.qb       %[flimit_vec],    %[uflimit]    \n\t"
-      "replv.qb       %[limit_vec],     %[ulimit]     \n\t"
-
-      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
-        [limit_vec] "=r"(limit_vec)
-      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
-
-  /* prefetch data for store */
-  prefetch_store(s + pitch);
-
-  for (i = 0; i < 2; i++) {
-    s1 = s;
-    s2 = s + pitch;
-    s3 = s2 + pitch;
-    s4 = s3 + pitch;
-    s = s4 + pitch;
-
-    /* load quad-byte vectors
-     * memory is 4 byte aligned
-     */
-    p2 = *((uint32_t *)(s1 - 4));
-    p6 = *((uint32_t *)(s1));
-    p1 = *((uint32_t *)(s2 - 4));
-    p5 = *((uint32_t *)(s2));
-    p0 = *((uint32_t *)(s3 - 4));
-    p4 = *((uint32_t *)(s3));
-    pm1 = *((uint32_t *)(s4 - 4));
-    p3 = *((uint32_t *)(s4));
-
-    /* transpose pm1, p0, p1, p2 */
-    __asm__ __volatile__(
-        "precrq.qb.ph   %[prim1],   %[p2],      %[p1]       \n\t"
-        "precr.qb.ph    %[prim2],   %[p2],      %[p1]       \n\t"
-        "precrq.qb.ph   %[prim3],   %[p0],      %[pm1]      \n\t"
-        "precr.qb.ph    %[prim4],   %[p0],      %[pm1]      \n\t"
-
-        "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
-        "precr.qb.ph    %[pm1],     %[prim1],   %[prim2]    \n\t"
-        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
-        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
-
-        "precrq.ph.w    %[p2],      %[p1],      %[sec3]     \n\t"
-        "precrq.ph.w    %[p0],      %[pm1],     %[sec4]     \n\t"
-        "append         %[p1],      %[sec3],    16          \n\t"
-        "append         %[pm1],     %[sec4],    16          \n\t"
-
-        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
-          [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0),
-          [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
-        :);
-
-    /* transpose p3, p4, p5, p6 */
-    __asm__ __volatile__(
-        "precrq.qb.ph   %[prim1],   %[p6],      %[p5]       \n\t"
-        "precr.qb.ph    %[prim2],   %[p6],      %[p5]       \n\t"
-        "precrq.qb.ph   %[prim3],   %[p4],      %[p3]       \n\t"
-        "precr.qb.ph    %[prim4],   %[p4],      %[p3]       \n\t"
-
-        "precrq.qb.ph   %[p5],      %[prim1],   %[prim2]    \n\t"
-        "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
-        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
-        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
-
-        "precrq.ph.w    %[p6],      %[p5],      %[sec3]     \n\t"
-        "precrq.ph.w    %[p4],      %[p3],      %[sec4]     \n\t"
-        "append         %[p5],      %[sec3],    16          \n\t"
-        "append         %[p3],      %[sec4],    16          \n\t"
-
-        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
-          [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4),
-          [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
-        :);
-
-    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
-     * mask will be zero and filtering is not needed
-     */
-    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
-      filter_hev_mask_dspr2(limit_vec, flimit_vec, p1, p2, pm1, p0, p3, p4, p5,
-                            p6, thresh_vec, &hev, &mask);
-
-      /* if mask == 0 do filtering is not needed */
-      if (mask) {
-        /* filtering */
-        filter_dspr2(mask, hev, &p1, &p2, &p3, &p4);
-
-        /* unpack processed 4x4 neighborhood
-         * don't use transpose on output data
-         * because memory isn't aligned
-         */
-        __asm__ __volatile__(
-            "sb     %[p4],   1(%[s4])    \n\t"
-            "sb     %[p3],   0(%[s4])    \n\t"
-            "sb     %[p2],  -1(%[s4])    \n\t"
-            "sb     %[p1],  -2(%[s4])    \n\t"
-
-            :
-            : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1),
-              [s4] "r"(s4));
-
-        __asm__ __volatile__(
-            "srl    %[p4],  %[p4],  8     \n\t"
-            "srl    %[p3],  %[p3],  8     \n\t"
-            "srl    %[p2],  %[p2],  8     \n\t"
-            "srl    %[p1],  %[p1],  8     \n\t"
-
-            : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
-            :);
-
-        __asm__ __volatile__(
-            "sb     %[p4],   1(%[s3])    \n\t"
-            "sb     %[p3],   0(%[s3])    \n\t"
-            "sb     %[p2],  -1(%[s3])    \n\t"
-            "sb     %[p1],  -2(%[s3])    \n\t"
-
-            : [p1] "+r"(p1)
-            : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [s3] "r"(s3));
-
-        __asm__ __volatile__(
-            "srl    %[p4],  %[p4],  8     \n\t"
-            "srl    %[p3],  %[p3],  8     \n\t"
-            "srl    %[p2],  %[p2],  8     \n\t"
-            "srl    %[p1],  %[p1],  8     \n\t"
-
-            : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
-            :);
-
-        __asm__ __volatile__(
-            "sb     %[p4],   1(%[s2])    \n\t"
-            "sb     %[p3],   0(%[s2])    \n\t"
-            "sb     %[p2],  -1(%[s2])    \n\t"
-            "sb     %[p1],  -2(%[s2])    \n\t"
-
-            :
-            : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1),
-              [s2] "r"(s2));
-
-        __asm__ __volatile__(
-            "srl    %[p4],  %[p4],  8     \n\t"
-            "srl    %[p3],  %[p3],  8     \n\t"
-            "srl    %[p2],  %[p2],  8     \n\t"
-            "srl    %[p1],  %[p1],  8     \n\t"
-
-            : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
-            :);
-
-        __asm__ __volatile__(
-            "sb     %[p4],   1(%[s1])    \n\t"
-            "sb     %[p3],   0(%[s1])    \n\t"
-            "sb     %[p2],  -1(%[s1])    \n\t"
-            "sb     %[p1],  -2(%[s1])    \n\t"
-
-            :
-            : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1),
-              [s1] "r"(s1));
-      }
-    }
-  }
-}
-
-void aom_lpf_horizontal_4_dual_dspr2(
-    uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
-    const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
-    const uint8_t *limit1, const uint8_t *thresh1) {
-  aom_lpf_horizontal_4_dspr2(s, p, blimit0, limit0, thresh0);
-  aom_lpf_horizontal_4_dspr2(s + 8, p, blimit1, limit1, thresh1);
-}
-
-void aom_lpf_horizontal_8_dual_dspr2(
-    uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
-    const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
-    const uint8_t *limit1, const uint8_t *thresh1) {
-  aom_lpf_horizontal_8_dspr2(s, p, blimit0, limit0, thresh0);
-  aom_lpf_horizontal_8_dspr2(s + 8, p, blimit1, limit1, thresh1);
-}
-
-void aom_lpf_vertical_4_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
-                                   const uint8_t *limit0,
-                                   const uint8_t *thresh0,
-                                   const uint8_t *blimit1,
-                                   const uint8_t *limit1,
-                                   const uint8_t *thresh1) {
-  aom_lpf_vertical_4_dspr2(s, p, blimit0, limit0, thresh0);
-  aom_lpf_vertical_4_dspr2(s + 8 * p, p, blimit1, limit1, thresh1);
-}
-
-void aom_lpf_vertical_8_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
-                                   const uint8_t *limit0,
-                                   const uint8_t *thresh0,
-                                   const uint8_t *blimit1,
-                                   const uint8_t *limit1,
-                                   const uint8_t *thresh1) {
-  aom_lpf_vertical_8_dspr2(s, p, blimit0, limit0, thresh0);
-  aom_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1);
-}
-
-void aom_lpf_vertical_16_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit,
-                                    const uint8_t *limit,
-                                    const uint8_t *thresh) {
-  aom_lpf_vertical_16_dspr2(s, p, blimit, limit, thresh);
-  aom_lpf_vertical_16_dspr2(s + 8 * p, p, blimit, limit, thresh);
-}
-#endif  // #if HAVE_DSPR2
diff --git a/aom_dsp/mips/loopfilter_filters_dspr2.h b/aom_dsp/mips/loopfilter_filters_dspr2.h
deleted file mode 100644
index 28f0dc3..0000000
--- a/aom_dsp/mips/loopfilter_filters_dspr2.h
+++ /dev/null
@@ -1,736 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#ifndef AOM_AOM_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
-#define AOM_AOM_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
-
-#include <stdlib.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom/aom_integer.h"
-#include "aom_mem/aom_mem.h"
-#include "aom_ports/mem.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if HAVE_DSPR2
-/* inputs & outputs are quad-byte vectors */
-static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, uint32_t *ps1,
-                                uint32_t *ps0, uint32_t *qs0, uint32_t *qs1) {
-  int32_t aom_filter_l, aom_filter_r;
-  int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r;
-  int32_t subr_r, subr_l;
-  uint32_t t1, t2, HWM, t3;
-  uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r;
-  int32_t vps1, vps0, vqs0, vqs1;
-  int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r;
-  uint32_t N128;
-
-  N128 = 0x80808080;
-  t1 = 0x03000300;
-  t2 = 0x04000400;
-  t3 = 0x01000100;
-  HWM = 0xFF00FF00;
-
-  vps0 = (*ps0) ^ N128;
-  vps1 = (*ps1) ^ N128;
-  vqs0 = (*qs0) ^ N128;
-  vqs1 = (*qs1) ^ N128;
-
-  /* use halfword pairs instead quad-bytes because of accuracy */
-  vps0_l = vps0 & HWM;
-  vps0_r = vps0 << 8;
-  vps0_r = vps0_r & HWM;
-
-  vps1_l = vps1 & HWM;
-  vps1_r = vps1 << 8;
-  vps1_r = vps1_r & HWM;
-
-  vqs0_l = vqs0 & HWM;
-  vqs0_r = vqs0 << 8;
-  vqs0_r = vqs0_r & HWM;
-
-  vqs1_l = vqs1 & HWM;
-  vqs1_r = vqs1 << 8;
-  vqs1_r = vqs1_r & HWM;
-
-  mask_l = mask & HWM;
-  mask_r = mask << 8;
-  mask_r = mask_r & HWM;
-
-  hev_l = hev & HWM;
-  hev_r = hev << 8;
-  hev_r = hev_r & HWM;
-
-  __asm__ __volatile__(
-      /* aom_filter = aom_signed_char_clamp(ps1 - qs1); */
-      "subq_s.ph    %[aom_filter_l], %[vps1_l],       %[vqs1_l]       \n\t"
-      "subq_s.ph    %[aom_filter_r], %[vps1_r],       %[vqs1_r]       \n\t"
-
-      /* qs0 - ps0 */
-      "subq_s.ph    %[subr_l],       %[vqs0_l],       %[vps0_l]       \n\t"
-      "subq_s.ph    %[subr_r],       %[vqs0_r],       %[vps0_r]       \n\t"
-
-      /* aom_filter &= hev; */
-      "and          %[aom_filter_l], %[aom_filter_l], %[hev_l]        \n\t"
-      "and          %[aom_filter_r], %[aom_filter_r], %[hev_r]        \n\t"
-
-      /* aom_filter = aom_signed_char_clamp(aom_filter + 3 * (qs0 - ps0)); */
-      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
-      "xor          %[invhev_l],     %[hev_l],        %[HWM]          \n\t"
-      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
-      "xor          %[invhev_r],     %[hev_r],        %[HWM]          \n\t"
-      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
-
-      /* aom_filter &= mask; */
-      "and          %[aom_filter_l], %[aom_filter_l], %[mask_l]       \n\t"
-      "and          %[aom_filter_r], %[aom_filter_r], %[mask_r]       \n\t"
-
-      : [aom_filter_l] "=&r"(aom_filter_l), [aom_filter_r] "=&r"(aom_filter_r),
-        [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r),
-        [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r)
-      : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l),
-        [vps1_r] "r"(vps1_r), [vqs0_l] "r"(vqs0_l), [vqs0_r] "r"(vqs0_r),
-        [vqs1_l] "r"(vqs1_l), [vqs1_r] "r"(vqs1_r), [mask_l] "r"(mask_l),
-        [mask_r] "r"(mask_r), [hev_l] "r"(hev_l), [hev_r] "r"(hev_r),
-        [HWM] "r"(HWM));
-
-  /* save bottom 3 bits so that we round one side +4 and the other +3 */
-  __asm__ __volatile__(
-      /* Filter2 = aom_signed_char_clamp(aom_filter + 3) >>= 3; */
-      "addq_s.ph    %[Filter1_l],    %[aom_filter_l], %[t2]           \n\t"
-      "addq_s.ph    %[Filter1_r],    %[aom_filter_r], %[t2]           \n\t"
-
-      /* Filter1 = aom_signed_char_clamp(aom_filter + 4) >>= 3; */
-      "addq_s.ph    %[Filter2_l],    %[aom_filter_l], %[t1]           \n\t"
-      "addq_s.ph    %[Filter2_r],    %[aom_filter_r], %[t1]           \n\t"
-      "shra.ph      %[Filter1_r],    %[Filter1_r],    3               \n\t"
-      "shra.ph      %[Filter1_l],    %[Filter1_l],    3               \n\t"
-
-      "shra.ph      %[Filter2_l],    %[Filter2_l],    3               \n\t"
-      "shra.ph      %[Filter2_r],    %[Filter2_r],    3               \n\t"
-
-      "and          %[Filter1_l],    %[Filter1_l],    %[HWM]          \n\t"
-      "and          %[Filter1_r],    %[Filter1_r],    %[HWM]          \n\t"
-
-      /* vps0 = aom_signed_char_clamp(ps0 + Filter2); */
-      "addq_s.ph    %[vps0_l],       %[vps0_l],       %[Filter2_l]    \n\t"
-      "addq_s.ph    %[vps0_r],       %[vps0_r],       %[Filter2_r]    \n\t"
-
-      /* vqs0 = aom_signed_char_clamp(qs0 - Filter1); */
-      "subq_s.ph    %[vqs0_l],       %[vqs0_l],       %[Filter1_l]    \n\t"
-      "subq_s.ph    %[vqs0_r],       %[vqs0_r],       %[Filter1_r]    \n\t"
-
-      : [Filter1_l] "=&r"(Filter1_l), [Filter1_r] "=&r"(Filter1_r),
-        [Filter2_l] "=&r"(Filter2_l), [Filter2_r] "=&r"(Filter2_r),
-        [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l),
-        [vqs0_r] "+r"(vqs0_r)
-      : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM),
-        [aom_filter_l] "r"(aom_filter_l), [aom_filter_r] "r"(aom_filter_r));
-
-  __asm__ __volatile__(
-      /* (aom_filter += 1) >>= 1 */
-      "addqh.ph    %[Filter1_l],    %[Filter1_l],     %[t3]           \n\t"
-      "addqh.ph    %[Filter1_r],    %[Filter1_r],     %[t3]           \n\t"
-
-      /* aom_filter &= ~hev; */
-      "and          %[Filter1_l],    %[Filter1_l],    %[invhev_l]     \n\t"
-      "and          %[Filter1_r],    %[Filter1_r],    %[invhev_r]     \n\t"
-
-      /* vps1 = aom_signed_char_clamp(ps1 + aom_filter); */
-      "addq_s.ph    %[vps1_l],       %[vps1_l],       %[Filter1_l]    \n\t"
-      "addq_s.ph    %[vps1_r],       %[vps1_r],       %[Filter1_r]    \n\t"
-
-      /* vqs1 = aom_signed_char_clamp(qs1 - aom_filter); */
-      "subq_s.ph    %[vqs1_l],       %[vqs1_l],       %[Filter1_l]    \n\t"
-      "subq_s.ph    %[vqs1_r],       %[vqs1_r],       %[Filter1_r]    \n\t"
-
-      : [Filter1_l] "+r"(Filter1_l), [Filter1_r] "+r"(Filter1_r),
-        [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l),
-        [vqs1_r] "+r"(vqs1_r)
-      : [t3] "r"(t3), [invhev_l] "r"(invhev_l), [invhev_r] "r"(invhev_r));
-
-  /* Create quad-bytes from halfword pairs */
-  vqs0_l = vqs0_l & HWM;
-  vqs1_l = vqs1_l & HWM;
-  vps0_l = vps0_l & HWM;
-  vps1_l = vps1_l & HWM;
-
-  __asm__ __volatile__(
-      "shrl.ph      %[vqs0_r],       %[vqs0_r],       8   \n\t"
-      "shrl.ph      %[vps0_r],       %[vps0_r],       8   \n\t"
-      "shrl.ph      %[vqs1_r],       %[vqs1_r],       8   \n\t"
-      "shrl.ph      %[vps1_r],       %[vps1_r],       8   \n\t"
-
-      : [vps1_r] "+r"(vps1_r), [vqs1_r] "+r"(vqs1_r), [vps0_r] "+r"(vps0_r),
-        [vqs0_r] "+r"(vqs0_r)
-      :);
-
-  vqs0 = vqs0_l | vqs0_r;
-  vqs1 = vqs1_l | vqs1_r;
-  vps0 = vps0_l | vps0_r;
-  vps1 = vps1_l | vps1_r;
-
-  *ps0 = vps0 ^ N128;
-  *ps1 = vps1 ^ N128;
-  *qs0 = vqs0 ^ N128;
-  *qs1 = vqs1 ^ N128;
-}
-
-static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, uint32_t ps1,
-                                 uint32_t ps0, uint32_t qs0, uint32_t qs1,
-                                 uint32_t *p1_f0, uint32_t *p0_f0,
-                                 uint32_t *q0_f0, uint32_t *q1_f0) {
-  int32_t aom_filter_l, aom_filter_r;
-  int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r;
-  int32_t subr_r, subr_l;
-  uint32_t t1, t2, HWM, t3;
-  uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r;
-  int32_t vps1, vps0, vqs0, vqs1;
-  int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r;
-  uint32_t N128;
-
-  N128 = 0x80808080;
-  t1 = 0x03000300;
-  t2 = 0x04000400;
-  t3 = 0x01000100;
-  HWM = 0xFF00FF00;
-
-  vps0 = (ps0) ^ N128;
-  vps1 = (ps1) ^ N128;
-  vqs0 = (qs0) ^ N128;
-  vqs1 = (qs1) ^ N128;
-
-  /* use halfword pairs instead quad-bytes because of accuracy */
-  vps0_l = vps0 & HWM;
-  vps0_r = vps0 << 8;
-  vps0_r = vps0_r & HWM;
-
-  vps1_l = vps1 & HWM;
-  vps1_r = vps1 << 8;
-  vps1_r = vps1_r & HWM;
-
-  vqs0_l = vqs0 & HWM;
-  vqs0_r = vqs0 << 8;
-  vqs0_r = vqs0_r & HWM;
-
-  vqs1_l = vqs1 & HWM;
-  vqs1_r = vqs1 << 8;
-  vqs1_r = vqs1_r & HWM;
-
-  mask_l = mask & HWM;
-  mask_r = mask << 8;
-  mask_r = mask_r & HWM;
-
-  hev_l = hev & HWM;
-  hev_r = hev << 8;
-  hev_r = hev_r & HWM;
-
-  __asm__ __volatile__(
-      /* aom_filter = aom_signed_char_clamp(ps1 - qs1); */
-      "subq_s.ph    %[aom_filter_l], %[vps1_l],       %[vqs1_l]       \n\t"
-      "subq_s.ph    %[aom_filter_r], %[vps1_r],       %[vqs1_r]       \n\t"
-
-      /* qs0 - ps0 */
-      "subq_s.ph    %[subr_l],       %[vqs0_l],       %[vps0_l]       \n\t"
-      "subq_s.ph    %[subr_r],       %[vqs0_r],       %[vps0_r]       \n\t"
-
-      /* aom_filter &= hev; */
-      "and          %[aom_filter_l], %[aom_filter_l], %[hev_l]        \n\t"
-      "and          %[aom_filter_r], %[aom_filter_r], %[hev_r]        \n\t"
-
-      /* aom_filter = aom_signed_char_clamp(aom_filter + 3 * (qs0 - ps0)); */
-      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
-      "xor          %[invhev_l],     %[hev_l],        %[HWM]          \n\t"
-      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
-      "xor          %[invhev_r],     %[hev_r],        %[HWM]          \n\t"
-      "addq_s.ph    %[aom_filter_l], %[aom_filter_l], %[subr_l]       \n\t"
-      "addq_s.ph    %[aom_filter_r], %[aom_filter_r], %[subr_r]       \n\t"
-
-      /* aom_filter &= mask; */
-      "and          %[aom_filter_l], %[aom_filter_l], %[mask_l]       \n\t"
-      "and          %[aom_filter_r], %[aom_filter_r], %[mask_r]       \n\t"
-
-      : [aom_filter_l] "=&r"(aom_filter_l), [aom_filter_r] "=&r"(aom_filter_r),
-        [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r),
-        [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r)
-      : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l),
-        [vps1_r] "r"(vps1_r), [vqs0_l] "r"(vqs0_l), [vqs0_r] "r"(vqs0_r),
-        [vqs1_l] "r"(vqs1_l), [vqs1_r] "r"(vqs1_r), [mask_l] "r"(mask_l),
-        [mask_r] "r"(mask_r), [hev_l] "r"(hev_l), [hev_r] "r"(hev_r),
-        [HWM] "r"(HWM));
-
-  /* save bottom 3 bits so that we round one side +4 and the other +3 */
-  __asm__ __volatile__(
-      /* Filter2 = aom_signed_char_clamp(aom_filter + 3) >>= 3; */
-      "addq_s.ph    %[Filter1_l],    %[aom_filter_l], %[t2]           \n\t"
-      "addq_s.ph    %[Filter1_r],    %[aom_filter_r], %[t2]           \n\t"
-
-      /* Filter1 = aom_signed_char_clamp(aom_filter + 4) >>= 3; */
-      "addq_s.ph    %[Filter2_l],    %[aom_filter_l], %[t1]           \n\t"
-      "addq_s.ph    %[Filter2_r],    %[aom_filter_r], %[t1]           \n\t"
-      "shra.ph      %[Filter1_r],    %[Filter1_r],    3               \n\t"
-      "shra.ph      %[Filter1_l],    %[Filter1_l],    3               \n\t"
-
-      "shra.ph      %[Filter2_l],    %[Filter2_l],    3               \n\t"
-      "shra.ph      %[Filter2_r],    %[Filter2_r],    3               \n\t"
-
-      "and          %[Filter1_l],    %[Filter1_l],    %[HWM]          \n\t"
-      "and          %[Filter1_r],    %[Filter1_r],    %[HWM]          \n\t"
-
-      /* vps0 = aom_signed_char_clamp(ps0 + Filter2); */
-      "addq_s.ph    %[vps0_l],       %[vps0_l],       %[Filter2_l]    \n\t"
-      "addq_s.ph    %[vps0_r],       %[vps0_r],       %[Filter2_r]    \n\t"
-
-      /* vqs0 = aom_signed_char_clamp(qs0 - Filter1); */
-      "subq_s.ph    %[vqs0_l],       %[vqs0_l],       %[Filter1_l]    \n\t"
-      "subq_s.ph    %[vqs0_r],       %[vqs0_r],       %[Filter1_r]    \n\t"
-
-      : [Filter1_l] "=&r"(Filter1_l), [Filter1_r] "=&r"(Filter1_r),
-        [Filter2_l] "=&r"(Filter2_l), [Filter2_r] "=&r"(Filter2_r),
-        [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l),
-        [vqs0_r] "+r"(vqs0_r)
-      : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM),
-        [aom_filter_l] "r"(aom_filter_l), [aom_filter_r] "r"(aom_filter_r));
-
-  __asm__ __volatile__(
-      /* (aom_filter += 1) >>= 1 */
-      "addqh.ph    %[Filter1_l],    %[Filter1_l],     %[t3]           \n\t"
-      "addqh.ph    %[Filter1_r],    %[Filter1_r],     %[t3]           \n\t"
-
-      /* aom_filter &= ~hev; */
-      "and          %[Filter1_l],    %[Filter1_l],    %[invhev_l]     \n\t"
-      "and          %[Filter1_r],    %[Filter1_r],    %[invhev_r]     \n\t"
-
-      /* vps1 = aom_signed_char_clamp(ps1 + aom_filter); */
-      "addq_s.ph    %[vps1_l],       %[vps1_l],       %[Filter1_l]    \n\t"
-      "addq_s.ph    %[vps1_r],       %[vps1_r],       %[Filter1_r]    \n\t"
-
-      /* vqs1 = aom_signed_char_clamp(qs1 - aom_filter); */
-      "subq_s.ph    %[vqs1_l],       %[vqs1_l],       %[Filter1_l]    \n\t"
-      "subq_s.ph    %[vqs1_r],       %[vqs1_r],       %[Filter1_r]    \n\t"
-
-      : [Filter1_l] "+r"(Filter1_l), [Filter1_r] "+r"(Filter1_r),
-        [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l),
-        [vqs1_r] "+r"(vqs1_r)
-      : [t3] "r"(t3), [invhev_l] "r"(invhev_l), [invhev_r] "r"(invhev_r));
-
-  /* Create quad-bytes from halfword pairs */
-  vqs0_l = vqs0_l & HWM;
-  vqs1_l = vqs1_l & HWM;
-  vps0_l = vps0_l & HWM;
-  vps1_l = vps1_l & HWM;
-
-  __asm__ __volatile__(
-      "shrl.ph      %[vqs0_r],       %[vqs0_r],       8   \n\t"
-      "shrl.ph      %[vps0_r],       %[vps0_r],       8   \n\t"
-      "shrl.ph      %[vqs1_r],       %[vqs1_r],       8   \n\t"
-      "shrl.ph      %[vps1_r],       %[vps1_r],       8   \n\t"
-
-      : [vps1_r] "+r"(vps1_r), [vqs1_r] "+r"(vqs1_r), [vps0_r] "+r"(vps0_r),
-        [vqs0_r] "+r"(vqs0_r)
-      :);
-
-  vqs0 = vqs0_l | vqs0_r;
-  vqs1 = vqs1_l | vqs1_r;
-  vps0 = vps0_l | vps0_r;
-  vps1 = vps1_l | vps1_r;
-
-  *p0_f0 = vps0 ^ N128;
-  *p1_f0 = vps1 ^ N128;
-  *q0_f0 = vqs0 ^ N128;
-  *q1_f0 = vqs1 ^ N128;
-}
-
-static INLINE void mbfilter_dspr2(uint32_t *op3, uint32_t *op2, uint32_t *op1,
-                                  uint32_t *op0, uint32_t *oq0, uint32_t *oq1,
-                                  uint32_t *oq2, uint32_t *oq3) {
-  /* use a 7 tap filter [1, 1, 1, 2, 1, 1, 1] for flat line */
-  const uint32_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
-  const uint32_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3;
-  uint32_t res_op2, res_op1, res_op0;
-  uint32_t res_oq0, res_oq1, res_oq2;
-  uint32_t tmp;
-  uint32_t add_p210_q012;
-  uint32_t u32Four = 0x00040004;
-
-  /* *op2 = ROUND_POWER_OF_TWO(p3 + p3 + p3 + p2 + p2 + p1 + p0 + q0, 3)  1 */
-  /* *op1 = ROUND_POWER_OF_TWO(p3 + p3 + p2 + p1 + p1 + p0 + q0 + q1, 3)  2 */
-  /* *op0 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + p0 + p0 + q0 + q1 + q2, 3)  3 */
-  /* *oq0 = ROUND_POWER_OF_TWO(p2 + p1 + p0 + q0 + q0 + q1 + q2 + q3, 3)  4 */
-  /* *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + q1 + q1 + q2 + q3 + q3, 3)  5 */
-  /* *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + q2 + q2 + q3 + q3 + q3, 3)  6 */
-
-  __asm__ __volatile__(
-      "addu.ph    %[add_p210_q012],  %[p2],             %[p1]            \n\t"
-      "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[p0]            \n\t"
-      "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[q0]            \n\t"
-      "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[q1]            \n\t"
-      "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[q2]            \n\t"
-      "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[u32Four]       \n\t"
-
-      "shll.ph    %[tmp],            %[p3],             1                \n\t"
-      "addu.ph    %[res_op2],        %[tmp],            %[p3]            \n\t"
-      "addu.ph    %[res_op1],        %[p3],             %[p3]            \n\t"
-      "addu.ph    %[res_op2],        %[res_op2],        %[p2]            \n\t"
-      "addu.ph    %[res_op1],        %[res_op1],        %[p1]            \n\t"
-      "addu.ph    %[res_op2],        %[res_op2],        %[add_p210_q012] \n\t"
-      "addu.ph    %[res_op1],        %[res_op1],        %[add_p210_q012] \n\t"
-      "subu.ph    %[res_op2],        %[res_op2],        %[q1]            \n\t"
-      "subu.ph    %[res_op1],        %[res_op1],        %[q2]            \n\t"
-      "subu.ph    %[res_op2],        %[res_op2],        %[q2]            \n\t"
-      "shrl.ph    %[res_op1],        %[res_op1],        3                \n\t"
-      "shrl.ph    %[res_op2],        %[res_op2],        3                \n\t"
-      "addu.ph    %[res_op0],        %[p3],             %[p0]            \n\t"
-      "addu.ph    %[res_oq0],        %[q0],             %[q3]            \n\t"
-      "addu.ph    %[res_op0],        %[res_op0],        %[add_p210_q012] \n\t"
-      "addu.ph    %[res_oq0],        %[res_oq0],        %[add_p210_q012] \n\t"
-      "addu.ph    %[res_oq1],        %[q3],             %[q3]            \n\t"
-      "shll.ph    %[tmp],            %[q3],             1                \n\t"
-      "addu.ph    %[res_oq1],        %[res_oq1],        %[q1]            \n\t"
-      "addu.ph    %[res_oq2],        %[tmp],            %[q3]            \n\t"
-      "addu.ph    %[res_oq1],        %[res_oq1],        %[add_p210_q012] \n\t"
-      "addu.ph    %[res_oq2],        %[res_oq2],        %[add_p210_q012] \n\t"
-      "subu.ph    %[res_oq1],        %[res_oq1],        %[p2]            \n\t"
-      "addu.ph    %[res_oq2],        %[res_oq2],        %[q2]            \n\t"
-      "shrl.ph    %[res_oq1],        %[res_oq1],        3                \n\t"
-      "subu.ph    %[res_oq2],        %[res_oq2],        %[p2]            \n\t"
-      "shrl.ph    %[res_oq0],        %[res_oq0],        3                \n\t"
-      "subu.ph    %[res_oq2],        %[res_oq2],        %[p1]            \n\t"
-      "shrl.ph    %[res_op0],        %[res_op0],        3                \n\t"
-      "shrl.ph    %[res_oq2],        %[res_oq2],        3                \n\t"
-
-      : [add_p210_q012] "=&r"(add_p210_q012), [tmp] "=&r"(tmp),
-        [res_op2] "=&r"(res_op2), [res_op1] "=&r"(res_op1),
-        [res_op0] "=&r"(res_op0), [res_oq0] "=&r"(res_oq0),
-        [res_oq1] "=&r"(res_oq1), [res_oq2] "=&r"(res_oq2)
-      : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [q1] "r"(q1), [p2] "r"(p2),
-        [q2] "r"(q2), [p3] "r"(p3), [q3] "r"(q3), [u32Four] "r"(u32Four));
-
-  *op2 = res_op2;
-  *op1 = res_op1;
-  *op0 = res_op0;
-  *oq0 = res_oq0;
-  *oq1 = res_oq1;
-  *oq2 = res_oq2;
-}
-
-static INLINE void mbfilter1_dspr2(uint32_t p3, uint32_t p2, uint32_t p1,
-                                   uint32_t p0, uint32_t q0, uint32_t q1,
-                                   uint32_t q2, uint32_t q3, uint32_t *op2_f1,
-                                   uint32_t *op1_f1, uint32_t *op0_f1,
-                                   uint32_t *oq0_f1, uint32_t *oq1_f1,
-                                   uint32_t *oq2_f1) {
-  /* use a 7 tap filter [1, 1, 1, 2, 1, 1, 1] for flat line */
-  uint32_t res_op2, res_op1, res_op0;
-  uint32_t res_oq0, res_oq1, res_oq2;
-  uint32_t tmp;
-  uint32_t add_p210_q012;
-  uint32_t u32Four = 0x00040004;
-
-  /* *op2 = ROUND_POWER_OF_TWO(p3 + p3 + p3 + p2 + p2 + p1 + p0 + q0, 3)   1 */
-  /* *op1 = ROUND_POWER_OF_TWO(p3 + p3 + p2 + p1 + p1 + p0 + q0 + q1, 3)   2 */
-  /* *op0 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + p0 + p0 + q0 + q1 + q2, 3)   3 */
-  /* *oq0 = ROUND_POWER_OF_TWO(p2 + p1 + p0 + q0 + q0 + q1 + q2 + q3, 3)   4 */
-  /* *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + q1 + q1 + q2 + q3 + q3, 3)   5 */
-  /* *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + q2 + q2 + q3 + q3 + q3, 3)   6 */
-
-  __asm__ __volatile__(
-      "addu.ph    %[add_p210_q012],  %[p2],             %[p1]             \n\t"
-      "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[p0]             \n\t"
-      "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[q0]             \n\t"
-      "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[q1]             \n\t"
-      "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[q2]             \n\t"
-      "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[u32Four]        \n\t"
-
-      "shll.ph    %[tmp],            %[p3],             1                 \n\t"
-      "addu.ph    %[res_op2],        %[tmp],            %[p3]             \n\t"
-      "addu.ph    %[res_op1],        %[p3],             %[p3]             \n\t"
-      "addu.ph    %[res_op2],        %[res_op2],        %[p2]             \n\t"
-      "addu.ph    %[res_op1],        %[res_op1],        %[p1]             \n\t"
-      "addu.ph    %[res_op2],        %[res_op2],        %[add_p210_q012]  \n\t"
-      "addu.ph    %[res_op1],        %[res_op1],        %[add_p210_q012]  \n\t"
-      "subu.ph    %[res_op2],        %[res_op2],        %[q1]             \n\t"
-      "subu.ph    %[res_op1],        %[res_op1],        %[q2]             \n\t"
-      "subu.ph    %[res_op2],        %[res_op2],        %[q2]             \n\t"
-      "shrl.ph    %[res_op1],        %[res_op1],        3                 \n\t"
-      "shrl.ph    %[res_op2],        %[res_op2],        3                 \n\t"
-      "addu.ph    %[res_op0],        %[p3],             %[p0]             \n\t"
-      "addu.ph    %[res_oq0],        %[q0],             %[q3]             \n\t"
-      "addu.ph    %[res_op0],        %[res_op0],        %[add_p210_q012]  \n\t"
-      "addu.ph    %[res_oq0],        %[res_oq0],        %[add_p210_q012]  \n\t"
-      "addu.ph    %[res_oq1],        %[q3],             %[q3]             \n\t"
-      "shll.ph    %[tmp],            %[q3],             1                 \n\t"
-      "addu.ph    %[res_oq1],        %[res_oq1],        %[q1]             \n\t"
-      "addu.ph    %[res_oq2],        %[tmp],            %[q3]             \n\t"
-      "addu.ph    %[res_oq1],        %[res_oq1],        %[add_p210_q012]  \n\t"
-      "addu.ph    %[res_oq2],        %[res_oq2],        %[add_p210_q012]  \n\t"
-      "subu.ph    %[res_oq1],        %[res_oq1],        %[p2]             \n\t"
-      "addu.ph    %[res_oq2],        %[res_oq2],        %[q2]             \n\t"
-      "shrl.ph    %[res_oq1],        %[res_oq1],        3                 \n\t"
-      "subu.ph    %[res_oq2],        %[res_oq2],        %[p2]             \n\t"
-      "shrl.ph    %[res_oq0],        %[res_oq0],        3                 \n\t"
-      "subu.ph    %[res_oq2],        %[res_oq2],        %[p1]             \n\t"
-      "shrl.ph    %[res_op0],        %[res_op0],        3                 \n\t"
-      "shrl.ph    %[res_oq2],        %[res_oq2],        3                 \n\t"
-
-      : [add_p210_q012] "=&r"(add_p210_q012), [tmp] "=&r"(tmp),
-        [res_op2] "=&r"(res_op2), [res_op1] "=&r"(res_op1),
-        [res_op0] "=&r"(res_op0), [res_oq0] "=&r"(res_oq0),
-        [res_oq1] "=&r"(res_oq1), [res_oq2] "=&r"(res_oq2)
-      : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [q1] "r"(q1), [p2] "r"(p2),
-        [q2] "r"(q2), [p3] "r"(p3), [q3] "r"(q3), [u32Four] "r"(u32Four));
-
-  *op2_f1 = res_op2;
-  *op1_f1 = res_op1;
-  *op0_f1 = res_op0;
-  *oq0_f1 = res_oq0;
-  *oq1_f1 = res_oq1;
-  *oq2_f1 = res_oq2;
-}
-
-static INLINE void wide_mbfilter_dspr2(
-    uint32_t *op7, uint32_t *op6, uint32_t *op5, uint32_t *op4, uint32_t *op3,
-    uint32_t *op2, uint32_t *op1, uint32_t *op0, uint32_t *oq0, uint32_t *oq1,
-    uint32_t *oq2, uint32_t *oq3, uint32_t *oq4, uint32_t *oq5, uint32_t *oq6,
-    uint32_t *oq7) {
-  const uint32_t p7 = *op7, p6 = *op6, p5 = *op5, p4 = *op4;
-  const uint32_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
-  const uint32_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3;
-  const uint32_t q4 = *oq4, q5 = *oq5, q6 = *oq6, q7 = *oq7;
-  uint32_t res_op6, res_op5, res_op4, res_op3, res_op2, res_op1, res_op0;
-  uint32_t res_oq0, res_oq1, res_oq2, res_oq3, res_oq4, res_oq5, res_oq6;
-  uint32_t tmp;
-  uint32_t add_p6toq6;
-  uint32_t u32Eight = 0x00080008;
-
-  __asm__ __volatile__(
-      /* addition of p6,p5,p4,p3,p2,p1,p0,q0,q1,q2,q3,q4,q5,q6
-         which is used most of the time */
-      "addu.ph      %[add_p6toq6],     %[p6],              %[p5]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[p4]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[p3]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[p2]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[p1]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[p0]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[q0]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[q1]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[q2]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[q3]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[q4]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[q5]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[q6]         \n\t"
-      "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[u32Eight]   \n\t"
-
-      : [add_p6toq6] "=&r"(add_p6toq6)
-      : [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2),
-        [p1] "r"(p1), [p0] "r"(p0), [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2),
-        [q3] "r"(q3), [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6),
-        [u32Eight] "r"(u32Eight));
-
-  __asm__ __volatile__(
-      /* *op6 = ROUND_POWER_OF_TWO(p7 * 7 + p6 * 2 + p5 + p4 +
-                                   p3 + p2 + p1 + p0 + q0, 4) */
-      "shll.ph       %[tmp],            %[p7],            3               \n\t"
-      "subu.ph       %[res_op6],        %[tmp],           %[p7]           \n\t"
-      "addu.ph       %[res_op6],        %[res_op6],       %[p6]           \n\t"
-      "addu.ph       %[res_op6],        %[res_op6],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_op6],        %[res_op6],       %[q1]           \n\t"
-      "subu.ph       %[res_op6],        %[res_op6],       %[q2]           \n\t"
-      "subu.ph       %[res_op6],        %[res_op6],       %[q3]           \n\t"
-      "subu.ph       %[res_op6],        %[res_op6],       %[q4]           \n\t"
-      "subu.ph       %[res_op6],        %[res_op6],       %[q5]           \n\t"
-      "subu.ph       %[res_op6],        %[res_op6],       %[q6]           \n\t"
-      "shrl.ph       %[res_op6],        %[res_op6],       4               \n\t"
-
-      /* *op5 = ROUND_POWER_OF_TWO(p7 * 6 + p6 + p5 * 2 + p4 + p3 +
-                                   p2 + p1 + p0 + q0 + q1, 4) */
-      "shll.ph       %[tmp],            %[p7],            2               \n\t"
-      "addu.ph       %[res_op5],        %[tmp],           %[p7]           \n\t"
-      "addu.ph       %[res_op5],        %[res_op5],       %[p7]           \n\t"
-      "addu.ph       %[res_op5],        %[res_op5],       %[p5]           \n\t"
-      "addu.ph       %[res_op5],        %[res_op5],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_op5],        %[res_op5],       %[q2]           \n\t"
-      "subu.ph       %[res_op5],        %[res_op5],       %[q3]           \n\t"
-      "subu.ph       %[res_op5],        %[res_op5],       %[q4]           \n\t"
-      "subu.ph       %[res_op5],        %[res_op5],       %[q5]           \n\t"
-      "subu.ph       %[res_op5],        %[res_op5],       %[q6]           \n\t"
-      "shrl.ph       %[res_op5],        %[res_op5],       4               \n\t"
-
-      /* *op4 = ROUND_POWER_OF_TWO(p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 +
-                                   p1 + p0 + q0 + q1 + q2, 4) */
-      "shll.ph       %[tmp],            %[p7],            2               \n\t"
-      "addu.ph       %[res_op4],        %[tmp],           %[p7]           \n\t"
-      "addu.ph       %[res_op4],        %[res_op4],       %[p4]           \n\t"
-      "addu.ph       %[res_op4],        %[res_op4],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_op4],        %[res_op4],       %[q3]           \n\t"
-      "subu.ph       %[res_op4],        %[res_op4],       %[q4]           \n\t"
-      "subu.ph       %[res_op4],        %[res_op4],       %[q5]           \n\t"
-      "subu.ph       %[res_op4],        %[res_op4],       %[q6]           \n\t"
-      "shrl.ph       %[res_op4],        %[res_op4],       4               \n\t"
-
-      /* *op3 = ROUND_POWER_OF_TWO(p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 +
-                                   p1 + p0 + q0 + q1 + q2 + q3, 4) */
-      "shll.ph       %[tmp],            %[p7],            2               \n\t"
-      "addu.ph       %[res_op3],        %[tmp],           %[p3]           \n\t"
-      "addu.ph       %[res_op3],        %[res_op3],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_op3],        %[res_op3],       %[q4]           \n\t"
-      "subu.ph       %[res_op3],        %[res_op3],       %[q5]           \n\t"
-      "subu.ph       %[res_op3],        %[res_op3],       %[q6]           \n\t"
-      "shrl.ph       %[res_op3],        %[res_op3],       4               \n\t"
-
-      /* *op2 = ROUND_POWER_OF_TWO(p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 +
-                                   p0 + q0 + q1 + q2 + q3 + q4, 4) */
-      "shll.ph       %[tmp],            %[p7],            1               \n\t"
-      "addu.ph       %[res_op2],        %[tmp],           %[p7]           \n\t"
-      "addu.ph       %[res_op2],        %[res_op2],       %[p2]           \n\t"
-      "addu.ph       %[res_op2],        %[res_op2],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_op2],        %[res_op2],       %[q5]           \n\t"
-      "subu.ph       %[res_op2],        %[res_op2],       %[q6]           \n\t"
-      "shrl.ph       %[res_op2],        %[res_op2],       4               \n\t"
-
-      /* *op1 = ROUND_POWER_OF_TWO(p7 * 2 + p6 + p5 + p4 + p3 + p2 + p1 * 2 +
-                                   p0 + q0 + q1 + q2 + q3 + q4 + q5, 4); */
-      "shll.ph       %[tmp],            %[p7],            1               \n\t"
-      "addu.ph       %[res_op1],        %[tmp],           %[p1]           \n\t"
-      "addu.ph       %[res_op1],        %[res_op1],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_op1],        %[res_op1],       %[q6]           \n\t"
-      "shrl.ph       %[res_op1],        %[res_op1],       4               \n\t"
-
-      /* *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 +
-                                  q0 + q1 + q2 + q3 + q4 + q5 + q6, 4) */
-      "addu.ph       %[res_op0],        %[p7],            %[p0]           \n\t"
-      "addu.ph       %[res_op0],        %[res_op0],       %[add_p6toq6]   \n\t"
-      "shrl.ph       %[res_op0],        %[res_op0],       4               \n\t"
-
-      : [res_op6] "=&r"(res_op6), [res_op5] "=&r"(res_op5),
-        [res_op4] "=&r"(res_op4), [res_op3] "=&r"(res_op3),
-        [res_op2] "=&r"(res_op2), [res_op1] "=&r"(res_op1),
-        [res_op0] "=&r"(res_op0), [tmp] "=&r"(tmp)
-      : [p7] "r"(p7), [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3),
-        [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [q2] "r"(q2), [q1] "r"(q1),
-        [q3] "r"(q3), [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6),
-        [add_p6toq6] "r"(add_p6toq6));
-
-  *op6 = res_op6;
-  *op5 = res_op5;
-  *op4 = res_op4;
-  *op3 = res_op3;
-  *op2 = res_op2;
-  *op1 = res_op1;
-  *op0 = res_op0;
-
-  __asm__ __volatile__(
-      /* *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 + q0 * 2 +
-                                   q1 + q2 + q3 + q4 + q5 + q6 + q7, 4); */
-      "addu.ph       %[res_oq0],        %[q7],            %[q0]           \n\t"
-      "addu.ph       %[res_oq0],        %[res_oq0],       %[add_p6toq6]   \n\t"
-      "shrl.ph       %[res_oq0],        %[res_oq0],       4               \n\t"
-
-      /* *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 + q0 + q1 * 2 +
-                                   q2 + q3 + q4 + q5 + q6 + q7 * 2, 4) */
-      "shll.ph       %[tmp],            %[q7],            1               \n\t"
-      "addu.ph       %[res_oq1],        %[tmp],           %[q1]           \n\t"
-      "addu.ph       %[res_oq1],        %[res_oq1],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_oq1],        %[res_oq1],       %[p6]           \n\t"
-      "shrl.ph       %[res_oq1],        %[res_oq1],       4               \n\t"
-
-      /* *oq2 = ROUND_POWER_OF_TWO(p4 + p3 + p2 + p1 + p0 + q0 + q1 + q2 * 2 +
-                                   q3 + q4 + q5 + q6 + q7 * 3, 4) */
-      "shll.ph       %[tmp],            %[q7],            1               \n\t"
-      "addu.ph       %[res_oq2],        %[tmp],           %[q7]           \n\t"
-      "addu.ph       %[res_oq2],        %[res_oq2],       %[q2]           \n\t"
-      "addu.ph       %[res_oq2],        %[res_oq2],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_oq2],        %[res_oq2],       %[p5]           \n\t"
-      "subu.ph       %[res_oq2],        %[res_oq2],       %[p6]           \n\t"
-      "shrl.ph       %[res_oq2],        %[res_oq2],       4               \n\t"
-
-      /* *oq3 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + p0 + q0 + q1 + q2 +
-                                   q3 * 2 + q4 + q5 + q6 + q7 * 4, 4) */
-      "shll.ph       %[tmp],            %[q7],            2               \n\t"
-      "addu.ph       %[res_oq3],        %[tmp],           %[q3]           \n\t"
-      "addu.ph       %[res_oq3],        %[res_oq3],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_oq3],        %[res_oq3],       %[p4]           \n\t"
-      "subu.ph       %[res_oq3],        %[res_oq3],       %[p5]           \n\t"
-      "subu.ph       %[res_oq3],        %[res_oq3],       %[p6]           \n\t"
-      "shrl.ph       %[res_oq3],        %[res_oq3],       4               \n\t"
-
-      /* *oq4 = ROUND_POWER_OF_TWO(p2 + p1 + p0 + q0 + q1 + q2 + q3 +
-                                   q4 * 2 + q5 + q6 + q7 * 5, 4) */
-      "shll.ph       %[tmp],            %[q7],            2               \n\t"
-      "addu.ph       %[res_oq4],        %[tmp],           %[q7]           \n\t"
-      "addu.ph       %[res_oq4],        %[res_oq4],       %[q4]           \n\t"
-      "addu.ph       %[res_oq4],        %[res_oq4],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_oq4],        %[res_oq4],       %[p3]           \n\t"
-      "subu.ph       %[res_oq4],        %[res_oq4],       %[p4]           \n\t"
-      "subu.ph       %[res_oq4],        %[res_oq4],       %[p5]           \n\t"
-      "subu.ph       %[res_oq4],        %[res_oq4],       %[p6]           \n\t"
-      "shrl.ph       %[res_oq4],        %[res_oq4],       4               \n\t"
-
-      /* *oq5 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + q1 + q2 + q3 + q4 +
-                                   q5 * 2 + q6 + q7 * 6, 4) */
-      "shll.ph       %[tmp],            %[q7],            2               \n\t"
-      "addu.ph       %[res_oq5],        %[tmp],           %[q7]           \n\t"
-      "addu.ph       %[res_oq5],        %[res_oq5],       %[q7]           \n\t"
-      "addu.ph       %[res_oq5],        %[res_oq5],       %[q5]           \n\t"
-      "addu.ph       %[res_oq5],        %[res_oq5],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_oq5],        %[res_oq5],       %[p2]           \n\t"
-      "subu.ph       %[res_oq5],        %[res_oq5],       %[p3]           \n\t"
-      "subu.ph       %[res_oq5],        %[res_oq5],       %[p4]           \n\t"
-      "subu.ph       %[res_oq5],        %[res_oq5],       %[p5]           \n\t"
-      "subu.ph       %[res_oq5],        %[res_oq5],       %[p6]           \n\t"
-      "shrl.ph       %[res_oq5],        %[res_oq5],       4               \n\t"
-
-      /* *oq6 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + q2 + q3 +
-                                   q4 + q5 + q6 * 2 + q7 * 7, 4) */
-      "shll.ph       %[tmp],            %[q7],            3               \n\t"
-      "subu.ph       %[res_oq6],        %[tmp],           %[q7]           \n\t"
-      "addu.ph       %[res_oq6],        %[res_oq6],       %[q6]           \n\t"
-      "addu.ph       %[res_oq6],        %[res_oq6],       %[add_p6toq6]   \n\t"
-      "subu.ph       %[res_oq6],        %[res_oq6],       %[p1]           \n\t"
-      "subu.ph       %[res_oq6],        %[res_oq6],       %[p2]           \n\t"
-      "subu.ph       %[res_oq6],        %[res_oq6],       %[p3]           \n\t"
-      "subu.ph       %[res_oq6],        %[res_oq6],       %[p4]           \n\t"
-      "subu.ph       %[res_oq6],        %[res_oq6],       %[p5]           \n\t"
-      "subu.ph       %[res_oq6],        %[res_oq6],       %[p6]           \n\t"
-      "shrl.ph       %[res_oq6],        %[res_oq6],       4               \n\t"
-
-      : [res_oq6] "=&r"(res_oq6), [res_oq5] "=&r"(res_oq5),
-        [res_oq4] "=&r"(res_oq4), [res_oq3] "=&r"(res_oq3),
-        [res_oq2] "=&r"(res_oq2), [res_oq1] "=&r"(res_oq1),
-        [res_oq0] "=&r"(res_oq0), [tmp] "=&r"(tmp)
-      : [q7] "r"(q7), [q6] "r"(q6), [q5] "r"(q5), [q4] "r"(q4), [q3] "r"(q3),
-        [q2] "r"(q2), [q1] "r"(q1), [q0] "r"(q0), [p1] "r"(p1), [p2] "r"(p2),
-        [p3] "r"(p3), [p4] "r"(p4), [p5] "r"(p5), [p6] "r"(p6),
-        [add_p6toq6] "r"(add_p6toq6));
-
-  *oq0 = res_oq0;
-  *oq1 = res_oq1;
-  *oq2 = res_oq2;
-  *oq3 = res_oq3;
-  *oq4 = res_oq4;
-  *oq5 = res_oq5;
-  *oq6 = res_oq6;
-}
-#endif  // #if HAVE_DSPR2
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // AOM_AOM_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
diff --git a/aom_dsp/mips/loopfilter_macros_dspr2.h b/aom_dsp/mips/loopfilter_macros_dspr2.h
deleted file mode 100644
index 62295d6..0000000
--- a/aom_dsp/mips/loopfilter_macros_dspr2.h
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#ifndef AOM_AOM_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
-#define AOM_AOM_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
-
-#include <stdlib.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom/aom_integer.h"
-#include "aom_mem/aom_mem.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if HAVE_DSPR2
-#define STORE_F0()                                                       \
-  {                                                                      \
-    __asm__ __volatile__(                                                \
-        "sb     %[q1_f0],    1(%[s4])           \n\t"                    \
-        "sb     %[q0_f0],    0(%[s4])           \n\t"                    \
-        "sb     %[p0_f0],   -1(%[s4])           \n\t"                    \
-        "sb     %[p1_f0],   -2(%[s4])           \n\t"                    \
-                                                                         \
-        :                                                                \
-        : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [p0_f0] "r"(p0_f0),    \
-          [p1_f0] "r"(p1_f0), [s4] "r"(s4));                             \
-                                                                         \
-    __asm__ __volatile__(                                                \
-        "srl    %[q1_f0],   %[q1_f0],   8       \n\t"                    \
-        "srl    %[q0_f0],   %[q0_f0],   8       \n\t"                    \
-        "srl    %[p0_f0],   %[p0_f0],   8       \n\t"                    \
-        "srl    %[p1_f0],   %[p1_f0],   8       \n\t"                    \
-                                                                         \
-        : [q1_f0] "+r"(q1_f0), [q0_f0] "+r"(q0_f0), [p0_f0] "+r"(p0_f0), \
-          [p1_f0] "+r"(p1_f0)                                            \
-        :);                                                              \
-                                                                         \
-    __asm__ __volatile__(                                                \
-        "sb     %[q1_f0],    1(%[s3])           \n\t"                    \
-        "sb     %[q0_f0],    0(%[s3])           \n\t"                    \
-        "sb     %[p0_f0],   -1(%[s3])           \n\t"                    \
-        "sb     %[p1_f0],   -2(%[s3])           \n\t"                    \
-                                                                         \
-        : [p1_f0] "+r"(p1_f0)                                            \
-        : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [s3] "r"(s3),          \
-          [p0_f0] "r"(p0_f0));                                           \
-                                                                         \
-    __asm__ __volatile__(                                                \
-        "srl    %[q1_f0],   %[q1_f0],   8       \n\t"                    \
-        "srl    %[q0_f0],   %[q0_f0],   8       \n\t"                    \
-        "srl    %[p0_f0],   %[p0_f0],   8       \n\t"                    \
-        "srl    %[p1_f0],   %[p1_f0],   8       \n\t"                    \
-                                                                         \
-        : [q1_f0] "+r"(q1_f0), [q0_f0] "+r"(q0_f0), [p0_f0] "+r"(p0_f0), \
-          [p1_f0] "+r"(p1_f0)                                            \
-        :);                                                              \
-                                                                         \
-    __asm__ __volatile__(                                                \
-        "sb     %[q1_f0],    1(%[s2])           \n\t"                    \
-        "sb     %[q0_f0],    0(%[s2])           \n\t"                    \
-        "sb     %[p0_f0],   -1(%[s2])           \n\t"                    \
-        "sb     %[p1_f0],   -2(%[s2])           \n\t"                    \
-                                                                         \
-        :                                                                \
-        : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [p0_f0] "r"(p0_f0),    \
-          [p1_f0] "r"(p1_f0), [s2] "r"(s2));                             \
-                                                                         \
-    __asm__ __volatile__(                                                \
-        "srl    %[q1_f0],   %[q1_f0],   8       \n\t"                    \
-        "srl    %[q0_f0],   %[q0_f0],   8       \n\t"                    \
-        "srl    %[p0_f0],   %[p0_f0],   8       \n\t"                    \
-        "srl    %[p1_f0],   %[p1_f0],   8       \n\t"                    \
-                                                                         \
-        : [q1_f0] "+r"(q1_f0), [q0_f0] "+r"(q0_f0), [p0_f0] "+r"(p0_f0), \
-          [p1_f0] "+r"(p1_f0)                                            \
-        :);                                                              \
-                                                                         \
-    __asm__ __volatile__(                                                \
-        "sb     %[q1_f0],    1(%[s1])           \n\t"                    \
-        "sb     %[q0_f0],    0(%[s1])           \n\t"                    \
-        "sb     %[p0_f0],   -1(%[s1])           \n\t"                    \
-        "sb     %[p1_f0],   -2(%[s1])           \n\t"                    \
-                                                                         \
-        :                                                                \
-        : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [p0_f0] "r"(p0_f0),    \
-          [p1_f0] "r"(p1_f0), [s1] "r"(s1));                             \
-  }
-
-#define STORE_F1()                                                             \
-  {                                                                            \
-    __asm__ __volatile__(                                                      \
-        "sb     %[q2_r],     2(%[s4])           \n\t"                          \
-        "sb     %[q1_r],     1(%[s4])           \n\t"                          \
-        "sb     %[q0_r],     0(%[s4])           \n\t"                          \
-        "sb     %[p0_r],    -1(%[s4])           \n\t"                          \
-        "sb     %[p1_r],    -2(%[s4])           \n\t"                          \
-        "sb     %[p2_r],    -3(%[s4])           \n\t"                          \
-                                                                               \
-        :                                                                      \
-        : [q2_r] "r"(q2_r), [q1_r] "r"(q1_r), [q0_r] "r"(q0_r),                \
-          [p0_r] "r"(p0_r), [p1_r] "r"(p1_r), [p2_r] "r"(p2_r), [s4] "r"(s4)); \
-                                                                               \
-    __asm__ __volatile__(                                                      \
-        "srl    %[q2_r],    %[q2_r],    16      \n\t"                          \
-        "srl    %[q1_r],    %[q1_r],    16      \n\t"                          \
-        "srl    %[q0_r],    %[q0_r],    16      \n\t"                          \
-        "srl    %[p0_r],    %[p0_r],    16      \n\t"                          \
-        "srl    %[p1_r],    %[p1_r],    16      \n\t"                          \
-        "srl    %[p2_r],    %[p2_r],    16      \n\t"                          \
-                                                                               \
-        : [q2_r] "+r"(q2_r), [q1_r] "+r"(q1_r), [q0_r] "+r"(q0_r),             \
-          [p0_r] "+r"(p0_r), [p1_r] "+r"(p1_r), [p2_r] "+r"(p2_r)              \
-        :);                                                                    \
-                                                                               \
-    __asm__ __volatile__(                                                      \
-        "sb     %[q2_r],     2(%[s3])           \n\t"                          \
-        "sb     %[q1_r],     1(%[s3])           \n\t"                          \
-        "sb     %[q0_r],     0(%[s3])           \n\t"                          \
-        "sb     %[p0_r],    -1(%[s3])           \n\t"                          \
-        "sb     %[p1_r],    -2(%[s3])           \n\t"                          \
-        "sb     %[p2_r],    -3(%[s3])           \n\t"                          \
-                                                                               \
-        :                                                                      \
-        : [q2_r] "r"(q2_r), [q1_r] "r"(q1_r), [q0_r] "r"(q0_r),                \
-          [p0_r] "r"(p0_r), [p1_r] "r"(p1_r), [p2_r] "r"(p2_r), [s3] "r"(s3)); \
-                                                                               \
-    __asm__ __volatile__(                                                      \
-        "sb     %[q2_l],     2(%[s2])           \n\t"                          \
-        "sb     %[q1_l],     1(%[s2])           \n\t"                          \
-        "sb     %[q0_l],     0(%[s2])           \n\t"                          \
-        "sb     %[p0_l],    -1(%[s2])           \n\t"                          \
-        "sb     %[p1_l],    -2(%[s2])           \n\t"                          \
-        "sb     %[p2_l],    -3(%[s2])           \n\t"                          \
-                                                                               \
-        :                                                                      \
-        : [q2_l] "r"(q2_l), [q1_l] "r"(q1_l), [q0_l] "r"(q0_l),                \
-          [p0_l] "r"(p0_l), [p1_l] "r"(p1_l), [p2_l] "r"(p2_l), [s2] "r"(s2)); \
-                                                                               \
-    __asm__ __volatile__(                                                      \
-        "srl    %[q2_l],    %[q2_l],    16      \n\t"                          \
-        "srl    %[q1_l],    %[q1_l],    16      \n\t"                          \
-        "srl    %[q0_l],    %[q0_l],    16      \n\t"                          \
-        "srl    %[p0_l],    %[p0_l],    16      \n\t"                          \
-        "srl    %[p1_l],    %[p1_l],    16      \n\t"                          \
-        "srl    %[p2_l],    %[p2_l],    16      \n\t"                          \
-                                                                               \
-        : [q2_l] "+r"(q2_l), [q1_l] "+r"(q1_l), [q0_l] "+r"(q0_l),             \
-          [p0_l] "+r"(p0_l), [p1_l] "+r"(p1_l), [p2_l] "+r"(p2_l)              \
-        :);                                                                    \
-                                                                               \
-    __asm__ __volatile__(                                                      \
-        "sb     %[q2_l],     2(%[s1])           \n\t"                          \
-        "sb     %[q1_l],     1(%[s1])           \n\t"                          \
-        "sb     %[q0_l],     0(%[s1])           \n\t"                          \
-        "sb     %[p0_l],    -1(%[s1])           \n\t"                          \
-        "sb     %[p1_l],    -2(%[s1])           \n\t"                          \
-        "sb     %[p2_l],    -3(%[s1])           \n\t"                          \
-                                                                               \
-        :                                                                      \
-        : [q2_l] "r"(q2_l), [q1_l] "r"(q1_l), [q0_l] "r"(q0_l),                \
-          [p0_l] "r"(p0_l), [p1_l] "r"(p1_l), [p2_l] "r"(p2_l), [s1] "r"(s1)); \
-  }
-
-#define STORE_F2()                                                 \
-  {                                                                \
-    __asm__ __volatile__(                                          \
-        "sb     %[q6_r],     6(%[s4])           \n\t"              \
-        "sb     %[q5_r],     5(%[s4])           \n\t"              \
-        "sb     %[q4_r],     4(%[s4])           \n\t"              \
-        "sb     %[q3_r],     3(%[s4])           \n\t"              \
-        "sb     %[q2_r],     2(%[s4])           \n\t"              \
-        "sb     %[q1_r],     1(%[s4])           \n\t"              \
-        "sb     %[q0_r],     0(%[s4])           \n\t"              \
-        "sb     %[p0_r],    -1(%[s4])           \n\t"              \
-        "sb     %[p1_r],    -2(%[s4])           \n\t"              \
-        "sb     %[p2_r],    -3(%[s4])           \n\t"              \
-        "sb     %[p3_r],    -4(%[s4])           \n\t"              \
-        "sb     %[p4_r],    -5(%[s4])           \n\t"              \
-        "sb     %[p5_r],    -6(%[s4])           \n\t"              \
-        "sb     %[p6_r],    -7(%[s4])           \n\t"              \
-                                                                   \
-        :                                                          \
-        : [q6_r] "r"(q6_r), [q5_r] "r"(q5_r), [q4_r] "r"(q4_r),    \
-          [q3_r] "r"(q3_r), [q2_r] "r"(q2_r), [q1_r] "r"(q1_r),    \
-          [q0_r] "r"(q0_r), [p0_r] "r"(p0_r), [p1_r] "r"(p1_r),    \
-          [p2_r] "r"(p2_r), [p3_r] "r"(p3_r), [p4_r] "r"(p4_r),    \
-          [p5_r] "r"(p5_r), [p6_r] "r"(p6_r), [s4] "r"(s4));       \
-                                                                   \
-    __asm__ __volatile__(                                          \
-        "srl    %[q6_r],    %[q6_r],    16      \n\t"              \
-        "srl    %[q5_r],    %[q5_r],    16      \n\t"              \
-        "srl    %[q4_r],    %[q4_r],    16      \n\t"              \
-        "srl    %[q3_r],    %[q3_r],    16      \n\t"              \
-        "srl    %[q2_r],    %[q2_r],    16      \n\t"              \
-        "srl    %[q1_r],    %[q1_r],    16      \n\t"              \
-        "srl    %[q0_r],    %[q0_r],    16      \n\t"              \
-        "srl    %[p0_r],    %[p0_r],    16      \n\t"              \
-        "srl    %[p1_r],    %[p1_r],    16      \n\t"              \
-        "srl    %[p2_r],    %[p2_r],    16      \n\t"              \
-        "srl    %[p3_r],    %[p3_r],    16      \n\t"              \
-        "srl    %[p4_r],    %[p4_r],    16      \n\t"              \
-        "srl    %[p5_r],    %[p5_r],    16      \n\t"              \
-        "srl    %[p6_r],    %[p6_r],    16      \n\t"              \
-                                                                   \
-        : [q6_r] "+r"(q6_r), [q5_r] "+r"(q5_r), [q4_r] "+r"(q4_r), \
-          [q3_r] "+r"(q3_r), [q2_r] "+r"(q2_r), [q1_r] "+r"(q1_r), \
-          [q0_r] "+r"(q0_r), [p0_r] "+r"(p0_r), [p1_r] "+r"(p1_r), \
-          [p2_r] "+r"(p2_r), [p3_r] "+r"(p3_r), [p4_r] "+r"(p4_r), \
-          [p5_r] "+r"(p5_r), [p6_r] "+r"(p6_r)                     \
-        :);                                                        \
-                                                                   \
-    __asm__ __volatile__(                                          \
-        "sb     %[q6_r],     6(%[s3])           \n\t"              \
-        "sb     %[q5_r],     5(%[s3])           \n\t"              \
-        "sb     %[q4_r],     4(%[s3])           \n\t"              \
-        "sb     %[q3_r],     3(%[s3])           \n\t"              \
-        "sb     %[q2_r],     2(%[s3])           \n\t"              \
-        "sb     %[q1_r],     1(%[s3])           \n\t"              \
-        "sb     %[q0_r],     0(%[s3])           \n\t"              \
-        "sb     %[p0_r],    -1(%[s3])           \n\t"              \
-        "sb     %[p1_r],    -2(%[s3])           \n\t"              \
-        "sb     %[p2_r],    -3(%[s3])           \n\t"              \
-        "sb     %[p3_r],    -4(%[s3])           \n\t"              \
-        "sb     %[p4_r],    -5(%[s3])           \n\t"              \
-        "sb     %[p5_r],    -6(%[s3])           \n\t"              \
-        "sb     %[p6_r],    -7(%[s3])           \n\t"              \
-                                                                   \
-        :                                                          \
-        : [q6_r] "r"(q6_r), [q5_r] "r"(q5_r), [q4_r] "r"(q4_r),    \
-          [q3_r] "r"(q3_r), [q2_r] "r"(q2_r), [q1_r] "r"(q1_r),    \
-          [q0_r] "r"(q0_r), [p0_r] "r"(p0_r), [p1_r] "r"(p1_r),    \
-          [p2_r] "r"(p2_r), [p3_r] "r"(p3_r), [p4_r] "r"(p4_r),    \
-          [p5_r] "r"(p5_r), [p6_r] "r"(p6_r), [s3] "r"(s3));       \
-                                                                   \
-    __asm__ __volatile__(                                          \
-        "sb     %[q6_l],     6(%[s2])           \n\t"              \
-        "sb     %[q5_l],     5(%[s2])           \n\t"              \
-        "sb     %[q4_l],     4(%[s2])           \n\t"              \
-        "sb     %[q3_l],     3(%[s2])           \n\t"              \
-        "sb     %[q2_l],     2(%[s2])           \n\t"              \
-        "sb     %[q1_l],     1(%[s2])           \n\t"              \
-        "sb     %[q0_l],     0(%[s2])           \n\t"              \
-        "sb     %[p0_l],    -1(%[s2])           \n\t"              \
-        "sb     %[p1_l],    -2(%[s2])           \n\t"              \
-        "sb     %[p2_l],    -3(%[s2])           \n\t"              \
-        "sb     %[p3_l],    -4(%[s2])           \n\t"              \
-        "sb     %[p4_l],    -5(%[s2])           \n\t"              \
-        "sb     %[p5_l],    -6(%[s2])           \n\t"              \
-        "sb     %[p6_l],    -7(%[s2])           \n\t"              \
-                                                                   \
-        :                                                          \
-        : [q6_l] "r"(q6_l), [q5_l] "r"(q5_l), [q4_l] "r"(q4_l),    \
-          [q3_l] "r"(q3_l), [q2_l] "r"(q2_l), [q1_l] "r"(q1_l),    \
-          [q0_l] "r"(q0_l), [p0_l] "r"(p0_l), [p1_l] "r"(p1_l),    \
-          [p2_l] "r"(p2_l), [p3_l] "r"(p3_l), [p4_l] "r"(p4_l),    \
-          [p5_l] "r"(p5_l), [p6_l] "r"(p6_l), [s2] "r"(s2));       \
-                                                                   \
-    __asm__ __volatile__(                                          \
-        "srl    %[q6_l],    %[q6_l],    16     \n\t"               \
-        "srl    %[q5_l],    %[q5_l],    16     \n\t"               \
-        "srl    %[q4_l],    %[q4_l],    16     \n\t"               \
-        "srl    %[q3_l],    %[q3_l],    16     \n\t"               \
-        "srl    %[q2_l],    %[q2_l],    16     \n\t"               \
-        "srl    %[q1_l],    %[q1_l],    16     \n\t"               \
-        "srl    %[q0_l],    %[q0_l],    16     \n\t"               \
-        "srl    %[p0_l],    %[p0_l],    16     \n\t"               \
-        "srl    %[p1_l],    %[p1_l],    16     \n\t"               \
-        "srl    %[p2_l],    %[p2_l],    16     \n\t"               \
-        "srl    %[p3_l],    %[p3_l],    16     \n\t"               \
-        "srl    %[p4_l],    %[p4_l],    16     \n\t"               \
-        "srl    %[p5_l],    %[p5_l],    16     \n\t"               \
-        "srl    %[p6_l],    %[p6_l],    16     \n\t"               \
-                                                                   \
-        : [q6_l] "+r"(q6_l), [q5_l] "+r"(q5_l), [q4_l] "+r"(q4_l), \
-          [q3_l] "+r"(q3_l), [q2_l] "+r"(q2_l), [q1_l] "+r"(q1_l), \
-          [q0_l] "+r"(q0_l), [p0_l] "+r"(p0_l), [p1_l] "+r"(p1_l), \
-          [p2_l] "+r"(p2_l), [p3_l] "+r"(p3_l), [p4_l] "+r"(p4_l), \
-          [p5_l] "+r"(p5_l), [p6_l] "+r"(p6_l)                     \
-        :);                                                        \
-                                                                   \
-    __asm__ __volatile__(                                          \
-        "sb     %[q6_l],     6(%[s1])           \n\t"              \
-        "sb     %[q5_l],     5(%[s1])           \n\t"              \
-        "sb     %[q4_l],     4(%[s1])           \n\t"              \
-        "sb     %[q3_l],     3(%[s1])           \n\t"              \
-        "sb     %[q2_l],     2(%[s1])           \n\t"              \
-        "sb     %[q1_l],     1(%[s1])           \n\t"              \
-        "sb     %[q0_l],     0(%[s1])           \n\t"              \
-        "sb     %[p0_l],    -1(%[s1])           \n\t"              \
-        "sb     %[p1_l],    -2(%[s1])           \n\t"              \
-        "sb     %[p2_l],    -3(%[s1])           \n\t"              \
-        "sb     %[p3_l],    -4(%[s1])           \n\t"              \
-        "sb     %[p4_l],    -5(%[s1])           \n\t"              \
-        "sb     %[p5_l],    -6(%[s1])           \n\t"              \
-        "sb     %[p6_l],    -7(%[s1])           \n\t"              \
-                                                                   \
-        :                                                          \
-        : [q6_l] "r"(q6_l), [q5_l] "r"(q5_l), [q4_l] "r"(q4_l),    \
-          [q3_l] "r"(q3_l), [q2_l] "r"(q2_l), [q1_l] "r"(q1_l),    \
-          [q0_l] "r"(q0_l), [p0_l] "r"(p0_l), [p1_l] "r"(p1_l),    \
-          [p2_l] "r"(p2_l), [p3_l] "r"(p3_l), [p4_l] "r"(p4_l),    \
-          [p5_l] "r"(p5_l), [p6_l] "r"(p6_l), [s1] "r"(s1));       \
-  }
-
-#define PACK_LEFT_0TO3()                                              \
-  {                                                                   \
-    __asm__ __volatile__(                                             \
-        "preceu.ph.qbl   %[p3_l],   %[p3]   \n\t"                     \
-        "preceu.ph.qbl   %[p2_l],   %[p2]   \n\t"                     \
-        "preceu.ph.qbl   %[p1_l],   %[p1]   \n\t"                     \
-        "preceu.ph.qbl   %[p0_l],   %[p0]   \n\t"                     \
-        "preceu.ph.qbl   %[q0_l],   %[q0]   \n\t"                     \
-        "preceu.ph.qbl   %[q1_l],   %[q1]   \n\t"                     \
-        "preceu.ph.qbl   %[q2_l],   %[q2]   \n\t"                     \
-        "preceu.ph.qbl   %[q3_l],   %[q3]   \n\t"                     \
-                                                                      \
-        : [p3_l] "=&r"(p3_l), [p2_l] "=&r"(p2_l), [p1_l] "=&r"(p1_l), \
-          [p0_l] "=&r"(p0_l), [q0_l] "=&r"(q0_l), [q1_l] "=&r"(q1_l), \
-          [q2_l] "=&r"(q2_l), [q3_l] "=&r"(q3_l)                      \
-        : [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0),     \
-          [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2), [q3] "r"(q3));    \
-  }
-
-#define PACK_LEFT_4TO7()                                              \
-  {                                                                   \
-    __asm__ __volatile__(                                             \
-        "preceu.ph.qbl   %[p7_l],   %[p7]   \n\t"                     \
-        "preceu.ph.qbl   %[p6_l],   %[p6]   \n\t"                     \
-        "preceu.ph.qbl   %[p5_l],   %[p5]   \n\t"                     \
-        "preceu.ph.qbl   %[p4_l],   %[p4]   \n\t"                     \
-        "preceu.ph.qbl   %[q4_l],   %[q4]   \n\t"                     \
-        "preceu.ph.qbl   %[q5_l],   %[q5]   \n\t"                     \
-        "preceu.ph.qbl   %[q6_l],   %[q6]   \n\t"                     \
-        "preceu.ph.qbl   %[q7_l],   %[q7]   \n\t"                     \
-                                                                      \
-        : [p7_l] "=&r"(p7_l), [p6_l] "=&r"(p6_l), [p5_l] "=&r"(p5_l), \
-          [p4_l] "=&r"(p4_l), [q4_l] "=&r"(q4_l), [q5_l] "=&r"(q5_l), \
-          [q6_l] "=&r"(q6_l), [q7_l] "=&r"(q7_l)                      \
-        : [p7] "r"(p7), [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4),     \
-          [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6), [q7] "r"(q7));    \
-  }
-
-#define PACK_RIGHT_0TO3()                                             \
-  {                                                                   \
-    __asm__ __volatile__(                                             \
-        "preceu.ph.qbr   %[p3_r],   %[p3]  \n\t"                      \
-        "preceu.ph.qbr   %[p2_r],   %[p2]   \n\t"                     \
-        "preceu.ph.qbr   %[p1_r],   %[p1]   \n\t"                     \
-        "preceu.ph.qbr   %[p0_r],   %[p0]   \n\t"                     \
-        "preceu.ph.qbr   %[q0_r],   %[q0]   \n\t"                     \
-        "preceu.ph.qbr   %[q1_r],   %[q1]   \n\t"                     \
-        "preceu.ph.qbr   %[q2_r],   %[q2]   \n\t"                     \
-        "preceu.ph.qbr   %[q3_r],   %[q3]   \n\t"                     \
-                                                                      \
-        : [p3_r] "=&r"(p3_r), [p2_r] "=&r"(p2_r), [p1_r] "=&r"(p1_r), \
-          [p0_r] "=&r"(p0_r), [q0_r] "=&r"(q0_r), [q1_r] "=&r"(q1_r), \
-          [q2_r] "=&r"(q2_r), [q3_r] "=&r"(q3_r)                      \
-        : [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0),     \
-          [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2), [q3] "r"(q3));    \
-  }
-
-#define PACK_RIGHT_4TO7()                                             \
-  {                                                                   \
-    __asm__ __volatile__(                                             \
-        "preceu.ph.qbr   %[p7_r],   %[p7]   \n\t"                     \
-        "preceu.ph.qbr   %[p6_r],   %[p6]   \n\t"                     \
-        "preceu.ph.qbr   %[p5_r],   %[p5]   \n\t"                     \
-        "preceu.ph.qbr   %[p4_r],   %[p4]   \n\t"                     \
-        "preceu.ph.qbr   %[q4_r],   %[q4]   \n\t"                     \
-        "preceu.ph.qbr   %[q5_r],   %[q5]   \n\t"                     \
-        "preceu.ph.qbr   %[q6_r],   %[q6]   \n\t"                     \
-        "preceu.ph.qbr   %[q7_r],   %[q7]   \n\t"                     \
-                                                                      \
-        : [p7_r] "=&r"(p7_r), [p6_r] "=&r"(p6_r), [p5_r] "=&r"(p5_r), \
-          [p4_r] "=&r"(p4_r), [q4_r] "=&r"(q4_r), [q5_r] "=&r"(q5_r), \
-          [q6_r] "=&r"(q6_r), [q7_r] "=&r"(q7_r)                      \
-        : [p7] "r"(p7), [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4),     \
-          [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6), [q7] "r"(q7));    \
-  }
-
-#define COMBINE_LEFT_RIGHT_0TO2()                                         \
-  {                                                                       \
-    __asm__ __volatile__(                                                 \
-        "precr.qb.ph    %[p2],  %[p2_l],    %[p2_r]    \n\t"              \
-        "precr.qb.ph    %[p1],  %[p1_l],    %[p1_r]    \n\t"              \
-        "precr.qb.ph    %[p0],  %[p0_l],    %[p0_r]    \n\t"              \
-        "precr.qb.ph    %[q0],  %[q0_l],    %[q0_r]    \n\t"              \
-        "precr.qb.ph    %[q1],  %[q1_l],    %[q1_r]    \n\t"              \
-        "precr.qb.ph    %[q2],  %[q2_l],    %[q2_r]    \n\t"              \
-                                                                          \
-        : [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0), [q0] "=&r"(q0), \
-          [q1] "=&r"(q1), [q2] "=&r"(q2)                                  \
-        : [p2_l] "r"(p2_l), [p2_r] "r"(p2_r), [p1_l] "r"(p1_l),           \
-          [p1_r] "r"(p1_r), [p0_l] "r"(p0_l), [p0_r] "r"(p0_r),           \
-          [q0_l] "r"(q0_l), [q0_r] "r"(q0_r), [q1_l] "r"(q1_l),           \
-          [q1_r] "r"(q1_r), [q2_l] "r"(q2_l), [q2_r] "r"(q2_r));          \
-  }
-
-#define COMBINE_LEFT_RIGHT_3TO6()                                         \
-  {                                                                       \
-    __asm__ __volatile__(                                                 \
-        "precr.qb.ph    %[p6],  %[p6_l],    %[p6_r]    \n\t"              \
-        "precr.qb.ph    %[p5],  %[p5_l],    %[p5_r]    \n\t"              \
-        "precr.qb.ph    %[p4],  %[p4_l],    %[p4_r]    \n\t"              \
-        "precr.qb.ph    %[p3],  %[p3_l],    %[p3_r]    \n\t"              \
-        "precr.qb.ph    %[q3],  %[q3_l],    %[q3_r]    \n\t"              \
-        "precr.qb.ph    %[q4],  %[q4_l],    %[q4_r]    \n\t"              \
-        "precr.qb.ph    %[q5],  %[q5_l],    %[q5_r]    \n\t"              \
-        "precr.qb.ph    %[q6],  %[q6_l],    %[q6_r]    \n\t"              \
-                                                                          \
-        : [p6] "=&r"(p6), [p5] "=&r"(p5), [p4] "=&r"(p4), [p3] "=&r"(p3), \
-          [q3] "=&r"(q3), [q4] "=&r"(q4), [q5] "=&r"(q5), [q6] "=&r"(q6)  \
-        : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l),           \
-          [p3_l] "r"(p3_l), [p6_r] "r"(p6_r), [p5_r] "r"(p5_r),           \
-          [p4_r] "r"(p4_r), [p3_r] "r"(p3_r), [q3_l] "r"(q3_l),           \
-          [q4_l] "r"(q4_l), [q5_l] "r"(q5_l), [q6_l] "r"(q6_l),           \
-          [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r),           \
-          [q6_r] "r"(q6_r));                                              \
-  }
-
-#endif  // #if HAVE_DSPR2
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // AOM_AOM_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
diff --git a/aom_dsp/mips/loopfilter_masks_dspr2.h b/aom_dsp/mips/loopfilter_masks_dspr2.h
deleted file mode 100644
index a0f57f3..0000000
--- a/aom_dsp/mips/loopfilter_masks_dspr2.h
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#ifndef AOM_AOM_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
-#define AOM_AOM_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
-
-#include <stdlib.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom/aom_integer.h"
-#include "aom_mem/aom_mem.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if HAVE_DSPR2
-/* processing 4 pixels at the same time
- * compute hev and mask in the same function */
-static INLINE void filter_hev_mask_dspr2(uint32_t limit, uint32_t flimit,
-                                         uint32_t p1, uint32_t p0, uint32_t p3,
-                                         uint32_t p2, uint32_t q0, uint32_t q1,
-                                         uint32_t q2, uint32_t q3,
-                                         uint32_t thresh, uint32_t *hev,
-                                         uint32_t *mask) {
-  uint32_t c, r, r3, r_k;
-  uint32_t s1, s2, s3;
-  uint32_t ones = 0xFFFFFFFF;
-  uint32_t hev1;
-
-  __asm__ __volatile__(
-      /* mask |= (abs(p3 - p2) > limit) */
-      "subu_s.qb      %[c],   %[p3],     %[p2]        \n\t"
-      "subu_s.qb      %[r_k], %[p2],     %[p3]        \n\t"
-      "or             %[r_k], %[r_k],    %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
-      "or             %[r],   $0,        %[c]         \n\t"
-
-      /* mask |= (abs(p2 - p1) > limit) */
-      "subu_s.qb      %[c],   %[p2],     %[p1]        \n\t"
-      "subu_s.qb      %[r_k], %[p1],     %[p2]        \n\t"
-      "or             %[r_k], %[r_k],    %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
-      "or             %[r],   %[r],      %[c]         \n\t"
-
-      /* mask |= (abs(p1 - p0) > limit)
-       * hev  |= (abs(p1 - p0) > thresh)
-       */
-      "subu_s.qb      %[c],   %[p1],     %[p0]        \n\t"
-      "subu_s.qb      %[r_k], %[p0],     %[p1]        \n\t"
-      "or             %[r_k], %[r_k],    %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],   %[thresh], %[r_k]       \n\t"
-      "or             %[r3],  $0,        %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
-      "or             %[r],   %[r],      %[c]         \n\t"
-
-      /* mask |= (abs(q1 - q0) > limit)
-       * hev  |= (abs(q1 - q0) > thresh)
-       */
-      "subu_s.qb      %[c],   %[q1],     %[q0]        \n\t"
-      "subu_s.qb      %[r_k], %[q0],     %[q1]        \n\t"
-      "or             %[r_k], %[r_k],    %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],   %[thresh], %[r_k]       \n\t"
-      "or             %[r3],  %[r3],     %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
-      "or             %[r],   %[r],      %[c]         \n\t"
-
-      /* mask |= (abs(q2 - q1) > limit) */
-      "subu_s.qb      %[c],   %[q2],     %[q1]        \n\t"
-      "subu_s.qb      %[r_k], %[q1],     %[q2]        \n\t"
-      "or             %[r_k], %[r_k],    %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
-      "or             %[r],   %[r],      %[c]         \n\t"
-      "sll            %[r3],    %[r3],    24          \n\t"
-
-      /* mask |= (abs(q3 - q2) > limit) */
-      "subu_s.qb      %[c],   %[q3],     %[q2]        \n\t"
-      "subu_s.qb      %[r_k], %[q2],     %[q3]        \n\t"
-      "or             %[r_k], %[r_k],    %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
-      "or             %[r],   %[r],      %[c]         \n\t"
-
-      : [c] "=&r"(c), [r_k] "=&r"(r_k), [r] "=&r"(r), [r3] "=&r"(r3)
-      : [limit] "r"(limit), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1),
-        [p0] "r"(p0), [q1] "r"(q1), [q0] "r"(q0), [q2] "r"(q2), [q3] "r"(q3),
-        [thresh] "r"(thresh));
-
-  __asm__ __volatile__(
-      /* abs(p0 - q0) */
-      "subu_s.qb      %[c],   %[p0],     %[q0]        \n\t"
-      "subu_s.qb      %[r_k], %[q0],     %[p0]        \n\t"
-      "wrdsp          %[r3]                           \n\t"
-      "or             %[s1],  %[r_k],    %[c]         \n\t"
-
-      /* abs(p1 - q1) */
-      "subu_s.qb      %[c],    %[p1],    %[q1]        \n\t"
-      "addu_s.qb      %[s3],   %[s1],    %[s1]        \n\t"
-      "pick.qb        %[hev1], %[ones],  $0           \n\t"
-      "subu_s.qb      %[r_k],  %[q1],    %[p1]        \n\t"
-      "or             %[s2],   %[r_k],   %[c]         \n\t"
-
-      /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > flimit * 2 + limit */
-      "shrl.qb        %[s2],   %[s2],     1           \n\t"
-      "addu_s.qb      %[s1],   %[s2],     %[s3]       \n\t"
-      "cmpgu.lt.qb    %[c],    %[flimit], %[s1]       \n\t"
-      "or             %[r],    %[r],      %[c]        \n\t"
-      "sll            %[r],    %[r],      24          \n\t"
-
-      "wrdsp          %[r]                            \n\t"
-      "pick.qb        %[s2],  $0,         %[ones]     \n\t"
-
-      : [c] "=&r"(c), [r_k] "=&r"(r_k), [s1] "=&r"(s1), [hev1] "=&r"(hev1),
-        [s2] "=&r"(s2), [r] "+r"(r), [s3] "=&r"(s3)
-      : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [r3] "r"(r3), [q1] "r"(q1),
-        [ones] "r"(ones), [flimit] "r"(flimit));
-
-  *hev = hev1;
-  *mask = s2;
-}
-
-static INLINE void filter_hev_mask_flatmask4_dspr2(
-    uint32_t limit, uint32_t flimit, uint32_t thresh, uint32_t p1, uint32_t p0,
-    uint32_t p3, uint32_t p2, uint32_t q0, uint32_t q1, uint32_t q2,
-    uint32_t q3, uint32_t *hev, uint32_t *mask, uint32_t *flat) {
-  uint32_t c, r, r3, r_k, r_flat;
-  uint32_t s1, s2, s3;
-  uint32_t ones = 0xFFFFFFFF;
-  uint32_t flat_thresh = 0x01010101;
-  uint32_t hev1;
-  uint32_t flat1;
-
-  __asm__ __volatile__(
-      /* mask |= (abs(p3 - p2) > limit) */
-      "subu_s.qb      %[c],       %[p3],          %[p2]        \n\t"
-      "subu_s.qb      %[r_k],     %[p2],          %[p3]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[limit],       %[r_k]       \n\t"
-      "or             %[r],       $0,             %[c]         \n\t"
-
-      /* mask |= (abs(p2 - p1) > limit) */
-      "subu_s.qb      %[c],       %[p2],          %[p1]        \n\t"
-      "subu_s.qb      %[r_k],     %[p1],          %[p2]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[limit],       %[r_k]       \n\t"
-      "or             %[r],       %[r],           %[c]         \n\t"
-
-      /* mask |= (abs(p1 - p0) > limit)
-       * hev  |= (abs(p1 - p0) > thresh)
-       * flat |= (abs(p1 - p0) > thresh)
-       */
-      "subu_s.qb      %[c],       %[p1],          %[p0]        \n\t"
-      "subu_s.qb      %[r_k],     %[p0],          %[p1]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[thresh],      %[r_k]       \n\t"
-      "or             %[r3],      $0,             %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[limit],       %[r_k]       \n\t"
-      "or             %[r],       %[r],           %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[flat_thresh], %[r_k]       \n\t"
-      "or             %[r_flat],  $0,             %[c]         \n\t"
-
-      /* mask |= (abs(q1 - q0) > limit)
-       * hev  |= (abs(q1 - q0) > thresh)
-       * flat |= (abs(q1 - q0) > thresh)
-       */
-      "subu_s.qb      %[c],       %[q1],          %[q0]        \n\t"
-      "subu_s.qb      %[r_k],     %[q0],          %[q1]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[thresh],      %[r_k]       \n\t"
-      "or             %[r3],      %[r3],          %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[limit],       %[r_k]       \n\t"
-      "or             %[r],       %[r],           %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[flat_thresh], %[r_k]       \n\t"
-      "or             %[r_flat],  %[r_flat],      %[c]         \n\t"
-
-      /* flat |= (abs(p0 - p2) > thresh) */
-      "subu_s.qb      %[c],       %[p0],          %[p2]        \n\t"
-      "subu_s.qb      %[r_k],     %[p2],          %[p0]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[flat_thresh], %[r_k]       \n\t"
-      "or             %[r_flat],  %[r_flat],      %[c]         \n\t"
-
-      /* flat |= (abs(q0 - q2) > thresh) */
-      "subu_s.qb      %[c],       %[q0],          %[q2]        \n\t"
-      "subu_s.qb      %[r_k],     %[q2],          %[q0]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[flat_thresh], %[r_k]       \n\t"
-      "or             %[r_flat],  %[r_flat],      %[c]         \n\t"
-
-      /* flat |= (abs(p3 - p0) > thresh) */
-      "subu_s.qb      %[c],       %[p3],          %[p0]        \n\t"
-      "subu_s.qb      %[r_k],     %[p0],          %[p3]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[flat_thresh], %[r_k]       \n\t"
-      "or             %[r_flat],  %[r_flat],      %[c]         \n\t"
-
-      /* flat |= (abs(q3 - q0) > thresh) */
-      "subu_s.qb      %[c],       %[q3],          %[q0]        \n\t"
-      "subu_s.qb      %[r_k],     %[q0],          %[q3]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[flat_thresh], %[r_k]       \n\t"
-      "or             %[r_flat],  %[r_flat],      %[c]         \n\t"
-      "sll            %[r_flat],  %[r_flat],      24           \n\t"
-      /* look at stall here */
-      "wrdsp          %[r_flat]                                \n\t"
-      "pick.qb        %[flat1],   $0,             %[ones]      \n\t"
-
-      /* mask |= (abs(q2 - q1) > limit) */
-      "subu_s.qb      %[c],       %[q2],          %[q1]        \n\t"
-      "subu_s.qb      %[r_k],     %[q1],          %[q2]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[limit],       %[r_k]       \n\t"
-      "or             %[r],       %[r],           %[c]         \n\t"
-      "sll            %[r3],      %[r3],          24           \n\t"
-
-      /* mask |= (abs(q3 - q2) > limit) */
-      "subu_s.qb      %[c],       %[q3],          %[q2]        \n\t"
-      "subu_s.qb      %[r_k],     %[q2],          %[q3]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[limit],       %[r_k]       \n\t"
-      "or             %[r],       %[r],           %[c]         \n\t"
-
-      : [c] "=&r"(c), [r_k] "=&r"(r_k), [r] "=&r"(r), [r3] "=&r"(r3),
-        [r_flat] "=&r"(r_flat), [flat1] "=&r"(flat1)
-      : [limit] "r"(limit), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1),
-        [p0] "r"(p0), [q1] "r"(q1), [q0] "r"(q0), [q2] "r"(q2), [q3] "r"(q3),
-        [thresh] "r"(thresh), [flat_thresh] "r"(flat_thresh), [ones] "r"(ones));
-
-  __asm__ __volatile__(
-      /* abs(p0 - q0) */
-      "subu_s.qb      %[c],   %[p0],     %[q0]        \n\t"
-      "subu_s.qb      %[r_k], %[q0],     %[p0]        \n\t"
-      "wrdsp          %[r3]                           \n\t"
-      "or             %[s1],  %[r_k],    %[c]         \n\t"
-
-      /* abs(p1 - q1) */
-      "subu_s.qb      %[c],    %[p1],    %[q1]        \n\t"
-      "addu_s.qb      %[s3],   %[s1],    %[s1]        \n\t"
-      "pick.qb        %[hev1], %[ones],  $0           \n\t"
-      "subu_s.qb      %[r_k],  %[q1],    %[p1]        \n\t"
-      "or             %[s2],   %[r_k],   %[c]         \n\t"
-
-      /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > flimit * 2 + limit */
-      "shrl.qb        %[s2],   %[s2],     1           \n\t"
-      "addu_s.qb      %[s1],   %[s2],     %[s3]       \n\t"
-      "cmpgu.lt.qb    %[c],    %[flimit], %[s1]       \n\t"
-      "or             %[r],    %[r],      %[c]        \n\t"
-      "sll            %[r],    %[r],      24          \n\t"
-
-      "wrdsp          %[r]                            \n\t"
-      "pick.qb        %[s2],   $0,        %[ones]     \n\t"
-
-      : [c] "=&r"(c), [r_k] "=&r"(r_k), [s1] "=&r"(s1), [hev1] "=&r"(hev1),
-        [s2] "=&r"(s2), [r] "+r"(r), [s3] "=&r"(s3)
-      : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [r3] "r"(r3), [q1] "r"(q1),
-        [ones] "r"(ones), [flimit] "r"(flimit));
-
-  *hev = hev1;
-  *mask = s2;
-  *flat = flat1;
-}
-
-static INLINE void flatmask5(uint32_t p4, uint32_t p3, uint32_t p2, uint32_t p1,
-                             uint32_t p0, uint32_t q0, uint32_t q1, uint32_t q2,
-                             uint32_t q3, uint32_t q4, uint32_t *flat2) {
-  uint32_t c, r, r_k, r_flat;
-  uint32_t ones = 0xFFFFFFFF;
-  uint32_t flat_thresh = 0x01010101;
-  uint32_t flat1, flat3;
-
-  __asm__ __volatile__(
-      /* flat |= (abs(p4 - p0) > thresh) */
-      "subu_s.qb      %[c],   %[p4],           %[p0]        \n\t"
-      "subu_s.qb      %[r_k], %[p0],           %[p4]        \n\t"
-      "or             %[r_k], %[r_k],          %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],   %[flat_thresh],  %[r_k]       \n\t"
-      "or             %[r],   $0,              %[c]         \n\t"
-
-      /* flat |= (abs(q4 - q0) > thresh) */
-      "subu_s.qb      %[c],     %[q4],           %[q0]     \n\t"
-      "subu_s.qb      %[r_k],   %[q0],           %[q4]     \n\t"
-      "or             %[r_k],   %[r_k],          %[c]      \n\t"
-      "cmpgu.lt.qb    %[c],     %[flat_thresh],  %[r_k]    \n\t"
-      "or             %[r],     %[r],            %[c]      \n\t"
-      "sll            %[r],     %[r],            24        \n\t"
-      "wrdsp          %[r]                                 \n\t"
-      "pick.qb        %[flat3], $0,           %[ones]      \n\t"
-
-      /* flat |= (abs(p1 - p0) > thresh) */
-      "subu_s.qb      %[c],       %[p1],          %[p0]        \n\t"
-      "subu_s.qb      %[r_k],     %[p0],          %[p1]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[flat_thresh], %[r_k]       \n\t"
-      "or             %[r_flat],  $0,             %[c]         \n\t"
-
-      /* flat |= (abs(q1 - q0) > thresh) */
-      "subu_s.qb      %[c],      %[q1],           %[q0]        \n\t"
-      "subu_s.qb      %[r_k],    %[q0],           %[q1]        \n\t"
-      "or             %[r_k],    %[r_k],          %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],      %[flat_thresh],  %[r_k]       \n\t"
-      "or             %[r_flat], %[r_flat],       %[c]         \n\t"
-
-      /* flat |= (abs(p0 - p2) > thresh) */
-      "subu_s.qb      %[c],       %[p0],          %[p2]        \n\t"
-      "subu_s.qb      %[r_k],     %[p2],          %[p0]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[flat_thresh], %[r_k]       \n\t"
-      "or             %[r_flat],  %[r_flat],      %[c]         \n\t"
-
-      /* flat |= (abs(q0 - q2) > thresh) */
-      "subu_s.qb      %[c],       %[q0],          %[q2]        \n\t"
-      "subu_s.qb      %[r_k],     %[q2],          %[q0]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[flat_thresh], %[r_k]       \n\t"
-      "or             %[r_flat],  %[r_flat],      %[c]         \n\t"
-
-      /* flat |= (abs(p3 - p0) > thresh) */
-      "subu_s.qb      %[c],       %[p3],          %[p0]        \n\t"
-      "subu_s.qb      %[r_k],     %[p0],          %[p3]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[flat_thresh], %[r_k]       \n\t"
-      "or             %[r_flat],  %[r_flat],      %[c]         \n\t"
-
-      /* flat |= (abs(q3 - q0) > thresh) */
-      "subu_s.qb      %[c],       %[q3],          %[q0]        \n\t"
-      "subu_s.qb      %[r_k],     %[q0],          %[q3]        \n\t"
-      "or             %[r_k],     %[r_k],         %[c]         \n\t"
-      "cmpgu.lt.qb    %[c],       %[flat_thresh], %[r_k]       \n\t"
-      "or             %[r_flat],  %[r_flat],      %[c]         \n\t"
-      "sll            %[r_flat],  %[r_flat],      24           \n\t"
-      "wrdsp          %[r_flat]                                \n\t"
-      "pick.qb        %[flat1],   $0,             %[ones]      \n\t"
-      /* flat & flatmask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3) */
-      "and            %[flat1],  %[flat3],        %[flat1]     \n\t"
-
-      : [c] "=&r"(c), [r_k] "=&r"(r_k), [r] "=&r"(r), [r_flat] "=&r"(r_flat),
-        [flat1] "=&r"(flat1), [flat3] "=&r"(flat3)
-      : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0),
-        [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2), [q3] "r"(q3), [q4] "r"(q4),
-        [flat_thresh] "r"(flat_thresh), [ones] "r"(ones));
-
-  *flat2 = flat1;
-}
-#endif  // #if HAVE_DSPR2
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // AOM_AOM_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
diff --git a/aom_dsp/mips/loopfilter_mb_dspr2.c b/aom_dsp/mips/loopfilter_mb_dspr2.c
deleted file mode 100644
index b67ccfe..0000000
--- a/aom_dsp/mips/loopfilter_mb_dspr2.c
+++ /dev/null
@@ -1,590 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <stdlib.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom/aom_integer.h"
-#include "aom_dsp/mips/common_dspr2.h"
-#include "aom_dsp/mips/loopfilter_filters_dspr2.h"
-#include "aom_dsp/mips/loopfilter_macros_dspr2.h"
-#include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/aom_mem.h"
-
-#if HAVE_DSPR2
-void aom_lpf_horizontal_8_dspr2(unsigned char *s, int pitch,
-                                const uint8_t *blimit, const uint8_t *limit,
-                                const uint8_t *thresh) {
-  uint32_t mask;
-  uint32_t hev, flat;
-  uint8_t i;
-  uint8_t *sp3, *sp2, *sp1, *sp0, *sq0, *sq1, *sq2, *sq3;
-  uint32_t thresh_vec, flimit_vec, limit_vec;
-  uint32_t uflimit, ulimit, uthresh;
-  uint32_t p1_f0, p0_f0, q0_f0, q1_f0;
-  uint32_t p3, p2, p1, p0, q0, q1, q2, q3;
-  uint32_t p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l;
-  uint32_t p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r;
-
-  uflimit = *blimit;
-  ulimit = *limit;
-  uthresh = *thresh;
-
-  /* create quad-byte */
-  __asm__ __volatile__(
-      "replv.qb       %[thresh_vec],    %[uthresh]    \n\t"
-      "replv.qb       %[flimit_vec],    %[uflimit]    \n\t"
-      "replv.qb       %[limit_vec],     %[ulimit]     \n\t"
-
-      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
-        [limit_vec] "=r"(limit_vec)
-      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
-
-  /* prefetch data for store */
-  prefetch_store(s);
-
-  for (i = 0; i < 2; i++) {
-    sp3 = s - (pitch << 2);
-    sp2 = sp3 + pitch;
-    sp1 = sp2 + pitch;
-    sp0 = sp1 + pitch;
-    sq0 = s;
-    sq1 = s + pitch;
-    sq2 = sq1 + pitch;
-    sq3 = sq2 + pitch;
-
-    __asm__ __volatile__(
-        "lw     %[p3],      (%[sp3])    \n\t"
-        "lw     %[p2],      (%[sp2])    \n\t"
-        "lw     %[p1],      (%[sp1])    \n\t"
-        "lw     %[p0],      (%[sp0])    \n\t"
-        "lw     %[q0],      (%[sq0])    \n\t"
-        "lw     %[q1],      (%[sq1])    \n\t"
-        "lw     %[q2],      (%[sq2])    \n\t"
-        "lw     %[q3],      (%[sq3])    \n\t"
-
-        : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0),
-          [q3] "=&r"(q3), [q2] "=&r"(q2), [q1] "=&r"(q1), [q0] "=&r"(q0)
-        : [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0),
-          [sq3] "r"(sq3), [sq2] "r"(sq2), [sq1] "r"(sq1), [sq0] "r"(sq0));
-
-    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, p1, p0,
-                                    p3, p2, q0, q1, q2, q3, &hev, &mask, &flat);
-
-    if ((flat == 0) && (mask != 0)) {
-      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
-
-      __asm__ __volatile__(
-          "sw       %[p1_f0],   (%[sp1])    \n\t"
-          "sw       %[p0_f0],   (%[sp0])    \n\t"
-          "sw       %[q0_f0],   (%[sq0])    \n\t"
-          "sw       %[q1_f0],   (%[sq1])    \n\t"
-
-          :
-          : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-            [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
-            [sq1] "r"(sq1));
-    } else if ((mask & flat) == 0xFFFFFFFF) {
-      /* left 2 element operation */
-      PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
-
-      /* right 2 element operation */
-      PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
-
-      COMBINE_LEFT_RIGHT_0TO2()
-
-      __asm__ __volatile__(
-          "sw       %[p2],      (%[sp2])    \n\t"
-          "sw       %[p1],      (%[sp1])    \n\t"
-          "sw       %[p0],      (%[sp0])    \n\t"
-          "sw       %[q0],      (%[sq0])    \n\t"
-          "sw       %[q1],      (%[sq1])    \n\t"
-          "sw       %[q2],      (%[sq2])    \n\t"
-
-          :
-          : [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [q0] "r"(q0),
-            [q1] "r"(q1), [q2] "r"(q2), [sp2] "r"(sp2), [sp1] "r"(sp1),
-            [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2));
-    } else if ((flat != 0) && (mask != 0)) {
-      /* filtering */
-      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
-
-      /* left 2 element operation */
-      PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
-
-      /* right 2 element operation */
-      PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
-
-      if (mask & flat & 0x000000FF) {
-        __asm__ __volatile__(
-            "sb     %[p2_r],    (%[sp2])    \n\t"
-            "sb     %[p1_r],    (%[sp1])    \n\t"
-            "sb     %[p0_r],    (%[sp0])    \n\t"
-            "sb     %[q0_r],    (%[sq0])    \n\t"
-            "sb     %[q1_r],    (%[sq1])    \n\t"
-            "sb     %[q2_r],    (%[sq2])    \n\t"
-
-            :
-            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
-              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
-              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
-              [sq1] "r"(sq1), [sq2] "r"(sq2));
-      } else if (mask & 0x000000FF) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  (%[sp1])    \n\t"
-            "sb         %[p0_f0],  (%[sp0])    \n\t"
-            "sb         %[q0_f0],  (%[sq0])    \n\t"
-            "sb         %[q1_f0],  (%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-
-      __asm__ __volatile__(
-          "srl      %[p2_r],    %[p2_r],    16      \n\t"
-          "srl      %[p1_r],    %[p1_r],    16      \n\t"
-          "srl      %[p0_r],    %[p0_r],    16      \n\t"
-          "srl      %[q0_r],    %[q0_r],    16      \n\t"
-          "srl      %[q1_r],    %[q1_r],    16      \n\t"
-          "srl      %[q2_r],    %[q2_r],    16      \n\t"
-          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
-          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
-          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
-          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
-
-          : [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r),
-            [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r),
-            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
-            [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & 0x0000FF00) {
-        __asm__ __volatile__(
-            "sb     %[p2_r],    +1(%[sp2])    \n\t"
-            "sb     %[p1_r],    +1(%[sp1])    \n\t"
-            "sb     %[p0_r],    +1(%[sp0])    \n\t"
-            "sb     %[q0_r],    +1(%[sq0])    \n\t"
-            "sb     %[q1_r],    +1(%[sq1])    \n\t"
-            "sb     %[q2_r],    +1(%[sq2])    \n\t"
-
-            :
-            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
-              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
-              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
-              [sq1] "r"(sq1), [sq2] "r"(sq2));
-      } else if (mask & 0x0000FF00) {
-        __asm__ __volatile__(
-            "sb     %[p1_f0],   +1(%[sp1])    \n\t"
-            "sb     %[p0_f0],   +1(%[sp0])    \n\t"
-            "sb     %[q0_f0],   +1(%[sq0])    \n\t"
-            "sb     %[q1_f0],   +1(%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-
-      __asm__ __volatile__(
-          "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
-          "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
-          "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
-          "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
-
-          : [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), [q0] "+r"(q0),
-            [q1] "+r"(q1), [q2] "+r"(q2), [p1_f0] "+r"(p1_f0),
-            [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & 0x00FF0000) {
-        __asm__ __volatile__(
-            "sb     %[p2_l],    +2(%[sp2])    \n\t"
-            "sb     %[p1_l],    +2(%[sp1])    \n\t"
-            "sb     %[p0_l],    +2(%[sp0])    \n\t"
-            "sb     %[q0_l],    +2(%[sq0])    \n\t"
-            "sb     %[q1_l],    +2(%[sq1])    \n\t"
-            "sb     %[q2_l],    +2(%[sq2])    \n\t"
-
-            :
-            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
-              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
-              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
-              [sq1] "r"(sq1), [sq2] "r"(sq2));
-      } else if (mask & 0x00FF0000) {
-        __asm__ __volatile__(
-            "sb     %[p1_f0],   +2(%[sp1])    \n\t"
-            "sb     %[p0_f0],   +2(%[sp0])    \n\t"
-            "sb     %[q0_f0],   +2(%[sq0])    \n\t"
-            "sb     %[q1_f0],   +2(%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-
-      __asm__ __volatile__(
-          "srl      %[p2_l],    %[p2_l],    16      \n\t"
-          "srl      %[p1_l],    %[p1_l],    16      \n\t"
-          "srl      %[p0_l],    %[p0_l],    16      \n\t"
-          "srl      %[q0_l],    %[q0_l],    16      \n\t"
-          "srl      %[q1_l],    %[q1_l],    16      \n\t"
-          "srl      %[q2_l],    %[q2_l],    16      \n\t"
-          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
-          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
-          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
-          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
-
-          : [p2_l] "+r"(p2_l), [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l),
-            [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l),
-            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
-            [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & 0xFF000000) {
-        __asm__ __volatile__(
-            "sb     %[p2_l],    +3(%[sp2])    \n\t"
-            "sb     %[p1_l],    +3(%[sp1])    \n\t"
-            "sb     %[p0_l],    +3(%[sp0])    \n\t"
-            "sb     %[q0_l],    +3(%[sq0])    \n\t"
-            "sb     %[q1_l],    +3(%[sq1])    \n\t"
-            "sb     %[q2_l],    +3(%[sq2])    \n\t"
-
-            :
-            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
-              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
-              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
-              [sq1] "r"(sq1), [sq2] "r"(sq2));
-      } else if (mask & 0xFF000000) {
-        __asm__ __volatile__(
-            "sb     %[p1_f0],   +3(%[sp1])    \n\t"
-            "sb     %[p0_f0],   +3(%[sp0])    \n\t"
-            "sb     %[q0_f0],   +3(%[sq0])    \n\t"
-            "sb     %[q1_f0],   +3(%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-    }
-
-    s = s + 4;
-  }
-}
-
-void aom_lpf_vertical_8_dspr2(unsigned char *s, int pitch,
-                              const uint8_t *blimit, const uint8_t *limit,
-                              const uint8_t *thresh) {
-  uint8_t i;
-  uint32_t mask, hev, flat;
-  uint8_t *s1, *s2, *s3, *s4;
-  uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
-  uint32_t thresh_vec, flimit_vec, limit_vec;
-  uint32_t uflimit, ulimit, uthresh;
-  uint32_t p3, p2, p1, p0, q3, q2, q1, q0;
-  uint32_t p1_f0, p0_f0, q0_f0, q1_f0;
-  uint32_t p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l;
-  uint32_t p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r;
-
-  uflimit = *blimit;
-  ulimit = *limit;
-  uthresh = *thresh;
-
-  /* create quad-byte */
-  __asm__ __volatile__(
-      "replv.qb     %[thresh_vec],  %[uthresh]    \n\t"
-      "replv.qb     %[flimit_vec],  %[uflimit]    \n\t"
-      "replv.qb     %[limit_vec],   %[ulimit]     \n\t"
-
-      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
-        [limit_vec] "=r"(limit_vec)
-      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
-
-  prefetch_store(s + pitch);
-
-  for (i = 0; i < 2; i++) {
-    s1 = s;
-    s2 = s + pitch;
-    s3 = s2 + pitch;
-    s4 = s3 + pitch;
-    s = s4 + pitch;
-
-    __asm__ __volatile__(
-        "lw     %[p0],  -4(%[s1])    \n\t"
-        "lw     %[p1],  -4(%[s2])    \n\t"
-        "lw     %[p2],  -4(%[s3])    \n\t"
-        "lw     %[p3],  -4(%[s4])    \n\t"
-        "lw     %[q3],    (%[s1])    \n\t"
-        "lw     %[q2],    (%[s2])    \n\t"
-        "lw     %[q1],    (%[s3])    \n\t"
-        "lw     %[q0],    (%[s4])    \n\t"
-
-        : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0),
-          [q0] "=&r"(q0), [q1] "=&r"(q1), [q2] "=&r"(q2), [q3] "=&r"(q3)
-        : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4));
-
-    /* transpose p3, p2, p1, p0
-       original (when loaded from memory)
-       register       -4    -3   -2     -1
-         p0         p0_0  p0_1  p0_2  p0_3
-         p1         p1_0  p1_1  p1_2  p1_3
-         p2         p2_0  p2_1  p2_2  p2_3
-         p3         p3_0  p3_1  p3_2  p3_3
-
-       after transpose
-       register
-         p0         p3_3  p2_3  p1_3  p0_3
-         p1         p3_2  p2_2  p1_2  p0_2
-         p2         p3_1  p2_1  p1_1  p0_1
-         p3         p3_0  p2_0  p1_0  p0_0
-    */
-    __asm__ __volatile__(
-        "precrq.qb.ph   %[prim1],   %[p0],      %[p1]       \n\t"
-        "precr.qb.ph    %[prim2],   %[p0],      %[p1]       \n\t"
-        "precrq.qb.ph   %[prim3],   %[p2],      %[p3]       \n\t"
-        "precr.qb.ph    %[prim4],   %[p2],      %[p3]       \n\t"
-
-        "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
-        "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
-        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
-        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
-
-        "precrq.ph.w    %[p0],      %[p1],      %[sec3]     \n\t"
-        "precrq.ph.w    %[p2],      %[p3],      %[sec4]     \n\t"
-        "append         %[p1],      %[sec3],    16          \n\t"
-        "append         %[p3],      %[sec4],    16          \n\t"
-
-        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
-          [prim4] "=&r"(prim4), [p0] "+r"(p0), [p1] "+r"(p1), [p2] "+r"(p2),
-          [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
-        :);
-
-    /* transpose q0, q1, q2, q3
-       original (when loaded from memory)
-       register       +1    +2    +3    +4
-         q3         q3_0  q3_1  q3_2  q3_3
-         q2         q2_0  q2_1  q2_2  q2_3
-         q1         q1_0  q1_1  q1_2  q1_3
-         q0         q0_0  q0_1  q0_2  q0_3
-
-       after transpose
-       register
-         q3         q0_3  q1_3  q2_3  q3_3
-         q2         q0_2  q1_2  q2_2  q3_2
-         q1         q0_1  q1_1  q2_1  q3_1
-         q0         q0_0  q1_0  q2_0  q3_0
-    */
-    __asm__ __volatile__(
-        "precrq.qb.ph   %[prim1],   %[q3],      %[q2]       \n\t"
-        "precr.qb.ph    %[prim2],   %[q3],      %[q2]       \n\t"
-        "precrq.qb.ph   %[prim3],   %[q1],      %[q0]       \n\t"
-        "precr.qb.ph    %[prim4],   %[q1],      %[q0]       \n\t"
-
-        "precrq.qb.ph   %[q2],      %[prim1],   %[prim2]    \n\t"
-        "precr.qb.ph    %[q0],      %[prim1],   %[prim2]    \n\t"
-        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
-        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
-
-        "precrq.ph.w    %[q3],      %[q2],      %[sec3]     \n\t"
-        "precrq.ph.w    %[q1],      %[q0],      %[sec4]     \n\t"
-        "append         %[q2],      %[sec3],    16          \n\t"
-        "append         %[q0],      %[sec4],    16          \n\t"
-
-        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
-          [prim4] "=&r"(prim4), [q3] "+r"(q3), [q2] "+r"(q2), [q1] "+r"(q1),
-          [q0] "+r"(q0), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
-        :);
-
-    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, p1, p0,
-                                    p3, p2, q0, q1, q2, q3, &hev, &mask, &flat);
-
-    if ((flat == 0) && (mask != 0)) {
-      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
-      STORE_F0()
-    } else if ((mask & flat) == 0xFFFFFFFF) {
-      /* left 2 element operation */
-      PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
-
-      /* right 2 element operation */
-      PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
-
-      STORE_F1()
-    } else if ((flat != 0) && (mask != 0)) {
-      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
-
-      /* left 2 element operation */
-      PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
-
-      /* right 2 element operation */
-      PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
-
-      if (mask & flat & 0x000000FF) {
-        __asm__ __volatile__(
-            "sb         %[p2_r],  -3(%[s4])    \n\t"
-            "sb         %[p1_r],  -2(%[s4])    \n\t"
-            "sb         %[p0_r],  -1(%[s4])    \n\t"
-            "sb         %[q0_r],    (%[s4])    \n\t"
-            "sb         %[q1_r],  +1(%[s4])    \n\t"
-            "sb         %[q2_r],  +2(%[s4])    \n\t"
-
-            :
-            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
-              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
-              [s4] "r"(s4));
-      } else if (mask & 0x000000FF) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  -2(%[s4])    \n\t"
-            "sb         %[p0_f0],  -1(%[s4])    \n\t"
-            "sb         %[q0_f0],    (%[s4])    \n\t"
-            "sb         %[q1_f0],  +1(%[s4])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [s4] "r"(s4));
-      }
-
-      __asm__ __volatile__(
-          "srl      %[p2_r],    %[p2_r],    16      \n\t"
-          "srl      %[p1_r],    %[p1_r],    16      \n\t"
-          "srl      %[p0_r],    %[p0_r],    16      \n\t"
-          "srl      %[q0_r],    %[q0_r],    16      \n\t"
-          "srl      %[q1_r],    %[q1_r],    16      \n\t"
-          "srl      %[q2_r],    %[q2_r],    16      \n\t"
-          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
-          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
-          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
-          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
-
-          : [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r),
-            [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r),
-            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
-            [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & 0x0000FF00) {
-        __asm__ __volatile__(
-            "sb         %[p2_r],  -3(%[s3])    \n\t"
-            "sb         %[p1_r],  -2(%[s3])    \n\t"
-            "sb         %[p0_r],  -1(%[s3])    \n\t"
-            "sb         %[q0_r],    (%[s3])    \n\t"
-            "sb         %[q1_r],  +1(%[s3])    \n\t"
-            "sb         %[q2_r],  +2(%[s3])    \n\t"
-
-            :
-            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
-              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
-              [s3] "r"(s3));
-      } else if (mask & 0x0000FF00) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  -2(%[s3])    \n\t"
-            "sb         %[p0_f0],  -1(%[s3])    \n\t"
-            "sb         %[q0_f0],    (%[s3])    \n\t"
-            "sb         %[q1_f0],  +1(%[s3])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [s3] "r"(s3));
-      }
-
-      __asm__ __volatile__(
-          "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
-          "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
-          "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
-          "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
-
-          : [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), [q0] "+r"(q0),
-            [q1] "+r"(q1), [q2] "+r"(q2), [p1_f0] "+r"(p1_f0),
-            [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & 0x00FF0000) {
-        __asm__ __volatile__(
-            "sb         %[p2_l],  -3(%[s2])    \n\t"
-            "sb         %[p1_l],  -2(%[s2])    \n\t"
-            "sb         %[p0_l],  -1(%[s2])    \n\t"
-            "sb         %[q0_l],    (%[s2])    \n\t"
-            "sb         %[q1_l],  +1(%[s2])    \n\t"
-            "sb         %[q2_l],  +2(%[s2])    \n\t"
-
-            :
-            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
-              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
-              [s2] "r"(s2));
-      } else if (mask & 0x00FF0000) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  -2(%[s2])    \n\t"
-            "sb         %[p0_f0],  -1(%[s2])    \n\t"
-            "sb         %[q0_f0],    (%[s2])    \n\t"
-            "sb         %[q1_f0],  +1(%[s2])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [s2] "r"(s2));
-      }
-
-      __asm__ __volatile__(
-          "srl      %[p2_l],    %[p2_l],    16      \n\t"
-          "srl      %[p1_l],    %[p1_l],    16      \n\t"
-          "srl      %[p0_l],    %[p0_l],    16      \n\t"
-          "srl      %[q0_l],    %[q0_l],    16      \n\t"
-          "srl      %[q1_l],    %[q1_l],    16      \n\t"
-          "srl      %[q2_l],    %[q2_l],    16      \n\t"
-          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
-          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
-          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
-          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
-
-          : [p2_l] "+r"(p2_l), [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l),
-            [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l),
-            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
-            [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & 0xFF000000) {
-        __asm__ __volatile__(
-            "sb         %[p2_l],  -3(%[s1])    \n\t"
-            "sb         %[p1_l],  -2(%[s1])    \n\t"
-            "sb         %[p0_l],  -1(%[s1])    \n\t"
-            "sb         %[q0_l],    (%[s1])    \n\t"
-            "sb         %[q1_l],  +1(%[s1])    \n\t"
-            "sb         %[q2_l],  +2(%[s1])    \n\t"
-
-            :
-            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
-              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
-              [s1] "r"(s1));
-      } else if (mask & 0xFF000000) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  -2(%[s1])    \n\t"
-            "sb         %[p0_f0],  -1(%[s1])    \n\t"
-            "sb         %[q0_f0],    (%[s1])    \n\t"
-            "sb         %[q1_f0],  +1(%[s1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [s1] "r"(s1));
-      }
-    }
-  }
-}
-#endif  // #if HAVE_DSPR2
diff --git a/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c b/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c
deleted file mode 100644
index 34733e4..0000000
--- a/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c
+++ /dev/null
@@ -1,734 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <stdlib.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom/aom_integer.h"
-#include "aom_dsp/mips/common_dspr2.h"
-#include "aom_dsp/mips/loopfilter_filters_dspr2.h"
-#include "aom_dsp/mips/loopfilter_macros_dspr2.h"
-#include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/aom_mem.h"
-
-#if HAVE_DSPR2
-static void mb_lpf_horizontal_edge(unsigned char *s, int pitch,
-                                   const uint8_t *blimit, const uint8_t *limit,
-                                   const uint8_t *thresh, int count) {
-  uint32_t mask;
-  uint32_t hev, flat, flat2;
-  uint8_t i;
-  uint8_t *sp7, *sp6, *sp5, *sp4, *sp3, *sp2, *sp1, *sp0;
-  uint8_t *sq0, *sq1, *sq2, *sq3, *sq4, *sq5, *sq6, *sq7;
-  uint32_t thresh_vec, flimit_vec, limit_vec;
-  uint32_t uflimit, ulimit, uthresh;
-  uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
-  uint32_t p1_f0, p0_f0, q0_f0, q1_f0;
-  uint32_t p7_l, p6_l, p5_l, p4_l, p3_l, p2_l, p1_l, p0_l;
-  uint32_t q0_l, q1_l, q2_l, q3_l, q4_l, q5_l, q6_l, q7_l;
-  uint32_t p7_r, p6_r, p5_r, p4_r, p3_r, p2_r, p1_r, p0_r;
-  uint32_t q0_r, q1_r, q2_r, q3_r, q4_r, q5_r, q6_r, q7_r;
-  uint32_t p2_l_f1, p1_l_f1, p0_l_f1, p2_r_f1, p1_r_f1, p0_r_f1;
-  uint32_t q0_l_f1, q1_l_f1, q2_l_f1, q0_r_f1, q1_r_f1, q2_r_f1;
-
-  uflimit = *blimit;
-  ulimit = *limit;
-  uthresh = *thresh;
-
-  /* create quad-byte */
-  __asm__ __volatile__(
-      "replv.qb       %[thresh_vec],    %[uthresh]      \n\t"
-      "replv.qb       %[flimit_vec],    %[uflimit]      \n\t"
-      "replv.qb       %[limit_vec],     %[ulimit]       \n\t"
-
-      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
-        [limit_vec] "=r"(limit_vec)
-      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
-
-  /* prefetch data for store */
-  prefetch_store(s);
-
-  for (i = 0; i < (2 * count); i++) {
-    sp7 = s - (pitch << 3);
-    sp6 = sp7 + pitch;
-    sp5 = sp6 + pitch;
-    sp4 = sp5 + pitch;
-    sp3 = sp4 + pitch;
-    sp2 = sp3 + pitch;
-    sp1 = sp2 + pitch;
-    sp0 = sp1 + pitch;
-    sq0 = s;
-    sq1 = s + pitch;
-    sq2 = sq1 + pitch;
-    sq3 = sq2 + pitch;
-    sq4 = sq3 + pitch;
-    sq5 = sq4 + pitch;
-    sq6 = sq5 + pitch;
-    sq7 = sq6 + pitch;
-
-    __asm__ __volatile__(
-        "lw     %[p7],      (%[sp7])            \n\t"
-        "lw     %[p6],      (%[sp6])            \n\t"
-        "lw     %[p5],      (%[sp5])            \n\t"
-        "lw     %[p4],      (%[sp4])            \n\t"
-        "lw     %[p3],      (%[sp3])            \n\t"
-        "lw     %[p2],      (%[sp2])            \n\t"
-        "lw     %[p1],      (%[sp1])            \n\t"
-        "lw     %[p0],      (%[sp0])            \n\t"
-
-        : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0),
-          [p7] "=&r"(p7), [p6] "=&r"(p6), [p5] "=&r"(p5), [p4] "=&r"(p4)
-        : [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0),
-          [sp4] "r"(sp4), [sp5] "r"(sp5), [sp6] "r"(sp6), [sp7] "r"(sp7));
-
-    __asm__ __volatile__(
-        "lw     %[q0],      (%[sq0])            \n\t"
-        "lw     %[q1],      (%[sq1])            \n\t"
-        "lw     %[q2],      (%[sq2])            \n\t"
-        "lw     %[q3],      (%[sq3])            \n\t"
-        "lw     %[q4],      (%[sq4])            \n\t"
-        "lw     %[q5],      (%[sq5])            \n\t"
-        "lw     %[q6],      (%[sq6])            \n\t"
-        "lw     %[q7],      (%[sq7])            \n\t"
-
-        : [q3] "=&r"(q3), [q2] "=&r"(q2), [q1] "=&r"(q1), [q0] "=&r"(q0),
-          [q7] "=&r"(q7), [q6] "=&r"(q6), [q5] "=&r"(q5), [q4] "=&r"(q4)
-        : [sq3] "r"(sq3), [sq2] "r"(sq2), [sq1] "r"(sq1), [sq0] "r"(sq0),
-          [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6), [sq7] "r"(sq7));
-
-    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, p1, p0,
-                                    p3, p2, q0, q1, q2, q3, &hev, &mask, &flat);
-
-    flatmask5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, &flat2);
-
-    /* f0 */
-    if (((flat2 == 0) && (flat == 0) && (mask != 0)) ||
-        ((flat2 != 0) && (flat == 0) && (mask != 0))) {
-      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
-
-      __asm__ __volatile__(
-          "sw       %[p1_f0],   (%[sp1])            \n\t"
-          "sw       %[p0_f0],   (%[sp0])            \n\t"
-          "sw       %[q0_f0],   (%[sq0])            \n\t"
-          "sw       %[q1_f0],   (%[sq1])            \n\t"
-
-          :
-          : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-            [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
-            [sq1] "r"(sq1));
-    } else if ((flat2 == 0XFFFFFFFF) && (flat == 0xFFFFFFFF) &&
-               (mask == 0xFFFFFFFF)) {
-      /* f2 */
-      PACK_LEFT_0TO3()
-      PACK_LEFT_4TO7()
-      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, &p3_l, &p2_l, &p1_l,
-                          &p0_l, &q0_l, &q1_l, &q2_l, &q3_l, &q4_l, &q5_l,
-                          &q6_l, &q7_l);
-
-      PACK_RIGHT_0TO3()
-      PACK_RIGHT_4TO7()
-      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, &p3_r, &p2_r, &p1_r,
-                          &p0_r, &q0_r, &q1_r, &q2_r, &q3_r, &q4_r, &q5_r,
-                          &q6_r, &q7_r);
-
-      COMBINE_LEFT_RIGHT_0TO2()
-      COMBINE_LEFT_RIGHT_3TO6()
-
-      __asm__ __volatile__(
-          "sw         %[p6], (%[sp6])    \n\t"
-          "sw         %[p5], (%[sp5])    \n\t"
-          "sw         %[p4], (%[sp4])    \n\t"
-          "sw         %[p3], (%[sp3])    \n\t"
-          "sw         %[p2], (%[sp2])    \n\t"
-          "sw         %[p1], (%[sp1])    \n\t"
-          "sw         %[p0], (%[sp0])    \n\t"
-
-          :
-          : [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3),
-            [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [sp6] "r"(sp6),
-            [sp5] "r"(sp5), [sp4] "r"(sp4), [sp3] "r"(sp3), [sp2] "r"(sp2),
-            [sp1] "r"(sp1), [sp0] "r"(sp0));
-
-      __asm__ __volatile__(
-          "sw         %[q6], (%[sq6])    \n\t"
-          "sw         %[q5], (%[sq5])    \n\t"
-          "sw         %[q4], (%[sq4])    \n\t"
-          "sw         %[q3], (%[sq3])    \n\t"
-          "sw         %[q2], (%[sq2])    \n\t"
-          "sw         %[q1], (%[sq1])    \n\t"
-          "sw         %[q0], (%[sq0])    \n\t"
-
-          :
-          : [q6] "r"(q6), [q5] "r"(q5), [q4] "r"(q4), [q3] "r"(q3),
-            [q2] "r"(q2), [q1] "r"(q1), [q0] "r"(q0), [sq6] "r"(sq6),
-            [sq5] "r"(sq5), [sq4] "r"(sq4), [sq3] "r"(sq3), [sq2] "r"(sq2),
-            [sq1] "r"(sq1), [sq0] "r"(sq0));
-    } else if ((flat2 == 0) && (flat == 0xFFFFFFFF) && (mask == 0xFFFFFFFF)) {
-      /* f1 */
-      /* left 2 element operation */
-      PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
-
-      /* right 2 element operation */
-      PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
-
-      COMBINE_LEFT_RIGHT_0TO2()
-
-      __asm__ __volatile__(
-          "sw         %[p2], (%[sp2])    \n\t"
-          "sw         %[p1], (%[sp1])    \n\t"
-          "sw         %[p0], (%[sp0])    \n\t"
-          "sw         %[q0], (%[sq0])    \n\t"
-          "sw         %[q1], (%[sq1])    \n\t"
-          "sw         %[q2], (%[sq2])    \n\t"
-
-          :
-          : [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [q0] "r"(q0),
-            [q1] "r"(q1), [q2] "r"(q2), [sp2] "r"(sp2), [sp1] "r"(sp1),
-            [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2));
-    } else if ((flat2 == 0) && (flat != 0) && (mask != 0)) {
-      /* f0+f1 */
-      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
-
-      /* left 2 element operation */
-      PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
-
-      /* right 2 element operation */
-      PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
-
-      if (mask & flat & 0x000000FF) {
-        __asm__ __volatile__(
-            "sb         %[p2_r],  (%[sp2])    \n\t"
-            "sb         %[p1_r],  (%[sp1])    \n\t"
-            "sb         %[p0_r],  (%[sp0])    \n\t"
-            "sb         %[q0_r],  (%[sq0])    \n\t"
-            "sb         %[q1_r],  (%[sq1])    \n\t"
-            "sb         %[q2_r],  (%[sq2])    \n\t"
-
-            :
-            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
-              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
-              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
-              [sq1] "r"(sq1), [sq2] "r"(sq2));
-      } else if (mask & 0x000000FF) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  (%[sp1])    \n\t"
-            "sb         %[p0_f0],  (%[sp0])    \n\t"
-            "sb         %[q0_f0],  (%[sq0])    \n\t"
-            "sb         %[q1_f0],  (%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-
-      __asm__ __volatile__(
-          "srl      %[p2_r],    %[p2_r],    16      \n\t"
-          "srl      %[p1_r],    %[p1_r],    16      \n\t"
-          "srl      %[p0_r],    %[p0_r],    16      \n\t"
-          "srl      %[q0_r],    %[q0_r],    16      \n\t"
-          "srl      %[q1_r],    %[q1_r],    16      \n\t"
-          "srl      %[q2_r],    %[q2_r],    16      \n\t"
-          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
-          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
-          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
-          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
-
-          : [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r),
-            [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r),
-            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
-            [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & 0x0000FF00) {
-        __asm__ __volatile__(
-            "sb         %[p2_r],  +1(%[sp2])    \n\t"
-            "sb         %[p1_r],  +1(%[sp1])    \n\t"
-            "sb         %[p0_r],  +1(%[sp0])    \n\t"
-            "sb         %[q0_r],  +1(%[sq0])    \n\t"
-            "sb         %[q1_r],  +1(%[sq1])    \n\t"
-            "sb         %[q2_r],  +1(%[sq2])    \n\t"
-
-            :
-            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
-              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
-              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
-              [sq1] "r"(sq1), [sq2] "r"(sq2));
-      } else if (mask & 0x0000FF00) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  +1(%[sp1])    \n\t"
-            "sb         %[p0_f0],  +1(%[sp0])    \n\t"
-            "sb         %[q0_f0],  +1(%[sq0])    \n\t"
-            "sb         %[q1_f0],  +1(%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-
-      __asm__ __volatile__(
-          "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
-          "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
-          "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
-          "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
-
-          : [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
-            [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & 0x00FF0000) {
-        __asm__ __volatile__(
-            "sb         %[p2_l],  +2(%[sp2])    \n\t"
-            "sb         %[p1_l],  +2(%[sp1])    \n\t"
-            "sb         %[p0_l],  +2(%[sp0])    \n\t"
-            "sb         %[q0_l],  +2(%[sq0])    \n\t"
-            "sb         %[q1_l],  +2(%[sq1])    \n\t"
-            "sb         %[q2_l],  +2(%[sq2])    \n\t"
-
-            :
-            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
-              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
-              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
-              [sq1] "r"(sq1), [sq2] "r"(sq2));
-      } else if (mask & 0x00FF0000) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  +2(%[sp1])    \n\t"
-            "sb         %[p0_f0],  +2(%[sp0])    \n\t"
-            "sb         %[q0_f0],  +2(%[sq0])    \n\t"
-            "sb         %[q1_f0],  +2(%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-
-      __asm__ __volatile__(
-          "srl      %[p2_l],    %[p2_l],    16      \n\t"
-          "srl      %[p1_l],    %[p1_l],    16      \n\t"
-          "srl      %[p0_l],    %[p0_l],    16      \n\t"
-          "srl      %[q0_l],    %[q0_l],    16      \n\t"
-          "srl      %[q1_l],    %[q1_l],    16      \n\t"
-          "srl      %[q2_l],    %[q2_l],    16      \n\t"
-          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
-          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
-          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
-          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
-
-          : [p2_l] "+r"(p2_l), [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l),
-            [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l),
-            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
-            [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & 0xFF000000) {
-        __asm__ __volatile__(
-            "sb         %[p2_l],  +3(%[sp2])    \n\t"
-            "sb         %[p1_l],  +3(%[sp1])    \n\t"
-            "sb         %[p0_l],  +3(%[sp0])    \n\t"
-            "sb         %[q0_l],  +3(%[sq0])    \n\t"
-            "sb         %[q1_l],  +3(%[sq1])    \n\t"
-            "sb         %[q2_l],  +3(%[sq2])    \n\t"
-
-            :
-            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
-              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
-              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
-              [sq1] "r"(sq1), [sq2] "r"(sq2));
-      } else if (mask & 0xFF000000) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  +3(%[sp1])    \n\t"
-            "sb         %[p0_f0],  +3(%[sp0])    \n\t"
-            "sb         %[q0_f0],  +3(%[sq0])    \n\t"
-            "sb         %[q1_f0],  +3(%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-    } else if ((flat2 != 0) && (flat != 0) && (mask != 0)) {
-      /* f0 + f1 + f2 */
-      /* f0  function */
-      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
-
-      /* f1  function */
-      /* left 2 element operation */
-      PACK_LEFT_0TO3()
-      mbfilter1_dspr2(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, &p2_l_f1,
-                      &p1_l_f1, &p0_l_f1, &q0_l_f1, &q1_l_f1, &q2_l_f1);
-
-      /* right 2 element operation */
-      PACK_RIGHT_0TO3()
-      mbfilter1_dspr2(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, &p2_r_f1,
-                      &p1_r_f1, &p0_r_f1, &q0_r_f1, &q1_r_f1, &q2_r_f1);
-
-      /* f2  function */
-      PACK_LEFT_4TO7()
-      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, &p3_l, &p2_l, &p1_l,
-                          &p0_l, &q0_l, &q1_l, &q2_l, &q3_l, &q4_l, &q5_l,
-                          &q6_l, &q7_l);
-
-      PACK_RIGHT_4TO7()
-      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, &p3_r, &p2_r, &p1_r,
-                          &p0_r, &q0_r, &q1_r, &q2_r, &q3_r, &q4_r, &q5_r,
-                          &q6_r, &q7_r);
-
-      if (mask & flat & flat2 & 0x000000FF) {
-        __asm__ __volatile__(
-            "sb         %[p6_r],  (%[sp6])    \n\t"
-            "sb         %[p5_r],  (%[sp5])    \n\t"
-            "sb         %[p4_r],  (%[sp4])    \n\t"
-            "sb         %[p3_r],  (%[sp3])    \n\t"
-            "sb         %[p2_r],  (%[sp2])    \n\t"
-            "sb         %[p1_r],  (%[sp1])    \n\t"
-            "sb         %[p0_r],  (%[sp0])    \n\t"
-
-            :
-            : [p6_r] "r"(p6_r), [p5_r] "r"(p5_r), [p4_r] "r"(p4_r),
-              [p3_r] "r"(p3_r), [p2_r] "r"(p2_r), [p1_r] "r"(p1_r),
-              [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4), [sp3] "r"(sp3),
-              [sp2] "r"(sp2), [sp1] "r"(sp1), [p0_r] "r"(p0_r), [sp0] "r"(sp0));
-
-        __asm__ __volatile__(
-            "sb         %[q0_r],  (%[sq0])    \n\t"
-            "sb         %[q1_r],  (%[sq1])    \n\t"
-            "sb         %[q2_r],  (%[sq2])    \n\t"
-            "sb         %[q3_r],  (%[sq3])    \n\t"
-            "sb         %[q4_r],  (%[sq4])    \n\t"
-            "sb         %[q5_r],  (%[sq5])    \n\t"
-            "sb         %[q6_r],  (%[sq6])    \n\t"
-
-            :
-            : [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
-              [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r),
-              [q6_r] "r"(q6_r), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2),
-              [sq3] "r"(sq3), [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6));
-      } else if (mask & flat & 0x000000FF) {
-        __asm__ __volatile__(
-            "sb         %[p2_r_f1],  (%[sp2])    \n\t"
-            "sb         %[p1_r_f1],  (%[sp1])    \n\t"
-            "sb         %[p0_r_f1],  (%[sp0])    \n\t"
-            "sb         %[q0_r_f1],  (%[sq0])    \n\t"
-            "sb         %[q1_r_f1],  (%[sq1])    \n\t"
-            "sb         %[q2_r_f1],  (%[sq2])    \n\t"
-
-            :
-            : [p2_r_f1] "r"(p2_r_f1), [p1_r_f1] "r"(p1_r_f1),
-              [p0_r_f1] "r"(p0_r_f1), [q0_r_f1] "r"(q0_r_f1),
-              [q1_r_f1] "r"(q1_r_f1), [q2_r_f1] "r"(q2_r_f1), [sp2] "r"(sp2),
-              [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1),
-              [sq2] "r"(sq2));
-      } else if (mask & 0x000000FF) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  (%[sp1])    \n\t"
-            "sb         %[p0_f0],  (%[sp0])    \n\t"
-            "sb         %[q0_f0],  (%[sq0])    \n\t"
-            "sb         %[q1_f0],  (%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-
-      __asm__ __volatile__(
-          "srl        %[p6_r], %[p6_r], 16     \n\t"
-          "srl        %[p5_r], %[p5_r], 16     \n\t"
-          "srl        %[p4_r], %[p4_r], 16     \n\t"
-          "srl        %[p3_r], %[p3_r], 16     \n\t"
-          "srl        %[p2_r], %[p2_r], 16     \n\t"
-          "srl        %[p1_r], %[p1_r], 16     \n\t"
-          "srl        %[p0_r], %[p0_r], 16     \n\t"
-          "srl        %[q0_r], %[q0_r], 16     \n\t"
-          "srl        %[q1_r], %[q1_r], 16     \n\t"
-          "srl        %[q2_r], %[q2_r], 16     \n\t"
-          "srl        %[q3_r], %[q3_r], 16     \n\t"
-          "srl        %[q4_r], %[q4_r], 16     \n\t"
-          "srl        %[q5_r], %[q5_r], 16     \n\t"
-          "srl        %[q6_r], %[q6_r], 16     \n\t"
-
-          : [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r),
-            [q3_r] "+r"(q3_r), [q4_r] "+r"(q4_r), [q5_r] "+r"(q5_r),
-            [p6_r] "+r"(p6_r), [p5_r] "+r"(p5_r), [p4_r] "+r"(p4_r),
-            [p3_r] "+r"(p3_r), [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r),
-            [q6_r] "+r"(q6_r), [p0_r] "+r"(p0_r)
-          :);
-
-      __asm__ __volatile__(
-          "srl        %[p2_r_f1], %[p2_r_f1], 16     \n\t"
-          "srl        %[p1_r_f1], %[p1_r_f1], 16     \n\t"
-          "srl        %[p0_r_f1], %[p0_r_f1], 16     \n\t"
-          "srl        %[q0_r_f1], %[q0_r_f1], 16     \n\t"
-          "srl        %[q1_r_f1], %[q1_r_f1], 16     \n\t"
-          "srl        %[q2_r_f1], %[q2_r_f1], 16     \n\t"
-          "srl        %[p1_f0],   %[p1_f0],   8      \n\t"
-          "srl        %[p0_f0],   %[p0_f0],   8      \n\t"
-          "srl        %[q0_f0],   %[q0_f0],   8      \n\t"
-          "srl        %[q1_f0],   %[q1_f0],   8      \n\t"
-
-          : [p2_r_f1] "+r"(p2_r_f1), [p1_r_f1] "+r"(p1_r_f1),
-            [p0_r_f1] "+r"(p0_r_f1), [q0_r_f1] "+r"(q0_r_f1),
-            [q1_r_f1] "+r"(q1_r_f1), [q2_r_f1] "+r"(q2_r_f1),
-            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
-            [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & flat2 & 0x0000FF00) {
-        __asm__ __volatile__(
-            "sb         %[p6_r],  +1(%[sp6])    \n\t"
-            "sb         %[p5_r],  +1(%[sp5])    \n\t"
-            "sb         %[p4_r],  +1(%[sp4])    \n\t"
-            "sb         %[p3_r],  +1(%[sp3])    \n\t"
-            "sb         %[p2_r],  +1(%[sp2])    \n\t"
-            "sb         %[p1_r],  +1(%[sp1])    \n\t"
-            "sb         %[p0_r],  +1(%[sp0])    \n\t"
-
-            :
-            : [p6_r] "r"(p6_r), [p5_r] "r"(p5_r), [p4_r] "r"(p4_r),
-              [p3_r] "r"(p3_r), [p2_r] "r"(p2_r), [p1_r] "r"(p1_r),
-              [p0_r] "r"(p0_r), [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4),
-              [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0));
-
-        __asm__ __volatile__(
-            "sb         %[q0_r],  +1(%[sq0])    \n\t"
-            "sb         %[q1_r],  +1(%[sq1])    \n\t"
-            "sb         %[q2_r],  +1(%[sq2])    \n\t"
-            "sb         %[q3_r],  +1(%[sq3])    \n\t"
-            "sb         %[q4_r],  +1(%[sq4])    \n\t"
-            "sb         %[q5_r],  +1(%[sq5])    \n\t"
-            "sb         %[q6_r],  +1(%[sq6])    \n\t"
-
-            :
-            : [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
-              [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r),
-              [q6_r] "r"(q6_r), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2),
-              [sq3] "r"(sq3), [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6));
-      } else if (mask & flat & 0x0000FF00) {
-        __asm__ __volatile__(
-            "sb         %[p2_r_f1],  +1(%[sp2])    \n\t"
-            "sb         %[p1_r_f1],  +1(%[sp1])    \n\t"
-            "sb         %[p0_r_f1],  +1(%[sp0])    \n\t"
-            "sb         %[q0_r_f1],  +1(%[sq0])    \n\t"
-            "sb         %[q1_r_f1],  +1(%[sq1])    \n\t"
-            "sb         %[q2_r_f1],  +1(%[sq2])    \n\t"
-
-            :
-            : [p2_r_f1] "r"(p2_r_f1), [p1_r_f1] "r"(p1_r_f1),
-              [p0_r_f1] "r"(p0_r_f1), [q0_r_f1] "r"(q0_r_f1),
-              [q1_r_f1] "r"(q1_r_f1), [q2_r_f1] "r"(q2_r_f1), [sp2] "r"(sp2),
-              [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1),
-              [sq2] "r"(sq2));
-      } else if (mask & 0x0000FF00) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  +1(%[sp1])    \n\t"
-            "sb         %[p0_f0],  +1(%[sp0])    \n\t"
-            "sb         %[q0_f0],  +1(%[sq0])    \n\t"
-            "sb         %[q1_f0],  +1(%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-
-      __asm__ __volatile__(
-          "srl        %[p1_f0], %[p1_f0], 8     \n\t"
-          "srl        %[p0_f0], %[p0_f0], 8     \n\t"
-          "srl        %[q0_f0], %[q0_f0], 8     \n\t"
-          "srl        %[q1_f0], %[q1_f0], 8     \n\t"
-
-          : [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
-            [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & flat2 & 0x00FF0000) {
-        __asm__ __volatile__(
-            "sb         %[p6_l],  +2(%[sp6])    \n\t"
-            "sb         %[p5_l],  +2(%[sp5])    \n\t"
-            "sb         %[p4_l],  +2(%[sp4])    \n\t"
-            "sb         %[p3_l],  +2(%[sp3])    \n\t"
-            "sb         %[p2_l],  +2(%[sp2])    \n\t"
-            "sb         %[p1_l],  +2(%[sp1])    \n\t"
-            "sb         %[p0_l],  +2(%[sp0])    \n\t"
-
-            :
-            : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l),
-              [p3_l] "r"(p3_l), [p2_l] "r"(p2_l), [p1_l] "r"(p1_l),
-              [p0_l] "r"(p0_l), [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4),
-              [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0));
-
-        __asm__ __volatile__(
-            "sb         %[q0_l],  +2(%[sq0])    \n\t"
-            "sb         %[q1_l],  +2(%[sq1])    \n\t"
-            "sb         %[q2_l],  +2(%[sq2])    \n\t"
-            "sb         %[q3_l],  +2(%[sq3])    \n\t"
-            "sb         %[q4_l],  +2(%[sq4])    \n\t"
-            "sb         %[q5_l],  +2(%[sq5])    \n\t"
-            "sb         %[q6_l],  +2(%[sq6])    \n\t"
-
-            :
-            : [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
-              [q3_l] "r"(q3_l), [q4_l] "r"(q4_l), [q5_l] "r"(q5_l),
-              [q6_l] "r"(q6_l), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2),
-              [sq3] "r"(sq3), [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6));
-      } else if (mask & flat & 0x00FF0000) {
-        __asm__ __volatile__(
-            "sb         %[p2_l_f1],  +2(%[sp2])    \n\t"
-            "sb         %[p1_l_f1],  +2(%[sp1])    \n\t"
-            "sb         %[p0_l_f1],  +2(%[sp0])    \n\t"
-            "sb         %[q0_l_f1],  +2(%[sq0])    \n\t"
-            "sb         %[q1_l_f1],  +2(%[sq1])    \n\t"
-            "sb         %[q2_l_f1],  +2(%[sq2])    \n\t"
-
-            :
-            : [p2_l_f1] "r"(p2_l_f1), [p1_l_f1] "r"(p1_l_f1),
-              [p0_l_f1] "r"(p0_l_f1), [q0_l_f1] "r"(q0_l_f1),
-              [q1_l_f1] "r"(q1_l_f1), [q2_l_f1] "r"(q2_l_f1), [sp2] "r"(sp2),
-              [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1),
-              [sq2] "r"(sq2));
-      } else if (mask & 0x00FF0000) {
-        __asm__ __volatile__(
-            "sb         %[p1_f0],  +2(%[sp1])    \n\t"
-            "sb         %[p0_f0],  +2(%[sp0])    \n\t"
-            "sb         %[q0_f0],  +2(%[sq0])    \n\t"
-            "sb         %[q1_f0],  +2(%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-
-      __asm__ __volatile__(
-          "srl      %[p6_l],    %[p6_l],    16   \n\t"
-          "srl      %[p5_l],    %[p5_l],    16   \n\t"
-          "srl      %[p4_l],    %[p4_l],    16   \n\t"
-          "srl      %[p3_l],    %[p3_l],    16   \n\t"
-          "srl      %[p2_l],    %[p2_l],    16   \n\t"
-          "srl      %[p1_l],    %[p1_l],    16   \n\t"
-          "srl      %[p0_l],    %[p0_l],    16   \n\t"
-          "srl      %[q0_l],    %[q0_l],    16   \n\t"
-          "srl      %[q1_l],    %[q1_l],    16   \n\t"
-          "srl      %[q2_l],    %[q2_l],    16   \n\t"
-          "srl      %[q3_l],    %[q3_l],    16   \n\t"
-          "srl      %[q4_l],    %[q4_l],    16   \n\t"
-          "srl      %[q5_l],    %[q5_l],    16   \n\t"
-          "srl      %[q6_l],    %[q6_l],    16   \n\t"
-
-          : [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l),
-            [q3_l] "+r"(q3_l), [q4_l] "+r"(q4_l), [q5_l] "+r"(q5_l),
-            [q6_l] "+r"(q6_l), [p6_l] "+r"(p6_l), [p5_l] "+r"(p5_l),
-            [p4_l] "+r"(p4_l), [p3_l] "+r"(p3_l), [p2_l] "+r"(p2_l),
-            [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l)
-          :);
-
-      __asm__ __volatile__(
-          "srl      %[p2_l_f1],   %[p2_l_f1],   16   \n\t"
-          "srl      %[p1_l_f1],   %[p1_l_f1],   16   \n\t"
-          "srl      %[p0_l_f1],   %[p0_l_f1],   16   \n\t"
-          "srl      %[q0_l_f1],   %[q0_l_f1],   16   \n\t"
-          "srl      %[q1_l_f1],   %[q1_l_f1],   16   \n\t"
-          "srl      %[q2_l_f1],   %[q2_l_f1],   16   \n\t"
-          "srl      %[p1_f0],     %[p1_f0],     8    \n\t"
-          "srl      %[p0_f0],     %[p0_f0],     8    \n\t"
-          "srl      %[q0_f0],     %[q0_f0],     8    \n\t"
-          "srl      %[q1_f0],     %[q1_f0],     8    \n\t"
-
-          : [p2_l_f1] "+r"(p2_l_f1), [p1_l_f1] "+r"(p1_l_f1),
-            [p0_l_f1] "+r"(p0_l_f1), [q0_l_f1] "+r"(q0_l_f1),
-            [q1_l_f1] "+r"(q1_l_f1), [q2_l_f1] "+r"(q2_l_f1),
-            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
-            [q1_f0] "+r"(q1_f0)
-          :);
-
-      if (mask & flat & flat2 & 0xFF000000) {
-        __asm__ __volatile__(
-            "sb     %[p6_l],    +3(%[sp6])    \n\t"
-            "sb     %[p5_l],    +3(%[sp5])    \n\t"
-            "sb     %[p4_l],    +3(%[sp4])    \n\t"
-            "sb     %[p3_l],    +3(%[sp3])    \n\t"
-            "sb     %[p2_l],    +3(%[sp2])    \n\t"
-            "sb     %[p1_l],    +3(%[sp1])    \n\t"
-            "sb     %[p0_l],    +3(%[sp0])    \n\t"
-
-            :
-            : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l),
-              [p3_l] "r"(p3_l), [p2_l] "r"(p2_l), [p1_l] "r"(p1_l),
-              [p0_l] "r"(p0_l), [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4),
-              [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0));
-
-        __asm__ __volatile__(
-            "sb     %[q0_l],    +3(%[sq0])    \n\t"
-            "sb     %[q1_l],    +3(%[sq1])    \n\t"
-            "sb     %[q2_l],    +3(%[sq2])    \n\t"
-            "sb     %[q3_l],    +3(%[sq3])    \n\t"
-            "sb     %[q4_l],    +3(%[sq4])    \n\t"
-            "sb     %[q5_l],    +3(%[sq5])    \n\t"
-            "sb     %[q6_l],    +3(%[sq6])    \n\t"
-
-            :
-            : [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
-              [q3_l] "r"(q3_l), [q4_l] "r"(q4_l), [q5_l] "r"(q5_l),
-              [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2), [sq3] "r"(sq3),
-              [sq4] "r"(sq4), [sq5] "r"(sq5), [q6_l] "r"(q6_l), [sq6] "r"(sq6));
-      } else if (mask & flat & 0xFF000000) {
-        __asm__ __volatile__(
-            "sb     %[p2_l_f1],     +3(%[sp2])    \n\t"
-            "sb     %[p1_l_f1],     +3(%[sp1])    \n\t"
-            "sb     %[p0_l_f1],     +3(%[sp0])    \n\t"
-            "sb     %[q0_l_f1],     +3(%[sq0])    \n\t"
-            "sb     %[q1_l_f1],     +3(%[sq1])    \n\t"
-            "sb     %[q2_l_f1],     +3(%[sq2])    \n\t"
-
-            :
-            : [p2_l_f1] "r"(p2_l_f1), [p1_l_f1] "r"(p1_l_f1),
-              [p0_l_f1] "r"(p0_l_f1), [q0_l_f1] "r"(q0_l_f1),
-              [q1_l_f1] "r"(q1_l_f1), [q2_l_f1] "r"(q2_l_f1), [sp2] "r"(sp2),
-              [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1),
-              [sq2] "r"(sq2));
-      } else if (mask & 0xFF000000) {
-        __asm__ __volatile__(
-            "sb     %[p1_f0],   +3(%[sp1])    \n\t"
-            "sb     %[p0_f0],   +3(%[sp0])    \n\t"
-            "sb     %[q0_f0],   +3(%[sq0])    \n\t"
-            "sb     %[q1_f0],   +3(%[sq1])    \n\t"
-
-            :
-            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
-              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
-              [sq0] "r"(sq0), [sq1] "r"(sq1));
-      }
-    }
-
-    s = s + 4;
-  }
-}
-
-void aom_lpf_horizontal_16_dspr2(unsigned char *s, int pitch,
-                                 const uint8_t *blimit, const uint8_t *limit,
-                                 const uint8_t *thresh) {
-  mb_lpf_horizontal_edge(s, pitch, blimit, limit, thresh, 1);
-}
-
-void aom_lpf_horizontal_16_dual_dspr2(unsigned char *s, int pitch,
-                                      const uint8_t *blimit,
-                                      const uint8_t *limit,
-                                      const uint8_t *thresh) {
-  mb_lpf_horizontal_edge(s, pitch, blimit, limit, thresh, 2);
-}
-#endif  // #if HAVE_DSPR2
diff --git a/aom_dsp/mips/loopfilter_mb_vert_dspr2.c b/aom_dsp/mips/loopfilter_mb_vert_dspr2.c
deleted file mode 100644
index 3d3f1ec..0000000
--- a/aom_dsp/mips/loopfilter_mb_vert_dspr2.c
+++ /dev/null
@@ -1,758 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <stdlib.h>
-
-#include "config/aom_dsp_rtcd.h"
-
-#include "aom/aom_integer.h"
-#include "aom_dsp/mips/common_dspr2.h"
-#include "aom_dsp/mips/loopfilter_filters_dspr2.h"
-#include "aom_dsp/mips/loopfilter_macros_dspr2.h"
-#include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/aom_mem.h"
-
-#if HAVE_DSPR2
-void aom_lpf_vertical_16_dspr2(uint8_t *s, int pitch, const uint8_t *blimit,
-                               const uint8_t *limit, const uint8_t *thresh) {
-  uint8_t i;
-  uint32_t mask, hev, flat, flat2;
-  uint8_t *s1, *s2, *s3, *s4;
-  uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
-  uint32_t thresh_vec, flimit_vec, limit_vec;
-  uint32_t uflimit, ulimit, uthresh;
-  uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
-  uint32_t p1_f0, p0_f0, q0_f0, q1_f0;
-  uint32_t p7_l, p6_l, p5_l, p4_l, p3_l, p2_l, p1_l, p0_l;
-  uint32_t q0_l, q1_l, q2_l, q3_l, q4_l, q5_l, q6_l, q7_l;
-  uint32_t p7_r, p6_r, p5_r, p4_r, p3_r, p2_r, p1_r, p0_r;
-  uint32_t q0_r, q1_r, q2_r, q3_r, q4_r, q5_r, q6_r, q7_r;
-  uint32_t p2_l_f1, p1_l_f1, p0_l_f1, p2_r_f1, p1_r_f1, p0_r_f1;
-  uint32_t q0_l_f1, q1_l_f1, q2_l_f1, q0_r_f1, q1_r_f1, q2_r_f1;
-
-  uflimit = *blimit;
-  ulimit = *limit;
-  uthresh = *thresh;
-
-  /* create quad-byte */
-  __asm__ __volatile__(
-      "replv.qb     %[thresh_vec],     %[uthresh]    \n\t"
-      "replv.qb     %[flimit_vec],     %[uflimit]    \n\t"
-      "replv.qb     %[limit_vec],      %[ulimit]     \n\t"
-
-      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
-        [limit_vec] "=r"(limit_vec)
-      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
-
-  prefetch_store(s + pitch);
-
-  for (i = 0; i < 2; i++) {
-    s1 = s;
-    s2 = s + pitch;
-    s3 = s2 + pitch;
-    s4 = s3 + pitch;
-    s = s4 + pitch;
-
-    __asm__ __volatile__(
-        "lw     %[p0],  -4(%[s1])    \n\t"
-        "lw     %[p1],  -4(%[s2])    \n\t"
-        "lw     %[p2],  -4(%[s3])    \n\t"
-        "lw     %[p3],  -4(%[s4])    \n\t"
-        "lw     %[p4],  -8(%[s1])    \n\t"
-        "lw     %[p5],  -8(%[s2])    \n\t"
-        "lw     %[p6],  -8(%[s3])    \n\t"
-        "lw     %[p7],  -8(%[s4])    \n\t"
-
-        : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0),
-          [p7] "=&r"(p7), [p6] "=&r"(p6), [p5] "=&r"(p5), [p4] "=&r"(p4)
-        : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4));
-
-    __asm__ __volatile__(
-        "lw     %[q3],  (%[s1])     \n\t"
-        "lw     %[q2],  (%[s2])     \n\t"
-        "lw     %[q1],  (%[s3])     \n\t"
-        "lw     %[q0],  (%[s4])     \n\t"
-        "lw     %[q7],  +4(%[s1])   \n\t"
-        "lw     %[q6],  +4(%[s2])   \n\t"
-        "lw     %[q5],  +4(%[s3])   \n\t"
-        "lw     %[q4],  +4(%[s4])   \n\t"
-
-        : [q3] "=&r"(q3), [q2] "=&r"(q2), [q1] "=&r"(q1), [q0] "=&r"(q0),
-          [q7] "=&r"(q7), [q6] "=&r"(q6), [q5] "=&r"(q5), [q4] "=&r"(q4)
-        : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4));
-
-    /* transpose p3, p2, p1, p0
-       original (when loaded from memory)
-       register       -4    -3   -2     -1
-         p0         p0_0  p0_1  p0_2  p0_3
-         p1         p1_0  p1_1  p1_2  p1_3
-         p2         p2_0  p2_1  p2_2  p2_3
-         p3         p3_0  p3_1  p3_2  p3_3
-
-       after transpose
-       register
-         p0         p3_3  p2_3  p1_3  p0_3
-         p1         p3_2  p2_2  p1_2  p0_2
-         p2         p3_1  p2_1  p1_1  p0_1
-         p3         p3_0  p2_0  p1_0  p0_0
-    */
-    __asm__ __volatile__(
-        "precrq.qb.ph   %[prim1],   %[p0],      %[p1]       \n\t"
-        "precr.qb.ph    %[prim2],   %[p0],      %[p1]       \n\t"
-        "precrq.qb.ph   %[prim3],   %[p2],      %[p3]       \n\t"
-        "precr.qb.ph    %[prim4],   %[p2],      %[p3]       \n\t"
-
-        "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
-        "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
-        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
-        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
-
-        "precrq.ph.w    %[p0],      %[p1],      %[sec3]     \n\t"
-        "precrq.ph.w    %[p2],      %[p3],      %[sec4]     \n\t"
-        "append         %[p1],      %[sec3],    16          \n\t"
-        "append         %[p3],      %[sec4],    16          \n\t"
-
-        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
-          [prim4] "=&r"(prim4), [p0] "+r"(p0), [p1] "+r"(p1), [p2] "+r"(p2),
-          [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
-        :);
-
-    /* transpose q0, q1, q2, q3
-       original (when loaded from memory)
-       register       +1    +2    +3    +4
-         q3         q3_0  q3_1  q3_2  q3_3
-         q2         q2_0  q2_1  q2_2  q2_3
-         q1         q1_0  q1_1  q1_2  q1_3
-         q0         q0_0  q0_1  q0_2  q0_3
-
-       after transpose
-       register
-         q3         q0_3  q1_3  q2_3  q3_3
-         q2         q0_2  q1_2  q2_2  q3_2
-         q1         q0_1  q1_1  q2_1  q3_1
-         q0         q0_0  q1_0  q2_0  q3_0
-    */
-    __asm__ __volatile__(
-        "precrq.qb.ph   %[prim1],   %[q3],      %[q2]       \n\t"
-        "precr.qb.ph    %[prim2],   %[q3],      %[q2]       \n\t"
-        "precrq.qb.ph   %[prim3],   %[q1],      %[q0]       \n\t"
-        "precr.qb.ph    %[prim4],   %[q1],      %[q0]       \n\t"
-
-        "precrq.qb.ph   %[q2],      %[prim1],   %[prim2]    \n\t"
-        "precr.qb.ph    %[q0],      %[prim1],   %[prim2]    \n\t"
-        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
-        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
-
-        "precrq.ph.w    %[q3],      %[q2],      %[sec3]     \n\t"
-        "precrq.ph.w    %[q1],      %[q0],      %[sec4]     \n\t"
-        "append         %[q2],      %[sec3],    16          \n\t"
-        "append         %[q0],      %[sec4],    16          \n\t"
-
-        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
-          [prim4] "=&r"(prim4), [q3] "+r"(q3), [q2] "+r"(q2), [q1] "+r"(q1),
-          [q0] "+r"(q0), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
-        :);
-
-    /* transpose p7, p6, p5, p4
-       original (when loaded from memory)
-       register      -8    -7   -6     -5
-         p4         p4_0  p4_1  p4_2  p4_3
-         p5         p5_0  p5_1  p5_2  p5_3
-         p6         p6_0  p6_1  p6_2  p6_3
-         p7         p7_0  p7_1  p7_2  p7_3
-
-       after transpose
-       register
-         p4         p7_3  p6_3  p5_3  p4_3
-         p5         p7_2  p6_2  p5_2  p4_2
-         p6         p7_1  p6_1  p5_1  p4_1
-         p7         p7_0  p6_0  p5_0  p4_0
-    */
-    __asm__ __volatile__(
-        "precrq.qb.ph   %[prim1],   %[p4],      %[p5]       \n\t"
-        "precr.qb.ph    %[prim2],   %[p4],      %[p5]       \n\t"
-        "precrq.qb.ph   %[prim3],   %[p6],      %[p7]       \n\t"
-        "precr.qb.ph    %[prim4],   %[p6],      %[p7]       \n\t"
-
-        "precrq.qb.ph   %[p5],      %[prim1],   %[prim2]    \n\t"
-        "precr.qb.ph    %[p7],      %[prim1],   %[prim2]    \n\t"
-        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
-        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
-
-        "precrq.ph.w