Port renaming changes from AOMedia
Cherry-Picked the following commits:
0defd8f Changed "WebM" to "AOMedia" & "webm" to "aomedia"
54e6676 Replace "VPx" by "AVx"
5082a36 Change "Vpx" to "Avx"
7df44f1 Replace "Vp9" w/ "Av1"
967f722 Remove kVp9CodecId
828f30c Change "Vp8" to "AOM"
030b5ff AUTHORS regenerated
2524cae Add ref-mv experimental flag
016762b Change copyright notice to AOMedia form
81e5526 Replace vp9 w/ av1
9b94565 Add missing files
fa8ca9f Change "vp9" to "av1"
ec838b7 Convert "vp8" to "aom"
80edfa0 Change "VP9" to "AV1"
d1a11fb Change "vp8" to "aom"
7b58251 Point to WebM test data
dd1a5c8 Replace "VP8" with "AOM"
ff00fc0 Change "VPX" to "AOM"
01dee0b Change "vp10" to "av1" in source code
cebe6f0 Convert "vpx" to "aom"
17b0567 rename vp10*.mk to av1_*.mk
fe5f8a8 rename files vp10_* to av1_*
Change-Id: I6fc3d18eb11fc171e46140c836ad5339cf6c9419
diff --git a/aom_dsp/add_noise.c b/aom_dsp/add_noise.c
index 826d935..2b281b7 100644
--- a/aom_dsp/add_noise.c
+++ b/aom_dsp/add_noise.c
@@ -11,13 +11,13 @@
#include <math.h>
#include <stdlib.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
-void vpx_plane_add_noise_c(uint8_t *start, char *noise, char blackclamp[16],
+void aom_plane_add_noise_c(uint8_t *start, char *noise, char blackclamp[16],
char whiteclamp[16], char bothclamp[16],
unsigned int width, unsigned int height, int pitch) {
unsigned int i, j;
@@ -43,7 +43,7 @@
(exp(-(x - mu) * (x - mu) / (2 * sigma * sigma)));
}
-int vpx_setup_noise(double sigma, int size, char *noise) {
+int aom_setup_noise(double sigma, int size, char *noise) {
char char_dist[256];
int next = 0, i, j;
diff --git a/aom_dsp/vpx_convolve.c b/aom_dsp/aom_convolve.c
similarity index 90%
rename from aom_dsp/vpx_convolve.c
rename to aom_dsp/aom_convolve.c
index 370ad77..b0630d2 100644
--- a/aom_dsp/vpx_convolve.c
+++ b/aom_dsp/aom_convolve.c
@@ -11,12 +11,12 @@
#include <assert.h>
#include <string.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
#include "aom_ports/mem.h"
static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride,
@@ -155,7 +155,7 @@
return (int)((const InterpKernel *)(intptr_t)f - base);
}
-void vpx_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -170,7 +170,7 @@
w, h);
}
-void vpx_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -185,7 +185,7 @@
x_step_q4, w, h);
}
-void vpx_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -200,7 +200,7 @@
w, h);
}
-void vpx_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -215,7 +215,7 @@
y_step_q4, w, h);
}
-void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int x_step_q4, const int16_t *filter_y, int y_step_q4,
int w, int h) {
@@ -229,7 +229,7 @@
filters_y, y0_q4, y_step_q4, w, h);
}
-void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int x_step_q4, const int16_t *filter_y, int y_step_q4,
int w, int h) {
@@ -238,13 +238,13 @@
assert(w <= MAX_SB_SIZE);
assert(h <= MAX_SB_SIZE);
- vpx_convolve8_c(src, src_stride, temp, MAX_SB_SIZE, filter_x, x_step_q4,
+ aom_convolve8_c(src, src_stride, temp, MAX_SB_SIZE, filter_x, x_step_q4,
filter_y, y_step_q4, w, h);
- vpx_convolve_avg_c(temp, MAX_SB_SIZE, dst, dst_stride, NULL, 0, NULL, 0, w,
+ aom_convolve_avg_c(temp, MAX_SB_SIZE, dst, dst_stride, NULL, 0, NULL, 0, w,
h);
}
-void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int filter_x_stride, const int16_t *filter_y,
int filter_y_stride, int w, int h) {
@@ -262,7 +262,7 @@
}
}
-void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int filter_x_stride, const int16_t *filter_y,
int filter_y_stride, int w, int h) {
@@ -281,57 +281,57 @@
}
}
-void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int x_step_q4, const int16_t *filter_y, int y_step_q4,
int w, int h) {
- vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+ aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
filter_y, y_step_q4, w, h);
}
-void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int x_step_q4, const int16_t *filter_y, int y_step_q4,
int w, int h) {
- vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+ aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
filter_y, y_step_q4, w, h);
}
-void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int x_step_q4, const int16_t *filter_y, int y_step_q4,
int w, int h) {
- vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+ aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
filter_y, y_step_q4, w, h);
}
-void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
int h) {
- vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
}
-void vpx_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
int h) {
- vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
}
-void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int x_step_q4, const int16_t *filter_y, int y_step_q4,
int w, int h) {
- vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+ aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
filter_y, y_step_q4, w, h);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
const InterpKernel *x_filters, int x0_q4,
@@ -466,7 +466,7 @@
MAX_SB_SIZE, dst, dst_stride, y_filters, y0_q4, y_step_q4, w, h, bd);
}
-void vpx_highbd_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -480,7 +480,7 @@
x_step_q4, w, h, bd);
}
-void vpx_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
@@ -494,7 +494,7 @@
x_step_q4, w, h, bd);
}
-void vpx_highbd_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -508,7 +508,7 @@
y_step_q4, w, h, bd);
}
-void vpx_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
@@ -522,7 +522,7 @@
y_step_q4, w, h, bd);
}
-void vpx_highbd_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -537,7 +537,7 @@
filters_y, y0_q4, y_step_q4, w, h, bd);
}
-void vpx_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
+void aom_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -547,13 +547,13 @@
assert(w <= MAX_SB_SIZE);
assert(h <= MAX_SB_SIZE);
- vpx_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), MAX_SB_SIZE,
+ aom_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), MAX_SB_SIZE,
filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd);
- vpx_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), MAX_SB_SIZE, dst,
+ aom_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), MAX_SB_SIZE, dst,
dst_stride, NULL, 0, NULL, 0, w, h, bd);
}
-void vpx_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride,
+void aom_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
@@ -574,7 +574,7 @@
}
}
-void vpx_highbd_convolve_avg_c(const uint8_t *src8, ptrdiff_t src_stride,
+void aom_highbd_convolve_avg_c(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
diff --git a/aom_dsp/vpx_convolve.h b/aom_dsp/aom_convolve.h
similarity index 88%
rename from aom_dsp/vpx_convolve.h
rename to aom_dsp/aom_convolve.h
index 20bef23..3441323 100644
--- a/aom_dsp/vpx_convolve.h
+++ b/aom_dsp/aom_convolve.h
@@ -7,11 +7,11 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_VPX_CONVOLVE_H_
-#define VPX_DSP_VPX_CONVOLVE_H_
+#ifndef AOM_DSP_AOM_CONVOLVE_H_
+#define AOM_DSP_AOM_CONVOLVE_H_
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
@@ -29,11 +29,11 @@
// --Must round-up because block may be located at sub-pixel position.
// --Require an additional SUBPEL_TAPS rows for the 8-tap filter tails.
// --((64 - 1) * 32 + 15) >> 4 + 8 = 135.
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
#define MAX_EXT_SIZE 263
#else
#define MAX_EXT_SIZE 135
-#endif // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif // CONFIG_AV1 && CONFIG_EXT_PARTITION
typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
@@ -41,7 +41,7 @@
const int16_t *filter_y, int y_step_q4, int w,
int h);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef void (*highbd_convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
@@ -53,4 +53,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_VPX_CONVOLVE_H_
+#endif // AOM_DSP_AOM_CONVOLVE_H_
diff --git a/aom_dsp/vpx_dsp.mk b/aom_dsp/aom_dsp.mk
similarity index 79%
rename from aom_dsp/vpx_dsp.mk
rename to aom_dsp/aom_dsp.mk
index c8933dd..b671a32 100644
--- a/aom_dsp/vpx_dsp.mk
+++ b/aom_dsp/aom_dsp.mk
@@ -8,8 +8,8 @@
## be found in the AUTHORS file in the root of the source tree.
##
-DSP_SRCS-yes += vpx_dsp.mk
-DSP_SRCS-yes += vpx_dsp_common.h
+DSP_SRCS-yes += aom_dsp.mk
+DSP_SRCS-yes += aom_dsp_common.h
DSP_SRCS-$(HAVE_MSA) += mips/macros_msa.h
@@ -45,12 +45,12 @@
DSP_SRCS-$(HAVE_SSE) += x86/intrapred_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/intrapred_sse2.asm
DSP_SRCS-$(HAVE_SSSE3) += x86/intrapred_ssse3.asm
-DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_ssse3.asm
+DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_ssse3.asm
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE) += x86/highbd_intrapred_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_intrapred_sse2.asm
-endif # CONFIG_VP9_HIGHBITDEPTH
+endif # CONFIG_AOM_HIGHBITDEPTH
DSP_SRCS-$(HAVE_NEON_ASM) += arm/intrapred_neon_asm$(ASM)
DSP_SRCS-$(HAVE_NEON) += arm/intrapred_neon.c
@@ -64,7 +64,7 @@
# inter predictions
-ifeq ($(CONFIG_VP10),yes)
+ifeq ($(CONFIG_AV1),yes)
DSP_SRCS-yes += blend.h
DSP_SRCS-yes += blend_a64_mask.c
DSP_SRCS-yes += blend_a64_hmask.c
@@ -73,54 +73,54 @@
DSP_SRCS-$(HAVE_SSE4_1) += x86/blend_a64_mask_sse4.c
DSP_SRCS-$(HAVE_SSE4_1) += x86/blend_a64_hmask_sse4.c
DSP_SRCS-$(HAVE_SSE4_1) += x86/blend_a64_vmask_sse4.c
-endif #CONFIG_VP10
+endif #CONFIG_AV1
# interpolation filters
-DSP_SRCS-yes += vpx_convolve.c
-DSP_SRCS-yes += vpx_convolve.h
-DSP_SRCS-yes += vpx_filter.h
+DSP_SRCS-yes += aom_convolve.c
+DSP_SRCS-yes += aom_convolve.h
+DSP_SRCS-yes += aom_filter.h
DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/convolve.h
-DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/vpx_asm_stubs.c
-DSP_SRCS-$(HAVE_SSE2) += x86/vpx_subpixel_8t_sse2.asm
-DSP_SRCS-$(HAVE_SSE2) += x86/vpx_subpixel_bilinear_sse2.asm
-DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_ssse3.asm
-DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_bilinear_ssse3.asm
-DSP_SRCS-$(HAVE_AVX2) += x86/vpx_subpixel_8t_intrin_avx2.c
-DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_intrin_ssse3.c
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-DSP_SRCS-$(HAVE_SSE2) += x86/vpx_high_subpixel_8t_sse2.asm
-DSP_SRCS-$(HAVE_SSE2) += x86/vpx_high_subpixel_bilinear_sse2.asm
+DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/aom_asm_stubs.c
+DSP_SRCS-$(HAVE_SSE2) += x86/aom_subpixel_8t_sse2.asm
+DSP_SRCS-$(HAVE_SSE2) += x86/aom_subpixel_bilinear_sse2.asm
+DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_ssse3.asm
+DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_bilinear_ssse3.asm
+DSP_SRCS-$(HAVE_AVX2) += x86/aom_subpixel_8t_intrin_avx2.c
+DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_intrin_ssse3.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+DSP_SRCS-$(HAVE_SSE2) += x86/aom_high_subpixel_8t_sse2.asm
+DSP_SRCS-$(HAVE_SSE2) += x86/aom_high_subpixel_bilinear_sse2.asm
endif
-DSP_SRCS-$(HAVE_SSE2) += x86/vpx_convolve_copy_sse2.asm
+DSP_SRCS-$(HAVE_SSE2) += x86/aom_convolve_copy_sse2.asm
ifeq ($(HAVE_NEON_ASM),yes)
-DSP_SRCS-yes += arm/vpx_convolve_copy_neon_asm$(ASM)
-DSP_SRCS-yes += arm/vpx_convolve8_avg_neon_asm$(ASM)
-DSP_SRCS-yes += arm/vpx_convolve8_neon_asm$(ASM)
-DSP_SRCS-yes += arm/vpx_convolve_avg_neon_asm$(ASM)
-DSP_SRCS-yes += arm/vpx_convolve_neon.c
+DSP_SRCS-yes += arm/aom_convolve_copy_neon_asm$(ASM)
+DSP_SRCS-yes += arm/aom_convolve8_avg_neon_asm$(ASM)
+DSP_SRCS-yes += arm/aom_convolve8_neon_asm$(ASM)
+DSP_SRCS-yes += arm/aom_convolve_avg_neon_asm$(ASM)
+DSP_SRCS-yes += arm/aom_convolve_neon.c
else
ifeq ($(HAVE_NEON),yes)
-DSP_SRCS-yes += arm/vpx_convolve_copy_neon.c
-DSP_SRCS-yes += arm/vpx_convolve8_avg_neon.c
-DSP_SRCS-yes += arm/vpx_convolve8_neon.c
-DSP_SRCS-yes += arm/vpx_convolve_avg_neon.c
-DSP_SRCS-yes += arm/vpx_convolve_neon.c
+DSP_SRCS-yes += arm/aom_convolve_copy_neon.c
+DSP_SRCS-yes += arm/aom_convolve8_avg_neon.c
+DSP_SRCS-yes += arm/aom_convolve8_neon.c
+DSP_SRCS-yes += arm/aom_convolve_avg_neon.c
+DSP_SRCS-yes += arm/aom_convolve_neon.c
endif # HAVE_NEON
endif # HAVE_NEON_ASM
# common (msa)
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_avg_horiz_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_avg_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_avg_vert_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_horiz_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_vert_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve_avg_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve_copy_msa.c
-DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve_msa.h
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_avg_horiz_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_avg_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_avg_vert_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_horiz_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_vert_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve_avg_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve_copy_msa.c
+DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve_msa.h
# common (dspr2)
DSP_SRCS-$(HAVE_DSPR2) += mips/convolve_common_dspr2.h
@@ -167,15 +167,15 @@
DSP_SRCS-$(HAVE_DSPR2) += mips/loopfilter_mb_horiz_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/loopfilter_mb_vert_dspr2.c
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_loopfilter_sse2.c
-endif # CONFIG_VP9_HIGHBITDEPTH
+endif # CONFIG_AOM_HIGHBITDEPTH
DSP_SRCS-yes += txfm_common.h
DSP_SRCS-$(HAVE_SSE2) += x86/txfm_common_sse2.h
DSP_SRCS-$(HAVE_MSA) += mips/txfm_macros_msa.h
# forward transform
-ifeq ($(CONFIG_VP10),yes)
+ifeq ($(CONFIG_AV1),yes)
DSP_SRCS-yes += fwd_txfm.c
DSP_SRCS-yes += fwd_txfm.h
DSP_SRCS-$(HAVE_SSE2) += x86/fwd_txfm_sse2.h
@@ -191,10 +191,10 @@
DSP_SRCS-$(HAVE_MSA) += mips/fwd_txfm_msa.h
DSP_SRCS-$(HAVE_MSA) += mips/fwd_txfm_msa.c
DSP_SRCS-$(HAVE_MSA) += mips/fwd_dct32x32_msa.c
-endif # CONFIG_VP10_ENCODER
+endif # CONFIG_AV1_ENCODER
# inverse transform
-ifeq ($(CONFIG_VP10), yes)
+ifeq ($(CONFIG_AV1), yes)
DSP_SRCS-yes += inv_txfm.h
DSP_SRCS-yes += inv_txfm.c
DSP_SRCS-$(HAVE_SSE2) += x86/inv_txfm_sse2.h
@@ -234,23 +234,23 @@
DSP_SRCS-$(HAVE_MSA) += mips/idct16x16_msa.c
DSP_SRCS-$(HAVE_MSA) += mips/idct32x32_msa.c
-ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_DSPR2) += mips/inv_txfm_dspr2.h
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans4_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans8_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans16_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_cols_dspr2.c
-endif # CONFIG_VP9_HIGHBITDEPTH
-endif # CONFIG_VP10
+endif # CONFIG_AOM_HIGHBITDEPTH
+endif # CONFIG_AV1
# quantization
-ifneq ($(filter yes,$(CONFIG_VP10_ENCODER)),)
+ifneq ($(filter yes,$(CONFIG_AV1_ENCODER)),)
DSP_SRCS-yes += quantize.c
DSP_SRCS-yes += quantize.h
DSP_SRCS-$(HAVE_SSE2) += x86/quantize_sse2.c
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_quantize_intrin_sse2.c
endif
ifeq ($(ARCH_X86_64),yes)
@@ -269,17 +269,17 @@
endif
# high bit depth subtract
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_subtract_sse2.c
endif
-endif # CONFIG_VP10_ENCODER
+endif # CONFIG_AV1_ENCODER
-ifeq ($(CONFIG_VP10_ENCODER),yes)
+ifeq ($(CONFIG_AV1_ENCODER),yes)
DSP_SRCS-yes += sum_squares.c
DSP_SRCS-$(HAVE_SSE2) += x86/sum_squares_sse2.c
-endif # CONFIG_VP10_ENCODER
+endif # CONFIG_AV1_ENCODER
ifeq ($(CONFIG_ENCODERS),yes)
DSP_SRCS-yes += sad.c
@@ -299,7 +299,7 @@
DSP_SRCS-$(HAVE_AVX2) += x86/sad4d_avx2.c
DSP_SRCS-$(HAVE_AVX2) += x86/sad_avx2.c
-ifeq ($(CONFIG_VP10_ENCODER),yes)
+ifeq ($(CONFIG_AV1_ENCODER),yes)
ifeq ($(CONFIG_EXT_INTER),yes)
DSP_SRCS-$(HAVE_SSSE3) += x86/masked_sad_intrin_ssse3.c
DSP_SRCS-$(HAVE_SSSE3) += x86/masked_variance_intrin_ssse3.c
@@ -308,7 +308,7 @@
DSP_SRCS-$(HAVE_SSE4_1) += x86/obmc_sad_sse4.c
DSP_SRCS-$(HAVE_SSE4_1) += x86/obmc_variance_sse4.c
endif #CONFIG_OBMC
-endif #CONFIG_VP10_ENCODER
+endif #CONFIG_AV1_ENCODER
DSP_SRCS-$(HAVE_SSE) += x86/sad4d_sse2.asm
DSP_SRCS-$(HAVE_SSE) += x86/sad_sse2.asm
@@ -316,10 +316,10 @@
DSP_SRCS-$(HAVE_SSE2) += x86/sad_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/subtract_sse2.asm
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad4d_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad_sse2.asm
-endif # CONFIG_VP9_HIGHBITDEPTH
+endif # CONFIG_AOM_HIGHBITDEPTH
endif # CONFIG_ENCODERS
@@ -353,17 +353,17 @@
DSP_SRCS-$(HAVE_SSE) += x86/subpel_variance_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/subpel_variance_sse2.asm # Contains SSE2 and SSSE3
-ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_variance_sse2.c
DSP_SRCS-$(HAVE_SSE4_1) += x86/highbd_variance_sse4.c
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_variance_impl_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_subpel_variance_impl_sse2.asm
-endif # CONFIG_VP9_HIGHBITDEPTH
+endif # CONFIG_AOM_HIGHBITDEPTH
endif # CONFIG_ENCODERS
DSP_SRCS-no += $(DSP_SRCS_REMOVE-yes)
-DSP_SRCS-yes += vpx_dsp_rtcd.c
-DSP_SRCS-yes += vpx_dsp_rtcd_defs.pl
+DSP_SRCS-yes += aom_dsp_rtcd.c
+DSP_SRCS-yes += aom_dsp_rtcd_defs.pl
-$(eval $(call rtcd_h_template,vpx_dsp_rtcd,aom_dsp/vpx_dsp_rtcd_defs.pl))
+$(eval $(call rtcd_h_template,aom_dsp_rtcd,aom_dsp/aom_dsp_rtcd_defs.pl))
diff --git a/aom_dsp/vpx_dsp_common.h b/aom_dsp/aom_dsp_common.h
similarity index 82%
rename from aom_dsp/vpx_dsp_common.h
rename to aom_dsp/aom_dsp_common.h
index 8f911dd..0524169 100644
--- a/aom_dsp/vpx_dsp_common.h
+++ b/aom_dsp/aom_dsp_common.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_VPX_DSP_COMMON_H_
-#define VPX_DSP_VPX_DSP_COMMON_H_
+#ifndef AOM_DSP_AOM_DSP_COMMON_H_
+#define AOM_DSP_AOM_DSP_COMMON_H_
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
#ifdef __cplusplus
@@ -20,15 +20,15 @@
#endif
#ifndef MAX_SB_SIZE
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
#define MAX_SB_SIZE 128
#else
#define MAX_SB_SIZE 64
-#endif // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif // CONFIG_AV1 && CONFIG_EXT_PARTITION
#endif // ndef MAX_SB_SIZE
-#define VPXMIN(x, y) (((x) < (y)) ? (x) : (y))
-#define VPXMAX(x, y) (((x) > (y)) ? (x) : (y))
+#define AOMMIN(x, y) (((x) < (y)) ? (x) : (y))
+#define AOMMAX(x, y) (((x) > (y)) ? (x) : (y))
#define IMPLIES(a, b) (!(a) || (b)) // Logical 'a implies b' (or 'a -> b')
@@ -46,7 +46,7 @@
#define UNLIKELY(v) (v)
#endif
-#define VPX_SWAP(type, a, b) \
+#define AOM_SWAP(type, a, b) \
do { \
type c = (b); \
b = a; \
@@ -57,7 +57,7 @@
typedef uint16_t qm_val_t;
#define AOM_QM_BITS 6
#endif
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
// Note:
// tran_low_t is the datatype used for final transform coefficients.
// tran_high_t is the datatype used for intermediate transform stages.
@@ -69,7 +69,7 @@
// tran_high_t is the datatype used for intermediate transform stages.
typedef int32_t tran_high_t;
typedef int16_t tran_low_t;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static INLINE uint8_t clip_pixel(int val) {
return (val > 255) ? 255 : (val < 0) ? 0 : val;
@@ -83,7 +83,7 @@
return value < low ? low : (value > high ? high : value);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE uint16_t clip_pixel_highbd(int val, int bd) {
switch (bd) {
case 8:
@@ -92,10 +92,10 @@
case 12: return (uint16_t)clamp(val, 0, 4095);
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VPX_DSP_VPX_DSP_COMMON_H_
+#endif // AOM_DSP_AOM_DSP_COMMON_H_
diff --git a/aom_dsp/vpx_dsp_rtcd.c b/aom_dsp/aom_dsp_rtcd.c
similarity index 75%
rename from aom_dsp/vpx_dsp_rtcd.c
rename to aom_dsp/aom_dsp_rtcd.c
index 3cd0cc1..438f901 100644
--- a/aom_dsp/vpx_dsp_rtcd.c
+++ b/aom_dsp/aom_dsp_rtcd.c
@@ -7,9 +7,9 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
+#include "./aom_config.h"
#define RTCD_C
-#include "./vpx_dsp_rtcd.h"
-#include "aom_ports/vpx_once.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_ports/aom_once.h"
-void vpx_dsp_rtcd() { once(setup_rtcd_internal); }
+void aom_dsp_rtcd() { once(setup_rtcd_internal); }
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
new file mode 100644
index 0000000..2afb3cd
--- /dev/null
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -0,0 +1,1929 @@
+sub aom_dsp_forward_decls() {
+print <<EOF
+/*
+ * DSP
+ */
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
+
+EOF
+}
+forward_decls qw/aom_dsp_forward_decls/;
+
+# optimizations which depend on multiple features
+$avx2_ssse3 = '';
+if ((aom_config("HAVE_AVX2") eq "yes") && (aom_config("HAVE_SSSE3") eq "yes")) {
+ $avx2_ssse3 = 'avx2';
+}
+
+# functions that are 64 bit only.
+$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
+if ($opts{arch} eq "x86_64") {
+ $mmx_x86_64 = 'mmx';
+ $sse2_x86_64 = 'sse2';
+ $ssse3_x86_64 = 'ssse3';
+ $avx_x86_64 = 'avx';
+ $avx2_x86_64 = 'avx2';
+}
+
+if (aom_config("CONFIG_EXT_PARTITION") eq "yes") {
+ @block_widths = (4, 8, 16, 32, 64, 128)
+} else {
+ @block_widths = (4, 8, 16, 32, 64)
+}
+
+@block_sizes = ();
+foreach $w (@block_widths) {
+ foreach $h (@block_widths) {
+ push @block_sizes, [$w, $h] if ($w <= 2*$h && $h <= 2*$w) ;
+ }
+}
+
+#
+# Intra prediction
+#
+
+add_proto qw/void aom_d207_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207_predictor_4x4 sse2/;
+
+add_proto qw/void aom_d207e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207e_predictor_4x4/;
+
+add_proto qw/void aom_d45_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45_predictor_4x4 neon sse2/;
+
+add_proto qw/void aom_d45e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45e_predictor_4x4/;
+
+add_proto qw/void aom_d63_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63_predictor_4x4 ssse3/;
+
+add_proto qw/void aom_d63e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63e_predictor_4x4/;
+
+add_proto qw/void aom_d63f_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63f_predictor_4x4/;
+
+add_proto qw/void aom_h_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_h_predictor_4x4 neon dspr2 msa sse2/;
+
+add_proto qw/void aom_he_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_he_predictor_4x4/;
+
+add_proto qw/void aom_d117_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d117_predictor_4x4/;
+
+add_proto qw/void aom_d135_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d135_predictor_4x4 neon/;
+
+add_proto qw/void aom_d153_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d153_predictor_4x4 ssse3/;
+
+add_proto qw/void aom_v_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_v_predictor_4x4 neon msa sse2/;
+
+add_proto qw/void aom_ve_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_ve_predictor_4x4/;
+
+add_proto qw/void aom_tm_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_tm_predictor_4x4 neon dspr2 msa sse2/;
+
+add_proto qw/void aom_dc_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_predictor_4x4 dspr2 msa neon sse2/;
+
+add_proto qw/void aom_dc_top_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_top_predictor_4x4 msa neon sse2/;
+
+add_proto qw/void aom_dc_left_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_left_predictor_4x4 msa neon sse2/;
+
+add_proto qw/void aom_dc_128_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_128_predictor_4x4 msa neon sse2/;
+
+add_proto qw/void aom_d207_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207_predictor_8x8 ssse3/;
+
+add_proto qw/void aom_d207e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207e_predictor_8x8/;
+
+add_proto qw/void aom_d45_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45_predictor_8x8 neon sse2/;
+
+add_proto qw/void aom_d45e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45e_predictor_8x8/;
+
+add_proto qw/void aom_d63_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63_predictor_8x8 ssse3/;
+
+add_proto qw/void aom_d63e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63e_predictor_8x8/;
+
+add_proto qw/void aom_h_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_h_predictor_8x8 neon dspr2 msa sse2/;
+
+add_proto qw/void aom_d117_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d117_predictor_8x8/;
+
+add_proto qw/void aom_d135_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d135_predictor_8x8/;
+
+add_proto qw/void aom_d153_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d153_predictor_8x8 ssse3/;
+
+add_proto qw/void aom_v_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_v_predictor_8x8 neon msa sse2/;
+
+add_proto qw/void aom_tm_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_tm_predictor_8x8 neon dspr2 msa sse2/;
+
+add_proto qw/void aom_dc_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_predictor_8x8 dspr2 neon msa sse2/;
+
+add_proto qw/void aom_dc_top_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_top_predictor_8x8 neon msa sse2/;
+
+add_proto qw/void aom_dc_left_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_left_predictor_8x8 neon msa sse2/;
+
+add_proto qw/void aom_dc_128_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_128_predictor_8x8 neon msa sse2/;
+
+add_proto qw/void aom_d207_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207_predictor_16x16 ssse3/;
+
+add_proto qw/void aom_d207e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207e_predictor_16x16/;
+
+add_proto qw/void aom_d45_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45_predictor_16x16 neon ssse3/;
+
+add_proto qw/void aom_d45e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45e_predictor_16x16/;
+
+add_proto qw/void aom_d63_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63_predictor_16x16 ssse3/;
+
+add_proto qw/void aom_d63e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63e_predictor_16x16/;
+
+add_proto qw/void aom_h_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_h_predictor_16x16 neon dspr2 msa sse2/;
+
+add_proto qw/void aom_d117_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d117_predictor_16x16/;
+
+add_proto qw/void aom_d135_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d135_predictor_16x16/;
+
+add_proto qw/void aom_d153_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d153_predictor_16x16 ssse3/;
+
+add_proto qw/void aom_v_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_v_predictor_16x16 neon msa sse2/;
+
+add_proto qw/void aom_tm_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_tm_predictor_16x16 neon msa sse2/;
+
+add_proto qw/void aom_dc_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_predictor_16x16 dspr2 neon msa sse2/;
+
+add_proto qw/void aom_dc_top_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_top_predictor_16x16 neon msa sse2/;
+
+add_proto qw/void aom_dc_left_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_left_predictor_16x16 neon msa sse2/;
+
+add_proto qw/void aom_dc_128_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_128_predictor_16x16 neon msa sse2/;
+
+add_proto qw/void aom_d207_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207_predictor_32x32 ssse3/;
+
+add_proto qw/void aom_d207e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d207e_predictor_32x32/;
+
+add_proto qw/void aom_d45_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45_predictor_32x32 ssse3/;
+
+add_proto qw/void aom_d45e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d45e_predictor_32x32/;
+
+add_proto qw/void aom_d63_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63_predictor_32x32 ssse3/;
+
+add_proto qw/void aom_d63e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d63e_predictor_32x32/;
+
+add_proto qw/void aom_h_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_h_predictor_32x32 neon msa sse2/;
+
+add_proto qw/void aom_d117_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d117_predictor_32x32/;
+
+add_proto qw/void aom_d135_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d135_predictor_32x32/;
+
+add_proto qw/void aom_d153_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_d153_predictor_32x32 ssse3/;
+
+add_proto qw/void aom_v_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_v_predictor_32x32 neon msa sse2/;
+
+add_proto qw/void aom_tm_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_tm_predictor_32x32 neon msa sse2/;
+
+add_proto qw/void aom_dc_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_predictor_32x32 msa neon sse2/;
+
+add_proto qw/void aom_dc_top_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_top_predictor_32x32 msa neon sse2/;
+
+add_proto qw/void aom_dc_left_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_left_predictor_32x32 msa neon sse2/;
+
+add_proto qw/void aom_dc_128_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+specialize qw/aom_dc_128_predictor_32x32 msa neon sse2/;
+
+# High bitdepth functions
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void aom_highbd_d207_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d207_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_d207e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d207e_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_d45_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d45_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_d45e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d45e_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_d63_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d63_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_d63e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d63e_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_h_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_h_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_d117_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d117_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_d135_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d135_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_d153_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d153_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_v_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_v_predictor_4x4 sse2/;
+
+ add_proto qw/void aom_highbd_tm_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_tm_predictor_4x4 sse2/;
+
+ add_proto qw/void aom_highbd_dc_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_predictor_4x4 sse2/;
+
+ add_proto qw/void aom_highbd_dc_top_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_top_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_dc_left_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_left_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_dc_128_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_128_predictor_4x4/;
+
+ add_proto qw/void aom_highbd_d207_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d207_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_d207e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d207e_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_d45_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d45_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_d45e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d45e_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_d63_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d63_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_d63e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d63e_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_h_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_h_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_d117_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d117_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_d135_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d135_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_d153_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d153_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_v_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_v_predictor_8x8 sse2/;
+
+ add_proto qw/void aom_highbd_tm_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_tm_predictor_8x8 sse2/;
+
+ add_proto qw/void aom_highbd_dc_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_predictor_8x8 sse2/;;
+
+ add_proto qw/void aom_highbd_dc_top_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_top_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_dc_left_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_left_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_dc_128_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_128_predictor_8x8/;
+
+ add_proto qw/void aom_highbd_d207_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d207_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_d207e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d207e_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_d45_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d45_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_d45e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d45e_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_d63_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d63_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_d63e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d63e_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_h_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_h_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_d117_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d117_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_d135_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d135_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_d153_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d153_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_v_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_v_predictor_16x16 sse2/;
+
+ add_proto qw/void aom_highbd_tm_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_tm_predictor_16x16 sse2/;
+
+ add_proto qw/void aom_highbd_dc_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_predictor_16x16 sse2/;
+
+ add_proto qw/void aom_highbd_dc_top_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_top_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_dc_left_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_left_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_dc_128_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_128_predictor_16x16/;
+
+ add_proto qw/void aom_highbd_d207_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d207_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_d207e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d207e_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_d45_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d45_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_d45e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d45e_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_d63_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d63_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_d63e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d63e_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_h_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_h_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_d117_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d117_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_d135_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d135_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_d153_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_d153_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_v_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_v_predictor_32x32 sse2/;
+
+ add_proto qw/void aom_highbd_tm_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_tm_predictor_32x32 sse2/;
+
+ add_proto qw/void aom_highbd_dc_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_predictor_32x32 sse2/;
+
+ add_proto qw/void aom_highbd_dc_top_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_top_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_dc_left_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_left_predictor_32x32/;
+
+ add_proto qw/void aom_highbd_dc_128_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/aom_highbd_dc_128_predictor_32x32/;
+} # CONFIG_AOM_HIGHBITDEPTH
+
+#
+# Sub Pixel Filters
+#
+add_proto qw/void aom_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_2d/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_avg_2d/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+
+specialize qw/aom_convolve_copy sse2 /;
+specialize qw/aom_convolve_avg sse2 /;
+specialize qw/aom_convolve8 sse2 ssse3/, "$avx2_ssse3";
+specialize qw/aom_convolve8_horiz sse2 ssse3/, "$avx2_ssse3";
+specialize qw/aom_convolve8_vert sse2 ssse3/, "$avx2_ssse3";
+specialize qw/aom_convolve8_avg sse2 ssse3/;
+specialize qw/aom_convolve8_avg_horiz sse2 ssse3/;
+specialize qw/aom_convolve8_avg_vert sse2 ssse3/;
+specialize qw/aom_scaled_2d ssse3/;
+
+# TODO(any): These need to be extended to up to 128x128 block sizes
+if (!(aom_config("CONFIG_AV1") eq "yes" && aom_config("CONFIG_EXT_PARTITION") eq "yes")) {
+ specialize qw/aom_convolve_copy neon dspr2 msa/;
+ specialize qw/aom_convolve_avg neon dspr2 msa/;
+ specialize qw/aom_convolve8 neon dspr2 msa/;
+ specialize qw/aom_convolve8_horiz neon dspr2 msa/;
+ specialize qw/aom_convolve8_vert neon dspr2 msa/;
+ specialize qw/aom_convolve8_avg neon dspr2 msa/;
+ specialize qw/aom_convolve8_avg_horiz neon dspr2 msa/;
+ specialize qw/aom_convolve8_avg_vert neon dspr2 msa/;
+}
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void aom_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/aom_highbd_convolve_copy sse2/;
+
+ add_proto qw/void aom_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/aom_highbd_convolve_avg sse2/;
+
+ add_proto qw/void aom_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/aom_highbd_convolve8/, "$sse2_x86_64";
+
+ add_proto qw/void aom_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/aom_highbd_convolve8_horiz/, "$sse2_x86_64";
+
+ add_proto qw/void aom_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/aom_highbd_convolve8_vert/, "$sse2_x86_64";
+
+ add_proto qw/void aom_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/aom_highbd_convolve8_avg/, "$sse2_x86_64";
+
+ add_proto qw/void aom_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/aom_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
+
+ add_proto qw/void aom_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ specialize qw/aom_highbd_convolve8_avg_vert/, "$sse2_x86_64";
+} # CONFIG_AOM_HIGHBITDEPTH
+
+#
+# Loopfilter
+#
+add_proto qw/void aom_lpf_vertical_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_vertical_16 sse2 neon_asm dspr2 msa/;
+$aom_lpf_vertical_16_neon_asm=aom_lpf_vertical_16_neon;
+
+add_proto qw/void aom_lpf_vertical_16_dual/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_vertical_16_dual sse2 neon_asm dspr2 msa/;
+$aom_lpf_vertical_16_dual_neon_asm=aom_lpf_vertical_16_dual_neon;
+
+add_proto qw/void aom_lpf_vertical_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_vertical_8 sse2 neon dspr2 msa/;
+
+add_proto qw/void aom_lpf_vertical_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
+specialize qw/aom_lpf_vertical_8_dual sse2 neon_asm dspr2 msa/;
+$aom_lpf_vertical_8_dual_neon_asm=aom_lpf_vertical_8_dual_neon;
+
+add_proto qw/void aom_lpf_vertical_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_vertical_4 sse2 neon dspr2 msa/;
+
+add_proto qw/void aom_lpf_vertical_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
+specialize qw/aom_lpf_vertical_4_dual sse2 neon dspr2 msa/;
+
+add_proto qw/void aom_lpf_horizontal_edge_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_horizontal_edge_8 sse2 avx2 neon_asm dspr2 msa/;
+$aom_lpf_horizontal_edge_8_neon_asm=aom_lpf_horizontal_edge_8_neon;
+
+add_proto qw/void aom_lpf_horizontal_edge_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_horizontal_edge_16 sse2 avx2 neon_asm dspr2 msa/;
+$aom_lpf_horizontal_edge_16_neon_asm=aom_lpf_horizontal_edge_16_neon;
+
+add_proto qw/void aom_lpf_horizontal_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_horizontal_8 sse2 neon dspr2 msa/;
+
+add_proto qw/void aom_lpf_horizontal_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
+specialize qw/aom_lpf_horizontal_8_dual sse2 neon_asm dspr2 msa/;
+$aom_lpf_horizontal_8_dual_neon_asm=aom_lpf_horizontal_8_dual_neon;
+
+add_proto qw/void aom_lpf_horizontal_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
+specialize qw/aom_lpf_horizontal_4 sse2 neon dspr2 msa/;
+
+add_proto qw/void aom_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
+specialize qw/aom_lpf_horizontal_4_dual sse2 neon dspr2 msa/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void aom_highbd_lpf_vertical_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+ specialize qw/aom_highbd_lpf_vertical_16 sse2/;
+
+ add_proto qw/void aom_highbd_lpf_vertical_16_dual/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+ specialize qw/aom_highbd_lpf_vertical_16_dual sse2/;
+
+ add_proto qw/void aom_highbd_lpf_vertical_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+ specialize qw/aom_highbd_lpf_vertical_8 sse2/;
+
+ add_proto qw/void aom_highbd_lpf_vertical_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
+ specialize qw/aom_highbd_lpf_vertical_8_dual sse2/;
+
+ add_proto qw/void aom_highbd_lpf_vertical_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+ specialize qw/aom_highbd_lpf_vertical_4 sse2/;
+
+ add_proto qw/void aom_highbd_lpf_vertical_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
+ specialize qw/aom_highbd_lpf_vertical_4_dual sse2/;
+
+ add_proto qw/void aom_highbd_lpf_horizontal_edge_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+ specialize qw/aom_highbd_lpf_horizontal_edge_8 sse2/;
+
+ add_proto qw/void aom_highbd_lpf_horizontal_edge_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+ specialize qw/aom_highbd_lpf_horizontal_edge_16 sse2/;
+
+ add_proto qw/void aom_highbd_lpf_horizontal_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+ specialize qw/aom_highbd_lpf_horizontal_8 sse2/;
+
+ add_proto qw/void aom_highbd_lpf_horizontal_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
+ specialize qw/aom_highbd_lpf_horizontal_8_dual sse2/;
+
+ add_proto qw/void aom_highbd_lpf_horizontal_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
+ specialize qw/aom_highbd_lpf_horizontal_4 sse2/;
+
+ add_proto qw/void aom_highbd_lpf_horizontal_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
+ specialize qw/aom_highbd_lpf_horizontal_4_dual sse2/;
+} # CONFIG_AOM_HIGHBITDEPTH
+
+#
+# Encoder functions.
+#
+
+#
+# Forward transform
+#
+if ((aom_config("CONFIG_AV1_ENCODER") eq "yes")) {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void aom_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct4x4 sse2/;
+
+ add_proto qw/void aom_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct4x4_1 sse2/;
+
+ add_proto qw/void aom_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct8x8 sse2/;
+
+ add_proto qw/void aom_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct8x8_1 sse2/;
+
+ add_proto qw/void aom_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct16x16 sse2/;
+
+ add_proto qw/void aom_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct16x16_1 sse2/;
+
+ add_proto qw/void aom_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct32x32 sse2/;
+
+ add_proto qw/void aom_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct32x32_rd sse2/;
+
+ add_proto qw/void aom_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct32x32_1 sse2/;
+
+ add_proto qw/void aom_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_highbd_fdct4x4 sse2/;
+
+ add_proto qw/void aom_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_highbd_fdct8x8 sse2/;
+
+ add_proto qw/void aom_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_highbd_fdct8x8_1/;
+
+ add_proto qw/void aom_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_highbd_fdct16x16 sse2/;
+
+ add_proto qw/void aom_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_highbd_fdct16x16_1/;
+
+ add_proto qw/void aom_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_highbd_fdct32x32 sse2/;
+
+ add_proto qw/void aom_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_highbd_fdct32x32_rd sse2/;
+
+ add_proto qw/void aom_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_highbd_fdct32x32_1/;
+} else {
+ add_proto qw/void aom_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct4x4 sse2 msa/;
+
+ add_proto qw/void aom_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct4x4_1 sse2/;
+
+ add_proto qw/void aom_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct8x8 sse2 neon msa/, "$ssse3_x86_64";
+
+ add_proto qw/void aom_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct8x8_1 sse2 neon msa/;
+
+ add_proto qw/void aom_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct16x16 sse2 msa/;
+
+ add_proto qw/void aom_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct16x16_1 sse2 msa/;
+
+ add_proto qw/void aom_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct32x32 sse2 avx2 msa/;
+
+ add_proto qw/void aom_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct32x32_rd sse2 avx2 msa/;
+
+ add_proto qw/void aom_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+ specialize qw/aom_fdct32x32_1 sse2 msa/;
+} # CONFIG_AOM_HIGHBITDEPTH
+} # CONFIG_AV1_ENCODER
+
+#
+# Inverse transform
+if (aom_config("CONFIG_AV1") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ # Note as optimized versions of these functions are added we need to add a check to ensure
+ # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
+ add_proto qw/void aom_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_iwht4x4_1_add/;
+
+ add_proto qw/void aom_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_iwht4x4_16_add sse2/;
+
+ add_proto qw/void aom_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct4x4_1_add/;
+
+ add_proto qw/void aom_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct8x8_1_add/;
+
+ add_proto qw/void aom_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct16x16_1_add/;
+
+ add_proto qw/void aom_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct32x32_1024_add/;
+
+ add_proto qw/void aom_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct32x32_34_add/;
+
+ add_proto qw/void aom_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct32x32_1_add/;
+
+ add_proto qw/void aom_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_iwht4x4_1_add/;
+
+ add_proto qw/void aom_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_iwht4x4_16_add/;
+
+ # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+ if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+ add_proto qw/void aom_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct4x4_16_add/;
+
+ add_proto qw/void aom_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct4x4_1_add/;
+
+ add_proto qw/void aom_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_64_add/;
+
+ add_proto qw/void aom_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_12_add/;
+
+ add_proto qw/void aom_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_1_add/;
+
+ add_proto qw/void aom_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_256_add/;
+
+ add_proto qw/void aom_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_10_add/;
+
+ add_proto qw/void aom_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_1_add/;
+
+ add_proto qw/void aom_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_1024_add/;
+
+ add_proto qw/void aom_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_135_add/;
+
+ add_proto qw/void aom_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_34_add/;
+
+ add_proto qw/void aom_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_1_add/;
+
+ add_proto qw/void aom_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct4x4_16_add/;
+
+ add_proto qw/void aom_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct8x8_64_add/;
+
+ add_proto qw/void aom_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct8x8_10_add/;
+
+ add_proto qw/void aom_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct16x16_256_add/;
+
+ add_proto qw/void aom_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct16x16_10_add/;
+ } else {
+ add_proto qw/void aom_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct4x4_16_add sse2/;
+
+ add_proto qw/void aom_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct4x4_1_add sse2/;
+
+ add_proto qw/void aom_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_64_add sse2/, "$ssse3_x86_64";
+
+ add_proto qw/void aom_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_12_add sse2/, "$ssse3_x86_64";
+
+ add_proto qw/void aom_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_1_add sse2/;
+
+ add_proto qw/void aom_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_256_add sse2/;
+
+ add_proto qw/void aom_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_10_add sse2/;
+
+ add_proto qw/void aom_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_1_add sse2/;
+
+ add_proto qw/void aom_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_1024_add sse2/, "$ssse3_x86_64";
+
+ add_proto qw/void aom_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_135_add sse2/, "$ssse3_x86_64";
+ # Need to add 135 eob idct32x32 implementations.
+ $aom_idct32x32_135_add_sse2=aom_idct32x32_1024_add_sse2;
+
+ add_proto qw/void aom_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_34_add sse2/, "$ssse3_x86_64";
+
+ add_proto qw/void aom_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_1_add sse2/;
+
+ add_proto qw/void aom_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct4x4_16_add sse2/;
+
+ add_proto qw/void aom_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct8x8_64_add sse2/;
+
+ add_proto qw/void aom_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct8x8_10_add sse2/;
+
+ add_proto qw/void aom_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct16x16_256_add sse2/;
+
+ add_proto qw/void aom_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+ specialize qw/aom_highbd_idct16x16_10_add sse2/;
+ } # CONFIG_EMULATE_HARDWARE
+} else {
+ # Force C versions if CONFIG_EMULATE_HARDWARE is 1
+ if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
+ add_proto qw/void aom_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct4x4_1_add/;
+
+ add_proto qw/void aom_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct4x4_16_add/;
+
+ add_proto qw/void aom_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_1_add/;
+
+ add_proto qw/void aom_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_64_add/;
+
+ add_proto qw/void aom_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_12_add/;
+
+ add_proto qw/void aom_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_1_add/;
+
+ add_proto qw/void aom_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_256_add/;
+
+ add_proto qw/void aom_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_10_add/;
+
+ add_proto qw/void aom_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_1024_add/;
+
+ add_proto qw/void aom_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_135_add/;
+
+ add_proto qw/void aom_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_34_add/;
+
+ add_proto qw/void aom_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_1_add/;
+
+ add_proto qw/void aom_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_iwht4x4_1_add/;
+
+ add_proto qw/void aom_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_iwht4x4_16_add/;
+ } else {
+ add_proto qw/void aom_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct4x4_1_add sse2 neon dspr2 msa/;
+
+ add_proto qw/void aom_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct4x4_16_add sse2 neon dspr2 msa/;
+
+ add_proto qw/void aom_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_1_add sse2 neon dspr2 msa/;
+
+ add_proto qw/void aom_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_64_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
+
+ add_proto qw/void aom_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct8x8_12_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
+
+ add_proto qw/void aom_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_1_add sse2 neon dspr2 msa/;
+
+ add_proto qw/void aom_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_256_add sse2 neon dspr2 msa/;
+
+ add_proto qw/void aom_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_10_add sse2 neon dspr2 msa/;
+
+ add_proto qw/void aom_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_1024_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
+
+ add_proto qw/void aom_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_135_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
+ # Need to add 135 eob idct32x32 implementations.
+ $aom_idct32x32_135_add_sse2=aom_idct32x32_1024_add_sse2;
+ $aom_idct32x32_135_add_neon=aom_idct32x32_1024_add_neon;
+ $aom_idct32x32_135_add_dspr2=aom_idct32x32_1024_add_dspr2;
+ $aom_idct32x32_135_add_msa=aom_idct32x32_1024_add_msa;
+
+ add_proto qw/void aom_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_34_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
+ # Need to add 34 eob idct32x32 neon implementation.
+ $aom_idct32x32_34_add_neon=aom_idct32x32_1024_add_neon;
+
+ add_proto qw/void aom_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct32x32_1_add sse2 neon dspr2 msa/;
+
+ add_proto qw/void aom_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_iwht4x4_1_add msa/;
+
+ add_proto qw/void aom_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_iwht4x4_16_add msa sse2/;
+ } # CONFIG_EMULATE_HARDWARE
+} # CONFIG_AOM_HIGHBITDEPTH
+} # CONFIG_AV1
+
+#
+# Quantization
+#
+if (aom_config("CONFIG_AOM_QM") eq "yes") {
+ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
+ add_proto qw/void aom_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+
+ add_proto qw/void aom_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+
+ add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+ } # CONFIG_AOM_HIGHBITDEPTH
+ } # CONFIG_AV1_ENCODER
+} else {
+ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
+ add_proto qw/void aom_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/aom_quantize_b sse2/, "$ssse3_x86_64", "$avx_x86_64";
+
+ add_proto qw/void aom_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/aom_quantize_b_32x32/, "$ssse3_x86_64", "$avx_x86_64";
+
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/aom_highbd_quantize_b sse2/;
+
+ add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/aom_highbd_quantize_b_32x32 sse2/;
+ } # CONFIG_AOM_HIGHBITDEPTH
+ } # CONFIG_AV1_ENCODER
+} # CONFIG_AOM_QM
+if (aom_config("CONFIG_AV1") eq "yes") {
+ #
+ # Alpha blending with mask
+ #
+ add_proto qw/void aom_blend_a64_mask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, int suby, int subx";
+ add_proto qw/void aom_blend_a64_hmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w";
+ add_proto qw/void aom_blend_a64_vmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w";
+ specialize "aom_blend_a64_mask", qw/sse4_1/;
+ specialize "aom_blend_a64_hmask", qw/sse4_1/;
+ specialize "aom_blend_a64_vmask", qw/sse4_1/;
+
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void aom_highbd_blend_a64_mask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, int suby, int subx, int bd";
+ add_proto qw/void aom_highbd_blend_a64_hmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w, int bd";
+ add_proto qw/void aom_highbd_blend_a64_vmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w, int bd";
+ specialize "aom_highbd_blend_a64_mask", qw/sse4_1/;
+ specialize "aom_highbd_blend_a64_hmask", qw/sse4_1/;
+ specialize "aom_highbd_blend_a64_vmask", qw/sse4_1/;
+ }
+} # CONFIG_AV1
+
+if (aom_config("CONFIG_ENCODERS") eq "yes") {
+#
+# Block subtraction
+#
+add_proto qw/void aom_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride";
+specialize qw/aom_subtract_block neon msa sse2/;
+
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
+#
+# Sum of Squares
+#
+add_proto qw/uint64_t aom_sum_squares_2d_i16/, "const int16_t *src, int stride, int size";
+specialize qw/aom_sum_squares_2d_i16 sse2/;
+
+add_proto qw/uint64_t aom_sum_squares_i16/, "const int16_t *src, uint32_t N";
+specialize qw/aom_sum_squares_i16 sse2/;
+}
+
+
+# Single block SAD
+#
+add_proto qw/unsigned int aom_sad64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad64x64 avx2 neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad64x32 avx2 msa sse2/;
+
+add_proto qw/unsigned int aom_sad32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad32x64 avx2 msa sse2/;
+
+add_proto qw/unsigned int aom_sad32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad32x32 avx2 neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad32x16 avx2 msa sse2/;
+
+add_proto qw/unsigned int aom_sad16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad16x32 msa sse2/;
+
+add_proto qw/unsigned int aom_sad16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad16x16 media neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad16x8 neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad8x16 neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad8x8 neon msa sse2/;
+
+add_proto qw/unsigned int aom_sad8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad8x4 msa sse2/;
+
+add_proto qw/unsigned int aom_sad4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad4x8 msa sse2/;
+
+add_proto qw/unsigned int aom_sad4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+specialize qw/aom_sad4x4 neon msa sse2/;
+
+#
+# Avg
+#
+if ((aom_config("CONFIG_AV1_ENCODER") eq "yes")) {
+ #
+ # Avg
+ #
+ add_proto qw/unsigned int aom_avg_8x8/, "const uint8_t *, int p";
+ specialize qw/aom_avg_8x8 sse2 neon msa/;
+ add_proto qw/unsigned int aom_avg_4x4/, "const uint8_t *, int p";
+ specialize qw/aom_avg_4x4 sse2 neon msa/;
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/unsigned int aom_highbd_avg_8x8/, "const uint8_t *, int p";
+ specialize qw/aom_highbd_avg_8x8/;
+ add_proto qw/unsigned int aom_highbd_avg_4x4/, "const uint8_t *, int p";
+ specialize qw/aom_highbd_avg_4x4/;
+ add_proto qw/void aom_highbd_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride, int bd";
+ specialize qw/aom_highbd_subtract_block sse2/;
+ }
+
+ #
+ # Minmax
+ #
+ add_proto qw/void aom_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
+ specialize qw/aom_minmax_8x8 sse2 neon/;
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void aom_highbd_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
+ specialize qw/aom_highbd_minmax_8x8/;
+ }
+
+ add_proto qw/void aom_hadamard_8x8/, "const int16_t *src_diff, int src_stride, int16_t *coeff";
+ specialize qw/aom_hadamard_8x8 sse2 neon/, "$ssse3_x86_64";
+
+ add_proto qw/void aom_hadamard_16x16/, "const int16_t *src_diff, int src_stride, int16_t *coeff";
+ specialize qw/aom_hadamard_16x16 sse2 neon/;
+
+ add_proto qw/int aom_satd/, "const int16_t *coeff, int length";
+ specialize qw/aom_satd sse2 neon/;
+
+ add_proto qw/void aom_int_pro_row/, "int16_t *hbuf, const uint8_t *ref, const int ref_stride, const int height";
+ specialize qw/aom_int_pro_row sse2 neon/;
+
+ add_proto qw/int16_t aom_int_pro_col/, "const uint8_t *ref, const int width";
+ specialize qw/aom_int_pro_col sse2 neon/;
+
+ add_proto qw/int aom_vector_var/, "const int16_t *ref, const int16_t *src, const int bwl";
+ specialize qw/aom_vector_var neon sse2/;
+} # CONFIG_AV1_ENCODER
+
+#
+# Single block SAD / Single block Avg SAD
+#
+foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_sad${w}x${h}", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+ add_proto qw/unsigned int/, "aom_sad${w}x${h}_avg", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
+}
+
+specialize qw/aom_sad128x128 sse2/;
+specialize qw/aom_sad128x64 sse2/;
+specialize qw/aom_sad64x128 sse2/;
+specialize qw/aom_sad64x64 avx2 msa sse2/;
+specialize qw/aom_sad64x32 avx2 msa sse2/;
+specialize qw/aom_sad32x64 avx2 msa sse2/;
+specialize qw/aom_sad32x32 avx2 neon msa sse2/;
+specialize qw/aom_sad32x16 avx2 msa sse2/;
+specialize qw/aom_sad16x32 msa sse2/;
+specialize qw/aom_sad16x16 media neon msa sse2/;
+specialize qw/aom_sad16x8 neon msa sse2/;
+specialize qw/aom_sad8x16 neon msa sse2/;
+specialize qw/aom_sad8x8 neon msa sse2/;
+specialize qw/aom_sad8x4 msa sse2/;
+specialize qw/aom_sad4x8 msa sse2/;
+specialize qw/aom_sad4x4 neon msa sse2/;
+
+specialize qw/aom_sad128x128_avg sse2/;
+specialize qw/aom_sad128x64_avg sse2/;
+specialize qw/aom_sad64x128_avg sse2/;
+specialize qw/aom_sad64x64_avg avx2 msa sse2/;
+specialize qw/aom_sad64x32_avg avx2 msa sse2/;
+specialize qw/aom_sad32x64_avg avx2 msa sse2/;
+specialize qw/aom_sad32x32_avg avx2 msa sse2/;
+specialize qw/aom_sad32x16_avg avx2 msa sse2/;
+specialize qw/aom_sad16x32_avg msa sse2/;
+specialize qw/aom_sad16x16_avg msa sse2/;
+specialize qw/aom_sad16x8_avg msa sse2/;
+specialize qw/aom_sad8x16_avg msa sse2/;
+specialize qw/aom_sad8x8_avg msa sse2/;
+specialize qw/aom_sad8x4_avg msa sse2/;
+specialize qw/aom_sad4x8_avg msa sse2/;
+specialize qw/aom_sad4x4_avg msa sse2/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_highbd_sad${w}x${h}", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
+ add_proto qw/unsigned int/, "aom_highbd_sad${w}x${h}_avg", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
+ if ($w != 128 && $h != 128 && $w != 4) {
+ specialize "aom_highbd_sad${w}x${h}", qw/sse2/;
+ specialize "aom_highbd_sad${w}x${h}_avg", qw/sse2/;
+ }
+ }
+}
+
+#
+# Masked SAD
+#
+if (aom_config("CONFIG_EXT_INTER") eq "yes") {
+ foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+ specialize "aom_masked_sad${w}x${h}", qw/ssse3/;
+ }
+
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_highbd_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+ specialize "aom_highbd_masked_sad${w}x${h}", qw/ssse3/;
+ }
+ }
+}
+
+#
+# OBMC SAD
+#
+if (aom_config("CONFIG_OBMC") eq "yes") {
+ foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_obmc_sad${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask";
+ specialize "aom_obmc_sad${w}x${h}", qw/sse4_1/;
+ }
+
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_highbd_obmc_sad${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask";
+ specialize "aom_highbd_obmc_sad${w}x${h}", qw/sse4_1/;
+ }
+ }
+}
+
+#
+# Multi-block SAD, comparing a reference to N blocks 1 pixel apart horizontally
+#
+# Blocks of 3
+foreach $s (@block_widths) {
+ add_proto qw/void/, "aom_sad${s}x${s}x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+}
+specialize qw/aom_sad64x64x3 msa/;
+specialize qw/aom_sad32x32x3 msa/;
+specialize qw/aom_sad16x16x3 sse3 ssse3 msa/;
+specialize qw/aom_sad8x8x3 sse3 msa/;
+specialize qw/aom_sad4x4x3 sse3 msa/;
+
+add_proto qw/void/, "aom_sad16x8x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad16x8x3 sse3 ssse3 msa/;
+add_proto qw/void/, "aom_sad8x16x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad8x16x3 sse3 msa/;
+
+# Blocks of 8
+foreach $s (@block_widths) {
+ add_proto qw/void/, "aom_sad${s}x${s}x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+}
+specialize qw/aom_sad64x64x8 msa/;
+specialize qw/aom_sad32x32x8 msa/;
+specialize qw/aom_sad16x16x8 sse4_1 msa/;
+specialize qw/aom_sad8x8x8 sse4_1 msa/;
+specialize qw/aom_sad4x4x8 sse4_1 msa/;
+
+add_proto qw/void/, "aom_sad16x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad16x8x8 sse4_1 msa/;
+add_proto qw/void/, "aom_sad8x16x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad8x16x8 sse4_1 msa/;
+add_proto qw/void/, "aom_sad8x4x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad8x4x8 msa/;
+add_proto qw/void/, "aom_sad4x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+specialize qw/aom_sad4x8x8 msa/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ foreach $s (@block_widths) {
+ # Blocks of 3
+ add_proto qw/void/, "aom_highbd_sad${s}x${s}x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+ # Blocks of 8
+ add_proto qw/void/, "aom_highbd_sad${s}x${s}x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+ }
+ # Blocks of 3
+ add_proto qw/void/, "aom_highbd_sad16x8x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+ add_proto qw/void/, "aom_highbd_sad8x16x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+ # Blocks of 8
+ add_proto qw/void/, "aom_highbd_sad16x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+ add_proto qw/void/, "aom_highbd_sad8x16x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+ add_proto qw/void/, "aom_highbd_sad8x4x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+ add_proto qw/void/, "aom_highbd_sad4x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
+}
+
+#
+# Multi-block SAD, comparing a reference to N independent blocks
+#
+foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/void/, "aom_sad${w}x${h}x4d", "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+}
+
+specialize qw/aom_sad128x128x4d sse2/;
+specialize qw/aom_sad128x64x4d sse2/;
+specialize qw/aom_sad64x128x4d sse2/;
+specialize qw/aom_sad64x64x4d avx2 neon msa sse2/;
+specialize qw/aom_sad64x32x4d msa sse2/;
+specialize qw/aom_sad32x64x4d msa sse2/;
+specialize qw/aom_sad32x32x4d avx2 neon msa sse2/;
+specialize qw/aom_sad32x16x4d msa sse2/;
+specialize qw/aom_sad16x32x4d msa sse2/;
+specialize qw/aom_sad16x16x4d neon msa sse2/;
+specialize qw/aom_sad16x8x4d msa sse2/;
+specialize qw/aom_sad8x16x4d msa sse2/;
+specialize qw/aom_sad8x8x4d msa sse2/;
+specialize qw/aom_sad8x4x4d msa sse2/;
+specialize qw/aom_sad4x8x4d msa sse2/;
+specialize qw/aom_sad4x4x4d msa sse2/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ #
+ # Multi-block SAD, comparing a reference to N independent blocks
+ #
+ foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/void/, "aom_highbd_sad${w}x${h}x4d", "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ if ($w != 128 && $h != 128) {
+ specialize "aom_highbd_sad${w}x${h}x4d", qw/sse2/;
+ }
+ }
+}
+
+#
+# Structured Similarity (SSIM)
+#
+if (aom_config("CONFIG_INTERNAL_STATS") eq "yes") {
+ add_proto qw/void aom_ssim_parms_8x8/, "const uint8_t *s, int sp, const uint8_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
+ specialize qw/aom_ssim_parms_8x8/, "$sse2_x86_64";
+
+ add_proto qw/void aom_ssim_parms_16x16/, "const uint8_t *s, int sp, const uint8_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
+ specialize qw/aom_ssim_parms_16x16/, "$sse2_x86_64";
+
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void aom_highbd_ssim_parms_8x8/, "const uint16_t *s, int sp, const uint16_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
+ }
+}
+} # CONFIG_ENCODERS
+
+if (aom_config("CONFIG_ENCODERS") eq "yes") {
+
+#
+# Variance
+#
+add_proto qw/unsigned int aom_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance64x64 sse2 avx2 neon msa/;
+
+add_proto qw/unsigned int aom_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance64x32 sse2 avx2 neon msa/;
+
+add_proto qw/unsigned int aom_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance32x64 sse2 neon msa/;
+
+add_proto qw/unsigned int aom_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance32x32 sse2 avx2 neon msa/;
+
+add_proto qw/unsigned int aom_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance32x16 sse2 avx2 msa/;
+
+add_proto qw/unsigned int aom_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance16x32 sse2 msa/;
+
+add_proto qw/unsigned int aom_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance16x16 sse2 avx2 media neon msa/;
+
+add_proto qw/unsigned int aom_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance16x8 sse2 neon msa/;
+
+add_proto qw/unsigned int aom_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance8x16 sse2 neon msa/;
+
+add_proto qw/unsigned int aom_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance8x8 sse2 media neon msa/;
+
+add_proto qw/unsigned int aom_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance8x4 sse2 msa/;
+
+add_proto qw/unsigned int aom_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance4x8 sse2 msa/;
+
+add_proto qw/unsigned int aom_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_variance4x4 sse2 msa/;
+
+#
+# Specialty Variance
+#
+add_proto qw/void aom_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+add_proto qw/void aom_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+specialize qw/aom_get16x16var sse2 avx2 neon msa/;
+specialize qw/aom_get8x8var sse2 neon msa/;
+
+
+add_proto qw/unsigned int aom_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+add_proto qw/unsigned int aom_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+add_proto qw/unsigned int aom_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+add_proto qw/unsigned int aom_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+
+specialize qw/aom_mse16x16 sse2 avx2 media neon msa/;
+specialize qw/aom_mse16x8 sse2 msa/;
+specialize qw/aom_mse8x16 sse2 msa/;
+specialize qw/aom_mse8x8 sse2 msa/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ foreach $bd (8, 10, 12) {
+ add_proto qw/void/, "aom_highbd_${bd}_get16x16var", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+ add_proto qw/void/, "aom_highbd_${bd}_get8x8var", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+ add_proto qw/unsigned int/, "aom_highbd_${bd}_mse16x16", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int/, "aom_highbd_${bd}_mse16x8", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int/, "aom_highbd_${bd}_mse8x16", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int/, "aom_highbd_${bd}_mse8x8", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+
+ specialize "aom_highbd_${bd}_mse16x16", qw/sse2/;
+ specialize "aom_highbd_${bd}_mse8x8", qw/sse2/;
+ }
+}
+
+#
+# ...
+#
+add_proto qw/void aom_upsampled_pred/, "uint8_t *comp_pred, int width, int height, const uint8_t *ref, int ref_stride";
+specialize qw/aom_upsampled_pred sse2/;
+add_proto qw/void aom_comp_avg_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
+specialize qw/aom_comp_avg_upsampled_pred sse2/;
+
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void aom_highbd_upsampled_pred/, "uint16_t *comp_pred, int width, int height, const uint8_t *ref8, int ref_stride";
+ specialize qw/aom_highbd_upsampled_pred sse2/;
+ add_proto qw/void aom_highbd_comp_avg_upsampled_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride";
+ specialize qw/aom_highbd_comp_avg_upsampled_pred sse2/;
+}
+
+#
+# ...
+#
+add_proto qw/unsigned int aom_get_mb_ss/, "const int16_t *";
+add_proto qw/unsigned int aom_get4x4sse_cs/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride";
+
+specialize qw/aom_get_mb_ss sse2 msa/;
+specialize qw/aom_get4x4sse_cs neon msa/;
+
+#
+# Variance / Subpixel Variance / Subpixel Avg Variance
+#
+foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/uint32_t/, "aom_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t/, "aom_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+}
+
+specialize qw/aom_variance64x64 sse2 avx2 neon msa/;
+specialize qw/aom_variance64x32 sse2 avx2 neon msa/;
+specialize qw/aom_variance32x64 sse2 neon msa/;
+specialize qw/aom_variance32x32 sse2 avx2 neon msa/;
+specialize qw/aom_variance32x16 sse2 avx2 msa/;
+specialize qw/aom_variance16x32 sse2 msa/;
+specialize qw/aom_variance16x16 sse2 avx2 media neon msa/;
+specialize qw/aom_variance16x8 sse2 neon msa/;
+specialize qw/aom_variance8x16 sse2 neon msa/;
+specialize qw/aom_variance8x8 sse2 media neon msa/;
+specialize qw/aom_variance8x4 sse2 msa/;
+specialize qw/aom_variance4x8 sse2 msa/;
+specialize qw/aom_variance4x4 sse2 msa/;
+
+specialize qw/aom_sub_pixel_variance64x64 avx2 neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance64x32 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance32x64 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance32x32 avx2 neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance32x16 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance16x32 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance16x16 media neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance16x8 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance8x16 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance8x8 media neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance8x4 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance4x8 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance4x4 msa sse2 ssse3/;
+
+specialize qw/aom_sub_pixel_avg_variance64x64 avx2 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance64x32 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance32x64 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance32x32 avx2 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance32x16 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance16x32 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance16x16 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance16x8 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance8x16 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance8x8 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance8x4 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance4x8 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_avg_variance4x4 msa sse2 ssse3/;
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ foreach $bd (8, 10, 12) {
+ foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_highbd_${bd}_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t/, "aom_highbd_${bd}_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t/, "aom_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ if ($w != 128 && $h != 128 && $w != 4 && $h != 4) {
+ specialize "aom_highbd_${bd}_variance${w}x${h}", "sse2";
+ }
+ if ($w == 4 && $h == 4) {
+ specialize "aom_highbd_${bd}_variance${w}x${h}", "sse4_1";
+ }
+ if ($w != 128 && $h != 128 && $w != 4) {
+ specialize "aom_highbd_${bd}_sub_pixel_variance${w}x${h}", qw/sse2/;
+ specialize "aom_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", qw/sse2/;
+ }
+ if ($w == 4 && $h == 4) {
+ specialize "aom_highbd_${bd}_sub_pixel_variance${w}x${h}", "sse4_1";
+ specialize "aom_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", "sse4_1";
+ }
+ }
+ }
+} # CONFIG_AOM_HIGHBITDEPTH
+
+if (aom_config("CONFIG_EXT_INTER") eq "yes") {
+#
+# Masked Variance / Masked Subpixel Variance
+#
+ foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+ add_proto qw/unsigned int/, "aom_masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+ specialize "aom_masked_variance${w}x${h}", qw/ssse3/;
+ specialize "aom_masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
+ }
+
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ foreach $bd ("_", "_10_", "_12_") {
+ foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_highbd${bd}masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+ add_proto qw/unsigned int/, "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+ specialize "aom_highbd${bd}masked_variance${w}x${h}", qw/ssse3/;
+ specialize "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
+ }
+ }
+ }
+}
+
+#
+# OBMC Variance / OBMC Subpixel Variance
+#
+if (aom_config("CONFIG_OBMC") eq "yes") {
+ foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_obmc_variance${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
+ add_proto qw/unsigned int/, "aom_obmc_sub_pixel_variance${w}x${h}", "const uint8_t *pre, int pre_stride, int xoffset, int yoffset, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
+ specialize "aom_obmc_variance${w}x${h}", q/sse4_1/;
+ specialize "aom_obmc_sub_pixel_variance${w}x${h}";
+ }
+
+ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ foreach $bd ("_", "_10_", "_12_") {
+ foreach (@block_sizes) {
+ ($w, $h) = @$_;
+ add_proto qw/unsigned int/, "aom_highbd${bd}obmc_variance${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
+ add_proto qw/unsigned int/, "aom_highbd${bd}obmc_sub_pixel_variance${w}x${h}", "const uint8_t *pre, int pre_stride, int xoffset, int yoffset, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
+ specialize "aom_highbd${bd}obmc_variance${w}x${h}", qw/sse4_1/;
+ specialize "aom_highbd${bd}obmc_sub_pixel_variance${w}x${h}";
+ }
+ }
+ }
+}
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance64x64 avx2 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance64x32 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance32x64 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance32x32 avx2 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance32x16 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance16x32 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance16x16 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance16x8 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance8x16 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance8x8 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance8x4 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance4x8 msa sse2 ssse3/;
+
+add_proto qw/uint32_t aom_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_sub_pixel_avg_variance4x4 msa sse2 ssse3/;
+
+#
+# Specialty Subpixel
+#
+add_proto qw/uint32_t aom_variance_halfpixvar16x16_h/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_variance_halfpixvar16x16_h sse2 media/;
+
+add_proto qw/uint32_t aom_variance_halfpixvar16x16_v/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_variance_halfpixvar16x16_v sse2 media/;
+
+add_proto qw/uint32_t aom_variance_halfpixvar16x16_hv/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_variance_halfpixvar16x16_hv sse2 media/;
+
+#
+# Comp Avg
+#
+add_proto qw/void aom_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/unsigned int aom_highbd_12_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance64x64 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance64x32 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance32x64 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance32x32 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance32x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance16x32 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance16x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance16x8 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance8x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance8x8 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_12_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_12_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+
+ add_proto qw/unsigned int aom_highbd_10_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance64x64 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance64x32 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance32x64 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance32x32 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance32x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance16x32 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance16x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance16x8 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance8x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance8x8 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_10_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_10_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+
+ add_proto qw/unsigned int aom_highbd_8_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance64x64 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_8_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance64x32 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_8_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance32x64 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_8_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance32x32 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_8_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance32x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_8_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance16x32 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_8_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance16x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_8_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance16x8 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_8_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance8x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_8_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance8x8 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_8_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_8_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_8_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+
+ add_proto qw/void aom_highbd_8_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+ add_proto qw/void aom_highbd_8_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+ add_proto qw/void aom_highbd_10_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+ add_proto qw/void aom_highbd_10_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+ add_proto qw/void aom_highbd_12_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+ add_proto qw/void aom_highbd_12_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+
+ add_proto qw/unsigned int aom_highbd_8_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_mse16x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_8_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_8_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_8_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_mse8x8 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_mse16x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_10_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_10_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_10_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_mse8x8 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_mse16x16 sse2/;
+
+ add_proto qw/unsigned int aom_highbd_12_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_12_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int aom_highbd_12_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_mse8x8 sse2/;
+
+ add_proto qw/void aom_highbd_comp_avg_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride";
+
+ #
+ # Subpixel Variance
+ #
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_12_sub_pixel_variance64x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_12_sub_pixel_variance64x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_12_sub_pixel_variance32x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_12_sub_pixel_variance32x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_12_sub_pixel_variance32x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_12_sub_pixel_variance16x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_12_sub_pixel_variance16x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_12_sub_pixel_variance16x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_12_sub_pixel_variance8x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_12_sub_pixel_variance8x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_12_sub_pixel_variance8x4 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_10_sub_pixel_variance64x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_10_sub_pixel_variance64x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_10_sub_pixel_variance32x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_10_sub_pixel_variance32x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_10_sub_pixel_variance32x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_10_sub_pixel_variance16x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_10_sub_pixel_variance16x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_10_sub_pixel_variance16x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_10_sub_pixel_variance8x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_10_sub_pixel_variance8x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_10_sub_pixel_variance8x4 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_8_sub_pixel_variance64x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_8_sub_pixel_variance64x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_8_sub_pixel_variance32x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_8_sub_pixel_variance32x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_8_sub_pixel_variance32x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_8_sub_pixel_variance16x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_8_sub_pixel_variance16x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_8_sub_pixel_variance16x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_8_sub_pixel_variance8x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_8_sub_pixel_variance8x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ specialize qw/aom_highbd_8_sub_pixel_variance8x4 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_12_sub_pixel_avg_variance64x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_12_sub_pixel_avg_variance64x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_12_sub_pixel_avg_variance32x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_12_sub_pixel_avg_variance32x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_12_sub_pixel_avg_variance32x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_12_sub_pixel_avg_variance16x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_12_sub_pixel_avg_variance16x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_12_sub_pixel_avg_variance16x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_12_sub_pixel_avg_variance8x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_12_sub_pixel_avg_variance8x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_12_sub_pixel_avg_variance8x4 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t aom_highbd_12_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_10_sub_pixel_avg_variance64x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_10_sub_pixel_avg_variance64x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_10_sub_pixel_avg_variance32x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_10_sub_pixel_avg_variance32x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_10_sub_pixel_avg_variance32x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_10_sub_pixel_avg_variance16x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_10_sub_pixel_avg_variance16x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_10_sub_pixel_avg_variance16x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_10_sub_pixel_avg_variance8x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_10_sub_pixel_avg_variance8x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_10_sub_pixel_avg_variance8x4 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t aom_highbd_10_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_8_sub_pixel_avg_variance64x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_8_sub_pixel_avg_variance64x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_8_sub_pixel_avg_variance32x64 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_8_sub_pixel_avg_variance32x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_8_sub_pixel_avg_variance32x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_8_sub_pixel_avg_variance16x32 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_8_sub_pixel_avg_variance16x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_8_sub_pixel_avg_variance16x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_8_sub_pixel_avg_variance8x16 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_8_sub_pixel_avg_variance8x8 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ specialize qw/aom_highbd_8_sub_pixel_avg_variance8x4 sse2/;
+
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+
+} # CONFIG_AOM_HIGHBITDEPTH
+
+} # CONFIG_ENCODERS
+
+1;
diff --git a/aom_dsp/vpx_filter.h b/aom_dsp/aom_filter.h
similarity index 88%
rename from aom_dsp/vpx_filter.h
rename to aom_dsp/aom_filter.h
index d977bd3..0a71817 100644
--- a/aom_dsp/vpx_filter.h
+++ b/aom_dsp/aom_filter.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_VPX_FILTER_H_
-#define VPX_DSP_VPX_FILTER_H_
+#ifndef AOM_DSP_AOM_FILTER_H_
+#define AOM_DSP_AOM_FILTER_H_
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
@@ -39,4 +39,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_VPX_FILTER_H_
+#endif // AOM_DSP_AOM_FILTER_H_
diff --git a/aom_dsp/arm/vpx_convolve8_avg_neon.c b/aom_dsp/arm/aom_convolve8_avg_neon.c
similarity index 98%
rename from aom_dsp/arm/vpx_convolve8_avg_neon.c
rename to aom_dsp/arm/aom_convolve8_avg_neon.c
index c6b1831..701d5b9 100644
--- a/aom_dsp/arm/vpx_convolve8_avg_neon.c
+++ b/aom_dsp/arm/aom_convolve8_avg_neon.c
@@ -11,9 +11,9 @@
#include <arm_neon.h>
#include <assert.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1,
@@ -38,7 +38,7 @@
return qdst;
}
-void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, // unused
@@ -218,7 +218,7 @@
return;
}
-void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, // unused
int x_step_q4, // unused
diff --git a/aom_dsp/arm/vpx_convolve8_avg_neon_asm.asm b/aom_dsp/arm/aom_convolve8_avg_neon_asm.asm
similarity index 93%
rename from aom_dsp/arm/vpx_convolve8_avg_neon_asm.asm
rename to aom_dsp/arm/aom_convolve8_avg_neon_asm.asm
index e279d57..a408d9d 100644
--- a/aom_dsp/arm/vpx_convolve8_avg_neon_asm.asm
+++ b/aom_dsp/arm/aom_convolve8_avg_neon_asm.asm
@@ -14,11 +14,11 @@
; w%4 == 0
; h%4 == 0
; taps == 8
- ; VP9_FILTER_WEIGHT == 128
- ; VP9_FILTER_SHIFT == 7
+ ; AV1_FILTER_WEIGHT == 128
+ ; AV1_FILTER_SHIFT == 7
- EXPORT |vpx_convolve8_avg_horiz_neon|
- EXPORT |vpx_convolve8_avg_vert_neon|
+ EXPORT |aom_convolve8_avg_horiz_neon|
+ EXPORT |aom_convolve8_avg_vert_neon|
ARM
REQUIRE8
PRESERVE8
@@ -49,7 +49,7 @@
; sp[]int w
; sp[]int h
-|vpx_convolve8_avg_horiz_neon| PROC
+|aom_convolve8_avg_horiz_neon| PROC
push {r4-r10, lr}
sub r0, r0, #3 ; adjust for taps
@@ -72,7 +72,7 @@
mov r10, r6 ; w loop counter
-vpx_convolve8_avg_loop_horiz_v
+aom_convolve8_avg_loop_horiz_v
vld1.8 {d24}, [r0], r1
vld1.8 {d25}, [r0], r1
vld1.8 {d26}, [r0], r1
@@ -95,7 +95,7 @@
add r0, r0, #3
-vpx_convolve8_avg_loop_horiz
+aom_convolve8_avg_loop_horiz
add r5, r0, #64
vld1.32 {d28[]}, [r0], r1
@@ -164,20 +164,20 @@
vmov q9, q13
subs r6, r6, #4 ; w -= 4
- bgt vpx_convolve8_avg_loop_horiz
+ bgt aom_convolve8_avg_loop_horiz
; outer loop
mov r6, r10 ; restore w counter
add r0, r0, r9 ; src += src_stride * 4 - w
add r2, r2, r12 ; dst += dst_stride * 4 - w
subs r7, r7, #4 ; h -= 4
- bgt vpx_convolve8_avg_loop_horiz_v
+ bgt aom_convolve8_avg_loop_horiz_v
pop {r4-r10, pc}
ENDP
-|vpx_convolve8_avg_vert_neon| PROC
+|aom_convolve8_avg_vert_neon| PROC
push {r4-r8, lr}
; adjust for taps
@@ -193,7 +193,7 @@
lsl r1, r1, #1
lsl r3, r3, #1
-vpx_convolve8_avg_loop_vert_h
+aom_convolve8_avg_loop_vert_h
mov r4, r0
add r7, r0, r1, asr #1
mov r5, r2
@@ -213,7 +213,7 @@
vmovl.u8 q10, d20
vmovl.u8 q11, d22
-vpx_convolve8_avg_loop_vert
+aom_convolve8_avg_loop_vert
; always process a 4x4 block at a time
vld1.u32 {d24[0]}, [r7], r1
vld1.u32 {d26[0]}, [r4], r1
@@ -278,13 +278,13 @@
vmov d22, d25
subs r12, r12, #4 ; h -= 4
- bgt vpx_convolve8_avg_loop_vert
+ bgt aom_convolve8_avg_loop_vert
; outer loop
add r0, r0, #4
add r2, r2, #4
subs r6, r6, #4 ; w -= 4
- bgt vpx_convolve8_avg_loop_vert_h
+ bgt aom_convolve8_avg_loop_vert_h
pop {r4-r8, pc}
diff --git a/aom_dsp/arm/vpx_convolve8_neon.c b/aom_dsp/arm/aom_convolve8_neon.c
similarity index 98%
rename from aom_dsp/arm/vpx_convolve8_neon.c
rename to aom_dsp/arm/aom_convolve8_neon.c
index b84be93..3e5aaf0 100644
--- a/aom_dsp/arm/vpx_convolve8_neon.c
+++ b/aom_dsp/arm/aom_convolve8_neon.c
@@ -11,9 +11,9 @@
#include <arm_neon.h>
#include <assert.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1,
@@ -38,7 +38,7 @@
return qdst;
}
-void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, // unused
@@ -204,7 +204,7 @@
return;
}
-void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, // unused
int x_step_q4, // unused
diff --git a/aom_dsp/arm/vpx_convolve8_neon_asm.asm b/aom_dsp/arm/aom_convolve8_neon_asm.asm
similarity index 93%
rename from aom_dsp/arm/vpx_convolve8_neon_asm.asm
rename to aom_dsp/arm/aom_convolve8_neon_asm.asm
index 2d0f2ae..800876f 100644
--- a/aom_dsp/arm/vpx_convolve8_neon_asm.asm
+++ b/aom_dsp/arm/aom_convolve8_neon_asm.asm
@@ -14,11 +14,11 @@
; w%4 == 0
; h%4 == 0
; taps == 8
- ; VP9_FILTER_WEIGHT == 128
- ; VP9_FILTER_SHIFT == 7
+ ; AV1_FILTER_WEIGHT == 128
+ ; AV1_FILTER_SHIFT == 7
- EXPORT |vpx_convolve8_horiz_neon|
- EXPORT |vpx_convolve8_vert_neon|
+ EXPORT |aom_convolve8_horiz_neon|
+ EXPORT |aom_convolve8_vert_neon|
ARM
REQUIRE8
PRESERVE8
@@ -49,7 +49,7 @@
; sp[]int w
; sp[]int h
-|vpx_convolve8_horiz_neon| PROC
+|aom_convolve8_horiz_neon| PROC
push {r4-r10, lr}
sub r0, r0, #3 ; adjust for taps
@@ -72,7 +72,7 @@
mov r10, r6 ; w loop counter
-vpx_convolve8_loop_horiz_v
+aom_convolve8_loop_horiz_v
vld1.8 {d24}, [r0], r1
vld1.8 {d25}, [r0], r1
vld1.8 {d26}, [r0], r1
@@ -95,7 +95,7 @@
add r0, r0, #3
-vpx_convolve8_loop_horiz
+aom_convolve8_loop_horiz
add r5, r0, #64
vld1.32 {d28[]}, [r0], r1
@@ -153,20 +153,20 @@
vmov q9, q13
subs r6, r6, #4 ; w -= 4
- bgt vpx_convolve8_loop_horiz
+ bgt aom_convolve8_loop_horiz
; outer loop
mov r6, r10 ; restore w counter
add r0, r0, r9 ; src += src_stride * 4 - w
add r2, r2, r12 ; dst += dst_stride * 4 - w
subs r7, r7, #4 ; h -= 4
- bgt vpx_convolve8_loop_horiz_v
+ bgt aom_convolve8_loop_horiz_v
pop {r4-r10, pc}
ENDP
-|vpx_convolve8_vert_neon| PROC
+|aom_convolve8_vert_neon| PROC
push {r4-r8, lr}
; adjust for taps
@@ -182,7 +182,7 @@
lsl r1, r1, #1
lsl r3, r3, #1
-vpx_convolve8_loop_vert_h
+aom_convolve8_loop_vert_h
mov r4, r0
add r7, r0, r1, asr #1
mov r5, r2
@@ -202,7 +202,7 @@
vmovl.u8 q10, d20
vmovl.u8 q11, d22
-vpx_convolve8_loop_vert
+aom_convolve8_loop_vert
; always process a 4x4 block at a time
vld1.u32 {d24[0]}, [r7], r1
vld1.u32 {d26[0]}, [r4], r1
@@ -256,13 +256,13 @@
vmov d22, d25
subs r12, r12, #4 ; h -= 4
- bgt vpx_convolve8_loop_vert
+ bgt aom_convolve8_loop_vert
; outer loop
add r0, r0, #4
add r2, r2, #4
subs r6, r6, #4 ; w -= 4
- bgt vpx_convolve8_loop_vert_h
+ bgt aom_convolve8_loop_vert_h
pop {r4-r8, pc}
diff --git a/aom_dsp/arm/vpx_convolve_avg_neon.c b/aom_dsp/arm/aom_convolve_avg_neon.c
similarity index 97%
rename from aom_dsp/arm/vpx_convolve_avg_neon.c
rename to aom_dsp/arm/aom_convolve_avg_neon.c
index a04d384..cfdfed9 100644
--- a/aom_dsp/arm/vpx_convolve_avg_neon.c
+++ b/aom_dsp/arm/aom_convolve_avg_neon.c
@@ -10,10 +10,10 @@
#include <arm_neon.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
-void vpx_convolve_avg_neon(const uint8_t *src, // r0
+void aom_convolve_avg_neon(const uint8_t *src, // r0
ptrdiff_t src_stride, // r1
uint8_t *dst, // r2
ptrdiff_t dst_stride, // r3
diff --git a/aom_dsp/arm/vpx_convolve_avg_neon_asm.asm b/aom_dsp/arm/aom_convolve_avg_neon_asm.asm
similarity index 98%
rename from aom_dsp/arm/vpx_convolve_avg_neon_asm.asm
rename to aom_dsp/arm/aom_convolve_avg_neon_asm.asm
index 97e6189..2177756 100644
--- a/aom_dsp/arm/vpx_convolve_avg_neon_asm.asm
+++ b/aom_dsp/arm/aom_convolve_avg_neon_asm.asm
@@ -8,14 +8,14 @@
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vpx_convolve_avg_neon|
+ EXPORT |aom_convolve_avg_neon|
ARM
REQUIRE8
PRESERVE8
AREA ||.text||, CODE, READONLY, ALIGN=2
-|vpx_convolve_avg_neon| PROC
+|aom_convolve_avg_neon| PROC
push {r4-r6, lr}
ldrd r4, r5, [sp, #32]
mov r6, r2
diff --git a/aom_dsp/arm/vpx_convolve_copy_neon.c b/aom_dsp/arm/aom_convolve_copy_neon.c
similarity index 95%
rename from aom_dsp/arm/vpx_convolve_copy_neon.c
rename to aom_dsp/arm/aom_convolve_copy_neon.c
index 8000eb7..bb8a55c 100644
--- a/aom_dsp/arm/vpx_convolve_copy_neon.c
+++ b/aom_dsp/arm/aom_convolve_copy_neon.c
@@ -10,10 +10,10 @@
#include <arm_neon.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
-void vpx_convolve_copy_neon(const uint8_t *src, // r0
+void aom_convolve_copy_neon(const uint8_t *src, // r0
ptrdiff_t src_stride, // r1
uint8_t *dst, // r2
ptrdiff_t dst_stride, // r3
diff --git a/aom_dsp/arm/vpx_convolve_copy_neon_asm.asm b/aom_dsp/arm/aom_convolve_copy_neon_asm.asm
similarity index 97%
rename from aom_dsp/arm/vpx_convolve_copy_neon_asm.asm
rename to aom_dsp/arm/aom_convolve_copy_neon_asm.asm
index 89164ad..2d60bee3 100644
--- a/aom_dsp/arm/vpx_convolve_copy_neon_asm.asm
+++ b/aom_dsp/arm/aom_convolve_copy_neon_asm.asm
@@ -8,14 +8,14 @@
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vpx_convolve_copy_neon|
+ EXPORT |aom_convolve_copy_neon|
ARM
REQUIRE8
PRESERVE8
AREA ||.text||, CODE, READONLY, ALIGN=2
-|vpx_convolve_copy_neon| PROC
+|aom_convolve_copy_neon| PROC
push {r4-r5, lr}
ldrd r4, r5, [sp, #28]
diff --git a/aom_dsp/arm/vpx_convolve_neon.c b/aom_dsp/arm/aom_convolve_neon.c
similarity index 83%
rename from aom_dsp/arm/vpx_convolve_neon.c
rename to aom_dsp/arm/aom_convolve_neon.c
index 297b64b..11bff2a 100644
--- a/aom_dsp/arm/vpx_convolve_neon.c
+++ b/aom_dsp/arm/aom_convolve_neon.c
@@ -10,11 +10,11 @@
#include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
-void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int x_step_q4, const int16_t *filter_y, int y_step_q4,
int w, int h) {
@@ -34,16 +34,16 @@
* the temp buffer which has lots of extra room and is subsequently discarded
* this is safe if somewhat less than ideal.
*/
- vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
+ aom_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
x_step_q4, filter_y, y_step_q4, w,
intermediate_height);
/* Step into the temp buffer 3 lines to get the actual frame data */
- vpx_convolve8_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
+ aom_convolve8_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
}
-void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -57,9 +57,9 @@
/* This implementation has the same issues as above. In addition, we only want
* to average the values after both passes.
*/
- vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
+ aom_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
x_step_q4, filter_y, y_step_q4, w,
intermediate_height);
- vpx_convolve8_avg_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
+ aom_convolve8_avg_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
}
diff --git a/aom_dsp/arm/avg_neon.c b/aom_dsp/arm/avg_neon.c
index ff9cbb9..1463c17 100644
--- a/aom_dsp/arm/avg_neon.c
+++ b/aom_dsp/arm/avg_neon.c
@@ -11,10 +11,10 @@
#include <arm_neon.h>
#include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
static INLINE unsigned int horizontal_add_u16x8(const uint16x8_t v_16x8) {
const uint32x4_t a = vpaddlq_u16(v_16x8);
@@ -24,7 +24,7 @@
return vget_lane_u32(c, 0);
}
-unsigned int vpx_avg_4x4_neon(const uint8_t *s, int p) {
+unsigned int aom_avg_4x4_neon(const uint8_t *s, int p) {
uint16x8_t v_sum;
uint32x2_t v_s0 = vdup_n_u32(0);
uint32x2_t v_s1 = vdup_n_u32(0);
@@ -36,7 +36,7 @@
return (horizontal_add_u16x8(v_sum) + 8) >> 4;
}
-unsigned int vpx_avg_8x8_neon(const uint8_t *s, int p) {
+unsigned int aom_avg_8x8_neon(const uint8_t *s, int p) {
uint8x8_t v_s0 = vld1_u8(s);
const uint8x8_t v_s1 = vld1_u8(s + p);
uint16x8_t v_sum = vaddl_u8(v_s0, v_s1);
@@ -64,7 +64,7 @@
// coeff: 16 bits, dynamic range [-32640, 32640].
// length: value range {16, 64, 256, 1024}.
-int vpx_satd_neon(const int16_t *coeff, int length) {
+int aom_satd_neon(const int16_t *coeff, int length) {
const int16x4_t zero = vdup_n_s16(0);
int32x4_t accum = vdupq_n_s32(0);
@@ -89,7 +89,7 @@
}
}
-void vpx_int_pro_row_neon(int16_t hbuf[16], uint8_t const *ref,
+void aom_int_pro_row_neon(int16_t hbuf[16], uint8_t const *ref,
const int ref_stride, const int height) {
int i;
uint16x8_t vec_sum_lo = vdupq_n_u16(0);
@@ -142,7 +142,7 @@
vst1q_s16(hbuf, vreinterpretq_s16_u16(vec_sum_hi));
}
-int16_t vpx_int_pro_col_neon(uint8_t const *ref, const int width) {
+int16_t aom_int_pro_col_neon(uint8_t const *ref, const int width) {
int i;
uint16x8_t vec_sum = vdupq_n_u16(0);
@@ -158,7 +158,7 @@
// ref, src = [0, 510] - max diff = 16-bits
// bwl = {2, 3, 4}, width = {16, 32, 64}
-int vpx_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) {
+int aom_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) {
int width = 4 << bwl;
int32x4_t sse = vdupq_n_s32(0);
int16x8_t total = vdupq_n_s16(0);
@@ -198,7 +198,7 @@
}
}
-void vpx_minmax_8x8_neon(const uint8_t *a, int a_stride, const uint8_t *b,
+void aom_minmax_8x8_neon(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, int *min, int *max) {
// Load and concatenate.
const uint8x16_t a01 = vcombine_u8(vld1_u8(a), vld1_u8(a + a_stride));
diff --git a/aom_dsp/arm/bilinear_filter_media.asm b/aom_dsp/arm/bilinear_filter_media.asm
index f3f9754..fbbef25 100644
--- a/aom_dsp/arm/bilinear_filter_media.asm
+++ b/aom_dsp/arm/bilinear_filter_media.asm
@@ -9,8 +9,8 @@
;
- EXPORT |vpx_filter_block2d_bil_first_pass_media|
- EXPORT |vpx_filter_block2d_bil_second_pass_media|
+ EXPORT |aom_filter_block2d_bil_first_pass_media|
+ EXPORT |aom_filter_block2d_bil_second_pass_media|
AREA |.text|, CODE, READONLY ; name this block of code
@@ -20,13 +20,13 @@
; r2 unsigned int src_pitch,
; r3 unsigned int height,
; stack unsigned int width,
-; stack const short *vpx_filter
+; stack const short *aom_filter
;-------------------------------------
; The output is transposed stroed in output array to make it easy for second pass filtering.
-|vpx_filter_block2d_bil_first_pass_media| PROC
+|aom_filter_block2d_bil_first_pass_media| PROC
stmdb sp!, {r4 - r11, lr}
- ldr r11, [sp, #40] ; vpx_filter address
+ ldr r11, [sp, #40] ; aom_filter address
ldr r4, [sp, #36] ; width
mov r12, r3 ; outer-loop counter
@@ -134,7 +134,7 @@
ldmia sp!, {r4 - r11, pc}
- ENDP ; |vpx_filter_block2d_bil_first_pass_media|
+ ENDP ; |aom_filter_block2d_bil_first_pass_media|
;---------------------------------
@@ -143,12 +143,12 @@
; r2 int dst_pitch,
; r3 unsigned int height,
; stack unsigned int width,
-; stack const short *vpx_filter
+; stack const short *aom_filter
;---------------------------------
-|vpx_filter_block2d_bil_second_pass_media| PROC
+|aom_filter_block2d_bil_second_pass_media| PROC
stmdb sp!, {r4 - r11, lr}
- ldr r11, [sp, #40] ; vpx_filter address
+ ldr r11, [sp, #40] ; aom_filter address
ldr r4, [sp, #36] ; width
ldr r5, [r11] ; load up filter coefficients
@@ -232,6 +232,6 @@
bne bil_height_loop_null_2nd
ldmia sp!, {r4 - r11, pc}
- ENDP ; |vpx_filter_block2d_second_pass_media|
+ ENDP ; |aom_filter_block2d_second_pass_media|
END
diff --git a/aom_dsp/arm/fwd_txfm_neon.c b/aom_dsp/arm/fwd_txfm_neon.c
index 4763cdb..92fe3d8 100644
--- a/aom_dsp/arm/fwd_txfm_neon.c
+++ b/aom_dsp/arm/fwd_txfm_neon.c
@@ -10,10 +10,10 @@
#include <arm_neon.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/txfm_common.h"
-void vpx_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) {
+void aom_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) {
int i;
// stage 1
int16x8_t input_0 = vshlq_n_s16(vld1q_s16(&input[0 * stride]), 2);
@@ -170,7 +170,7 @@
}
} // for
{
- // from vpx_dct_sse2.c
+ // from aom_dct_sse2.c
// Post-condition (division by two)
// division of two 16 bits signed numbers using shifts
// n / 2 = (n - (n >> 15)) >> 1
@@ -202,7 +202,7 @@
}
}
-void vpx_fdct8x8_1_neon(const int16_t *input, int16_t *output, int stride) {
+void aom_fdct8x8_1_neon(const int16_t *input, int16_t *output, int stride) {
int r;
int16x8_t sum = vld1q_s16(&input[0]);
for (r = 1; r < 8; ++r) {
diff --git a/aom_dsp/arm/hadamard_neon.c b/aom_dsp/arm/hadamard_neon.c
index 46b2755..af955f0 100644
--- a/aom_dsp/arm/hadamard_neon.c
+++ b/aom_dsp/arm/hadamard_neon.c
@@ -10,7 +10,7 @@
#include <arm_neon.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
static void hadamard8x8_one_pass(int16x8_t *a0, int16x8_t *a1, int16x8_t *a2,
int16x8_t *a3, int16x8_t *a4, int16x8_t *a5,
@@ -130,7 +130,7 @@
*a7 = b3.val[1];
}
-void vpx_hadamard_8x8_neon(const int16_t *src_diff, int src_stride,
+void aom_hadamard_8x8_neon(const int16_t *src_diff, int src_stride,
int16_t *coeff) {
int16x8_t a0 = vld1q_s16(src_diff);
int16x8_t a1 = vld1q_s16(src_diff + src_stride);
@@ -159,19 +159,19 @@
vst1q_s16(coeff + 56, a7);
}
-void vpx_hadamard_16x16_neon(const int16_t *src_diff, int src_stride,
+void aom_hadamard_16x16_neon(const int16_t *src_diff, int src_stride,
int16_t *coeff) {
int i;
/* Rearrange 16x16 to 8x32 and remove stride.
* Top left first. */
- vpx_hadamard_8x8_neon(src_diff + 0 + 0 * src_stride, src_stride, coeff + 0);
+ aom_hadamard_8x8_neon(src_diff + 0 + 0 * src_stride, src_stride, coeff + 0);
/* Top right. */
- vpx_hadamard_8x8_neon(src_diff + 8 + 0 * src_stride, src_stride, coeff + 64);
+ aom_hadamard_8x8_neon(src_diff + 8 + 0 * src_stride, src_stride, coeff + 64);
/* Bottom left. */
- vpx_hadamard_8x8_neon(src_diff + 0 + 8 * src_stride, src_stride, coeff + 128);
+ aom_hadamard_8x8_neon(src_diff + 0 + 8 * src_stride, src_stride, coeff + 128);
/* Bottom right. */
- vpx_hadamard_8x8_neon(src_diff + 8 + 8 * src_stride, src_stride, coeff + 192);
+ aom_hadamard_8x8_neon(src_diff + 8 + 8 * src_stride, src_stride, coeff + 192);
for (i = 0; i < 64; i += 8) {
const int16x8_t a0 = vld1q_s16(coeff + 0);
diff --git a/aom_dsp/arm/idct16x16_1_add_neon.asm b/aom_dsp/arm/idct16x16_1_add_neon.asm
index dc459e2..e07614f 100644
--- a/aom_dsp/arm/idct16x16_1_add_neon.asm
+++ b/aom_dsp/arm/idct16x16_1_add_neon.asm
@@ -8,21 +8,21 @@
;
- EXPORT |vpx_idct16x16_1_add_neon|
+ EXPORT |aom_idct16x16_1_add_neon|
ARM
REQUIRE8
PRESERVE8
AREA ||.text||, CODE, READONLY, ALIGN=2
-;void vpx_idct16x16_1_add_neon(int16_t *input, uint8_t *dest,
+;void aom_idct16x16_1_add_neon(int16_t *input, uint8_t *dest,
; int dest_stride)
;
; r0 int16_t input
; r1 uint8_t *dest
; r2 int dest_stride)
-|vpx_idct16x16_1_add_neon| PROC
+|aom_idct16x16_1_add_neon| PROC
ldrsh r0, [r0]
; generate cospi_16_64 = 11585
@@ -193,6 +193,6 @@
vst1.64 {d31}, [r12], r2
bx lr
- ENDP ; |vpx_idct16x16_1_add_neon|
+ ENDP ; |aom_idct16x16_1_add_neon|
END
diff --git a/aom_dsp/arm/idct16x16_1_add_neon.c b/aom_dsp/arm/idct16x16_1_add_neon.c
index a37e53c..2bdb333 100644
--- a/aom_dsp/arm/idct16x16_1_add_neon.c
+++ b/aom_dsp/arm/idct16x16_1_add_neon.c
@@ -13,7 +13,7 @@
#include "aom_dsp/inv_txfm.h"
#include "aom_ports/mem.h"
-void vpx_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint8x8_t d2u8, d3u8, d30u8, d31u8;
uint64x1_t d2u64, d3u64, d4u64, d5u64;
uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16;
diff --git a/aom_dsp/arm/idct16x16_add_neon.asm b/aom_dsp/arm/idct16x16_add_neon.asm
index 22a0c95..e22ab3e 100644
--- a/aom_dsp/arm/idct16x16_add_neon.asm
+++ b/aom_dsp/arm/idct16x16_add_neon.asm
@@ -8,10 +8,10 @@
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vpx_idct16x16_256_add_neon_pass1|
- EXPORT |vpx_idct16x16_256_add_neon_pass2|
- EXPORT |vpx_idct16x16_10_add_neon_pass1|
- EXPORT |vpx_idct16x16_10_add_neon_pass2|
+ EXPORT |aom_idct16x16_256_add_neon_pass1|
+ EXPORT |aom_idct16x16_256_add_neon_pass2|
+ EXPORT |aom_idct16x16_10_add_neon_pass1|
+ EXPORT |aom_idct16x16_10_add_neon_pass2|
ARM
REQUIRE8
PRESERVE8
@@ -36,7 +36,7 @@
MEND
AREA Block, CODE, READONLY ; name this block of code
-;void |vpx_idct16x16_256_add_neon_pass1|(int16_t *input,
+;void |aom_idct16x16_256_add_neon_pass1|(int16_t *input,
; int16_t *output, int output_stride)
;
; r0 int16_t input
@@ -46,7 +46,7 @@
; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output
; will be stored back into q8-q15 registers. This function will touch q0-q7
; registers and use them as buffer during calculation.
-|vpx_idct16x16_256_add_neon_pass1| PROC
+|aom_idct16x16_256_add_neon_pass1| PROC
; TODO(hkuang): Find a better way to load the elements.
; load elements of 0, 2, 4, 6, 8, 10, 12, 14 into q8 - q15
@@ -273,9 +273,9 @@
vst1.64 {d31}, [r1], r2
bx lr
- ENDP ; |vpx_idct16x16_256_add_neon_pass1|
+ ENDP ; |aom_idct16x16_256_add_neon_pass1|
-;void vpx_idct16x16_256_add_neon_pass2(int16_t *src,
+;void aom_idct16x16_256_add_neon_pass2(int16_t *src,
; int16_t *output,
; int16_t *pass1Output,
; int16_t skip_adding,
@@ -292,7 +292,7 @@
; idct16 stage1 - stage7 on all the elements loaded in q8-q15. The output
; will be stored back into q8-q15 registers. This function will touch q0-q7
; registers and use them as buffer during calculation.
-|vpx_idct16x16_256_add_neon_pass2| PROC
+|aom_idct16x16_256_add_neon_pass2| PROC
push {r3-r9}
; TODO(hkuang): Find a better way to load the elements.
@@ -784,9 +784,9 @@
end_idct16x16_pass2
pop {r3-r9}
bx lr
- ENDP ; |vpx_idct16x16_256_add_neon_pass2|
+ ENDP ; |aom_idct16x16_256_add_neon_pass2|
-;void |vpx_idct16x16_10_add_neon_pass1|(int16_t *input,
+;void |aom_idct16x16_10_add_neon_pass1|(int16_t *input,
; int16_t *output, int output_stride)
;
; r0 int16_t input
@@ -796,7 +796,7 @@
; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output
; will be stored back into q8-q15 registers. This function will touch q0-q7
; registers and use them as buffer during calculation.
-|vpx_idct16x16_10_add_neon_pass1| PROC
+|aom_idct16x16_10_add_neon_pass1| PROC
; TODO(hkuang): Find a better way to load the elements.
; load elements of 0, 2, 4, 6, 8, 10, 12, 14 into q8 - q15
@@ -905,9 +905,9 @@
vst1.64 {d31}, [r1], r2
bx lr
- ENDP ; |vpx_idct16x16_10_add_neon_pass1|
+ ENDP ; |aom_idct16x16_10_add_neon_pass1|
-;void vpx_idct16x16_10_add_neon_pass2(int16_t *src,
+;void aom_idct16x16_10_add_neon_pass2(int16_t *src,
; int16_t *output,
; int16_t *pass1Output,
; int16_t skip_adding,
@@ -924,7 +924,7 @@
; idct16 stage1 - stage7 on all the elements loaded in q8-q15. The output
; will be stored back into q8-q15 registers. This function will touch q0-q7
; registers and use them as buffer during calculation.
-|vpx_idct16x16_10_add_neon_pass2| PROC
+|aom_idct16x16_10_add_neon_pass2| PROC
push {r3-r9}
; TODO(hkuang): Find a better way to load the elements.
@@ -1175,5 +1175,5 @@
end_idct10_16x16_pass2
pop {r3-r9}
bx lr
- ENDP ; |vpx_idct16x16_10_add_neon_pass2|
+ ENDP ; |aom_idct16x16_10_add_neon_pass2|
END
diff --git a/aom_dsp/arm/idct16x16_add_neon.c b/aom_dsp/arm/idct16x16_add_neon.c
index 2bb92c6..268c2ce 100644
--- a/aom_dsp/arm/idct16x16_add_neon.c
+++ b/aom_dsp/arm/idct16x16_add_neon.c
@@ -10,7 +10,7 @@
#include <arm_neon.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/txfm_common.h"
static INLINE void TRANSPOSE8X8(int16x8_t *q8s16, int16x8_t *q9s16,
@@ -77,7 +77,7 @@
return;
}
-void vpx_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out,
+void aom_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out,
int output_stride) {
int16x4_t d0s16, d1s16, d2s16, d3s16;
int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
@@ -313,7 +313,7 @@
return;
}
-void vpx_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
+void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
int16_t *pass1Output, int16_t skip_adding,
uint8_t *dest, int dest_stride) {
uint8_t *d;
@@ -862,7 +862,7 @@
return;
}
-void vpx_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out,
+void aom_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out,
int output_stride) {
int16x4_t d4s16;
int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
@@ -998,7 +998,7 @@
return;
}
-void vpx_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out,
+void aom_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out,
int16_t *pass1Output, int16_t skip_adding,
uint8_t *dest, int dest_stride) {
int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
diff --git a/aom_dsp/arm/idct16x16_neon.c b/aom_dsp/arm/idct16x16_neon.c
index e205056..653603a 100644
--- a/aom_dsp/arm/idct16x16_neon.c
+++ b/aom_dsp/arm/idct16x16_neon.c
@@ -8,26 +8,26 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
-void vpx_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output,
+void aom_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output,
int output_stride);
-void vpx_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *output,
+void aom_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *output,
int16_t *pass1Output, int16_t skip_adding,
uint8_t *dest, int dest_stride);
-void vpx_idct16x16_10_add_neon_pass1(const int16_t *input, int16_t *output,
+void aom_idct16x16_10_add_neon_pass1(const int16_t *input, int16_t *output,
int output_stride);
-void vpx_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *output,
+void aom_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *output,
int16_t *pass1Output, int16_t skip_adding,
uint8_t *dest, int dest_stride);
#if HAVE_NEON_ASM
/* For ARM NEON, d8-d15 are callee-saved registers, and need to be saved. */
-extern void vpx_push_neon(int64_t *store);
-extern void vpx_pop_neon(int64_t *store);
+extern void aom_push_neon(int64_t *store);
+extern void aom_pop_neon(int64_t *store);
#endif // HAVE_NEON_ASM
-void vpx_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest,
+void aom_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest,
int dest_stride) {
#if HAVE_NEON_ASM
int64_t store_reg[8];
@@ -37,63 +37,63 @@
#if HAVE_NEON_ASM
// save d8-d15 register values.
- vpx_push_neon(store_reg);
+ aom_push_neon(store_reg);
#endif
/* Parallel idct on the upper 8 rows */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(input, pass1_output, 8);
+ aom_idct16x16_256_add_neon_pass1(input, pass1_output, 8);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7
// which will be saved into row_idct_output.
- vpx_idct16x16_256_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0,
+ aom_idct16x16_256_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0,
dest, dest_stride);
/* Parallel idct on the lower 8 rows */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output, 8);
+ aom_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output, 8);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7
// which will be saved into row_idct_output.
- vpx_idct16x16_256_add_neon_pass2(input + 8 * 16 + 1, row_idct_output + 8,
+ aom_idct16x16_256_add_neon_pass2(input + 8 * 16 + 1, row_idct_output + 8,
pass1_output, 0, dest, dest_stride);
/* Parallel idct on the left 8 columns */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
+ aom_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7.
// Then add the result to the destination data.
- vpx_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output,
+ aom_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output,
pass1_output, 1, dest, dest_stride);
/* Parallel idct on the right 8 columns */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
+ aom_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7.
// Then add the result to the destination data.
- vpx_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1,
+ aom_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1,
row_idct_output + 8, pass1_output, 1,
dest + 8, dest_stride);
#if HAVE_NEON_ASM
// restore d8-d15 register values.
- vpx_pop_neon(store_reg);
+ aom_pop_neon(store_reg);
#endif
return;
}
-void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest,
+void aom_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest,
int dest_stride) {
#if HAVE_NEON_ASM
int64_t store_reg[8];
@@ -103,18 +103,18 @@
#if HAVE_NEON_ASM
// save d8-d15 register values.
- vpx_push_neon(store_reg);
+ aom_push_neon(store_reg);
#endif
/* Parallel idct on the upper 8 rows */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_10_add_neon_pass1(input, pass1_output, 8);
+ aom_idct16x16_10_add_neon_pass1(input, pass1_output, 8);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7
// which will be saved into row_idct_output.
- vpx_idct16x16_10_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0,
+ aom_idct16x16_10_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0,
dest, dest_stride);
/* Skip Parallel idct on the lower 8 rows as they are all 0s */
@@ -122,29 +122,29 @@
/* Parallel idct on the left 8 columns */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
+ aom_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7.
// Then add the result to the destination data.
- vpx_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output,
+ aom_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output,
pass1_output, 1, dest, dest_stride);
/* Parallel idct on the right 8 columns */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
+ aom_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7.
// Then add the result to the destination data.
- vpx_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1,
+ aom_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1,
row_idct_output + 8, pass1_output, 1,
dest + 8, dest_stride);
#if HAVE_NEON_ASM
// restore d8-d15 register values.
- vpx_pop_neon(store_reg);
+ aom_pop_neon(store_reg);
#endif
return;
diff --git a/aom_dsp/arm/idct32x32_1_add_neon.asm b/aom_dsp/arm/idct32x32_1_add_neon.asm
index 96d276b..9b31287 100644
--- a/aom_dsp/arm/idct32x32_1_add_neon.asm
+++ b/aom_dsp/arm/idct32x32_1_add_neon.asm
@@ -7,7 +7,7 @@
; file in the root of the source tree.
;
- EXPORT |vpx_idct32x32_1_add_neon|
+ EXPORT |aom_idct32x32_1_add_neon|
ARM
REQUIRE8
PRESERVE8
@@ -64,14 +64,14 @@
vst1.8 {q15},[$dst], $stride
MEND
-;void vpx_idct32x32_1_add_neon(int16_t *input, uint8_t *dest,
+;void aom_idct32x32_1_add_neon(int16_t *input, uint8_t *dest,
; int dest_stride)
;
; r0 int16_t input
; r1 uint8_t *dest
; r2 int dest_stride
-|vpx_idct32x32_1_add_neon| PROC
+|aom_idct32x32_1_add_neon| PROC
push {lr}
pld [r1]
add r3, r1, #16 ; r3 dest + 16 for second loop
@@ -140,5 +140,5 @@
bne diff_positive_32_32_loop
pop {pc}
- ENDP ; |vpx_idct32x32_1_add_neon|
+ ENDP ; |aom_idct32x32_1_add_neon|
END
diff --git a/aom_dsp/arm/idct32x32_1_add_neon.c b/aom_dsp/arm/idct32x32_1_add_neon.c
index 35bfc66..531ffd8 100644
--- a/aom_dsp/arm/idct32x32_1_add_neon.c
+++ b/aom_dsp/arm/idct32x32_1_add_neon.c
@@ -10,7 +10,7 @@
#include <arm_neon.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/inv_txfm.h"
#include "aom_ports/mem.h"
@@ -93,7 +93,7 @@
return;
}
-void vpx_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint8x16_t q0u8, q8u8, q9u8, q10u8, q11u8, q12u8, q13u8, q14u8, q15u8;
int i, j, dest_stride8;
uint8_t *d;
diff --git a/aom_dsp/arm/idct32x32_add_neon.asm b/aom_dsp/arm/idct32x32_add_neon.asm
index 7483ee7..10de482 100644
--- a/aom_dsp/arm/idct32x32_add_neon.asm
+++ b/aom_dsp/arm/idct32x32_add_neon.asm
@@ -43,7 +43,7 @@
cospi_31_64 EQU 804
- EXPORT |vpx_idct32x32_1024_add_neon|
+ EXPORT |aom_idct32x32_1024_add_neon|
ARM
REQUIRE8
PRESERVE8
@@ -288,7 +288,7 @@
MEND
; --------------------------------------------------------------------------
-;void vpx_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
+;void aom_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
;
; r0 int16_t *input,
; r1 uint8_t *dest,
@@ -303,7 +303,7 @@
; r9 dest + 15 * dest_stride, descending (14, 13, 12, ...)
; r10 dest + 16 * dest_stride, ascending (17, 18, 19, ...)
-|vpx_idct32x32_1024_add_neon| PROC
+|aom_idct32x32_1024_add_neon| PROC
; This function does one pass of idct32x32 transform.
;
; This is done by transposing the input and then doing a 1d transform on
@@ -1295,5 +1295,5 @@
vpop {d8-d15}
pop {r4-r11}
bx lr
- ENDP ; |vpx_idct32x32_1024_add_neon|
+ ENDP ; |aom_idct32x32_1024_add_neon|
END
diff --git a/aom_dsp/arm/idct32x32_add_neon.c b/aom_dsp/arm/idct32x32_add_neon.c
index 644155c..e1a561b 100644
--- a/aom_dsp/arm/idct32x32_add_neon.c
+++ b/aom_dsp/arm/idct32x32_add_neon.c
@@ -10,7 +10,7 @@
#include <arm_neon.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/txfm_common.h"
#define LOAD_FROM_TRANSPOSED(prev, first, second) \
@@ -427,7 +427,7 @@
return;
}
-void vpx_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int stride) {
+void aom_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int stride) {
int i, idct32_pass_loop;
int16_t trans_buf[32 * 8];
int16_t pass1[32 * 32];
diff --git a/aom_dsp/arm/idct4x4_1_add_neon.asm b/aom_dsp/arm/idct4x4_1_add_neon.asm
index adab715..1457527 100644
--- a/aom_dsp/arm/idct4x4_1_add_neon.asm
+++ b/aom_dsp/arm/idct4x4_1_add_neon.asm
@@ -8,21 +8,21 @@
;
- EXPORT |vpx_idct4x4_1_add_neon|
+ EXPORT |aom_idct4x4_1_add_neon|
ARM
REQUIRE8
PRESERVE8
AREA ||.text||, CODE, READONLY, ALIGN=2
-;void vpx_idct4x4_1_add_neon(int16_t *input, uint8_t *dest,
+;void aom_idct4x4_1_add_neon(int16_t *input, uint8_t *dest,
; int dest_stride)
;
; r0 int16_t input
; r1 uint8_t *dest
; r2 int dest_stride)
-|vpx_idct4x4_1_add_neon| PROC
+|aom_idct4x4_1_add_neon| PROC
ldrsh r0, [r0]
; generate cospi_16_64 = 11585
@@ -63,6 +63,6 @@
vst1.32 {d7[1]}, [r12]
bx lr
- ENDP ; |vpx_idct4x4_1_add_neon|
+ ENDP ; |aom_idct4x4_1_add_neon|
END
diff --git a/aom_dsp/arm/idct4x4_1_add_neon.c b/aom_dsp/arm/idct4x4_1_add_neon.c
index 0a2e827..23399fd 100644
--- a/aom_dsp/arm/idct4x4_1_add_neon.c
+++ b/aom_dsp/arm/idct4x4_1_add_neon.c
@@ -13,7 +13,7 @@
#include "aom_dsp/inv_txfm.h"
#include "aom_ports/mem.h"
-void vpx_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint8x8_t d6u8;
uint32x2_t d2u32 = vdup_n_u32(0);
uint16x8_t q8u16;
diff --git a/aom_dsp/arm/idct4x4_add_neon.asm b/aom_dsp/arm/idct4x4_add_neon.asm
index 877fbd6..d240f33 100644
--- a/aom_dsp/arm/idct4x4_add_neon.asm
+++ b/aom_dsp/arm/idct4x4_add_neon.asm
@@ -8,7 +8,7 @@
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vpx_idct4x4_16_add_neon|
+ EXPORT |aom_idct4x4_16_add_neon|
ARM
REQUIRE8
PRESERVE8
@@ -16,13 +16,13 @@
AREA ||.text||, CODE, READONLY, ALIGN=2
AREA Block, CODE, READONLY ; name this block of code
-;void vpx_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
+;void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
;
; r0 int16_t input
; r1 uint8_t *dest
; r2 int dest_stride)
-|vpx_idct4x4_16_add_neon| PROC
+|aom_idct4x4_16_add_neon| PROC
; The 2D transform is done with two passes which are actually pretty
; similar. We first transform the rows. This is done by transposing
@@ -185,6 +185,6 @@
vst1.32 {d26[1]}, [r1], r2
vst1.32 {d26[0]}, [r1] ; no post-increment
bx lr
- ENDP ; |vpx_idct4x4_16_add_neon|
+ ENDP ; |aom_idct4x4_16_add_neon|
END
diff --git a/aom_dsp/arm/idct4x4_add_neon.c b/aom_dsp/arm/idct4x4_add_neon.c
index 3826269..5668beb 100644
--- a/aom_dsp/arm/idct4x4_add_neon.c
+++ b/aom_dsp/arm/idct4x4_add_neon.c
@@ -10,7 +10,7 @@
#include <arm_neon.h>
-void vpx_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint8x8_t d26u8, d27u8;
uint32x2_t d26u32, d27u32;
uint16x8_t q8u16, q9u16;
diff --git a/aom_dsp/arm/idct8x8_1_add_neon.asm b/aom_dsp/arm/idct8x8_1_add_neon.asm
index dbbff36..d2b410d 100644
--- a/aom_dsp/arm/idct8x8_1_add_neon.asm
+++ b/aom_dsp/arm/idct8x8_1_add_neon.asm
@@ -8,21 +8,21 @@
;
- EXPORT |vpx_idct8x8_1_add_neon|
+ EXPORT |aom_idct8x8_1_add_neon|
ARM
REQUIRE8
PRESERVE8
AREA ||.text||, CODE, READONLY, ALIGN=2
-;void vpx_idct8x8_1_add_neon(int16_t *input, uint8_t *dest,
+;void aom_idct8x8_1_add_neon(int16_t *input, uint8_t *dest,
; int dest_stride)
;
; r0 int16_t input
; r1 uint8_t *dest
; r2 int dest_stride)
-|vpx_idct8x8_1_add_neon| PROC
+|aom_idct8x8_1_add_neon| PROC
ldrsh r0, [r0]
; generate cospi_16_64 = 11585
@@ -83,6 +83,6 @@
vst1.64 {d31}, [r12], r2
bx lr
- ENDP ; |vpx_idct8x8_1_add_neon|
+ ENDP ; |aom_idct8x8_1_add_neon|
END
diff --git a/aom_dsp/arm/idct8x8_1_add_neon.c b/aom_dsp/arm/idct8x8_1_add_neon.c
index bda5998..393341a 100644
--- a/aom_dsp/arm/idct8x8_1_add_neon.c
+++ b/aom_dsp/arm/idct8x8_1_add_neon.c
@@ -13,7 +13,7 @@
#include "aom_dsp/inv_txfm.h"
#include "aom_ports/mem.h"
-void vpx_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint8x8_t d2u8, d3u8, d30u8, d31u8;
uint64x1_t d2u64, d3u64, d4u64, d5u64;
uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16;
diff --git a/aom_dsp/arm/idct8x8_add_neon.asm b/aom_dsp/arm/idct8x8_add_neon.asm
index 6ab59b4..a03c83d 100644
--- a/aom_dsp/arm/idct8x8_add_neon.asm
+++ b/aom_dsp/arm/idct8x8_add_neon.asm
@@ -8,8 +8,8 @@
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vpx_idct8x8_64_add_neon|
- EXPORT |vpx_idct8x8_12_add_neon|
+ EXPORT |aom_idct8x8_64_add_neon|
+ EXPORT |aom_idct8x8_12_add_neon|
ARM
REQUIRE8
PRESERVE8
@@ -198,13 +198,13 @@
MEND
AREA Block, CODE, READONLY ; name this block of code
-;void vpx_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
+;void aom_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
;
; r0 int16_t input
; r1 uint8_t *dest
; r2 int dest_stride)
-|vpx_idct8x8_64_add_neon| PROC
+|aom_idct8x8_64_add_neon| PROC
push {r4-r9}
vpush {d8-d15}
vld1.s16 {q8,q9}, [r0]!
@@ -308,15 +308,15 @@
vpop {d8-d15}
pop {r4-r9}
bx lr
- ENDP ; |vpx_idct8x8_64_add_neon|
+ ENDP ; |aom_idct8x8_64_add_neon|
-;void vpx_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
+;void aom_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
;
; r0 int16_t input
; r1 uint8_t *dest
; r2 int dest_stride)
-|vpx_idct8x8_12_add_neon| PROC
+|aom_idct8x8_12_add_neon| PROC
push {r4-r9}
vpush {d8-d15}
vld1.s16 {q8,q9}, [r0]!
@@ -514,6 +514,6 @@
vpop {d8-d15}
pop {r4-r9}
bx lr
- ENDP ; |vpx_idct8x8_12_add_neon|
+ ENDP ; |aom_idct8x8_12_add_neon|
END
diff --git a/aom_dsp/arm/idct8x8_add_neon.c b/aom_dsp/arm/idct8x8_add_neon.c
index 124c317..bd01aab 100644
--- a/aom_dsp/arm/idct8x8_add_neon.c
+++ b/aom_dsp/arm/idct8x8_add_neon.c
@@ -10,7 +10,7 @@
#include <arm_neon.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/txfm_common.h"
static INLINE void TRANSPOSE8X8(int16x8_t *q8s16, int16x8_t *q9s16,
@@ -228,7 +228,7 @@
return;
}
-void vpx_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint8_t *d1, *d2;
uint8x8_t d0u8, d1u8, d2u8, d3u8;
uint64x1_t d0u64, d1u64, d2u64, d3u64;
@@ -330,7 +330,7 @@
return;
}
-void vpx_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+void aom_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint8_t *d1, *d2;
uint8x8_t d0u8, d1u8, d2u8, d3u8;
int16x4_t d10s16, d11s16, d12s16, d13s16, d16s16;
diff --git a/aom_dsp/arm/intrapred_neon.c b/aom_dsp/arm/intrapred_neon.c
index 3166a4e..f2a0476 100644
--- a/aom_dsp/arm/intrapred_neon.c
+++ b/aom_dsp/arm/intrapred_neon.c
@@ -10,9 +10,9 @@
#include <arm_neon.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
//------------------------------------------------------------------------------
// DC 4x4
@@ -58,24 +58,24 @@
}
}
-void vpx_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
dc_4x4(dst, stride, above, left, 1, 1);
}
-void vpx_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
dc_4x4(dst, stride, NULL, left, 0, 1);
}
-void vpx_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
dc_4x4(dst, stride, above, NULL, 1, 0);
}
-void vpx_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
(void)left;
@@ -128,24 +128,24 @@
}
}
-void vpx_dc_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
dc_8x8(dst, stride, above, left, 1, 1);
}
-void vpx_dc_left_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_left_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
dc_8x8(dst, stride, NULL, left, 0, 1);
}
-void vpx_dc_top_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_top_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
dc_8x8(dst, stride, above, NULL, 1, 0);
}
-void vpx_dc_128_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_128_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
(void)left;
@@ -201,26 +201,26 @@
}
}
-void vpx_dc_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
dc_16x16(dst, stride, above, left, 1, 1);
}
-void vpx_dc_left_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_left_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
(void)above;
dc_16x16(dst, stride, NULL, left, 0, 1);
}
-void vpx_dc_top_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_top_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
(void)left;
dc_16x16(dst, stride, above, NULL, 1, 0);
}
-void vpx_dc_128_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_128_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
(void)above;
@@ -284,26 +284,26 @@
}
}
-void vpx_dc_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
dc_32x32(dst, stride, above, left, 1, 1);
}
-void vpx_dc_left_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_left_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
(void)above;
dc_32x32(dst, stride, NULL, left, 0, 1);
}
-void vpx_dc_top_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_top_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
(void)left;
dc_32x32(dst, stride, above, NULL, 1, 0);
}
-void vpx_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
(void)above;
@@ -313,7 +313,7 @@
// -----------------------------------------------------------------------------
-void vpx_d45_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_d45_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint64x1_t A0 = vreinterpret_u64_u8(vld1_u8(above)); // top row
const uint64x1_t A1 = vshr_n_u64(A0, 8);
@@ -336,7 +336,7 @@
dst[3 * stride + 3] = above[7];
}
-void vpx_d45_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_d45_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
static const uint8_t shuffle1[8] = { 1, 2, 3, 4, 5, 6, 7, 7 };
static const uint8_t shuffle2[8] = { 2, 3, 4, 5, 6, 7, 7, 7 };
@@ -356,7 +356,7 @@
vst1_u8(dst + i * stride, row);
}
-void vpx_d45_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_d45_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t A0 = vld1q_u8(above); // top row
const uint8x16_t above_right = vld1q_dup_u8(above + 15);
@@ -375,7 +375,7 @@
// -----------------------------------------------------------------------------
-void vpx_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x8_t XABCD_u8 = vld1_u8(above - 1);
const uint64x1_t XABCD = vreinterpret_u64_u8(XABCD_u8);
@@ -405,7 +405,7 @@
#if !HAVE_NEON_ASM
-void vpx_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int i;
uint32x2_t d0u32 = vdup_n_u32(0);
@@ -416,7 +416,7 @@
vst1_lane_u32((uint32_t *)dst, d0u32, 0);
}
-void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int i;
uint8x8_t d0u8 = vdup_n_u8(0);
@@ -426,7 +426,7 @@
for (i = 0; i < 8; i++, dst += stride) vst1_u8(dst, d0u8);
}
-void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int i;
uint8x16_t q0u8 = vdupq_n_u8(0);
@@ -436,7 +436,7 @@
for (i = 0; i < 16; i++, dst += stride) vst1q_u8(dst, q0u8);
}
-void vpx_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int i;
uint8x16_t q0u8 = vdupq_n_u8(0);
@@ -451,7 +451,7 @@
}
}
-void vpx_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint8x8_t d0u8 = vdup_n_u8(0);
uint32x2_t d1u32 = vdup_n_u32(0);
@@ -472,7 +472,7 @@
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
}
-void vpx_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint8x8_t d0u8 = vdup_n_u8(0);
uint64x1_t d1u64 = vdup_n_u64(0);
@@ -505,7 +505,7 @@
vst1_u8(dst, d0u8);
}
-void vpx_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int j;
uint8x8_t d2u8 = vdup_n_u8(0);
@@ -543,7 +543,7 @@
}
}
-void vpx_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int j, k;
uint8x8_t d2u8 = vdup_n_u8(0);
@@ -591,7 +591,7 @@
}
}
-void vpx_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int i;
uint16x8_t q1u16, q3u16;
@@ -611,7 +611,7 @@
}
}
-void vpx_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int j;
uint16x8_t q0u16, q3u16, q10u16;
@@ -653,7 +653,7 @@
}
}
-void vpx_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int j, k;
uint16x8_t q0u16, q2u16, q3u16, q8u16, q10u16;
@@ -716,7 +716,7 @@
}
}
-void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int j, k;
uint16x8_t q0u16, q3u16, q8u16, q9u16, q10u16, q11u16;
diff --git a/aom_dsp/arm/intrapred_neon_asm.asm b/aom_dsp/arm/intrapred_neon_asm.asm
index 115790d..6014a09 100644
--- a/aom_dsp/arm/intrapred_neon_asm.asm
+++ b/aom_dsp/arm/intrapred_neon_asm.asm
@@ -8,25 +8,25 @@
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vpx_v_predictor_4x4_neon|
- EXPORT |vpx_v_predictor_8x8_neon|
- EXPORT |vpx_v_predictor_16x16_neon|
- EXPORT |vpx_v_predictor_32x32_neon|
- EXPORT |vpx_h_predictor_4x4_neon|
- EXPORT |vpx_h_predictor_8x8_neon|
- EXPORT |vpx_h_predictor_16x16_neon|
- EXPORT |vpx_h_predictor_32x32_neon|
- EXPORT |vpx_tm_predictor_4x4_neon|
- EXPORT |vpx_tm_predictor_8x8_neon|
- EXPORT |vpx_tm_predictor_16x16_neon|
- EXPORT |vpx_tm_predictor_32x32_neon|
+ EXPORT |aom_v_predictor_4x4_neon|
+ EXPORT |aom_v_predictor_8x8_neon|
+ EXPORT |aom_v_predictor_16x16_neon|
+ EXPORT |aom_v_predictor_32x32_neon|
+ EXPORT |aom_h_predictor_4x4_neon|
+ EXPORT |aom_h_predictor_8x8_neon|
+ EXPORT |aom_h_predictor_16x16_neon|
+ EXPORT |aom_h_predictor_32x32_neon|
+ EXPORT |aom_tm_predictor_4x4_neon|
+ EXPORT |aom_tm_predictor_8x8_neon|
+ EXPORT |aom_tm_predictor_16x16_neon|
+ EXPORT |aom_tm_predictor_32x32_neon|
ARM
REQUIRE8
PRESERVE8
AREA ||.text||, CODE, READONLY, ALIGN=2
-;void vpx_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -34,16 +34,16 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_v_predictor_4x4_neon| PROC
+|aom_v_predictor_4x4_neon| PROC
vld1.32 {d0[0]}, [r2]
vst1.32 {d0[0]}, [r0], r1
vst1.32 {d0[0]}, [r0], r1
vst1.32 {d0[0]}, [r0], r1
vst1.32 {d0[0]}, [r0], r1
bx lr
- ENDP ; |vpx_v_predictor_4x4_neon|
+ ENDP ; |aom_v_predictor_4x4_neon|
-;void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -51,7 +51,7 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_v_predictor_8x8_neon| PROC
+|aom_v_predictor_8x8_neon| PROC
vld1.8 {d0}, [r2]
vst1.8 {d0}, [r0], r1
vst1.8 {d0}, [r0], r1
@@ -62,9 +62,9 @@
vst1.8 {d0}, [r0], r1
vst1.8 {d0}, [r0], r1
bx lr
- ENDP ; |vpx_v_predictor_8x8_neon|
+ ENDP ; |aom_v_predictor_8x8_neon|
-;void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -72,7 +72,7 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_v_predictor_16x16_neon| PROC
+|aom_v_predictor_16x16_neon| PROC
vld1.8 {q0}, [r2]
vst1.8 {q0}, [r0], r1
vst1.8 {q0}, [r0], r1
@@ -91,9 +91,9 @@
vst1.8 {q0}, [r0], r1
vst1.8 {q0}, [r0], r1
bx lr
- ENDP ; |vpx_v_predictor_16x16_neon|
+ ENDP ; |aom_v_predictor_16x16_neon|
-;void vpx_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -101,7 +101,7 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_v_predictor_32x32_neon| PROC
+|aom_v_predictor_32x32_neon| PROC
vld1.8 {q0, q1}, [r2]
mov r2, #2
loop_v
@@ -124,9 +124,9 @@
subs r2, r2, #1
bgt loop_v
bx lr
- ENDP ; |vpx_v_predictor_32x32_neon|
+ ENDP ; |aom_v_predictor_32x32_neon|
-;void vpx_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -134,7 +134,7 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_h_predictor_4x4_neon| PROC
+|aom_h_predictor_4x4_neon| PROC
vld1.32 {d1[0]}, [r3]
vdup.8 d0, d1[0]
vst1.32 {d0[0]}, [r0], r1
@@ -145,9 +145,9 @@
vdup.8 d0, d1[3]
vst1.32 {d0[0]}, [r0], r1
bx lr
- ENDP ; |vpx_h_predictor_4x4_neon|
+ ENDP ; |aom_h_predictor_4x4_neon|
-;void vpx_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -155,7 +155,7 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_h_predictor_8x8_neon| PROC
+|aom_h_predictor_8x8_neon| PROC
vld1.64 {d1}, [r3]
vdup.8 d0, d1[0]
vst1.64 {d0}, [r0], r1
@@ -174,9 +174,9 @@
vdup.8 d0, d1[7]
vst1.64 {d0}, [r0], r1
bx lr
- ENDP ; |vpx_h_predictor_8x8_neon|
+ ENDP ; |aom_h_predictor_8x8_neon|
-;void vpx_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -184,7 +184,7 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_h_predictor_16x16_neon| PROC
+|aom_h_predictor_16x16_neon| PROC
vld1.8 {q1}, [r3]
vdup.8 q0, d2[0]
vst1.8 {q0}, [r0], r1
@@ -219,9 +219,9 @@
vdup.8 q0, d3[7]
vst1.8 {q0}, [r0], r1
bx lr
- ENDP ; |vpx_h_predictor_16x16_neon|
+ ENDP ; |aom_h_predictor_16x16_neon|
-;void vpx_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
+;void aom_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -229,7 +229,7 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_h_predictor_32x32_neon| PROC
+|aom_h_predictor_32x32_neon| PROC
sub r1, r1, #16
mov r2, #2
loop_h
@@ -285,9 +285,9 @@
subs r2, r2, #1
bgt loop_h
bx lr
- ENDP ; |vpx_h_predictor_32x32_neon|
+ ENDP ; |aom_h_predictor_32x32_neon|
-;void vpx_tm_predictor_4x4_neon (uint8_t *dst, ptrdiff_t y_stride,
+;void aom_tm_predictor_4x4_neon (uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -295,7 +295,7 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_tm_predictor_4x4_neon| PROC
+|aom_tm_predictor_4x4_neon| PROC
; Load ytop_left = above[-1];
sub r12, r2, #1
vld1.u8 {d0[]}, [r12]
@@ -331,9 +331,9 @@
vst1.32 {d0[0]}, [r0], r1
vst1.32 {d1[0]}, [r0], r1
bx lr
- ENDP ; |vpx_tm_predictor_4x4_neon|
+ ENDP ; |aom_tm_predictor_4x4_neon|
-;void vpx_tm_predictor_8x8_neon (uint8_t *dst, ptrdiff_t y_stride,
+;void aom_tm_predictor_8x8_neon (uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -341,7 +341,7 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_tm_predictor_8x8_neon| PROC
+|aom_tm_predictor_8x8_neon| PROC
; Load ytop_left = above[-1];
sub r12, r2, #1
vld1.8 {d0[]}, [r12]
@@ -403,9 +403,9 @@
vst1.64 {d3}, [r0], r1
bx lr
- ENDP ; |vpx_tm_predictor_8x8_neon|
+ ENDP ; |aom_tm_predictor_8x8_neon|
-;void vpx_tm_predictor_16x16_neon (uint8_t *dst, ptrdiff_t y_stride,
+;void aom_tm_predictor_16x16_neon (uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -413,7 +413,7 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_tm_predictor_16x16_neon| PROC
+|aom_tm_predictor_16x16_neon| PROC
; Load ytop_left = above[-1];
sub r12, r2, #1
vld1.8 {d0[]}, [r12]
@@ -496,9 +496,9 @@
bgt loop_16x16_neon
bx lr
- ENDP ; |vpx_tm_predictor_16x16_neon|
+ ENDP ; |aom_tm_predictor_16x16_neon|
-;void vpx_tm_predictor_32x32_neon (uint8_t *dst, ptrdiff_t y_stride,
+;void aom_tm_predictor_32x32_neon (uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
@@ -506,7 +506,7 @@
; r2 const uint8_t *above
; r3 const uint8_t *left
-|vpx_tm_predictor_32x32_neon| PROC
+|aom_tm_predictor_32x32_neon| PROC
; Load ytop_left = above[-1];
sub r12, r2, #1
vld1.8 {d0[]}, [r12]
@@ -625,6 +625,6 @@
bgt loop_32x32_neon
bx lr
- ENDP ; |vpx_tm_predictor_32x32_neon|
+ ENDP ; |aom_tm_predictor_32x32_neon|
END
diff --git a/aom_dsp/arm/loopfilter_16_neon.asm b/aom_dsp/arm/loopfilter_16_neon.asm
index 5a8fdd6..1f2fc41 100644
--- a/aom_dsp/arm/loopfilter_16_neon.asm
+++ b/aom_dsp/arm/loopfilter_16_neon.asm
@@ -8,12 +8,12 @@
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vpx_lpf_horizontal_4_dual_neon|
+ EXPORT |aom_lpf_horizontal_4_dual_neon|
ARM
AREA ||.text||, CODE, READONLY, ALIGN=2
-;void vpx_lpf_horizontal_4_dual_neon(uint8_t *s, int p,
+;void aom_lpf_horizontal_4_dual_neon(uint8_t *s, int p,
; const uint8_t *blimit0,
; const uint8_t *limit0,
; const uint8_t *thresh0,
@@ -29,7 +29,7 @@
; sp+8 const uint8_t *limit1,
; sp+12 const uint8_t *thresh1,
-|vpx_lpf_horizontal_4_dual_neon| PROC
+|aom_lpf_horizontal_4_dual_neon| PROC
push {lr}
ldr r12, [sp, #4] ; load thresh0
@@ -66,7 +66,7 @@
sub r2, r2, r1, lsl #1
sub r3, r3, r1, lsl #1
- bl vpx_loop_filter_neon_16
+ bl aom_loop_filter_neon_16
vst1.u8 {q5}, [r2@64], r1 ; store op1
vst1.u8 {q6}, [r3@64], r1 ; store op0
@@ -76,9 +76,9 @@
vpop {d8-d15} ; restore neon registers
pop {pc}
- ENDP ; |vpx_lpf_horizontal_4_dual_neon|
+ ENDP ; |aom_lpf_horizontal_4_dual_neon|
-; void vpx_loop_filter_neon_16();
+; void aom_loop_filter_neon_16();
; This is a helper function for the loopfilters. The invidual functions do the
; necessary load, transpose (if necessary) and store. This function uses
; registers d8-d15, so the calling function must save those registers.
@@ -101,7 +101,7 @@
; q6 op0
; q7 oq0
; q8 oq1
-|vpx_loop_filter_neon_16| PROC
+|aom_loop_filter_neon_16| PROC
; filter_mask
vabd.u8 q11, q3, q4 ; m1 = abs(p3 - p2)
@@ -194,6 +194,6 @@
veor q8, q12, q10 ; *oq1 = u^0x80
bx lr
- ENDP ; |vpx_loop_filter_neon_16|
+ ENDP ; |aom_loop_filter_neon_16|
END
diff --git a/aom_dsp/arm/loopfilter_16_neon.c b/aom_dsp/arm/loopfilter_16_neon.c
index 70087f9..a6bc70d 100644
--- a/aom_dsp/arm/loopfilter_16_neon.c
+++ b/aom_dsp/arm/loopfilter_16_neon.c
@@ -10,9 +10,9 @@
#include <arm_neon.h>
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
static INLINE void loop_filter_neon_16(uint8x16_t qblimit, // blimit
uint8x16_t qlimit, // limit
@@ -122,7 +122,7 @@
return;
}
-void vpx_lpf_horizontal_4_dual_neon(
+void aom_lpf_horizontal_4_dual_neon(
uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
const uint8_t *limit1, const uint8_t *thresh1) {
diff --git a/aom_dsp/arm/loopfilter_4_neon.asm b/aom_dsp/arm/loopfilter_4_neon.asm
index 9371158..78be4b8 100644
--- a/aom_dsp/arm/loopfilter_4_neon.asm
+++ b/aom_dsp/arm/loopfilter_4_neon.asm
@@ -8,16 +8,16 @@
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vpx_lpf_horizontal_4_neon|
- EXPORT |vpx_lpf_vertical_4_neon|
+ EXPORT |aom_lpf_horizontal_4_neon|
+ EXPORT |aom_lpf_vertical_4_neon|
ARM
AREA ||.text||, CODE, READONLY, ALIGN=2
-; Currently vpx only works on iterations 8 at a time. The vp8 loop filter
+; Currently aom only works on iterations 8 at a time. The vp8 loop filter
; works on 16 iterations at a time.
;
-; void vpx_lpf_horizontal_4_neon(uint8_t *s,
+; void aom_lpf_horizontal_4_neon(uint8_t *s,
; int p /* pitch */,
; const uint8_t *blimit,
; const uint8_t *limit,
@@ -28,7 +28,7 @@
; r2 const uint8_t *blimit,
; r3 const uint8_t *limit,
; sp const uint8_t *thresh,
-|vpx_lpf_horizontal_4_neon| PROC
+|aom_lpf_horizontal_4_neon| PROC
push {lr}
vld1.8 {d0[]}, [r2] ; duplicate *blimit
@@ -53,7 +53,7 @@
sub r2, r2, r1, lsl #1
sub r3, r3, r1, lsl #1
- bl vpx_loop_filter_neon
+ bl aom_loop_filter_neon
vst1.u8 {d4}, [r2@64], r1 ; store op1
vst1.u8 {d5}, [r3@64], r1 ; store op0
@@ -61,12 +61,12 @@
vst1.u8 {d7}, [r3@64], r1 ; store oq1
pop {pc}
- ENDP ; |vpx_lpf_horizontal_4_neon|
+ ENDP ; |aom_lpf_horizontal_4_neon|
-; Currently vpx only works on iterations 8 at a time. The vp8 loop filter
+; Currently aom only works on iterations 8 at a time. The vp8 loop filter
; works on 16 iterations at a time.
;
-; void vpx_lpf_vertical_4_neon(uint8_t *s,
+; void aom_lpf_vertical_4_neon(uint8_t *s,
; int p /* pitch */,
; const uint8_t *blimit,
; const uint8_t *limit,
@@ -77,7 +77,7 @@
; r2 const uint8_t *blimit,
; r3 const uint8_t *limit,
; sp const uint8_t *thresh,
-|vpx_lpf_vertical_4_neon| PROC
+|aom_lpf_vertical_4_neon| PROC
push {lr}
vld1.8 {d0[]}, [r2] ; duplicate *blimit
@@ -113,7 +113,7 @@
vtrn.8 d7, d16
vtrn.8 d17, d18
- bl vpx_loop_filter_neon
+ bl aom_loop_filter_neon
sub r0, r0, #2
@@ -128,9 +128,9 @@
vst4.8 {d4[7], d5[7], d6[7], d7[7]}, [r0]
pop {pc}
- ENDP ; |vpx_lpf_vertical_4_neon|
+ ENDP ; |aom_lpf_vertical_4_neon|
-; void vpx_loop_filter_neon();
+; void aom_loop_filter_neon();
; This is a helper function for the loopfilters. The invidual functions do the
; necessary load, transpose (if necessary) and store. The function does not use
; registers d8-d15.
@@ -154,7 +154,7 @@
; d5 op0
; d6 oq0
; d7 oq1
-|vpx_loop_filter_neon| PROC
+|aom_loop_filter_neon| PROC
; filter_mask
vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2)
vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1)
@@ -244,6 +244,6 @@
veor d7, d20, d18 ; *oq1 = u^0x80
bx lr
- ENDP ; |vpx_loop_filter_neon|
+ ENDP ; |aom_loop_filter_neon|
END
diff --git a/aom_dsp/arm/loopfilter_4_neon.c b/aom_dsp/arm/loopfilter_4_neon.c
index 1c1e80e..74e13bd 100644
--- a/aom_dsp/arm/loopfilter_4_neon.c
+++ b/aom_dsp/arm/loopfilter_4_neon.c
@@ -10,7 +10,7 @@
#include <arm_neon.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
static INLINE void loop_filter_neon(uint8x8_t dblimit, // flimit
uint8x8_t dlimit, // limit
@@ -107,7 +107,7 @@
return;
}
-void vpx_lpf_horizontal_4_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+void aom_lpf_horizontal_4_neon(uint8_t *src, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
int i;
uint8_t *s, *psrc;
@@ -153,7 +153,7 @@
return;
}
-void vpx_lpf_vertical_4_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+void aom_lpf_vertical_4_neon(uint8_t *src, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
int i, pitch8;
uint8_t *s;
diff --git a/aom_dsp/arm/loopfilter_8_neon.asm b/aom_dsp/arm/loopfilter_8_neon.asm
index a2f20e1..4f6ede2 100644
--- a/aom_dsp/arm/loopfilter_8_neon.asm
+++ b/aom_dsp/arm/loopfilter_8_neon.asm
@@ -8,16 +8,16 @@
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vpx_lpf_horizontal_8_neon|
- EXPORT |vpx_lpf_vertical_8_neon|
+ EXPORT |aom_lpf_horizontal_8_neon|
+ EXPORT |aom_lpf_vertical_8_neon|
ARM
AREA ||.text||, CODE, READONLY, ALIGN=2
-; Currently vpx only works on iterations 8 at a time. The vp8 loop filter
+; Currently aom only works on iterations 8 at a time. The vp8 loop filter
; works on 16 iterations at a time.
;
-; void vpx_lpf_horizontal_8_neon(uint8_t *s, int p,
+; void aom_lpf_horizontal_8_neon(uint8_t *s, int p,
; const uint8_t *blimit,
; const uint8_t *limit,
; const uint8_t *thresh)
@@ -26,7 +26,7 @@
; r2 const uint8_t *blimit,
; r3 const uint8_t *limit,
; sp const uint8_t *thresh,
-|vpx_lpf_horizontal_8_neon| PROC
+|aom_lpf_horizontal_8_neon| PROC
push {r4-r5, lr}
vld1.8 {d0[]}, [r2] ; duplicate *blimit
@@ -51,7 +51,7 @@
sub r3, r3, r1, lsl #1
sub r2, r2, r1, lsl #2
- bl vpx_mbloop_filter_neon
+ bl aom_mbloop_filter_neon
vst1.u8 {d0}, [r2@64], r1 ; store op2
vst1.u8 {d1}, [r3@64], r1 ; store op1
@@ -62,9 +62,9 @@
pop {r4-r5, pc}
- ENDP ; |vpx_lpf_horizontal_8_neon|
+ ENDP ; |aom_lpf_horizontal_8_neon|
-; void vpx_lpf_vertical_8_neon(uint8_t *s,
+; void aom_lpf_vertical_8_neon(uint8_t *s,
; int pitch,
; const uint8_t *blimit,
; const uint8_t *limit,
@@ -75,7 +75,7 @@
; r2 const uint8_t *blimit,
; r3 const uint8_t *limit,
; sp const uint8_t *thresh,
-|vpx_lpf_vertical_8_neon| PROC
+|aom_lpf_vertical_8_neon| PROC
push {r4-r5, lr}
vld1.8 {d0[]}, [r2] ; duplicate *blimit
@@ -114,7 +114,7 @@
sub r2, r0, #3
add r3, r0, #1
- bl vpx_mbloop_filter_neon
+ bl aom_mbloop_filter_neon
;store op2, op1, op0, oq0
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r2], r1
@@ -137,9 +137,9 @@
vst2.8 {d4[7], d5[7]}, [r3]
pop {r4-r5, pc}
- ENDP ; |vpx_lpf_vertical_8_neon|
+ ENDP ; |aom_lpf_vertical_8_neon|
-; void vpx_mbloop_filter_neon();
+; void aom_mbloop_filter_neon();
; This is a helper function for the loopfilters. The invidual functions do the
; necessary load, transpose (if necessary) and store. The function does not use
; registers d8-d15.
@@ -165,7 +165,7 @@
; d3 oq0
; d4 oq1
; d5 oq2
-|vpx_mbloop_filter_neon| PROC
+|aom_mbloop_filter_neon| PROC
; filter_mask
vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2)
vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1)
@@ -420,6 +420,6 @@
bx lr
- ENDP ; |vpx_mbloop_filter_neon|
+ ENDP ; |aom_mbloop_filter_neon|
END
diff --git a/aom_dsp/arm/loopfilter_8_neon.c b/aom_dsp/arm/loopfilter_8_neon.c
index 854196f..54c1d22 100644
--- a/aom_dsp/arm/loopfilter_8_neon.c
+++ b/aom_dsp/arm/loopfilter_8_neon.c
@@ -10,7 +10,7 @@
#include <arm_neon.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
static INLINE void mbloop_filter_neon(uint8x8_t dblimit, // mblimit
uint8x8_t dlimit, // limit
@@ -259,7 +259,7 @@
return;
}
-void vpx_lpf_horizontal_8_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+void aom_lpf_horizontal_8_neon(uint8_t *src, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
int i;
uint8_t *s, *psrc;
@@ -311,7 +311,7 @@
return;
}
-void vpx_lpf_vertical_8_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+void aom_lpf_vertical_8_neon(uint8_t *src, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
int i;
uint8_t *s;
diff --git a/aom_dsp/arm/loopfilter_mb_neon.asm b/aom_dsp/arm/loopfilter_mb_neon.asm
index d5da7a8..cb20a28 100644
--- a/aom_dsp/arm/loopfilter_mb_neon.asm
+++ b/aom_dsp/arm/loopfilter_mb_neon.asm
@@ -8,9 +8,9 @@
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vpx_lpf_horizontal_edge_8_neon|
- EXPORT |vpx_lpf_horizontal_edge_16_neon|
- EXPORT |vpx_lpf_vertical_16_neon|
+ EXPORT |aom_lpf_horizontal_edge_8_neon|
+ EXPORT |aom_lpf_horizontal_edge_16_neon|
+ EXPORT |aom_lpf_vertical_16_neon|
ARM
AREA ||.text||, CODE, READONLY, ALIGN=2
@@ -55,7 +55,7 @@
vld1.u8 {d14}, [r8@64], r1 ; q6
vld1.u8 {d15}, [r8@64], r1 ; q7
- bl vpx_wide_mbfilter_neon
+ bl aom_wide_mbfilter_neon
tst r7, #1
beq h_mbfilter
@@ -118,7 +118,7 @@
ENDP ; |mb_lpf_horizontal_edge|
-; void vpx_lpf_horizontal_edge_8_neon(uint8_t *s, int pitch,
+; void aom_lpf_horizontal_edge_8_neon(uint8_t *s, int pitch,
; const uint8_t *blimit,
; const uint8_t *limit,
; const uint8_t *thresh)
@@ -127,12 +127,12 @@
; r2 const uint8_t *blimit,
; r3 const uint8_t *limit,
; sp const uint8_t *thresh
-|vpx_lpf_horizontal_edge_8_neon| PROC
+|aom_lpf_horizontal_edge_8_neon| PROC
mov r12, #1
b mb_lpf_horizontal_edge
- ENDP ; |vpx_lpf_horizontal_edge_8_neon|
+ ENDP ; |aom_lpf_horizontal_edge_8_neon|
-; void vpx_lpf_horizontal_edge_16_neon(uint8_t *s, int pitch,
+; void aom_lpf_horizontal_edge_16_neon(uint8_t *s, int pitch,
; const uint8_t *blimit,
; const uint8_t *limit,
; const uint8_t *thresh)
@@ -141,12 +141,12 @@
; r2 const uint8_t *blimit,
; r3 const uint8_t *limit,
; sp const uint8_t *thresh
-|vpx_lpf_horizontal_edge_16_neon| PROC
+|aom_lpf_horizontal_edge_16_neon| PROC
mov r12, #2
b mb_lpf_horizontal_edge
- ENDP ; |vpx_lpf_horizontal_edge_16_neon|
+ ENDP ; |aom_lpf_horizontal_edge_16_neon|
-; void vpx_lpf_vertical_16_neon(uint8_t *s, int p,
+; void aom_lpf_vertical_16_neon(uint8_t *s, int p,
; const uint8_t *blimit,
; const uint8_t *limit,
; const uint8_t *thresh)
@@ -155,7 +155,7 @@
; r2 const uint8_t *blimit,
; r3 const uint8_t *limit,
; sp const uint8_t *thresh,
-|vpx_lpf_vertical_16_neon| PROC
+|aom_lpf_vertical_16_neon| PROC
push {r4-r8, lr}
vpush {d8-d15}
ldr r4, [sp, #88] ; load thresh
@@ -205,7 +205,7 @@
vtrn.8 d12, d13
vtrn.8 d14, d15
- bl vpx_wide_mbfilter_neon
+ bl aom_wide_mbfilter_neon
tst r7, #1
beq v_mbfilter
@@ -308,9 +308,9 @@
vpop {d8-d15}
pop {r4-r8, pc}
- ENDP ; |vpx_lpf_vertical_16_neon|
+ ENDP ; |aom_lpf_vertical_16_neon|
-; void vpx_wide_mbfilter_neon();
+; void aom_wide_mbfilter_neon();
; This is a helper function for the loopfilters. The invidual functions do the
; necessary load, transpose (if necessary) and store.
;
@@ -334,7 +334,7 @@
; d13 q5
; d14 q6
; d15 q7
-|vpx_wide_mbfilter_neon| PROC
+|aom_wide_mbfilter_neon| PROC
mov r7, #0
; filter_mask
@@ -630,6 +630,6 @@
vbif d3, d14, d17 ; oq6 |= q6 & ~(f2 & f & m)
bx lr
- ENDP ; |vpx_wide_mbfilter_neon|
+ ENDP ; |aom_wide_mbfilter_neon|
END
diff --git a/aom_dsp/arm/loopfilter_neon.c b/aom_dsp/arm/loopfilter_neon.c
index 04c163a..da28e27 100644
--- a/aom_dsp/arm/loopfilter_neon.c
+++ b/aom_dsp/arm/loopfilter_neon.c
@@ -10,39 +10,39 @@
#include <arm_neon.h>
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
-void vpx_lpf_vertical_4_dual_neon(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_4_dual_neon(uint8_t *s, int p, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
- vpx_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0);
- vpx_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1);
+ aom_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0);
+ aom_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1);
}
#if HAVE_NEON_ASM
-void vpx_lpf_horizontal_8_dual_neon(
+void aom_lpf_horizontal_8_dual_neon(
uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
const uint8_t *limit1, const uint8_t *thresh1) {
- vpx_lpf_horizontal_8_neon(s, p, blimit0, limit0, thresh0);
- vpx_lpf_horizontal_8_neon(s + 8, p, blimit1, limit1, thresh1);
+ aom_lpf_horizontal_8_neon(s, p, blimit0, limit0, thresh0);
+ aom_lpf_horizontal_8_neon(s + 8, p, blimit1, limit1, thresh1);
}
-void vpx_lpf_vertical_8_dual_neon(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_8_dual_neon(uint8_t *s, int p, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
- vpx_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0);
- vpx_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1);
+ aom_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0);
+ aom_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1);
}
-void vpx_lpf_vertical_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_vertical_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh) {
- vpx_lpf_vertical_16_neon(s, p, blimit, limit, thresh);
- vpx_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh);
+ aom_lpf_vertical_16_neon(s, p, blimit, limit, thresh);
+ aom_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh);
}
#endif // HAVE_NEON_ASM
diff --git a/aom_dsp/arm/sad4d_neon.c b/aom_dsp/arm/sad4d_neon.c
index 11f13be..e94029e 100644
--- a/aom_dsp/arm/sad4d_neon.c
+++ b/aom_dsp/arm/sad4d_neon.c
@@ -10,9 +10,9 @@
#include <arm_neon.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo,
const uint16x8_t vec_hi) {
@@ -78,7 +78,7 @@
vget_high_u8(vec_ref_16));
}
-void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride,
+void aom_sad64x64x4d_neon(const uint8_t *src, int src_stride,
const uint8_t *const ref[4], int ref_stride,
uint32_t *res) {
int i;
@@ -124,7 +124,7 @@
res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
}
-void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride,
+void aom_sad32x32x4d_neon(const uint8_t *src, int src_stride,
const uint8_t *const ref[4], int ref_stride,
uint32_t *res) {
int i;
@@ -168,7 +168,7 @@
res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
}
-void vpx_sad16x16x4d_neon(const uint8_t *src, int src_stride,
+void aom_sad16x16x4d_neon(const uint8_t *src, int src_stride,
const uint8_t *const ref[4], int ref_stride,
uint32_t *res) {
int i;
diff --git a/aom_dsp/arm/sad_media.asm b/aom_dsp/arm/sad_media.asm
index aed1d3a..9d815a2 100644
--- a/aom_dsp/arm/sad_media.asm
+++ b/aom_dsp/arm/sad_media.asm
@@ -9,7 +9,7 @@
;
- EXPORT |vpx_sad16x16_media|
+ EXPORT |aom_sad16x16_media|
ARM
REQUIRE8
@@ -21,7 +21,7 @@
; r1 int src_stride
; r2 const unsigned char *ref_ptr
; r3 int ref_stride
-|vpx_sad16x16_media| PROC
+|aom_sad16x16_media| PROC
stmfd sp!, {r4-r12, lr}
pld [r0, r1, lsl #0]
diff --git a/aom_dsp/arm/sad_neon.c b/aom_dsp/arm/sad_neon.c
index 19fa109..274b6d3 100644
--- a/aom_dsp/arm/sad_neon.c
+++ b/aom_dsp/arm/sad_neon.c
@@ -10,11 +10,11 @@
#include <arm_neon.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
-unsigned int vpx_sad8x16_neon(unsigned char *src_ptr, int src_stride,
+unsigned int aom_sad8x16_neon(unsigned char *src_ptr, int src_stride,
unsigned char *ref_ptr, int ref_stride) {
uint8x8_t d0, d8;
uint16x8_t q12;
@@ -45,7 +45,7 @@
return vget_lane_u32(d5, 0);
}
-unsigned int vpx_sad4x4_neon(unsigned char *src_ptr, int src_stride,
+unsigned int aom_sad4x4_neon(unsigned char *src_ptr, int src_stride,
unsigned char *ref_ptr, int ref_stride) {
uint8x8_t d0, d8;
uint16x8_t q12;
@@ -73,7 +73,7 @@
return vget_lane_u32(vreinterpret_u32_u64(d3), 0);
}
-unsigned int vpx_sad16x8_neon(unsigned char *src_ptr, int src_stride,
+unsigned int aom_sad16x8_neon(unsigned char *src_ptr, int src_stride,
unsigned char *ref_ptr, int ref_stride) {
uint8x16_t q0, q4;
uint16x8_t q12, q13;
@@ -127,7 +127,7 @@
return vget_lane_u32(c, 0);
}
-unsigned int vpx_sad64x64_neon(const uint8_t *src, int src_stride,
+unsigned int aom_sad64x64_neon(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride) {
int i;
uint16x8_t vec_accum_lo = vdupq_n_u16(0);
@@ -163,7 +163,7 @@
return horizontal_long_add_16x8(vec_accum_lo, vec_accum_hi);
}
-unsigned int vpx_sad32x32_neon(const uint8_t *src, int src_stride,
+unsigned int aom_sad32x32_neon(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride) {
int i;
uint16x8_t vec_accum_lo = vdupq_n_u16(0);
@@ -188,7 +188,7 @@
return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi));
}
-unsigned int vpx_sad16x16_neon(const uint8_t *src, int src_stride,
+unsigned int aom_sad16x16_neon(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride) {
int i;
uint16x8_t vec_accum_lo = vdupq_n_u16(0);
@@ -207,7 +207,7 @@
return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi));
}
-unsigned int vpx_sad8x8_neon(const uint8_t *src, int src_stride,
+unsigned int aom_sad8x8_neon(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride) {
int i;
uint16x8_t vec_accum = vdupq_n_u16(0);
diff --git a/aom_dsp/arm/save_reg_neon.asm b/aom_dsp/arm/save_reg_neon.asm
index c9ca108..b802792 100644
--- a/aom_dsp/arm/save_reg_neon.asm
+++ b/aom_dsp/arm/save_reg_neon.asm
@@ -9,8 +9,8 @@
;
- EXPORT |vpx_push_neon|
- EXPORT |vpx_pop_neon|
+ EXPORT |aom_push_neon|
+ EXPORT |aom_pop_neon|
ARM
REQUIRE8
@@ -18,14 +18,14 @@
AREA ||.text||, CODE, READONLY, ALIGN=2
-|vpx_push_neon| PROC
+|aom_push_neon| PROC
vst1.i64 {d8, d9, d10, d11}, [r0]!
vst1.i64 {d12, d13, d14, d15}, [r0]!
bx lr
ENDP
-|vpx_pop_neon| PROC
+|aom_pop_neon| PROC
vld1.i64 {d8, d9, d10, d11}, [r0]!
vld1.i64 {d12, d13, d14, d15}, [r0]!
bx lr
diff --git a/aom_dsp/arm/subpel_variance_media.c b/aom_dsp/arm/subpel_variance_media.c
index 69b1b33..2704f5a 100644
--- a/aom_dsp/arm/subpel_variance_media.c
+++ b/aom_dsp/arm/subpel_variance_media.c
@@ -8,9 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
#if HAVE_MEDIA
@@ -19,15 +19,15 @@
{ 64, 64 }, { 48, 80 },
{ 32, 96 }, { 16, 112 } };
-extern void vpx_filter_block2d_bil_first_pass_media(
+extern void aom_filter_block2d_bil_first_pass_media(
const uint8_t *src_ptr, uint16_t *dst_ptr, uint32_t src_pitch,
uint32_t height, uint32_t width, const int16_t *filter);
-extern void vpx_filter_block2d_bil_second_pass_media(
+extern void aom_filter_block2d_bil_second_pass_media(
const uint16_t *src_ptr, uint8_t *dst_ptr, int32_t src_pitch,
uint32_t height, uint32_t width, const int16_t *filter);
-unsigned int vpx_sub_pixel_variance8x8_media(
+unsigned int aom_sub_pixel_variance8x8_media(
const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset,
const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse) {
uint16_t first_pass[10 * 8];
@@ -37,16 +37,16 @@
HFilter = bilinear_filters_media[xoffset];
VFilter = bilinear_filters_media[yoffset];
- vpx_filter_block2d_bil_first_pass_media(src_ptr, first_pass,
+ aom_filter_block2d_bil_first_pass_media(src_ptr, first_pass,
src_pixels_per_line, 9, 8, HFilter);
- vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass, 8, 8, 8,
+ aom_filter_block2d_bil_second_pass_media(first_pass, second_pass, 8, 8, 8,
VFilter);
- return vpx_variance8x8_media(second_pass, 8, dst_ptr, dst_pixels_per_line,
+ return aom_variance8x8_media(second_pass, 8, dst_ptr, dst_pixels_per_line,
sse);
}
-unsigned int vpx_sub_pixel_variance16x16_media(
+unsigned int aom_sub_pixel_variance16x16_media(
const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset,
const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse) {
uint16_t first_pass[36 * 16];
@@ -55,24 +55,24 @@
unsigned int var;
if (xoffset == 4 && yoffset == 0) {
- var = vpx_variance_halfpixvar16x16_h_media(
+ var = aom_variance_halfpixvar16x16_h_media(
src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
} else if (xoffset == 0 && yoffset == 4) {
- var = vpx_variance_halfpixvar16x16_v_media(
+ var = aom_variance_halfpixvar16x16_v_media(
src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
} else if (xoffset == 4 && yoffset == 4) {
- var = vpx_variance_halfpixvar16x16_hv_media(
+ var = aom_variance_halfpixvar16x16_hv_media(
src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
} else {
HFilter = bilinear_filters_media[xoffset];
VFilter = bilinear_filters_media[yoffset];
- vpx_filter_block2d_bil_first_pass_media(
+ aom_filter_block2d_bil_first_pass_media(
src_ptr, first_pass, src_pixels_per_line, 17, 16, HFilter);
- vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass, 16, 16,
+ aom_filter_block2d_bil_second_pass_media(first_pass, second_pass, 16, 16,
16, VFilter);
- var = vpx_variance16x16_media(second_pass, 16, dst_ptr, dst_pixels_per_line,
+ var = aom_variance16x16_media(second_pass, 16, dst_ptr, dst_pixels_per_line,
sse);
}
return var;
diff --git a/aom_dsp/arm/subpel_variance_neon.c b/aom_dsp/arm/subpel_variance_neon.c
index caa3f4a..f04235d 100644
--- a/aom_dsp/arm/subpel_variance_neon.c
+++ b/aom_dsp/arm/subpel_variance_neon.c
@@ -9,11 +9,11 @@
*/
#include <arm_neon.h>
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
#include "aom_ports/mem.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/variance.h"
@@ -73,7 +73,7 @@
}
}
-unsigned int vpx_sub_pixel_variance8x8_neon(const uint8_t *src, int src_stride,
+unsigned int aom_sub_pixel_variance8x8_neon(const uint8_t *src, int src_stride,
int xoffset, int yoffset,
const uint8_t *dst, int dst_stride,
unsigned int *sse) {
@@ -84,10 +84,10 @@
bilinear_filters[xoffset]);
var_filter_block2d_bil_w8(fdata3, temp2, 8, 8, 8, 8,
bilinear_filters[yoffset]);
- return vpx_variance8x8_neon(temp2, 8, dst, dst_stride, sse);
+ return aom_variance8x8_neon(temp2, 8, dst, dst_stride, sse);
}
-unsigned int vpx_sub_pixel_variance16x16_neon(const uint8_t *src,
+unsigned int aom_sub_pixel_variance16x16_neon(const uint8_t *src,
int src_stride, int xoffset,
int yoffset, const uint8_t *dst,
int dst_stride,
@@ -99,10 +99,10 @@
bilinear_filters[xoffset]);
var_filter_block2d_bil_w16(fdata3, temp2, 16, 16, 16, 16,
bilinear_filters[yoffset]);
- return vpx_variance16x16_neon(temp2, 16, dst, dst_stride, sse);
+ return aom_variance16x16_neon(temp2, 16, dst, dst_stride, sse);
}
-unsigned int vpx_sub_pixel_variance32x32_neon(const uint8_t *src,
+unsigned int aom_sub_pixel_variance32x32_neon(const uint8_t *src,
int src_stride, int xoffset,
int yoffset, const uint8_t *dst,
int dst_stride,
@@ -114,10 +114,10 @@
bilinear_filters[xoffset]);
var_filter_block2d_bil_w16(fdata3, temp2, 32, 32, 32, 32,
bilinear_filters[yoffset]);
- return vpx_variance32x32_neon(temp2, 32, dst, dst_stride, sse);
+ return aom_variance32x32_neon(temp2, 32, dst, dst_stride, sse);
}
-unsigned int vpx_sub_pixel_variance64x64_neon(const uint8_t *src,
+unsigned int aom_sub_pixel_variance64x64_neon(const uint8_t *src,
int src_stride, int xoffset,
int yoffset, const uint8_t *dst,
int dst_stride,
@@ -129,5 +129,5 @@
bilinear_filters[xoffset]);
var_filter_block2d_bil_w16(fdata3, temp2, 64, 64, 64, 64,
bilinear_filters[yoffset]);
- return vpx_variance64x64_neon(temp2, 64, dst, dst_stride, sse);
+ return aom_variance64x64_neon(temp2, 64, dst, dst_stride, sse);
}
diff --git a/aom_dsp/arm/subtract_neon.c b/aom_dsp/arm/subtract_neon.c
index ab7157c..27b37f3 100644
--- a/aom_dsp/arm/subtract_neon.c
+++ b/aom_dsp/arm/subtract_neon.c
@@ -10,10 +10,10 @@
#include <arm_neon.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
-void vpx_subtract_block_neon(int rows, int cols, int16_t *diff,
+void aom_subtract_block_neon(int rows, int cols, int16_t *diff,
ptrdiff_t diff_stride, const uint8_t *src,
ptrdiff_t src_stride, const uint8_t *pred,
ptrdiff_t pred_stride) {
diff --git a/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm b/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm
index dab845a..52214f7 100644
--- a/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm
+++ b/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm
@@ -9,7 +9,7 @@
;
- EXPORT |vpx_variance_halfpixvar16x16_h_media|
+ EXPORT |aom_variance_halfpixvar16x16_h_media|
ARM
REQUIRE8
@@ -22,7 +22,7 @@
; r2 unsigned char *ref_ptr
; r3 int recon_stride
; stack unsigned int *sse
-|vpx_variance_halfpixvar16x16_h_media| PROC
+|aom_variance_halfpixvar16x16_h_media| PROC
stmfd sp!, {r4-r12, lr}
diff --git a/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm b/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm
index 01953b7..a3f60fc 100644
--- a/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm
+++ b/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm
@@ -9,7 +9,7 @@
;
- EXPORT |vpx_variance_halfpixvar16x16_hv_media|
+ EXPORT |aom_variance_halfpixvar16x16_hv_media|
ARM
REQUIRE8
@@ -22,7 +22,7 @@
; r2 unsigned char *ref_ptr
; r3 int recon_stride
; stack unsigned int *sse
-|vpx_variance_halfpixvar16x16_hv_media| PROC
+|aom_variance_halfpixvar16x16_hv_media| PROC
stmfd sp!, {r4-r12, lr}
diff --git a/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm b/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm
index 0d17acb..b8071be 100644
--- a/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm
+++ b/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm
@@ -9,7 +9,7 @@
;
- EXPORT |vpx_variance_halfpixvar16x16_v_media|
+ EXPORT |aom_variance_halfpixvar16x16_v_media|
ARM
REQUIRE8
@@ -22,7 +22,7 @@
; r2 unsigned char *ref_ptr
; r3 int recon_stride
; stack unsigned int *sse
-|vpx_variance_halfpixvar16x16_v_media| PROC
+|aom_variance_halfpixvar16x16_v_media| PROC
stmfd sp!, {r4-r12, lr}
diff --git a/aom_dsp/arm/variance_media.asm b/aom_dsp/arm/variance_media.asm
index f7f9e14..8a21fdc 100644
--- a/aom_dsp/arm/variance_media.asm
+++ b/aom_dsp/arm/variance_media.asm
@@ -9,9 +9,9 @@
;
- EXPORT |vpx_variance16x16_media|
- EXPORT |vpx_variance8x8_media|
- EXPORT |vpx_mse16x16_media|
+ EXPORT |aom_variance16x16_media|
+ EXPORT |aom_variance8x8_media|
+ EXPORT |aom_mse16x16_media|
ARM
REQUIRE8
@@ -24,7 +24,7 @@
; r2 unsigned char *ref_ptr
; r3 int recon_stride
; stack unsigned int *sse
-|vpx_variance16x16_media| PROC
+|aom_variance16x16_media| PROC
stmfd sp!, {r4-r12, lr}
@@ -157,7 +157,7 @@
; r2 unsigned char *ref_ptr
; r3 int recon_stride
; stack unsigned int *sse
-|vpx_variance8x8_media| PROC
+|aom_variance8x8_media| PROC
push {r4-r10, lr}
@@ -241,10 +241,10 @@
; r3 int recon_stride
; stack unsigned int *sse
;
-;note: Based on vpx_variance16x16_media. In this function, sum is never used.
+;note: Based on aom_variance16x16_media. In this function, sum is never used.
; So, we can remove this part of calculation.
-|vpx_mse16x16_media| PROC
+|aom_mse16x16_media| PROC
push {r4-r9, lr}
diff --git a/aom_dsp/arm/variance_neon.c b/aom_dsp/arm/variance_neon.c
index fcf6e45..1fbf470 100644
--- a/aom_dsp/arm/variance_neon.c
+++ b/aom_dsp/arm/variance_neon.c
@@ -10,10 +10,10 @@
#include <arm_neon.h>
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_config.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) {
@@ -60,17 +60,17 @@
*sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
}
-void vpx_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
+void aom_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, unsigned int *sse, int *sum) {
variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, sum);
}
-void vpx_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
+void aom_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, unsigned int *sse, int *sum) {
variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, sum);
}
-unsigned int vpx_variance8x8_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance8x8_neon(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
unsigned int *sse) {
int sum;
@@ -78,7 +78,7 @@
return *sse - (((int64_t)sum * sum) >> 6); // >> 6 = / 8 * 8
}
-unsigned int vpx_variance16x16_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance16x16_neon(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
unsigned int *sse) {
int sum;
@@ -86,7 +86,7 @@
return *sse - (((int64_t)sum * sum) >> 8); // >> 8 = / 16 * 16
}
-unsigned int vpx_variance32x32_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance32x32_neon(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
unsigned int *sse) {
int sum;
@@ -94,7 +94,7 @@
return *sse - (((int64_t)sum * sum) >> 10); // >> 10 = / 32 * 32
}
-unsigned int vpx_variance32x64_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance32x64_neon(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
unsigned int *sse) {
int sum1, sum2;
@@ -107,7 +107,7 @@
return *sse - (((int64_t)sum1 * sum1) >> 11); // >> 11 = / 32 * 64
}
-unsigned int vpx_variance64x32_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance64x32_neon(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
unsigned int *sse) {
int sum1, sum2;
@@ -120,7 +120,7 @@
return *sse - (((int64_t)sum1 * sum1) >> 11); // >> 11 = / 32 * 64
}
-unsigned int vpx_variance64x64_neon(const uint8_t *a, int a_stride,
+unsigned int aom_variance64x64_neon(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
unsigned int *sse) {
int sum1, sum2;
@@ -144,7 +144,7 @@
return *sse - (((int64_t)sum1 * sum1) >> 12); // >> 12 = / 64 * 64
}
-unsigned int vpx_variance16x8_neon(const unsigned char *src_ptr,
+unsigned int aom_variance16x8_neon(const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride, unsigned int *sse) {
@@ -220,7 +220,7 @@
return vget_lane_u32(d0u32, 0);
}
-unsigned int vpx_variance8x16_neon(const unsigned char *src_ptr,
+unsigned int aom_variance8x16_neon(const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride, unsigned int *sse) {
@@ -282,7 +282,7 @@
return vget_lane_u32(d0u32, 0);
}
-unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int source_stride,
+unsigned int aom_mse16x16_neon(const unsigned char *src_ptr, int source_stride,
const unsigned char *ref_ptr, int recon_stride,
unsigned int *sse) {
int i;
@@ -345,7 +345,7 @@
return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
}
-unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr,
+unsigned int aom_get4x4sse_cs_neon(const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride) {
diff --git a/aom_dsp/avg.c b/aom_dsp/avg.c
index d3e4578..bf0bb5b 100644
--- a/aom_dsp/avg.c
+++ b/aom_dsp/avg.c
@@ -9,10 +9,10 @@
*/
#include <stdlib.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
-unsigned int vpx_avg_8x8_c(const uint8_t *src, int stride) {
+unsigned int aom_avg_8x8_c(const uint8_t *src, int stride) {
int i, j;
int sum = 0;
for (i = 0; i < 8; ++i, src += stride)
@@ -22,7 +22,7 @@
return ROUND_POWER_OF_TWO(sum, 6);
}
-unsigned int vpx_avg_4x4_c(const uint8_t *src, int stride) {
+unsigned int aom_avg_4x4_c(const uint8_t *src, int stride) {
int i, j;
int sum = 0;
for (i = 0; i < 4; ++i, src += stride)
@@ -66,7 +66,7 @@
// The order of the output coeff of the hadamard is not important. For
// optimization purposes the final transpose may be skipped.
-void vpx_hadamard_8x8_c(const int16_t *src_diff, int src_stride,
+void aom_hadamard_8x8_c(const int16_t *src_diff, int src_stride,
int16_t *coeff) {
int idx;
int16_t buffer[64];
@@ -89,14 +89,14 @@
}
// In place 16x16 2D Hadamard transform
-void vpx_hadamard_16x16_c(const int16_t *src_diff, int src_stride,
+void aom_hadamard_16x16_c(const int16_t *src_diff, int src_stride,
int16_t *coeff) {
int idx;
for (idx = 0; idx < 4; ++idx) {
// src_diff: 9 bit, dynamic range [-255, 255]
const int16_t *src_ptr =
src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
- vpx_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64);
+ aom_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64);
}
// coeff: 15 bit, dynamic range [-16320, 16320]
@@ -122,7 +122,7 @@
// coeff: 16 bits, dynamic range [-32640, 32640].
// length: value range {16, 64, 256, 1024}.
-int vpx_satd_c(const int16_t *coeff, int length) {
+int aom_satd_c(const int16_t *coeff, int length) {
int i;
int satd = 0;
for (i = 0; i < length; ++i) satd += abs(coeff[i]);
@@ -133,7 +133,7 @@
// Integer projection onto row vectors.
// height: value range {16, 32, 64}.
-void vpx_int_pro_row_c(int16_t hbuf[16], const uint8_t *ref,
+void aom_int_pro_row_c(int16_t hbuf[16], const uint8_t *ref,
const int ref_stride, const int height) {
int idx;
const int norm_factor = height >> 1;
@@ -149,7 +149,7 @@
}
// width: value range {16, 32, 64}.
-int16_t vpx_int_pro_col_c(const uint8_t *ref, const int width) {
+int16_t aom_int_pro_col_c(const uint8_t *ref, const int width) {
int idx;
int16_t sum = 0;
// sum: 14 bit, dynamic range [0, 16320]
@@ -160,7 +160,7 @@
// ref: [0 - 510]
// src: [0 - 510]
// bwl: {2, 3, 4}
-int vpx_vector_var_c(const int16_t *ref, const int16_t *src, const int bwl) {
+int aom_vector_var_c(const int16_t *ref, const int16_t *src, const int bwl) {
int i;
int width = 4 << bwl;
int sse = 0, mean = 0, var;
@@ -176,7 +176,7 @@
return var;
}
-void vpx_minmax_8x8_c(const uint8_t *src, int src_stride, const uint8_t *ref,
+void aom_minmax_8x8_c(const uint8_t *src, int src_stride, const uint8_t *ref,
int ref_stride, int *min, int *max) {
int i, j;
*min = 255;
@@ -190,8 +190,8 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t *src, int stride) {
+#if CONFIG_AOM_HIGHBITDEPTH
+unsigned int aom_highbd_avg_8x8_c(const uint8_t *src, int stride) {
int i, j;
int sum = 0;
const uint16_t *s = CONVERT_TO_SHORTPTR(src);
@@ -202,7 +202,7 @@
return ROUND_POWER_OF_TWO(sum, 6);
}
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t *src, int stride) {
+unsigned int aom_highbd_avg_4x4_c(const uint8_t *src, int stride) {
int i, j;
int sum = 0;
const uint16_t *s = CONVERT_TO_SHORTPTR(src);
@@ -213,7 +213,7 @@
return ROUND_POWER_OF_TWO(sum, 4);
}
-void vpx_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
+void aom_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
int dp, int *min, int *max) {
int i, j;
const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
@@ -228,4 +228,4 @@
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/bitreader.c b/aom_dsp/bitreader.c
index 0942005..60c546d 100644
--- a/aom_dsp/bitreader.c
+++ b/aom_dsp/bitreader.c
@@ -9,17 +9,17 @@
*/
#include <stdlib.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/bitreader.h"
#include "aom_dsp/prob.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "aom_util/endian_inl.h"
-int vpx_reader_init(vpx_reader *r, const uint8_t *buffer, size_t size,
- vpx_decrypt_cb decrypt_cb, void *decrypt_state) {
+int aom_reader_init(aom_reader *r, const uint8_t *buffer, size_t size,
+ aom_decrypt_cb decrypt_cb, void *decrypt_state) {
if (size && !buffer) {
return 1;
} else {
@@ -30,12 +30,12 @@
r->range = 255;
r->decrypt_cb = decrypt_cb;
r->decrypt_state = decrypt_state;
- vpx_reader_fill(r);
- return vpx_read_bit(r) != 0; // marker bit
+ aom_reader_fill(r);
+ return aom_read_bit(r) != 0; // marker bit
}
}
-void vpx_reader_fill(vpx_reader *r) {
+void aom_reader_fill(aom_reader *r) {
const uint8_t *const buffer_end = r->buffer_end;
const uint8_t *buffer = r->buffer;
const uint8_t *buffer_start = buffer;
@@ -46,7 +46,7 @@
int shift = BD_VALUE_SIZE - CHAR_BIT - (count + CHAR_BIT);
if (r->decrypt_cb) {
- size_t n = VPXMIN(sizeof(r->clear_buffer), bytes_left);
+ size_t n = AOMMIN(sizeof(r->clear_buffer), bytes_left);
r->decrypt_cb(r->decrypt_state, buffer, r->clear_buffer, (int)n);
buffer = r->clear_buffer;
buffer_start = r->clear_buffer;
@@ -90,7 +90,7 @@
r->count = count;
}
-const uint8_t *vpx_reader_find_end(vpx_reader *r) {
+const uint8_t *aom_reader_find_end(aom_reader *r) {
// Find the end of the coded buffer
while (r->count > CHAR_BIT && r->count < BD_VALUE_SIZE) {
r->count -= CHAR_BIT;
diff --git a/aom_dsp/bitreader.h b/aom_dsp/bitreader.h
index d211511..402461d 100644
--- a/aom_dsp/bitreader.h
+++ b/aom_dsp/bitreader.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_BITREADER_H_
-#define VPX_DSP_BITREADER_H_
+#ifndef AOM_DSP_BITREADER_H_
+#define AOM_DSP_BITREADER_H_
#include <limits.h>
#include <stddef.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#if CONFIG_BITSTREAM_DEBUG
#include <assert.h>
@@ -22,8 +22,8 @@
#endif // CONFIG_BITSTREAM_DEBUG
#include "aom_ports/mem.h"
-#include "aom/vp8dx.h"
-#include "aom/vpx_integer.h"
+#include "aom/aomdx.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/prob.h"
#include "aom_util/debug_util.h"
@@ -47,19 +47,19 @@
int count;
const uint8_t *buffer_end;
const uint8_t *buffer;
- vpx_decrypt_cb decrypt_cb;
+ aom_decrypt_cb decrypt_cb;
void *decrypt_state;
uint8_t clear_buffer[sizeof(BD_VALUE) + 1];
-} vpx_reader;
+} aom_reader;
-int vpx_reader_init(vpx_reader *r, const uint8_t *buffer, size_t size,
- vpx_decrypt_cb decrypt_cb, void *decrypt_state);
+int aom_reader_init(aom_reader *r, const uint8_t *buffer, size_t size,
+ aom_decrypt_cb decrypt_cb, void *decrypt_state);
-void vpx_reader_fill(vpx_reader *r);
+void aom_reader_fill(aom_reader *r);
-const uint8_t *vpx_reader_find_end(vpx_reader *r);
+const uint8_t *aom_reader_find_end(aom_reader *r);
-static INLINE int vpx_reader_has_error(vpx_reader *r) {
+static INLINE int aom_reader_has_error(aom_reader *r) {
// Check if we have reached the end of the buffer.
//
// Variable 'count' stores the number of bits in the 'value' buffer, minus
@@ -77,7 +77,7 @@
return r->count > BD_VALUE_SIZE && r->count < LOTS_OF_BITS;
}
-static INLINE int vpx_read(vpx_reader *r, int prob) {
+static INLINE int aom_read(aom_reader *r, int prob) {
unsigned int bit = 0;
BD_VALUE value;
BD_VALUE bigsplit;
@@ -85,7 +85,7 @@
unsigned int range;
unsigned int split = (r->range * prob + (256 - prob)) >> CHAR_BIT;
- if (r->count < 0) vpx_reader_fill(r);
+ if (r->count < 0) aom_reader_fill(r);
value = r->value;
count = r->count;
@@ -101,7 +101,7 @@
}
{
- register int shift = vpx_norm[range];
+ register int shift = aom_norm[range];
range <<= shift;
value <<= shift;
count -= shift;
@@ -129,23 +129,23 @@
return bit;
}
-static INLINE int vpx_read_bit(vpx_reader *r) {
- return vpx_read(r, 128); // vpx_prob_half
+static INLINE int aom_read_bit(aom_reader *r) {
+ return aom_read(r, 128); // aom_prob_half
}
-static INLINE int vpx_read_literal(vpx_reader *r, int bits) {
+static INLINE int aom_read_literal(aom_reader *r, int bits) {
int literal = 0, bit;
- for (bit = bits - 1; bit >= 0; bit--) literal |= vpx_read_bit(r) << bit;
+ for (bit = bits - 1; bit >= 0; bit--) literal |= aom_read_bit(r) << bit;
return literal;
}
-static INLINE int vpx_read_tree(vpx_reader *r, const vpx_tree_index *tree,
- const vpx_prob *probs) {
- vpx_tree_index i = 0;
+static INLINE int aom_read_tree(aom_reader *r, const aom_tree_index *tree,
+ const aom_prob *probs) {
+ aom_tree_index i = 0;
- while ((i = tree[i + vpx_read(r, probs[i >> 1])]) > 0) continue;
+ while ((i = tree[i + aom_read(r, probs[i >> 1])]) > 0) continue;
return -i;
}
@@ -154,4 +154,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_BITREADER_H_
+#endif // AOM_DSP_BITREADER_H_
diff --git a/aom_dsp/bitreader_buffer.c b/aom_dsp/bitreader_buffer.c
index bf88119..cf505e8 100644
--- a/aom_dsp/bitreader_buffer.c
+++ b/aom_dsp/bitreader_buffer.c
@@ -7,14 +7,14 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "./bitreader_buffer.h"
-size_t vpx_rb_bytes_read(struct vpx_read_bit_buffer *rb) {
+size_t aom_rb_bytes_read(struct aom_read_bit_buffer *rb) {
return (rb->bit_offset + 7) >> 3;
}
-int vpx_rb_read_bit(struct vpx_read_bit_buffer *rb) {
+int aom_rb_read_bit(struct aom_read_bit_buffer *rb) {
const size_t off = rb->bit_offset;
const size_t p = off >> 3;
const int q = 7 - (int)(off & 0x7);
@@ -28,19 +28,19 @@
}
}
-int vpx_rb_read_literal(struct vpx_read_bit_buffer *rb, int bits) {
+int aom_rb_read_literal(struct aom_read_bit_buffer *rb, int bits) {
int value = 0, bit;
- for (bit = bits - 1; bit >= 0; bit--) value |= vpx_rb_read_bit(rb) << bit;
+ for (bit = bits - 1; bit >= 0; bit--) value |= aom_rb_read_bit(rb) << bit;
return value;
}
-int vpx_rb_read_signed_literal(struct vpx_read_bit_buffer *rb, int bits) {
- const int value = vpx_rb_read_literal(rb, bits);
- return vpx_rb_read_bit(rb) ? -value : value;
+int aom_rb_read_signed_literal(struct aom_read_bit_buffer *rb, int bits) {
+ const int value = aom_rb_read_literal(rb, bits);
+ return aom_rb_read_bit(rb) ? -value : value;
}
-int vpx_rb_read_inv_signed_literal(struct vpx_read_bit_buffer *rb, int bits) {
+int aom_rb_read_inv_signed_literal(struct aom_read_bit_buffer *rb, int bits) {
const int nbits = sizeof(unsigned) * 8 - bits - 1;
- const unsigned value = (unsigned)vpx_rb_read_literal(rb, bits + 1) << nbits;
+ const unsigned value = (unsigned)aom_rb_read_literal(rb, bits + 1) << nbits;
return ((int)value) >> nbits;
}
diff --git a/aom_dsp/bitreader_buffer.h b/aom_dsp/bitreader_buffer.h
index 5e557ea..2f68664 100644
--- a/aom_dsp/bitreader_buffer.h
+++ b/aom_dsp/bitreader_buffer.h
@@ -8,40 +8,40 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_BITREADER_BUFFER_H_
-#define VPX_DSP_BITREADER_BUFFER_H_
+#ifndef AOM_DSP_BITREADER_BUFFER_H_
+#define AOM_DSP_BITREADER_BUFFER_H_
#include <limits.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
#endif
-typedef void (*vpx_rb_error_handler)(void *data);
+typedef void (*aom_rb_error_handler)(void *data);
-struct vpx_read_bit_buffer {
+struct aom_read_bit_buffer {
const uint8_t *bit_buffer;
const uint8_t *bit_buffer_end;
size_t bit_offset;
void *error_handler_data;
- vpx_rb_error_handler error_handler;
+ aom_rb_error_handler error_handler;
};
-size_t vpx_rb_bytes_read(struct vpx_read_bit_buffer *rb);
+size_t aom_rb_bytes_read(struct aom_read_bit_buffer *rb);
-int vpx_rb_read_bit(struct vpx_read_bit_buffer *rb);
+int aom_rb_read_bit(struct aom_read_bit_buffer *rb);
-int vpx_rb_read_literal(struct vpx_read_bit_buffer *rb, int bits);
+int aom_rb_read_literal(struct aom_read_bit_buffer *rb, int bits);
-int vpx_rb_read_signed_literal(struct vpx_read_bit_buffer *rb, int bits);
+int aom_rb_read_signed_literal(struct aom_read_bit_buffer *rb, int bits);
-int vpx_rb_read_inv_signed_literal(struct vpx_read_bit_buffer *rb, int bits);
+int aom_rb_read_inv_signed_literal(struct aom_read_bit_buffer *rb, int bits);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VPX_DSP_BITREADER_BUFFER_H_
+#endif // AOM_DSP_BITREADER_BUFFER_H_
diff --git a/aom_dsp/bitwriter.c b/aom_dsp/bitwriter.c
index 0abe351..9009a44 100644
--- a/aom_dsp/bitwriter.c
+++ b/aom_dsp/bitwriter.c
@@ -12,23 +12,23 @@
#include "./bitwriter.h"
-void vpx_start_encode(vpx_writer *br, uint8_t *source) {
+void aom_start_encode(aom_writer *br, uint8_t *source) {
br->lowvalue = 0;
br->range = 255;
br->count = -24;
br->buffer = source;
br->pos = 0;
- vpx_write_bit(br, 0);
+ aom_write_bit(br, 0);
}
-void vpx_stop_encode(vpx_writer *br) {
+void aom_stop_encode(aom_writer *br) {
int i;
#if CONFIG_BITSTREAM_DEBUG
bitstream_queue_set_skip_write(1);
#endif // CONFIG_BITSTREAM_DEBUG
- for (i = 0; i < 32; i++) vpx_write_bit(br, 0);
+ for (i = 0; i < 32; i++) aom_write_bit(br, 0);
#if CONFIG_BITSTREAM_DEBUG
bitstream_queue_set_skip_write(0);
diff --git a/aom_dsp/bitwriter.h b/aom_dsp/bitwriter.h
index 5b3634a..c6bc99b 100644
--- a/aom_dsp/bitwriter.h
+++ b/aom_dsp/bitwriter.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_BITWRITER_H_
-#define VPX_DSP_BITWRITER_H_
+#ifndef AOM_DSP_BITWRITER_H_
+#define AOM_DSP_BITWRITER_H_
#include "aom_ports/mem.h"
#include "aom_dsp/prob.h"
@@ -19,18 +19,18 @@
extern "C" {
#endif
-typedef struct vpx_writer {
+typedef struct aom_writer {
unsigned int lowvalue;
unsigned int range;
int count;
unsigned int pos;
uint8_t *buffer;
-} vpx_writer;
+} aom_writer;
-void vpx_start_encode(vpx_writer *bc, uint8_t *buffer);
-void vpx_stop_encode(vpx_writer *bc);
+void aom_start_encode(aom_writer *bc, uint8_t *buffer);
+void aom_stop_encode(aom_writer *bc);
-static INLINE void vpx_write(vpx_writer *br, int bit, int probability) {
+static INLINE void aom_write(aom_writer *br, int bit, int probability) {
unsigned int split;
int count = br->count;
unsigned int range = br->range;
@@ -50,7 +50,7 @@
range = br->range - split;
}
- shift = vpx_norm[range];
+ shift = aom_norm[range];
range <<= shift;
count += shift;
@@ -82,20 +82,20 @@
br->range = range;
}
-static INLINE void vpx_write_bit(vpx_writer *w, int bit) {
- vpx_write(w, bit, 128); // vpx_prob_half
+static INLINE void aom_write_bit(aom_writer *w, int bit) {
+ aom_write(w, bit, 128); // aom_prob_half
}
-static INLINE void vpx_write_literal(vpx_writer *w, int data, int bits) {
+static INLINE void aom_write_literal(aom_writer *w, int data, int bits) {
int bit;
- for (bit = bits - 1; bit >= 0; bit--) vpx_write_bit(w, 1 & (data >> bit));
+ for (bit = bits - 1; bit >= 0; bit--) aom_write_bit(w, 1 & (data >> bit));
}
-#define vpx_write_prob(w, v) vpx_write_literal((w), (v), 8)
+#define aom_write_prob(w, v) aom_write_literal((w), (v), 8)
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VPX_DSP_BITWRITER_H_
+#endif // AOM_DSP_BITWRITER_H_
diff --git a/aom_dsp/bitwriter_buffer.c b/aom_dsp/bitwriter_buffer.c
index 0638622..3f9a875 100644
--- a/aom_dsp/bitwriter_buffer.c
+++ b/aom_dsp/bitwriter_buffer.c
@@ -11,14 +11,14 @@
#include <limits.h>
#include <stdlib.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "./bitwriter_buffer.h"
-size_t vpx_wb_bytes_written(const struct vpx_write_bit_buffer *wb) {
+size_t aom_wb_bytes_written(const struct aom_write_bit_buffer *wb) {
return wb->bit_offset / CHAR_BIT + (wb->bit_offset % CHAR_BIT > 0);
}
-void vpx_wb_write_bit(struct vpx_write_bit_buffer *wb, int bit) {
+void aom_wb_write_bit(struct aom_write_bit_buffer *wb, int bit) {
const int off = (int)wb->bit_offset;
const int p = off / CHAR_BIT;
const int q = CHAR_BIT - 1 - off % CHAR_BIT;
@@ -31,12 +31,12 @@
wb->bit_offset = off + 1;
}
-void vpx_wb_write_literal(struct vpx_write_bit_buffer *wb, int data, int bits) {
+void aom_wb_write_literal(struct aom_write_bit_buffer *wb, int data, int bits) {
int bit;
- for (bit = bits - 1; bit >= 0; bit--) vpx_wb_write_bit(wb, (data >> bit) & 1);
+ for (bit = bits - 1; bit >= 0; bit--) aom_wb_write_bit(wb, (data >> bit) & 1);
}
-void vpx_wb_write_inv_signed_literal(struct vpx_write_bit_buffer *wb, int data,
+void aom_wb_write_inv_signed_literal(struct aom_write_bit_buffer *wb, int data,
int bits) {
- vpx_wb_write_literal(wb, data, bits + 1);
+ aom_wb_write_literal(wb, data, bits + 1);
}
diff --git a/aom_dsp/bitwriter_buffer.h b/aom_dsp/bitwriter_buffer.h
index 2406abd..4ca942d 100644
--- a/aom_dsp/bitwriter_buffer.h
+++ b/aom_dsp/bitwriter_buffer.h
@@ -8,31 +8,31 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_BITWRITER_BUFFER_H_
-#define VPX_DSP_BITWRITER_BUFFER_H_
+#ifndef AOM_DSP_BITWRITER_BUFFER_H_
+#define AOM_DSP_BITWRITER_BUFFER_H_
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
#endif
-struct vpx_write_bit_buffer {
+struct aom_write_bit_buffer {
uint8_t *bit_buffer;
size_t bit_offset;
};
-size_t vpx_wb_bytes_written(const struct vpx_write_bit_buffer *wb);
+size_t aom_wb_bytes_written(const struct aom_write_bit_buffer *wb);
-void vpx_wb_write_bit(struct vpx_write_bit_buffer *wb, int bit);
+void aom_wb_write_bit(struct aom_write_bit_buffer *wb, int bit);
-void vpx_wb_write_literal(struct vpx_write_bit_buffer *wb, int data, int bits);
+void aom_wb_write_literal(struct aom_write_bit_buffer *wb, int data, int bits);
-void vpx_wb_write_inv_signed_literal(struct vpx_write_bit_buffer *wb, int data,
+void aom_wb_write_inv_signed_literal(struct aom_write_bit_buffer *wb, int data,
int bits);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VPX_DSP_BITWRITER_BUFFER_H_
+#endif // AOM_DSP_BITWRITER_BUFFER_H_
diff --git a/aom_dsp/blend.h b/aom_dsp/blend.h
index e43149d..2dcaa1f 100644
--- a/aom_dsp/blend.h
+++ b/aom_dsp/blend.h
@@ -8,33 +8,33 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_BLEND_H_
-#define VPX_DSP_BLEND_H_
+#ifndef AOM_DSP_BLEND_H_
+#define AOM_DSP_BLEND_H_
#include "aom_ports/mem.h"
// Various blending functions and macros.
-// See also the vpx_blend_* functions in vpx_dsp_rtcd.h
+// See also the aom_blend_* functions in aom_dsp_rtcd.h
// Alpha blending with alpha values from the range [0, 64], where 64
// means use the first input and 0 means use the second input.
-#define VPX_BLEND_A64_ROUND_BITS 6
-#define VPX_BLEND_A64_MAX_ALPHA (1 << VPX_BLEND_A64_ROUND_BITS) // 64
+#define AOM_BLEND_A64_ROUND_BITS 6
+#define AOM_BLEND_A64_MAX_ALPHA (1 << AOM_BLEND_A64_ROUND_BITS) // 64
-#define VPX_BLEND_A64(a, v0, v1) \
- ROUND_POWER_OF_TWO((a) * (v0) + (VPX_BLEND_A64_MAX_ALPHA - (a)) * (v1), \
- VPX_BLEND_A64_ROUND_BITS)
+#define AOM_BLEND_A64(a, v0, v1) \
+ ROUND_POWER_OF_TWO((a) * (v0) + (AOM_BLEND_A64_MAX_ALPHA - (a)) * (v1), \
+ AOM_BLEND_A64_ROUND_BITS)
// Alpha blending with alpha values from the range [0, 256], where 256
// means use the first input and 0 means use the second input.
-#define VPX_BLEND_A256_ROUND_BITS 8
-#define VPX_BLEND_A256_MAX_ALPHA (1 << VPX_BLEND_A256_ROUND_BITS) // 256
+#define AOM_BLEND_A256_ROUND_BITS 8
+#define AOM_BLEND_A256_MAX_ALPHA (1 << AOM_BLEND_A256_ROUND_BITS) // 256
-#define VPX_BLEND_A256(a, v0, v1) \
- ROUND_POWER_OF_TWO((a) * (v0) + (VPX_BLEND_A256_MAX_ALPHA - (a)) * (v1), \
- VPX_BLEND_A256_ROUND_BITS)
+#define AOM_BLEND_A256(a, v0, v1) \
+ ROUND_POWER_OF_TWO((a) * (v0) + (AOM_BLEND_A256_MAX_ALPHA - (a)) * (v1), \
+ AOM_BLEND_A256_ROUND_BITS)
// Blending by averaging.
-#define VPX_BLEND_AVG(v0, v1) ROUND_POWER_OF_TWO((v0) + (v1), 1)
+#define AOM_BLEND_AVG(v0, v1) ROUND_POWER_OF_TWO((v0) + (v1), 1)
-#endif // VPX_DSP_BLEND_H_
+#endif // AOM_DSP_BLEND_H_
diff --git a/aom_dsp/blend_a64_hmask.c b/aom_dsp/blend_a64_hmask.c
index cce5d88..4bb7ae6 100644
--- a/aom_dsp/blend_a64_hmask.c
+++ b/aom_dsp/blend_a64_hmask.c
@@ -10,14 +10,14 @@
#include <assert.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/blend.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
-void vpx_blend_a64_hmask_c(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_hmask_c(uint8_t *dst, uint32_t dst_stride,
const uint8_t *src0, uint32_t src0_stride,
const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, int h, int w) {
@@ -33,14 +33,14 @@
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
- dst[i * dst_stride + j] = VPX_BLEND_A64(
+ dst[i * dst_stride + j] = AOM_BLEND_A64(
mask[j], src0[i * src0_stride + j], src1[i * src1_stride + j]);
}
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_blend_a64_hmask_c(uint8_t *dst_8, uint32_t dst_stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_blend_a64_hmask_c(uint8_t *dst_8, uint32_t dst_stride,
const uint8_t *src0_8, uint32_t src0_stride,
const uint8_t *src1_8, uint32_t src1_stride,
const uint8_t *mask, int h, int w, int bd) {
@@ -61,9 +61,9 @@
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
- dst[i * dst_stride + j] = VPX_BLEND_A64(
+ dst[i * dst_stride + j] = AOM_BLEND_A64(
mask[j], src0[i * src0_stride + j], src1[i * src1_stride + j]);
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/blend_a64_mask.c b/aom_dsp/blend_a64_mask.c
index 939e729..bb7a088 100644
--- a/aom_dsp/blend_a64_mask.c
+++ b/aom_dsp/blend_a64_mask.c
@@ -10,18 +10,18 @@
#include <assert.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
#include "aom_dsp/blend.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
// Blending with alpha mask. Mask values come from the range [0, 64],
-// as described for VPX_BLEND_A64 in aom_dsp/blned.h. src0 or src1 can
+// as described for AOM_BLEND_A64 in aom_dsp/blned.h. src0 or src1 can
// be the same as dst, or dst can be different from both sources.
-void vpx_blend_a64_mask_c(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_mask_c(uint8_t *dst, uint32_t dst_stride,
const uint8_t *src0, uint32_t src0_stride,
const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride, int h,
@@ -40,7 +40,7 @@
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
const int m = mask[i * mask_stride + j];
- dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
src1[i * src1_stride + j]);
}
}
@@ -53,33 +53,33 @@
mask[(2 * i) * mask_stride + (2 * j + 1)] +
mask[(2 * i + 1) * mask_stride + (2 * j + 1)],
2);
- dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
src1[i * src1_stride + j]);
}
}
} else if (subw == 1 && subh == 0) {
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
- const int m = VPX_BLEND_AVG(mask[i * mask_stride + (2 * j)],
+ const int m = AOM_BLEND_AVG(mask[i * mask_stride + (2 * j)],
mask[i * mask_stride + (2 * j + 1)]);
- dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
src1[i * src1_stride + j]);
}
}
} else {
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
- const int m = VPX_BLEND_AVG(mask[(2 * i) * mask_stride + j],
+ const int m = AOM_BLEND_AVG(mask[(2 * i) * mask_stride + j],
mask[(2 * i + 1) * mask_stride + j]);
- dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
src1[i * src1_stride + j]);
}
}
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_blend_a64_mask_c(uint8_t *dst_8, uint32_t dst_stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_blend_a64_mask_c(uint8_t *dst_8, uint32_t dst_stride,
const uint8_t *src0_8, uint32_t src0_stride,
const uint8_t *src1_8, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride,
@@ -103,7 +103,7 @@
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
const int m = mask[i * mask_stride + j];
- dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
src1[i * src1_stride + j]);
}
}
@@ -116,28 +116,28 @@
mask[(2 * i) * mask_stride + (2 * j + 1)] +
mask[(2 * i + 1) * mask_stride + (2 * j + 1)],
2);
- dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
src1[i * src1_stride + j]);
}
}
} else if (subw == 1 && subh == 0) {
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
- const int m = VPX_BLEND_AVG(mask[i * mask_stride + (2 * j)],
+ const int m = AOM_BLEND_AVG(mask[i * mask_stride + (2 * j)],
mask[i * mask_stride + (2 * j + 1)]);
- dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
src1[i * src1_stride + j]);
}
}
} else {
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
- const int m = VPX_BLEND_AVG(mask[(2 * i) * mask_stride + j],
+ const int m = AOM_BLEND_AVG(mask[(2 * i) * mask_stride + j],
mask[(2 * i + 1) * mask_stride + j]);
- dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
src1[i * src1_stride + j]);
}
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/blend_a64_vmask.c b/aom_dsp/blend_a64_vmask.c
index b22dcd1..05745dc 100644
--- a/aom_dsp/blend_a64_vmask.c
+++ b/aom_dsp/blend_a64_vmask.c
@@ -10,14 +10,14 @@
#include <assert.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/blend.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
-void vpx_blend_a64_vmask_c(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_vmask_c(uint8_t *dst, uint32_t dst_stride,
const uint8_t *src0, uint32_t src0_stride,
const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, int h, int w) {
@@ -34,14 +34,14 @@
for (i = 0; i < h; ++i) {
const int m = mask[i];
for (j = 0; j < w; ++j) {
- dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
src1[i * src1_stride + j]);
}
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_blend_a64_vmask_c(uint8_t *dst_8, uint32_t dst_stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_blend_a64_vmask_c(uint8_t *dst_8, uint32_t dst_stride,
const uint8_t *src0_8, uint32_t src0_stride,
const uint8_t *src1_8, uint32_t src1_stride,
const uint8_t *mask, int h, int w, int bd) {
@@ -63,9 +63,9 @@
for (i = 0; i < h; ++i) {
const int m = mask[i];
for (j = 0; j < w; ++j) {
- dst[i * dst_stride + j] = VPX_BLEND_A64(m, src0[i * src0_stride + j],
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
src1[i * src1_stride + j]);
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/deblock.c b/aom_dsp/deblock.c
index 66995c1..2027054 100644
--- a/aom_dsp/deblock.c
+++ b/aom_dsp/deblock.c
@@ -8,9 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdlib.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
-const int16_t vpx_rv[] = {
+const int16_t aom_rv[] = {
8, 5, 2, 2, 8, 12, 4, 9, 8, 3, 0, 3, 9, 0, 0, 0, 8, 3, 14,
4, 10, 1, 11, 14, 1, 14, 9, 6, 12, 11, 8, 6, 10, 0, 0, 8, 9, 0,
3, 14, 8, 11, 13, 4, 2, 9, 0, 3, 9, 6, 1, 2, 3, 14, 13, 1, 8,
@@ -37,7 +37,7 @@
9, 10, 13,
};
-void vpx_post_proc_down_and_across_mb_row_c(unsigned char *src_ptr,
+void aom_post_proc_down_and_across_mb_row_c(unsigned char *src_ptr,
unsigned char *dst_ptr,
int src_pixels_per_line,
int dst_pixels_per_line, int cols,
@@ -109,7 +109,7 @@
}
}
-void vpx_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows,
+void aom_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows,
int cols, int flimit) {
int r, c, i;
@@ -153,10 +153,10 @@
}
}
-void vpx_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols,
+void aom_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols,
int flimit) {
int r, c, i;
- const int16_t *rv3 = &vpx_rv[63 & rand()];
+ const int16_t *rv3 = &aom_rv[63 & rand()];
for (c = 0; c < cols; c++) {
unsigned char *s = &dst[c];
diff --git a/aom_dsp/fastssim.c b/aom_dsp/fastssim.c
index 1bdec95..d9236c0 100644
--- a/aom_dsp/fastssim.c
+++ b/aom_dsp/fastssim.c
@@ -14,8 +14,8 @@
#include <math.h>
#include <stdlib.h>
#include <string.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/ssim.h"
#include "aom_ports/system_state.h"
@@ -24,7 +24,7 @@
#define SSIM_C1 (255 * 255 * 0.01 * 0.01)
#define SSIM_C2 (255 * 255 * 0.03 * 0.03)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define SSIM_C1_10 (1023 * 1023 * 0.01 * 0.01)
#define SSIM_C1_12 (4095 * 4095 * 0.01 * 0.01)
#define SSIM_C2_10 (1023 * 1023 * 0.03 * 0.03)
@@ -197,7 +197,7 @@
int i;
int j;
double ssim_c1 = SSIM_C1;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (bit_depth == 10) ssim_c1 = SSIM_C1_10;
if (bit_depth == 12) ssim_c1 = SSIM_C1_12;
#else
@@ -321,7 +321,7 @@
int i;
int j;
double ssim_c2 = SSIM_C2;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (bit_depth == 10) ssim_c2 = SSIM_C2_10;
if (bit_depth == 12) ssim_c2 = SSIM_C2_12;
#else
@@ -465,13 +465,13 @@
return ret;
}
-double vpx_calc_fastssim(const YV12_BUFFER_CONFIG *source,
+double aom_calc_fastssim(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest, double *ssim_y,
double *ssim_u, double *ssim_v, uint32_t bd,
uint32_t in_bd) {
double ssimv;
uint32_t bd_shift = 0;
- vpx_clear_system_state();
+ aom_clear_system_state();
assert(bd >= in_bd);
bd_shift = bd - in_bd;
diff --git a/aom_dsp/fwd_txfm.c b/aom_dsp/fwd_txfm.c
index aecaa93..16db08c 100644
--- a/aom_dsp/fwd_txfm.c
+++ b/aom_dsp/fwd_txfm.c
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/fwd_txfm.h"
-void vpx_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
// The 2D transform is done with two passes which are actually pretty
// similar. In the first one, we transform the columns and transpose
// the results. In the second one, we transform the rows. To achieve that,
@@ -77,7 +77,7 @@
}
}
-void vpx_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
tran_low_t sum = 0;
for (r = 0; r < 4; ++r)
@@ -86,7 +86,7 @@
output[0] = sum << 1;
}
-void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
+void aom_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
int i, j;
tran_low_t intermediate[64];
int pass;
@@ -171,7 +171,7 @@
}
}
-void vpx_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
tran_low_t sum = 0;
for (r = 0; r < 8; ++r)
@@ -180,7 +180,7 @@
output[0] = sum;
}
-void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
// The 2D transform is done with two passes which are actually pretty
// similar. In the first one, we transform the columns and transpose
// the results. In the second one, we transform the rows. To achieve that,
@@ -360,7 +360,7 @@
}
}
-void vpx_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
int sum = 0;
for (r = 0; r < 16; ++r)
@@ -382,7 +382,7 @@
return rv;
}
-void vpx_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
+void aom_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
tran_high_t step[32];
// Stage 1
step[0] = input[0] + input[(32 - 1)];
@@ -705,7 +705,7 @@
output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
}
-void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+void aom_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
int i, j;
tran_high_t output[32 * 32];
@@ -713,7 +713,7 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
- vpx_fdct32(temp_in, temp_out, 0);
+ aom_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
}
@@ -722,7 +722,7 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
- vpx_fdct32(temp_in, temp_out, 0);
+ aom_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
out[j + i * 32] =
(tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
@@ -732,7 +732,7 @@
// Note that although we use dct_32_round in dct32 computation flow,
// this 2d fdct32x32 for rate-distortion optimization loop is operating
// within 16 bits precision.
-void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
+void aom_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
int i, j;
tran_high_t output[32 * 32];
@@ -740,11 +740,11 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
- vpx_fdct32(temp_in, temp_out, 0);
+ aom_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
// TODO(cd): see quality impact of only doing
// output[j * 32 + i] = (temp_out[j] + 1) >> 2;
- // PS: also change code in aom_dsp/x86/vpx_dct_sse2.c
+ // PS: also change code in aom_dsp/x86/aom_dct_sse2.c
output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
}
@@ -752,12 +752,12 @@
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
- vpx_fdct32(temp_in, temp_out, 1);
+ aom_fdct32(temp_in, temp_out, 1);
for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
}
}
-void vpx_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
int r, c;
int sum = 0;
for (r = 0; r < 32; ++r)
@@ -766,43 +766,43 @@
output[0] = (tran_low_t)(sum >> 3);
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
int stride) {
- vpx_fdct4x4_c(input, output, stride);
+ aom_fdct4x4_c(input, output, stride);
}
-void vpx_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
+void aom_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
int stride) {
- vpx_fdct8x8_c(input, final_output, stride);
+ aom_fdct8x8_c(input, final_output, stride);
}
-void vpx_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
+void aom_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
int stride) {
- vpx_fdct8x8_1_c(input, final_output, stride);
+ aom_fdct8x8_1_c(input, final_output, stride);
}
-void vpx_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
+void aom_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
int stride) {
- vpx_fdct16x16_c(input, output, stride);
+ aom_fdct16x16_c(input, output, stride);
}
-void vpx_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
+void aom_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
int stride) {
- vpx_fdct16x16_1_c(input, output, stride);
+ aom_fdct16x16_1_c(input, output, stride);
}
-void vpx_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
- vpx_fdct32x32_c(input, out, stride);
+void aom_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+ aom_fdct32x32_c(input, out, stride);
}
-void vpx_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
+void aom_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
int stride) {
- vpx_fdct32x32_rd_c(input, out, stride);
+ aom_fdct32x32_rd_c(input, out, stride);
}
-void vpx_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
+void aom_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
int stride) {
- vpx_fdct32x32_1_c(input, out, stride);
+ aom_fdct32x32_1_c(input, out, stride);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/fwd_txfm.h b/aom_dsp/fwd_txfm.h
index b874dd4..168962a 100644
--- a/aom_dsp/fwd_txfm.h
+++ b/aom_dsp/fwd_txfm.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_FWD_TXFM_H_
-#define VPX_DSP_FWD_TXFM_H_
+#ifndef AOM_DSP_FWD_TXFM_H_
+#define AOM_DSP_FWD_TXFM_H_
#include "aom_dsp/txfm_common.h"
@@ -21,5 +21,5 @@
return rv;
}
-void vpx_fdct32(const tran_high_t *input, tran_high_t *output, int round);
-#endif // VPX_DSP_FWD_TXFM_H_
+void aom_fdct32(const tran_high_t *input, tran_high_t *output, int round);
+#endif // AOM_DSP_FWD_TXFM_H_
diff --git a/aom_dsp/intrapred.c b/aom_dsp/intrapred.c
index b57ba71..df4ec80 100644
--- a/aom_dsp/intrapred.c
+++ b/aom_dsp/intrapred.c
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#define DST(x, y) dst[(x) + (y)*stride]
#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)
@@ -289,7 +289,7 @@
}
}
-void vpx_he_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_he_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const int H = above[-1];
const int I = left[0];
@@ -303,7 +303,7 @@
memset(dst + stride * 3, AVG3(K, L, L), 4);
}
-void vpx_ve_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_ve_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const int H = above[-1];
const int I = above[0];
@@ -322,7 +322,7 @@
memcpy(dst + stride * 3, dst, 4);
}
-void vpx_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const int I = left[0];
const int J = left[1];
@@ -338,7 +338,7 @@
DST(3, 2) = DST(2, 2) = DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L;
}
-void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const int A = above[0];
const int B = above[1];
@@ -361,7 +361,7 @@
DST(3, 3) = AVG3(E, F, G); // differs from vp8
}
-void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const int A = above[0];
const int B = above[1];
@@ -385,7 +385,7 @@
DST(3, 3) = AVG3(F, G, H);
}
-void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const int A = above[0];
const int B = above[1];
@@ -406,7 +406,7 @@
DST(3, 3) = H; // differs from vp8
}
-void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const int A = above[0];
const int B = above[1];
@@ -427,7 +427,7 @@
DST(3, 3) = AVG3(G, H, H);
}
-void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const int I = left[0];
const int J = left[1];
@@ -450,7 +450,7 @@
DST(3, 1) = AVG3(B, C, D);
}
-void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const int I = left[0];
const int J = left[1];
@@ -471,7 +471,7 @@
DST(3, 0) = AVG3(D, C, B);
}
-void vpx_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
+void aom_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const int I = left[0];
const int J = left[1];
@@ -495,7 +495,7 @@
DST(1, 3) = AVG3(L, K, J);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE void highbd_d207_predictor(uint16_t *dst, ptrdiff_t stride,
int bs, const uint16_t *above,
const uint16_t *left, int bd) {
@@ -683,7 +683,7 @@
(void)above;
(void)bd;
for (r = 0; r < bs; r++) {
- vpx_memset16(dst, left[r], bs);
+ aom_memset16(dst, left[r], bs);
dst += stride;
}
}
@@ -710,7 +710,7 @@
(void)left;
for (r = 0; r < bs; r++) {
- vpx_memset16(dst, 128 << (bd - 8), bs);
+ aom_memset16(dst, 128 << (bd - 8), bs);
dst += stride;
}
}
@@ -726,7 +726,7 @@
expected_dc = (sum + (bs >> 1)) / bs;
for (r = 0; r < bs; r++) {
- vpx_memset16(dst, expected_dc, bs);
+ aom_memset16(dst, expected_dc, bs);
dst += stride;
}
}
@@ -742,7 +742,7 @@
expected_dc = (sum + (bs >> 1)) / bs;
for (r = 0; r < bs; r++) {
- vpx_memset16(dst, expected_dc, bs);
+ aom_memset16(dst, expected_dc, bs);
dst += stride;
}
}
@@ -762,25 +762,25 @@
expected_dc = (sum + (count >> 1)) / count;
for (r = 0; r < bs; r++) {
- vpx_memset16(dst, expected_dc, bs);
+ aom_memset16(dst, expected_dc, bs);
dst += stride;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// This serves as a wrapper function, so that all the prediction functions
// can be unified and accessed as a pointer array. Note that the boundary
// above and left are not necessarily used all the time.
#define intra_pred_sized(type, size) \
- void vpx_##type##_predictor_##size##x##size##_c( \
+ void aom_##type##_predictor_##size##x##size##_c( \
uint8_t *dst, ptrdiff_t stride, const uint8_t *above, \
const uint8_t *left) { \
type##_predictor(dst, stride, size, above, left); \
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define intra_pred_highbd_sized(type, size) \
- void vpx_highbd_##type##_predictor_##size##x##size##_c( \
+ void aom_highbd_##type##_predictor_##size##x##size##_c( \
uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \
const uint16_t *left, int bd) { \
highbd_##type##_predictor(dst, stride, size, above, left, bd); \
@@ -817,7 +817,7 @@
intra_pred_sized(type, 8) \
intra_pred_sized(type, 16) \
intra_pred_sized(type, 32)
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
intra_pred_no_4x4(d207)
intra_pred_no_4x4(d63)
diff --git a/aom_dsp/inv_txfm.c b/aom_dsp/inv_txfm.c
index 6f92bee..3bb6c09 100644
--- a/aom_dsp/inv_txfm.c
+++ b/aom_dsp/inv_txfm.c
@@ -11,10 +11,10 @@
#include <math.h>
#include <string.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/inv_txfm.h"
-void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
0.5 shifts per pixel. */
int i;
@@ -66,7 +66,7 @@
}
}
-void vpx_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) {
+void aom_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) {
int i;
tran_high_t a1, e1;
tran_low_t tmp[4];
@@ -112,7 +112,7 @@
output[3] = WRAPLOW(step[0] - step[3]);
}
-void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[4 * 4];
tran_low_t *outptr = out;
int i, j;
@@ -136,7 +136,7 @@
}
}
-void vpx_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
int dest_stride) {
int i;
tran_high_t a1;
@@ -207,7 +207,7 @@
output[7] = WRAPLOW(step1[0] - step1[7]);
}
-void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[8 * 8];
tran_low_t *outptr = out;
int i, j;
@@ -231,7 +231,7 @@
}
}
-void vpx_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
int i, j;
tran_high_t a1;
tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -357,7 +357,7 @@
output[7] = WRAPLOW(-x1);
}
-void vpx_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[8 * 8] = { 0 };
tran_low_t *outptr = out;
int i, j;
@@ -547,7 +547,7 @@
output[15] = WRAPLOW(step2[0] - step2[15]);
}
-void vpx_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
tran_low_t out[16 * 16];
tran_low_t *outptr = out;
@@ -743,7 +743,7 @@
output[15] = WRAPLOW(-x1);
}
-void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
tran_low_t out[16 * 16] = { 0 };
tran_low_t *outptr = out;
@@ -769,7 +769,7 @@
}
}
-void vpx_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
int i, j;
tran_high_t a1;
tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
@@ -1148,7 +1148,7 @@
output[31] = WRAPLOW(step1[0] - step1[31]);
}
-void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
tran_low_t out[32 * 32];
tran_low_t *outptr = out;
@@ -1185,7 +1185,7 @@
}
}
-void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
tran_low_t out[32 * 32] = { 0 };
tran_low_t *outptr = out;
@@ -1211,7 +1211,7 @@
}
}
-void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
tran_low_t out[32 * 32] = { 0 };
tran_low_t *outptr = out;
@@ -1237,7 +1237,7 @@
}
}
-void vpx_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void aom_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
int i, j;
tran_high_t a1;
@@ -1251,8 +1251,8 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
0.5 shifts per pixel. */
@@ -1310,7 +1310,7 @@
}
}
-void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
+void aom_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
int dest_stride, int bd) {
int i;
tran_high_t a1, e1;
@@ -1343,7 +1343,7 @@
}
}
-void vpx_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step[4];
tran_high_t temp1, temp2;
(void)bd;
@@ -1364,7 +1364,7 @@
output[3] = HIGHBD_WRAPLOW(step[0] - step[3], bd);
}
-void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[4 * 4];
tran_low_t *outptr = out;
@@ -1374,7 +1374,7 @@
// Rows
for (i = 0; i < 4; ++i) {
- vpx_highbd_idct4_c(input, outptr, bd);
+ aom_highbd_idct4_c(input, outptr, bd);
input += 4;
outptr += 4;
}
@@ -1382,7 +1382,7 @@
// Columns
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
- vpx_highbd_idct4_c(temp_in, temp_out, bd);
+ aom_highbd_idct4_c(temp_in, temp_out, bd);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -1390,7 +1390,7 @@
}
}
-void vpx_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
int dest_stride, int bd) {
int i;
tran_high_t a1;
@@ -1410,7 +1410,7 @@
}
}
-void vpx_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step1[8], step2[8];
tran_high_t temp1, temp2;
// stage 1
@@ -1428,7 +1428,7 @@
step1[6] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd);
// stage 2 & stage 3 - even half
- vpx_highbd_idct4_c(step1, step1, bd);
+ aom_highbd_idct4_c(step1, step1, bd);
// stage 2 - odd half
step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
@@ -1455,7 +1455,7 @@
output[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
}
-void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[8 * 8];
tran_low_t *outptr = out;
@@ -1465,7 +1465,7 @@
// First transform rows.
for (i = 0; i < 8; ++i) {
- vpx_highbd_idct8_c(input, outptr, bd);
+ aom_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
@@ -1473,7 +1473,7 @@
// Then transform columns.
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vpx_highbd_idct8_c(temp_in, temp_out, bd);
+ aom_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1481,7 +1481,7 @@
}
}
-void vpx_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
int i, j;
tran_high_t a1;
@@ -1496,7 +1496,7 @@
}
}
-void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
tran_low_t x0 = input[0];
@@ -1534,7 +1534,7 @@
output[3] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3), bd);
}
-void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
tran_low_t x0 = input[7];
@@ -1611,7 +1611,7 @@
output[7] = HIGHBD_WRAPLOW(-x1, bd);
}
-void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[8 * 8] = { 0 };
tran_low_t *outptr = out;
@@ -1622,14 +1622,14 @@
// First transform rows.
// Only first 4 row has non-zero coefs.
for (i = 0; i < 4; ++i) {
- vpx_highbd_idct8_c(input, outptr, bd);
+ aom_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
// Then transform columns.
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vpx_highbd_idct8_c(temp_in, temp_out, bd);
+ aom_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1637,7 +1637,7 @@
}
}
-void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step1[16], step2[16];
tran_high_t temp1, temp2;
(void)bd;
@@ -1803,7 +1803,7 @@
output[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
}
-void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[16 * 16];
tran_low_t *outptr = out;
@@ -1813,7 +1813,7 @@
// First transform rows.
for (i = 0; i < 16; ++i) {
- vpx_highbd_idct16_c(input, outptr, bd);
+ aom_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
@@ -1821,7 +1821,7 @@
// Then transform columns.
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vpx_highbd_idct16_c(temp_in, temp_out, bd);
+ aom_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -1829,7 +1829,7 @@
}
}
-void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
tran_high_t s9, s10, s11, s12, s13, s14, s15;
@@ -1999,7 +1999,7 @@
output[15] = HIGHBD_WRAPLOW(-x1, bd);
}
-void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[16 * 16] = { 0 };
tran_low_t *outptr = out;
@@ -2010,7 +2010,7 @@
// First transform rows. Since all non-zero dct coefficients are in
// upper-left 4x4 area, we only need to calculate first 4 rows here.
for (i = 0; i < 4; ++i) {
- vpx_highbd_idct16_c(input, outptr, bd);
+ aom_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
@@ -2018,7 +2018,7 @@
// Then transform columns.
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vpx_highbd_idct16_c(temp_in, temp_out, bd);
+ aom_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -2026,7 +2026,7 @@
}
}
-void vpx_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
int i, j;
tran_high_t a1;
@@ -2042,7 +2042,7 @@
}
}
-void vpx_highbd_idct32_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void aom_highbd_idct32_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step1[32], step2[32];
tran_high_t temp1, temp2;
(void)bd;
@@ -2410,7 +2410,7 @@
output[31] = HIGHBD_WRAPLOW(step1[0] - step1[31], bd);
}
-void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[32 * 32];
tran_low_t *outptr = out;
@@ -2430,7 +2430,7 @@
zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
if (zero_coeff[0] | zero_coeff[1])
- vpx_highbd_idct32_c(input, outptr, bd);
+ aom_highbd_idct32_c(input, outptr, bd);
else
memset(outptr, 0, sizeof(tran_low_t) * 32);
input += 32;
@@ -2440,7 +2440,7 @@
// Columns
for (i = 0; i < 32; ++i) {
for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
- vpx_highbd_idct32_c(temp_in, temp_out, bd);
+ aom_highbd_idct32_c(temp_in, temp_out, bd);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -2448,7 +2448,7 @@
}
}
-void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[32 * 32] = { 0 };
tran_low_t *outptr = out;
@@ -2459,14 +2459,14 @@
// Rows
// Only upper-left 8x8 has non-zero coeff.
for (i = 0; i < 8; ++i) {
- vpx_highbd_idct32_c(input, outptr, bd);
+ aom_highbd_idct32_c(input, outptr, bd);
input += 32;
outptr += 32;
}
// Columns
for (i = 0; i < 32; ++i) {
for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
- vpx_highbd_idct32_c(temp_in, temp_out, bd);
+ aom_highbd_idct32_c(temp_in, temp_out, bd);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -2474,7 +2474,7 @@
}
}
-void vpx_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
int i, j;
int a1;
@@ -2490,4 +2490,4 @@
dest += stride;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/inv_txfm.h b/aom_dsp/inv_txfm.h
index 6865eaf..211ac63 100644
--- a/aom_dsp/inv_txfm.h
+++ b/aom_dsp/inv_txfm.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_INV_TXFM_H_
-#define VPX_DSP_INV_TXFM_H_
+#ifndef AOM_DSP_INV_TXFM_H_
+#define AOM_DSP_INV_TXFM_H_
#include <assert.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/txfm_common.h"
#include "aom_ports/mem.h"
@@ -23,9 +23,9 @@
static INLINE tran_high_t check_range(tran_high_t input) {
#if CONFIG_COEFFICIENT_RANGE_CHECKING
- // For valid VP9 input streams, intermediate stage coefficients should always
+ // For valid AV1 input streams, intermediate stage coefficients should always
// stay within the range of a signed 16 bit integer. Coefficients can go out
- // of this range for invalid/corrupt VP9 streams. However, strictly checking
+ // of this range for invalid/corrupt AV1 streams. However, strictly checking
// this range for every intermediate coefficient can burdensome for a decoder,
// therefore the following assertion is only enabled when configured with
// --enable-coefficient-range-checking.
@@ -40,10 +40,10 @@
return rv;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE tran_high_t highbd_check_range(tran_high_t input, int bd) {
#if CONFIG_COEFFICIENT_RANGE_CHECKING
- // For valid highbitdepth VP9 streams, intermediate stage coefficients will
+ // For valid highbitdepth AV1 streams, intermediate stage coefficients will
// stay within the ranges:
// - 8 bit: signed 16 bit integer
// - 10 bit: signed 18 bit integer
@@ -62,7 +62,7 @@
tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
return rv;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#if CONFIG_EMULATE_HARDWARE
// When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
@@ -83,17 +83,17 @@
// bd of x uses trans_low with 8+x bits, need to remove 24-x bits
#define WRAPLOW(x) ((((int32_t)check_range(x)) << 16) >> 16)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define HIGHBD_WRAPLOW(x, bd) \
((((int32_t)highbd_check_range((x), bd)) << (24 - bd)) >> (24 - bd))
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#else // CONFIG_EMULATE_HARDWARE
#define WRAPLOW(x) ((int32_t)check_range(x))
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define HIGHBD_WRAPLOW(x, bd) ((int32_t)highbd_check_range((x), bd))
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_EMULATE_HARDWARE
void idct4_c(const tran_low_t *input, tran_low_t *output);
@@ -104,15 +104,15 @@
void iadst8_c(const tran_low_t *input, tran_low_t *output);
void iadst16_c(const tran_low_t *input, tran_low_t *output);
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vpx_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vpx_highbd_idct32_c(const tran_low_t *input, tran_low_t *output, int bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_idct32_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void aom_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans,
int bd) {
@@ -129,4 +129,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_INV_TXFM_H_
+#endif // AOM_DSP_INV_TXFM_H_
diff --git a/aom_dsp/loopfilter.c b/aom_dsp/loopfilter.c
index 53d028c..1387495 100644
--- a/aom_dsp/loopfilter.c
+++ b/aom_dsp/loopfilter.c
@@ -10,16 +10,16 @@
#include <stdlib.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
static INLINE int8_t signed_char_clamp(int t) {
return (int8_t)clamp(t, -128, 127);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE int16_t signed_char_clamp_high(int t, int bd) {
switch (bd) {
case 10: return (int16_t)clamp(t, -128 * 4, 128 * 4 - 1);
@@ -109,7 +109,7 @@
*op1 = signed_char_clamp(ps1 + filter) ^ 0x80;
}
-void vpx_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */,
+void aom_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh) {
int i;
@@ -126,15 +126,15 @@
}
}
-void vpx_lpf_horizontal_4_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_horizontal_4_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
- vpx_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0);
- vpx_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1);
+ aom_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0);
+ aom_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1);
}
-void vpx_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit,
+void aom_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
int i;
@@ -150,12 +150,12 @@
}
}
-void vpx_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
+void aom_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
- vpx_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0);
- vpx_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1);
+ aom_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0);
+ aom_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1);
}
static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat,
@@ -178,7 +178,7 @@
}
}
-void vpx_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
int i;
@@ -197,15 +197,15 @@
}
}
-void vpx_lpf_horizontal_8_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_horizontal_8_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
- vpx_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0);
- vpx_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1);
+ aom_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0);
+ aom_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1);
}
-void vpx_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit,
+void aom_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
int i;
@@ -221,12 +221,12 @@
}
}
-void vpx_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
+void aom_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
- vpx_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0);
- vpx_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1);
+ aom_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0);
+ aom_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1);
}
static INLINE void filter16(int8_t mask, uint8_t thresh, uint8_t flat,
@@ -308,12 +308,12 @@
}
}
-void vpx_lpf_horizontal_edge_8_c(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_horizontal_edge_8_c(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 1);
}
-void vpx_lpf_horizontal_edge_16_c(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_horizontal_edge_16_c(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 2);
}
@@ -339,17 +339,17 @@
}
}
-void vpx_lpf_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8);
}
-void vpx_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
// Should we apply any filter at all: 11111111 yes, 00000000 no ?
static INLINE int8_t highbd_filter_mask(uint8_t limit, uint8_t blimit,
uint16_t p3, uint16_t p2, uint16_t p1,
@@ -440,7 +440,7 @@
*op1 = signed_char_clamp_high(ps1 + filter, bd) + (0x80 << shift);
}
-void vpx_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */,
+void aom_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh, int bd) {
int i;
@@ -463,15 +463,15 @@
}
}
-void vpx_highbd_lpf_horizontal_4_dual_c(
+void aom_highbd_lpf_horizontal_4_dual_c(
uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1, int bd) {
- vpx_highbd_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, bd);
- vpx_highbd_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, bd);
+ aom_highbd_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, bd);
+ aom_highbd_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, bd);
}
-void vpx_highbd_lpf_vertical_4_c(uint16_t *s, int pitch, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_4_c(uint16_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int bd) {
int i;
@@ -488,12 +488,12 @@
}
}
-void vpx_highbd_lpf_vertical_4_dual_c(
+void aom_highbd_lpf_vertical_4_dual_c(
uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1, int bd) {
- vpx_highbd_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, bd);
- vpx_highbd_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1,
+ aom_highbd_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, bd);
+ aom_highbd_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1,
bd);
}
@@ -517,7 +517,7 @@
}
}
-void vpx_highbd_lpf_horizontal_8_c(uint16_t *s, int p, const uint8_t *blimit,
+void aom_highbd_lpf_horizontal_8_c(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int bd) {
int i;
@@ -538,15 +538,15 @@
}
}
-void vpx_highbd_lpf_horizontal_8_dual_c(
+void aom_highbd_lpf_horizontal_8_dual_c(
uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1, int bd) {
- vpx_highbd_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, bd);
- vpx_highbd_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, bd);
+ aom_highbd_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, bd);
+ aom_highbd_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, bd);
}
-void vpx_highbd_lpf_vertical_8_c(uint16_t *s, int pitch, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_8_c(uint16_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int bd) {
int i;
@@ -564,12 +564,12 @@
}
}
-void vpx_highbd_lpf_vertical_8_dual_c(
+void aom_highbd_lpf_vertical_8_dual_c(
uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1, int bd) {
- vpx_highbd_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, bd);
- vpx_highbd_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1,
+ aom_highbd_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, bd);
+ aom_highbd_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1,
bd);
}
@@ -673,14 +673,14 @@
}
}
-void vpx_highbd_lpf_horizontal_edge_8_c(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_edge_8_c(uint16_t *s, int p,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int bd) {
highbd_mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 1, bd);
}
-void vpx_highbd_lpf_horizontal_edge_16_c(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_edge_16_c(uint16_t *s, int p,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int bd) {
@@ -717,16 +717,16 @@
}
}
-void vpx_highbd_lpf_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int bd) {
highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8, bd);
}
-void vpx_highbd_lpf_vertical_16_dual_c(uint16_t *s, int p,
+void aom_highbd_lpf_vertical_16_dual_c(uint16_t *s, int p,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int bd) {
highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16, bd);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/mips/add_noise_msa.c b/aom_dsp/mips/add_noise_msa.c
index 366770c..fe3510d 100644
--- a/aom_dsp/mips/add_noise_msa.c
+++ b/aom_dsp/mips/add_noise_msa.c
@@ -11,7 +11,7 @@
#include <stdlib.h>
#include "./macros_msa.h"
-void vpx_plane_add_noise_msa(uint8_t *start_ptr, char *noise,
+void aom_plane_add_noise_msa(uint8_t *start_ptr, char *noise,
char blackclamp[16], char whiteclamp[16],
char bothclamp[16], uint32_t width,
uint32_t height, int32_t pitch) {
diff --git a/aom_dsp/mips/vpx_convolve8_avg_horiz_msa.c b/aom_dsp/mips/aom_convolve8_avg_horiz_msa.c
similarity index 99%
rename from aom_dsp/mips/vpx_convolve8_avg_horiz_msa.c
rename to aom_dsp/mips/aom_convolve8_avg_horiz_msa.c
index 300656c..86f3e95 100644
--- a/aom_dsp/mips/vpx_convolve8_avg_horiz_msa.c
+++ b/aom_dsp/mips/aom_convolve8_avg_horiz_msa.c
@@ -9,8 +9,8 @@
*/
#include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
static void common_hz_8t_and_aver_dst_4x4_msa(const uint8_t *src,
int32_t src_stride, uint8_t *dst,
@@ -631,7 +631,7 @@
}
}
-void vpx_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -668,7 +668,7 @@
(int32_t)dst_stride, &filt_hor[3], h);
break;
default:
- vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
@@ -695,7 +695,7 @@
(int32_t)dst_stride, filt_hor, h);
break;
default:
- vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/vpx_convolve8_avg_msa.c b/aom_dsp/mips/aom_convolve8_avg_msa.c
similarity index 98%
rename from aom_dsp/mips/vpx_convolve8_avg_msa.c
rename to aom_dsp/mips/aom_convolve8_avg_msa.c
index 8037661..2e66449 100644
--- a/aom_dsp/mips/vpx_convolve8_avg_msa.c
+++ b/aom_dsp/mips/aom_convolve8_avg_msa.c
@@ -9,8 +9,8 @@
*/
#include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
static void common_hv_8ht_8vt_and_aver_dst_4w_msa(
const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
@@ -514,7 +514,7 @@
}
}
-void vpx_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -560,13 +560,13 @@
&filt_hor[3], &filt_ver[3], h);
break;
default:
- vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
} else if (((const int32_t *)filter_x)[0] == 0 ||
((const int32_t *)filter_y)[0] == 0) {
- vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+ aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
filter_y, y_step_q4, w, h);
} else {
switch (w) {
@@ -596,7 +596,7 @@
filt_ver, h);
break;
default:
- vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/vpx_convolve8_avg_vert_msa.c b/aom_dsp/mips/aom_convolve8_avg_vert_msa.c
similarity index 98%
rename from aom_dsp/mips/vpx_convolve8_avg_vert_msa.c
rename to aom_dsp/mips/aom_convolve8_avg_vert_msa.c
index b3c9b6b..f6e966e 100644
--- a/aom_dsp/mips/vpx_convolve8_avg_vert_msa.c
+++ b/aom_dsp/mips/aom_convolve8_avg_vert_msa.c
@@ -9,8 +9,8 @@
*/
#include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
static void common_vt_8t_and_aver_dst_4w_msa(const uint8_t *src,
int32_t src_stride, uint8_t *dst,
@@ -603,7 +603,7 @@
}
}
-void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -640,7 +640,7 @@
(int32_t)dst_stride, &filt_ver[3], h);
break;
default:
- vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
@@ -668,7 +668,7 @@
(int32_t)dst_stride, filt_ver, h);
break;
default:
- vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/vpx_convolve8_horiz_msa.c b/aom_dsp/mips/aom_convolve8_horiz_msa.c
similarity index 98%
rename from aom_dsp/mips/vpx_convolve8_horiz_msa.c
rename to aom_dsp/mips/aom_convolve8_horiz_msa.c
index 256abd5..7416482 100644
--- a/aom_dsp/mips/vpx_convolve8_horiz_msa.c
+++ b/aom_dsp/mips/aom_convolve8_horiz_msa.c
@@ -9,8 +9,8 @@
*/
#include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
static void common_hz_8t_4x4_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
@@ -619,7 +619,7 @@
}
}
-void vpx_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -656,7 +656,7 @@
&filt_hor[3], h);
break;
default:
- vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
@@ -683,7 +683,7 @@
filt_hor, h);
break;
default:
- vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/vpx_convolve8_msa.c b/aom_dsp/mips/aom_convolve8_msa.c
similarity index 98%
rename from aom_dsp/mips/vpx_convolve8_msa.c
rename to aom_dsp/mips/aom_convolve8_msa.c
index 81d4f14..45761d3 100644
--- a/aom_dsp/mips/vpx_convolve8_msa.c
+++ b/aom_dsp/mips/aom_convolve8_msa.c
@@ -9,8 +9,8 @@
*/
#include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
const uint8_t mc_filt_mask_arr[16 * 3] = {
/* 8 width cases */
@@ -540,7 +540,7 @@
}
}
-void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int32_t x_step_q4, const int16_t *filter_y,
int32_t y_step_q4, int32_t w, int32_t h) {
@@ -585,13 +585,13 @@
&filt_ver[3], (int32_t)h);
break;
default:
- vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+ aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
filter_y, y_step_q4, w, h);
break;
}
} else if (((const int32_t *)filter_x)[0] == 0 ||
((const int32_t *)filter_y)[0] == 0) {
- vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+ aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
filter_y, y_step_q4, w, h);
} else {
switch (w) {
@@ -621,7 +621,7 @@
(int32_t)h);
break;
default:
- vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+ aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/vpx_convolve8_vert_msa.c b/aom_dsp/mips/aom_convolve8_vert_msa.c
similarity index 98%
rename from aom_dsp/mips/vpx_convolve8_vert_msa.c
rename to aom_dsp/mips/aom_convolve8_vert_msa.c
index 0404575..4d634c4 100644
--- a/aom_dsp/mips/vpx_convolve8_vert_msa.c
+++ b/aom_dsp/mips/aom_convolve8_vert_msa.c
@@ -9,8 +9,8 @@
*/
#include <assert.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/mips/vpx_convolve_msa.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/mips/aom_convolve_msa.h"
static void common_vt_8t_4w_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
@@ -626,7 +626,7 @@
}
}
-void vpx_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -663,7 +663,7 @@
&filt_ver[3], h);
break;
default:
- vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
@@ -690,7 +690,7 @@
filt_ver, h);
break;
default:
- vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/vpx_convolve_avg_msa.c b/aom_dsp/mips/aom_convolve_avg_msa.c
similarity index 99%
rename from aom_dsp/mips/vpx_convolve_avg_msa.c
rename to aom_dsp/mips/aom_convolve_avg_msa.c
index 313223b..f6d9c09 100644
--- a/aom_dsp/mips/vpx_convolve_avg_msa.c
+++ b/aom_dsp/mips/aom_convolve_avg_msa.c
@@ -186,7 +186,7 @@
}
}
-void vpx_convolve_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int32_t filter_x_stride,
const int16_t *filter_y, int32_t filter_y_stride,
diff --git a/aom_dsp/mips/vpx_convolve_copy_msa.c b/aom_dsp/mips/aom_convolve_copy_msa.c
similarity index 99%
rename from aom_dsp/mips/vpx_convolve_copy_msa.c
rename to aom_dsp/mips/aom_convolve_copy_msa.c
index 520a706..8151609 100644
--- a/aom_dsp/mips/vpx_convolve_copy_msa.c
+++ b/aom_dsp/mips/aom_convolve_copy_msa.c
@@ -196,7 +196,7 @@
copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 64);
}
-void vpx_convolve_copy_msa(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve_copy_msa(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int32_t filter_x_stride,
const int16_t *filter_y, int32_t filter_y_stride,
diff --git a/aom_dsp/mips/vpx_convolve_msa.h b/aom_dsp/mips/aom_convolve_msa.h
similarity index 97%
rename from aom_dsp/mips/vpx_convolve_msa.h
rename to aom_dsp/mips/aom_convolve_msa.h
index 6b48879..fc2748f 100644
--- a/aom_dsp/mips/vpx_convolve_msa.h
+++ b/aom_dsp/mips/aom_convolve_msa.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_
-#define VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_
+#ifndef AOM_DSP_MIPS_AOM_CONVOLVE_MSA_H_
+#define AOM_DSP_MIPS_AOM_CONVOLVE_MSA_H_
#include "aom_dsp/mips/macros_msa.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
extern const uint8_t mc_filt_mask_arr[16 * 3];
@@ -121,4 +121,4 @@
AVER_UB2_UB(tmp0_m, tmp2_m, tmp1_m, tmp3_m, tmp0_m, tmp1_m); \
ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride); \
}
-#endif /* VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_ */
+#endif /* AOM_DSP_MIPS_AOM_CONVOLVE_MSA_H_ */
diff --git a/aom_dsp/mips/avg_msa.c b/aom_dsp/mips/avg_msa.c
index 5896708..0dae2ed 100644
--- a/aom_dsp/mips/avg_msa.c
+++ b/aom_dsp/mips/avg_msa.c
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/macros_msa.h"
-uint32_t vpx_avg_8x8_msa(const uint8_t *src, int32_t src_stride) {
+uint32_t aom_avg_8x8_msa(const uint8_t *src, int32_t src_stride) {
uint32_t sum_out;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
v8u16 sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7;
@@ -33,7 +33,7 @@
return sum_out;
}
-uint32_t vpx_avg_4x4_msa(const uint8_t *src, int32_t src_stride) {
+uint32_t aom_avg_4x4_msa(const uint8_t *src, int32_t src_stride) {
uint32_t sum_out;
uint32_t src0, src1, src2, src3;
v16u8 vec = { 0 };
diff --git a/aom_dsp/mips/common_dspr2.c b/aom_dsp/mips/common_dspr2.c
index 268bbcd..537a92f 100644
--- a/aom_dsp/mips/common_dspr2.c
+++ b/aom_dsp/mips/common_dspr2.c
@@ -11,20 +11,20 @@
#include "aom_dsp/mips/common_dspr2.h"
#if HAVE_DSPR2
-uint8_t vpx_ff_cropTbl_a[256 + 2 * CROP_WIDTH];
-uint8_t *vpx_ff_cropTbl;
+uint8_t aom_ff_cropTbl_a[256 + 2 * CROP_WIDTH];
+uint8_t *aom_ff_cropTbl;
-void vpx_dsputil_static_init(void) {
+void aom_dsputil_static_init(void) {
int i;
- for (i = 0; i < 256; i++) vpx_ff_cropTbl_a[i + CROP_WIDTH] = i;
+ for (i = 0; i < 256; i++) aom_ff_cropTbl_a[i + CROP_WIDTH] = i;
for (i = 0; i < CROP_WIDTH; i++) {
- vpx_ff_cropTbl_a[i] = 0;
- vpx_ff_cropTbl_a[i + CROP_WIDTH + 256] = 255;
+ aom_ff_cropTbl_a[i] = 0;
+ aom_ff_cropTbl_a[i + CROP_WIDTH + 256] = 255;
}
- vpx_ff_cropTbl = &vpx_ff_cropTbl_a[CROP_WIDTH];
+ aom_ff_cropTbl = &aom_ff_cropTbl_a[CROP_WIDTH];
}
#endif
diff --git a/aom_dsp/mips/common_dspr2.h b/aom_dsp/mips/common_dspr2.h
index 1da490a..2c508ad 100644
--- a/aom_dsp/mips/common_dspr2.h
+++ b/aom_dsp/mips/common_dspr2.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_COMMON_MIPS_DSPR2_H_
-#define VPX_COMMON_MIPS_DSPR2_H_
+#ifndef AOM_COMMON_MIPS_DSPR2_H_
+#define AOM_COMMON_MIPS_DSPR2_H_
#include <assert.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
@@ -21,7 +21,7 @@
#if HAVE_DSPR2
#define CROP_WIDTH 512
-extern uint8_t *vpx_ff_cropTbl; // From "aom_dsp/mips/intrapred4_dspr2.c"
+extern uint8_t *aom_ff_cropTbl; // From "aom_dsp/mips/intrapred4_dspr2.c"
static INLINE void prefetch_load(const unsigned char *src) {
__asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src));
@@ -45,4 +45,4 @@
} // extern "C"
#endif
-#endif // VPX_COMMON_MIPS_DSPR2_H_
+#endif // AOM_COMMON_MIPS_DSPR2_H_
diff --git a/aom_dsp/mips/convolve2_avg_dspr2.c b/aom_dsp/mips/convolve2_avg_dspr2.c
index b73eba2..1cbfe74 100644
--- a/aom_dsp/mips/convolve2_avg_dspr2.c
+++ b/aom_dsp/mips/convolve2_avg_dspr2.c
@@ -11,10 +11,10 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
#if HAVE_DSPR2
@@ -25,7 +25,7 @@
int32_t x, y;
const uint8_t *src_ptr;
uint8_t *dst_ptr;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
uint32_t load1, load2;
uint32_t p1, p2;
@@ -124,7 +124,7 @@
int32_t x, y;
const uint8_t *src_ptr;
uint8_t *dst_ptr;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
uint32_t load1, load2;
uint32_t p1, p2;
@@ -217,7 +217,7 @@
}
}
-void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -247,7 +247,7 @@
h);
break;
default:
- vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/convolve2_avg_horiz_dspr2.c b/aom_dsp/mips/convolve2_avg_horiz_dspr2.c
index 765c902..d8639b7 100644
--- a/aom_dsp/mips/convolve2_avg_horiz_dspr2.c
+++ b/aom_dsp/mips/convolve2_avg_horiz_dspr2.c
@@ -11,10 +11,10 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
#if HAVE_DSPR2
@@ -23,7 +23,7 @@
int32_t dst_stride,
const int16_t *filter_x0, int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
int32_t Temp1, Temp2, Temp3, Temp4;
uint32_t vector4a = 64;
uint32_t tp1, tp2;
@@ -114,7 +114,7 @@
int32_t dst_stride,
const int16_t *filter_x0, int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
int32_t Temp1, Temp2, Temp3;
uint32_t tp1, tp2, tp3, tp4;
@@ -261,7 +261,7 @@
int32_t y, c;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t Temp1, Temp2, Temp3;
uint32_t qload1, qload2, qload3;
@@ -508,7 +508,7 @@
int32_t y, c;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t Temp1, Temp2, Temp3;
uint32_t qload1, qload2, qload3;
@@ -749,7 +749,7 @@
}
}
-void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
@@ -793,7 +793,7 @@
h);
break;
default:
- vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/convolve2_dspr2.c b/aom_dsp/mips/convolve2_dspr2.c
index 78ee6e0..ee9da6d 100644
--- a/aom_dsp/mips/convolve2_dspr2.c
+++ b/aom_dsp/mips/convolve2_dspr2.c
@@ -11,10 +11,10 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
#include "aom_ports/mem.h"
#if HAVE_DSPR2
@@ -22,7 +22,7 @@
const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
const int16_t *filter_x0, int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint8_t *dst_ptr;
int32_t Temp1, Temp2;
uint32_t vector4a = 64;
@@ -106,7 +106,7 @@
const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
const int16_t *filter_x0, int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint8_t *dst_ptr;
uint32_t vector4a = 64;
int32_t Temp1, Temp2, Temp3;
@@ -242,7 +242,7 @@
int32_t c, y;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t Temp1, Temp2, Temp3;
uint32_t qload1, qload2;
@@ -607,7 +607,7 @@
int32_t c, y;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t Temp1, Temp2, Temp3;
uint32_t qload1, qload2;
@@ -987,7 +987,7 @@
}
}
-void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter, int w,
int h) {
uint32_t pos = 38;
diff --git a/aom_dsp/mips/convolve2_horiz_dspr2.c b/aom_dsp/mips/convolve2_horiz_dspr2.c
index 0d6ebea..275f859 100644
--- a/aom_dsp/mips/convolve2_horiz_dspr2.c
+++ b/aom_dsp/mips/convolve2_horiz_dspr2.c
@@ -11,10 +11,10 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
#if HAVE_DSPR2
@@ -22,7 +22,7 @@
uint8_t *dst, int32_t dst_stride,
const int16_t *filter_x0, int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
int32_t Temp1, Temp2, Temp3, Temp4;
uint32_t vector4a = 64;
uint32_t tp1, tp2;
@@ -99,7 +99,7 @@
uint8_t *dst, int32_t dst_stride,
const int16_t *filter_x0, int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
int32_t Temp1, Temp2, Temp3;
uint32_t tp1, tp2, tp3;
@@ -223,7 +223,7 @@
int32_t y, c;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t Temp1, Temp2, Temp3;
uint32_t qload1, qload2, qload3;
@@ -427,7 +427,7 @@
int32_t y, c;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t Temp1, Temp2, Temp3;
uint32_t qload1, qload2, qload3;
@@ -626,7 +626,7 @@
}
}
-void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -672,7 +672,7 @@
(int32_t)dst_stride, filter_x, (int32_t)h);
break;
default:
- vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/convolve2_vert_dspr2.c b/aom_dsp/mips/convolve2_vert_dspr2.c
index a9d0cbf..0af7b6a 100644
--- a/aom_dsp/mips/convolve2_vert_dspr2.c
+++ b/aom_dsp/mips/convolve2_vert_dspr2.c
@@ -11,10 +11,10 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
#if HAVE_DSPR2
@@ -25,7 +25,7 @@
int32_t x, y;
const uint8_t *src_ptr;
uint8_t *dst_ptr;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
uint32_t load1, load2;
uint32_t p1, p2;
@@ -115,7 +115,7 @@
int32_t x, y;
const uint8_t *src_ptr;
uint8_t *dst_ptr;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
uint32_t load1, load2;
uint32_t p1, p2;
@@ -199,7 +199,7 @@
}
}
-void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -228,7 +228,7 @@
convolve_bi_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h);
break;
default:
- vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/convolve8_avg_dspr2.c b/aom_dsp/mips/convolve8_avg_dspr2.c
index 8baf33a..774616a 100644
--- a/aom_dsp/mips/convolve8_avg_dspr2.c
+++ b/aom_dsp/mips/convolve8_avg_dspr2.c
@@ -11,10 +11,10 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
#if HAVE_DSPR2
@@ -25,7 +25,7 @@
int32_t x, y;
const uint8_t *src_ptr;
uint8_t *dst_ptr;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
uint32_t load1, load2, load3, load4;
uint32_t p1, p2;
@@ -181,7 +181,7 @@
int32_t x, y;
const uint8_t *src_ptr;
uint8_t *dst_ptr;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
uint32_t load1, load2, load3, load4;
uint32_t p1, p2;
@@ -332,7 +332,7 @@
}
}
-void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -341,7 +341,7 @@
assert(((const int32_t *)filter_y)[1] != 0x800000);
if (((const int32_t *)filter_y)[0] == 0) {
- vpx_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
} else {
uint32_t pos = 38;
@@ -367,14 +367,14 @@
h);
break;
default:
- vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
}
}
-void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -390,14 +390,14 @@
if (intermediate_height < h) intermediate_height = h;
- vpx_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter_x,
+ aom_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter_x,
x_step_q4, filter_y, y_step_q4, w, intermediate_height);
- vpx_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter_x,
+ aom_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
}
-void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride, int w,
diff --git a/aom_dsp/mips/convolve8_avg_horiz_dspr2.c b/aom_dsp/mips/convolve8_avg_horiz_dspr2.c
index d732d2e..3267446 100644
--- a/aom_dsp/mips/convolve8_avg_horiz_dspr2.c
+++ b/aom_dsp/mips/convolve8_avg_horiz_dspr2.c
@@ -11,10 +11,10 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_convolve.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_ports/mem.h"
#if HAVE_DSPR2
@@ -22,7 +22,7 @@
uint8_t *dst, int32_t dst_stride,
const int16_t *filter_x0, int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
int32_t vector1b, vector2b, vector3b, vector4b;
int32_t Temp1, Temp2, Temp3, Temp4;
uint32_t vector4a = 64;
@@ -139,7 +139,7 @@
uint8_t *dst, int32_t dst_stride,
const int16_t *filter_x0, int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
int32_t vector1b, vector2b, vector3b, vector4b;
int32_t Temp1, Temp2, Temp3;
@@ -325,7 +325,7 @@
int32_t y, c;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t filter12, filter34, filter56, filter78;
int32_t Temp1, Temp2, Temp3;
@@ -633,7 +633,7 @@
int32_t y, c;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t filter12, filter34, filter56, filter78;
int32_t Temp1, Temp2, Temp3;
@@ -936,7 +936,7 @@
}
}
-void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
@@ -945,7 +945,7 @@
assert(((const int32_t *)filter_x)[1] != 0x800000);
if (((const int32_t *)filter_x)[0] == 0) {
- vpx_convolve2_avg_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve2_avg_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
} else {
uint32_t pos = 38;
@@ -987,7 +987,7 @@
h);
break;
default:
- vpx_convolve8_avg_horiz_c(src + 3, src_stride, dst, dst_stride,
+ aom_convolve8_avg_horiz_c(src + 3, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4, w,
h);
break;
diff --git a/aom_dsp/mips/convolve8_dspr2.c b/aom_dsp/mips/convolve8_dspr2.c
index 09a9083..db2e6ef 100644
--- a/aom_dsp/mips/convolve8_dspr2.c
+++ b/aom_dsp/mips/convolve8_dspr2.c
@@ -11,10 +11,10 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
#include "aom_ports/mem.h"
#if HAVE_DSPR2
@@ -24,7 +24,7 @@
const int16_t *filter_x0,
int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint8_t *dst_ptr;
int32_t vector1b, vector2b, vector3b, vector4b;
int32_t Temp1, Temp2, Temp3, Temp4;
@@ -138,7 +138,7 @@
const int16_t *filter_x0,
int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint8_t *dst_ptr;
uint32_t vector4a = 64;
int32_t vector1b, vector2b, vector3b, vector4b;
@@ -311,7 +311,7 @@
int32_t c, y;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t filter12, filter34, filter56, filter78;
int32_t Temp1, Temp2, Temp3;
@@ -789,7 +789,7 @@
int32_t c, y;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t filter12, filter34, filter56, filter78;
int32_t Temp1, Temp2, Temp3;
@@ -1295,7 +1295,7 @@
}
}
-void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int x_step_q4, const int16_t *filter_y, int y_step_q4,
int w, int h) {
@@ -1320,7 +1320,7 @@
copy_horiz_transposed(src - src_stride * 3, src_stride, temp,
intermediate_height, w, intermediate_height);
} else if (((const int32_t *)filter_x)[0] == 0) {
- vpx_convolve2_dspr2(src - src_stride * 3, src_stride, temp,
+ aom_convolve2_dspr2(src - src_stride * 3, src_stride, temp,
intermediate_height, filter_x, w, intermediate_height);
} else {
src -= (src_stride * 3 + 3);
@@ -1363,7 +1363,7 @@
if (filter_y[3] == 0x80) {
copy_horiz_transposed(temp + 3, intermediate_height, dst, dst_stride, h, w);
} else if (((const int32_t *)filter_y)[0] == 0) {
- vpx_convolve2_dspr2(temp + 3, intermediate_height, dst, dst_stride,
+ aom_convolve2_dspr2(temp + 3, intermediate_height, dst, dst_stride,
filter_y, h, w);
} else {
switch (h) {
@@ -1392,7 +1392,7 @@
}
}
-void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
diff --git a/aom_dsp/mips/convolve8_horiz_dspr2.c b/aom_dsp/mips/convolve8_horiz_dspr2.c
index 66692be..cbd9b5a 100644
--- a/aom_dsp/mips/convolve8_horiz_dspr2.c
+++ b/aom_dsp/mips/convolve8_horiz_dspr2.c
@@ -11,10 +11,10 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
#include "aom_ports/mem.h"
#if HAVE_DSPR2
@@ -22,7 +22,7 @@
uint8_t *dst, int32_t dst_stride,
const int16_t *filter_x0, int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
int32_t vector1b, vector2b, vector3b, vector4b;
int32_t Temp1, Temp2, Temp3, Temp4;
uint32_t vector4a = 64;
@@ -128,7 +128,7 @@
uint8_t *dst, int32_t dst_stride,
const int16_t *filter_x0, int32_t h) {
int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
int32_t vector1b, vector2b, vector3b, vector4b;
int32_t Temp1, Temp2, Temp3;
@@ -290,7 +290,7 @@
int32_t y, c;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t filter12, filter34, filter56, filter78;
int32_t Temp1, Temp2, Temp3;
@@ -555,7 +555,7 @@
int32_t y, c;
const uint8_t *src;
uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector_64 = 64;
int32_t filter12, filter34, filter56, filter78;
int32_t Temp1, Temp2, Temp3;
@@ -816,7 +816,7 @@
}
}
-void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -825,7 +825,7 @@
assert(((const int32_t *)filter_x)[1] != 0x800000);
if (((const int32_t *)filter_x)[0] == 0) {
- vpx_convolve2_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve2_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
} else {
uint32_t pos = 38;
@@ -868,7 +868,7 @@
(int32_t)dst_stride, filter_x, (int32_t)h);
break;
default:
- vpx_convolve8_horiz_c(src + 3, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_horiz_c(src + 3, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/convolve8_vert_dspr2.c b/aom_dsp/mips/convolve8_vert_dspr2.c
index 1594f10..7aee3b7 100644
--- a/aom_dsp/mips/convolve8_vert_dspr2.c
+++ b/aom_dsp/mips/convolve8_vert_dspr2.c
@@ -11,10 +11,10 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/convolve_common_dspr2.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
#include "aom_ports/mem.h"
#if HAVE_DSPR2
@@ -25,7 +25,7 @@
int32_t x, y;
const uint8_t *src_ptr;
uint8_t *dst_ptr;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
uint32_t load1, load2, load3, load4;
uint32_t p1, p2;
@@ -173,7 +173,7 @@
int32_t x, y;
const uint8_t *src_ptr;
uint8_t *dst_ptr;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
uint32_t vector4a = 64;
uint32_t load1, load2, load3, load4;
uint32_t p1, p2;
@@ -316,7 +316,7 @@
}
}
-void vpx_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -325,7 +325,7 @@
assert(((const int32_t *)filter_y)[1] != 0x800000);
if (((const int32_t *)filter_y)[0] == 0) {
- vpx_convolve2_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve2_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
} else {
uint32_t pos = 38;
@@ -349,7 +349,7 @@
convolve_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h);
break;
default:
- vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+ aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
break;
}
diff --git a/aom_dsp/mips/convolve_common_dspr2.h b/aom_dsp/mips/convolve_common_dspr2.h
index b650019..e9dbf2d 100644
--- a/aom_dsp/mips/convolve_common_dspr2.h
+++ b/aom_dsp/mips/convolve_common_dspr2.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
-#define VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
+#ifndef AOM_DSP_MIPS_AOM_COMMON_DSPR2_H_
+#define AOM_DSP_MIPS_AOM_COMMON_DSPR2_H_
#include <assert.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/mips/common_dspr2.h"
#ifdef __cplusplus
@@ -22,29 +22,29 @@
#endif
#if HAVE_DSPR2
-void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
int h);
-void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h);
-void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
int h);
-void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter, int w,
int h);
-void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+void aom_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w,
@@ -55,4 +55,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
+#endif // AOM_DSP_MIPS_AOM_COMMON_DSPR2_H_
diff --git a/aom_dsp/mips/deblock_msa.c b/aom_dsp/mips/deblock_msa.c
index 402d7ed..37e3e4a 100644
--- a/aom_dsp/mips/deblock_msa.c
+++ b/aom_dsp/mips/deblock_msa.c
@@ -11,9 +11,9 @@
#include <stdlib.h>
#include "./macros_msa.h"
-extern const int16_t vpx_rv[];
+extern const int16_t aom_rv[];
-#define VPX_TRANSPOSE8x16_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, out0, \
+#define AOM_TRANSPOSE8x16_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, out0, \
out1, out2, out3, out4, out5, out6, out7, \
out8, out9, out10, out11, out12, out13, out14, \
out15) \
@@ -47,7 +47,7 @@
out7 = (v16u8)__msa_ilvl_d((v2i64)out6, (v2i64)out6); \
}
-#define VPX_AVER_IF_RETAIN(above2_in, above1_in, src_in, below1_in, below2_in, \
+#define AOM_AVER_IF_RETAIN(above2_in, above1_in, src_in, below1_in, below2_in, \
ref, out) \
{ \
v16u8 temp0, temp1; \
@@ -109,7 +109,7 @@
in11 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp2); \
}
-#define VPX_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, \
+#define AOM_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, \
in9, in10, in11) \
{ \
v8i16 temp0, temp1, temp2, temp3; \
@@ -159,21 +159,21 @@
LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
src = LD_UB(p_src);
LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
above2 = LD_UB(p_src + 3 * src_stride);
- VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
+ AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
above1 = LD_UB(p_src + 4 * src_stride);
- VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
+ AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
src = LD_UB(p_src + 5 * src_stride);
- VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
+ AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
below1 = LD_UB(p_src + 6 * src_stride);
- VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
+ AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
below2 = LD_UB(p_src + 7 * src_stride);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
above2 = LD_UB(p_src + 8 * src_stride);
- VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
+ AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
above1 = LD_UB(p_src + 9 * src_stride);
- VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
+ AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7,
p_dst, dst_stride);
@@ -187,21 +187,21 @@
LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
src = LD_UB(p_src);
LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
above2 = LD_UB(p_src + 3 * src_stride);
- VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
+ AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
above1 = LD_UB(p_src + 4 * src_stride);
- VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
+ AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
src = LD_UB(p_src + 5 * src_stride);
- VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
+ AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
below1 = LD_UB(p_src + 6 * src_stride);
- VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
+ AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
below2 = LD_UB(p_src + 7 * src_stride);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
above2 = LD_UB(p_src + 8 * src_stride);
- VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
+ AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
above1 = LD_UB(p_src + 9 * src_stride);
- VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
+ AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
out0 = __msa_copy_u_d((v2i64)inter0, 0);
out1 = __msa_copy_u_d((v2i64)inter1, 0);
out2 = __msa_copy_u_d((v2i64)inter2, 0);
@@ -223,7 +223,7 @@
for (col = 0; col < (cols / 8); ++col) {
ref = LD_UB(f);
f += 8;
- VPX_TRANSPOSE12x8_UB_UB(inter0, inter1, inter2, inter3, inter4, inter5,
+ AOM_TRANSPOSE12x8_UB_UB(inter0, inter1, inter2, inter3, inter4, inter5,
inter6, inter7, inter8, inter9, inter10, inter11);
if (0 == col) {
above2 = inter2;
@@ -236,36 +236,36 @@
below1 = inter3;
below2 = inter4;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
above2 = inter5;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1);
- VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
+ AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
above1 = inter6;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2);
- VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
+ AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
src = inter7;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3);
- VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
+ AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
below1 = inter8;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4);
- VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
+ AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
below2 = inter9;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
if (col == (cols / 8 - 1)) {
above2 = inter9;
} else {
above2 = inter10;
}
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6);
- VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
+ AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
if (col == (cols / 8 - 1)) {
above1 = inter9;
} else {
above1 = inter11;
}
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7);
- VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
+ AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
TRANSPOSE8x8_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7, inter8,
inter9, inter2, inter3, inter4, inter5, inter6, inter7,
inter8, inter9);
@@ -306,37 +306,37 @@
LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
src = LD_UB(p_src);
LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
above2 = LD_UB(p_src + 3 * src_stride);
- VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
+ AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
above1 = LD_UB(p_src + 4 * src_stride);
- VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
+ AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
src = LD_UB(p_src + 5 * src_stride);
- VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
+ AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
below1 = LD_UB(p_src + 6 * src_stride);
- VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
+ AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
below2 = LD_UB(p_src + 7 * src_stride);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
above2 = LD_UB(p_src + 8 * src_stride);
- VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
+ AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
above1 = LD_UB(p_src + 9 * src_stride);
- VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
+ AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
src = LD_UB(p_src + 10 * src_stride);
- VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter8);
+ AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter8);
below1 = LD_UB(p_src + 11 * src_stride);
- VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter9);
+ AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter9);
below2 = LD_UB(p_src + 12 * src_stride);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter10);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter10);
above2 = LD_UB(p_src + 13 * src_stride);
- VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter11);
+ AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter11);
above1 = LD_UB(p_src + 14 * src_stride);
- VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter12);
+ AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter12);
src = LD_UB(p_src + 15 * src_stride);
- VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter13);
+ AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter13);
below1 = LD_UB(p_src + 16 * src_stride);
- VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter14);
+ AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter14);
below2 = LD_UB(p_src + 17 * src_stride);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter15);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter15);
ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7,
p_dst, dst_stride);
ST_UB8(inter8, inter9, inter10, inter11, inter12, inter13, inter14, inter15,
@@ -371,37 +371,37 @@
below1 = inter3;
below2 = inter4;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
above2 = inter5;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1);
- VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
+ AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
above1 = inter6;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2);
- VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
+ AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
src = inter7;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3);
- VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
+ AOM_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
below1 = inter8;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4);
- VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
+ AOM_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
below2 = inter9;
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5);
- VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
+ AOM_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
if (col == (cols / 8 - 1)) {
above2 = inter9;
} else {
above2 = inter10;
}
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6);
- VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
+ AOM_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
if (col == (cols / 8 - 1)) {
above1 = inter9;
} else {
above1 = inter11;
}
ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7);
- VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
- VPX_TRANSPOSE8x16_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7,
+ AOM_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
+ AOM_TRANSPOSE8x16_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7,
inter8, inter9, inter2, inter3, inter4, inter5,
inter6, inter7, inter8, inter9, inter10, inter11,
inter12, inter13, inter14, inter15, above2, above1);
@@ -435,7 +435,7 @@
}
}
-void vpx_post_proc_down_and_across_mb_row_msa(uint8_t *src, uint8_t *dst,
+void aom_post_proc_down_and_across_mb_row_msa(uint8_t *src, uint8_t *dst,
int32_t src_stride,
int32_t dst_stride, int32_t cols,
uint8_t *f, int32_t size) {
@@ -446,7 +446,7 @@
}
}
-void vpx_mbpost_proc_across_ip_msa(uint8_t *src_ptr, int32_t pitch,
+void aom_mbpost_proc_across_ip_msa(uint8_t *src_ptr, int32_t pitch,
int32_t rows, int32_t cols, int32_t flimit) {
int32_t row, col, cnt;
uint8_t *src_dup = src_ptr;
@@ -571,10 +571,10 @@
}
}
-void vpx_mbpost_proc_down_msa(uint8_t *dst_ptr, int32_t pitch, int32_t rows,
+void aom_mbpost_proc_down_msa(uint8_t *dst_ptr, int32_t pitch, int32_t rows,
int32_t cols, int32_t flimit) {
int32_t row, col, cnt, i;
- const int16_t *rv3 = &vpx_rv[63 & rand()];
+ const int16_t *rv3 = &aom_rv[63 & rand()];
v4i32 flimit_vec;
v16u8 dst7, dst8, dst_r_b, dst_l_b;
v16i8 mask;
diff --git a/aom_dsp/mips/fwd_dct32x32_msa.c b/aom_dsp/mips/fwd_dct32x32_msa.c
index 9dcde12..fd24501 100644
--- a/aom_dsp/mips/fwd_dct32x32_msa.c
+++ b/aom_dsp/mips/fwd_dct32x32_msa.c
@@ -670,7 +670,7 @@
fdct8x32_1d_row_transpose_store(tmp_buf, output);
}
-void vpx_fdct32x32_msa(const int16_t *input, int16_t *output,
+void aom_fdct32x32_msa(const int16_t *input, int16_t *output,
int32_t src_stride) {
int32_t i;
DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
@@ -907,7 +907,7 @@
fdct8x32_1d_row_transpose_store(tmp_buf, output);
}
-void vpx_fdct32x32_rd_msa(const int16_t *input, int16_t *out,
+void aom_fdct32x32_rd_msa(const int16_t *input, int16_t *out,
int32_t src_stride) {
int32_t i;
DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
@@ -926,7 +926,7 @@
}
}
-void vpx_fdct32x32_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
+void aom_fdct32x32_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
int sum = LD_HADD(input, stride);
sum += LD_HADD(input + 8, stride);
sum += LD_HADD(input + 16, stride);
diff --git a/aom_dsp/mips/fwd_txfm_msa.c b/aom_dsp/mips/fwd_txfm_msa.c
index c95c1d0..53c0bd0 100644
--- a/aom_dsp/mips/fwd_txfm_msa.c
+++ b/aom_dsp/mips/fwd_txfm_msa.c
@@ -166,7 +166,7 @@
ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, output + 8, 16);
}
-void vpx_fdct4x4_msa(const int16_t *input, int16_t *output,
+void aom_fdct4x4_msa(const int16_t *input, int16_t *output,
int32_t src_stride) {
v8i16 in0, in1, in2, in3;
@@ -186,9 +186,9 @@
in0 += vec;
}
- VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
- VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3);
SRA_4V(in0, in1, in2, in3, 2);
@@ -196,18 +196,18 @@
ST_SH2(in0, in2, output, 8);
}
-void vpx_fdct8x8_msa(const int16_t *input, int16_t *output,
+void aom_fdct8x8_msa(const int16_t *input, int16_t *output,
int32_t src_stride) {
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
LD_SH8(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7);
SLLI_4V(in0, in1, in2, in3, 2);
SLLI_4V(in4, in5, in6, in7, 2);
- VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
- VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
@@ -215,12 +215,12 @@
ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
}
-void vpx_fdct8x8_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
+void aom_fdct8x8_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
out[0] = LD_HADD(input, stride);
out[1] = 0;
}
-void vpx_fdct16x16_msa(const int16_t *input, int16_t *output,
+void aom_fdct16x16_msa(const int16_t *input, int16_t *output,
int32_t src_stride) {
int32_t i;
DECLARE_ALIGNED(32, int16_t, tmp_buf[16 * 16]);
@@ -236,7 +236,7 @@
}
}
-void vpx_fdct16x16_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
+void aom_fdct16x16_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
int sum = LD_HADD(input, stride);
sum += LD_HADD(input + 8, stride);
sum += LD_HADD(input + 16 * 8, stride);
diff --git a/aom_dsp/mips/fwd_txfm_msa.h b/aom_dsp/mips/fwd_txfm_msa.h
index 0911c3e..5251d0d 100644
--- a/aom_dsp/mips/fwd_txfm_msa.h
+++ b/aom_dsp/mips/fwd_txfm_msa.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_MIPS_FWD_TXFM_MSA_H_
-#define VPX_DSP_MIPS_FWD_TXFM_MSA_H_
+#ifndef AOM_DSP_MIPS_FWD_TXFM_MSA_H_
+#define AOM_DSP_MIPS_FWD_TXFM_MSA_H_
#include "aom_dsp/mips/txfm_macros_msa.h"
#include "aom_dsp/txfm_common.h"
@@ -30,7 +30,7 @@
HADD_SW_S32(vec_w_m); \
})
-#define VPX_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3) \
+#define AOM_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m; \
v8i16 vec0_m, vec1_m, vec2_m, vec3_m; \
@@ -71,7 +71,7 @@
in6, in7); \
}
-#define VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
+#define AOM_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
out3, out4, out5, out6, out7) \
{ \
v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m; \
@@ -377,4 +377,4 @@
void fdct8x16_1d_column(const int16_t *input, int16_t *tmp_ptr,
int32_t src_stride);
void fdct16x8_1d_row(int16_t *input, int16_t *output);
-#endif // VPX_DSP_MIPS_FWD_TXFM_MSA_H_
+#endif // AOM_DSP_MIPS_FWD_TXFM_MSA_H_
diff --git a/aom_dsp/mips/idct16x16_msa.c b/aom_dsp/mips/idct16x16_msa.c
index 977d794..258847e 100644
--- a/aom_dsp/mips/idct16x16_msa.c
+++ b/aom_dsp/mips/idct16x16_msa.c
@@ -10,7 +10,7 @@
#include "aom_dsp/mips/inv_txfm_msa.h"
-void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output) {
+void aom_idct16_1d_rows_msa(const int16_t *input, int16_t *output) {
v8i16 loc0, loc1, loc2, loc3;
v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15;
@@ -103,7 +103,7 @@
ST_SH8(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, (output + 8), 16);
}
-void vpx_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
+void aom_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
int32_t dst_stride) {
v8i16 loc0, loc1, loc2, loc3;
v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
@@ -189,19 +189,19 @@
reg3 = tmp7;
SRARI_H4_SH(reg0, reg2, reg4, reg6, 6);
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6);
dst += (4 * dst_stride);
SRARI_H4_SH(reg8, reg10, reg12, reg14, 6);
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14);
dst += (4 * dst_stride);
SRARI_H4_SH(reg3, reg13, reg11, reg5, 6);
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5);
dst += (4 * dst_stride);
SRARI_H4_SH(reg7, reg9, reg1, reg15, 6);
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
}
-void vpx_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
int32_t i;
DECLARE_ALIGNED(32, int16_t, out_arr[16 * 16]);
@@ -210,25 +210,25 @@
/* transform rows */
for (i = 0; i < 2; ++i) {
/* process 16 * 8 block */
- vpx_idct16_1d_rows_msa((input + (i << 7)), (out + (i << 7)));
+ aom_idct16_1d_rows_msa((input + (i << 7)), (out + (i << 7)));
}
/* transform columns */
for (i = 0; i < 2; ++i) {
/* process 8 * 16 block */
- vpx_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
+ aom_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
dst_stride);
}
}
-void vpx_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
uint8_t i;
DECLARE_ALIGNED(32, int16_t, out_arr[16 * 16]);
int16_t *out = out_arr;
/* process 16 * 8 block */
- vpx_idct16_1d_rows_msa(input, out);
+ aom_idct16_1d_rows_msa(input, out);
/* short case just considers top 4 rows as valid output */
out += 4 * 16;
@@ -254,12 +254,12 @@
/* transform columns */
for (i = 0; i < 2; ++i) {
/* process 8 * 16 block */
- vpx_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
+ aom_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
dst_stride);
}
}
-void vpx_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
uint8_t i;
int16_t out;
@@ -289,7 +289,7 @@
}
}
-void vpx_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) {
+void aom_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) {
v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
@@ -302,7 +302,7 @@
l12, l13, l14, l15);
/* ADST in horizontal */
- VP9_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13,
+ AV1_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13,
l14, l15, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11,
r12, r13, r14, r15);
@@ -319,7 +319,7 @@
ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16);
}
-void vpx_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
+void aom_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
int32_t dst_stride) {
v8i16 v0, v2, v4, v6, k0, k1, k2, k3;
v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
@@ -343,20 +343,20 @@
r15 = LD_SH(input + 15 * 16);
/* stage 1 */
- k0 = VPX_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);
- k1 = VPX_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);
- k2 = VPX_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);
- k3 = VPX_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);
+ k0 = AOM_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);
+ k1 = AOM_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);
+ k2 = AOM_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);
+ k3 = AOM_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);
MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
- k0 = VPX_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);
- k1 = VPX_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);
- k2 = VPX_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);
- k3 = VPX_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);
+ k0 = AOM_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);
+ k1 = AOM_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);
+ k2 = AOM_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);
+ k3 = AOM_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);
MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
BUTTERFLY_4(g0, g2, g10, g8, h8, h9, v2, v0);
- k0 = VPX_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
- k1 = VPX_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
- k2 = VPX_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);
+ k0 = AOM_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
+ k1 = AOM_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
+ k2 = AOM_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);
MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
r1 = LD_SH(input + 1 * 16);
@@ -368,15 +368,15 @@
r13 = LD_SH(input + 13 * 16);
r14 = LD_SH(input + 14 * 16);
- k0 = VPX_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);
- k1 = VPX_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);
- k2 = VPX_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);
- k3 = VPX_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);
+ k0 = AOM_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);
+ k1 = AOM_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);
+ k2 = AOM_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);
+ k3 = AOM_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);
MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, g4, g5, g6, g7);
- k0 = VPX_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);
- k1 = VPX_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);
- k2 = VPX_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);
- k3 = VPX_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);
+ k0 = AOM_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);
+ k1 = AOM_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);
+ k2 = AOM_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);
+ k3 = AOM_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);
MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g12, g13, g14, g15);
BUTTERFLY_4(g4, g6, g14, g12, h10, h11, v6, v4);
BUTTERFLY_4(h8, h9, h11, h10, out0, out1, h11, h10);
@@ -391,9 +391,9 @@
ST8x1_UB(res0, dst);
ST8x1_UB(res1, dst + 15 * dst_stride);
- k0 = VPX_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
- k1 = VPX_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
- k2 = VPX_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);
+ k0 = AOM_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
+ k1 = AOM_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
+ k2 = AOM_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);
MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10);
out8 = -out8;
@@ -408,9 +408,9 @@
ST8x1_UB(res8, dst + dst_stride);
ST8x1_UB(res9, dst + 14 * dst_stride);
- k0 = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
- k1 = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
- k2 = VPX_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
+ k0 = AOM_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
+ k1 = AOM_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
+ k2 = AOM_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
MADD_BF(v0, v2, v4, v6, k0, k1, k2, k0, out4, out6, out5, out7);
out4 = -out4;
SRARI_H2_SH(out4, out5, 6);
@@ -435,8 +435,8 @@
ST8x1_UB(res12, dst + 2 * dst_stride);
ST8x1_UB(res13, dst + 13 * dst_stride);
- k0 = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
- k3 = VPX_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
+ k0 = AOM_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
+ k3 = AOM_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
MADD_SHORT(out6, out7, k0, k3, out6, out7);
SRARI_H2_SH(out6, out7, 6);
dst6 = LD_UB(dst + 4 * dst_stride);
@@ -459,8 +459,8 @@
ST8x1_UB(res10, dst + 6 * dst_stride);
ST8x1_UB(res11, dst + 9 * dst_stride);
- k1 = VPX_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);
- k2 = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
+ k1 = AOM_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);
+ k2 = AOM_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
MADD_SHORT(h10, h11, k1, k2, out2, out3);
SRARI_H2_SH(out2, out3, 6);
dst2 = LD_UB(dst + 7 * dst_stride);
diff --git a/aom_dsp/mips/idct32x32_msa.c b/aom_dsp/mips/idct32x32_msa.c
index e090c62..47fad35 100644
--- a/aom_dsp/mips/idct32x32_msa.c
+++ b/aom_dsp/mips/idct32x32_msa.c
@@ -553,11 +553,11 @@
ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
SRARI_H4_SH(m0, m2, m4, m6, 6);
- VPX_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
+ AOM_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
SRARI_H4_SH(m0, m2, m4, m6, 6);
- VPX_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride), m0, m2, m4,
+ AOM_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride), m0, m2, m4,
m6);
/* Load 8 & Store 8 */
@@ -572,11 +572,11 @@
ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
SRARI_H4_SH(m1, m3, m5, m7, 6);
- VPX_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride), m1, m3, m5, m7);
+ AOM_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride), m1, m3, m5, m7);
SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
SRARI_H4_SH(m1, m3, m5, m7, 6);
- VPX_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride), m1, m3, m5,
+ AOM_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride), m1, m3, m5,
m7);
/* Load 8 & Store 8 */
@@ -591,11 +591,11 @@
ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
SRARI_H4_SH(n0, n2, n4, n6, 6);
- VPX_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride), n0, n2, n4, n6);
+ AOM_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride), n0, n2, n4, n6);
SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
SRARI_H4_SH(n0, n2, n4, n6, 6);
- VPX_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride), n0, n2, n4,
+ AOM_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride), n0, n2, n4,
n6);
/* Load 8 & Store 8 */
@@ -610,11 +610,11 @@
ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
SRARI_H4_SH(n1, n3, n5, n7, 6);
- VPX_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride), n1, n3, n5, n7);
+ AOM_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride), n1, n3, n5, n7);
SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
SRARI_H4_SH(n1, n3, n5, n7, 6);
- VPX_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride), n1, n3, n5,
+ AOM_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride), n1, n3, n5,
n7);
}
@@ -629,7 +629,7 @@
dst_stride);
}
-void vpx_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
int32_t i;
DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
@@ -649,7 +649,7 @@
}
}
-void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
int32_t i;
DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
@@ -693,7 +693,7 @@
}
}
-void vpx_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
int32_t i;
int16_t out;
diff --git a/aom_dsp/mips/idct4x4_msa.c b/aom_dsp/mips/idct4x4_msa.c
index 956b5f5..446f402 100644
--- a/aom_dsp/mips/idct4x4_msa.c
+++ b/aom_dsp/mips/idct4x4_msa.c
@@ -10,7 +10,7 @@
#include "aom_dsp/mips/inv_txfm_msa.h"
-void vpx_iwht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+void aom_iwht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
v8i16 in0, in1, in2, in3;
v4i32 in0_r, in1_r, in2_r, in3_r, in4_r;
@@ -47,7 +47,7 @@
ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride);
}
-void vpx_iwht4x4_1_add_msa(const int16_t *input, uint8_t *dst,
+void aom_iwht4x4_1_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
int16_t a1, e1;
v8i16 in1, in0 = { 0 };
@@ -67,7 +67,7 @@
ADDBLK_ST4x4_UB(in0, in1, in1, in1, dst, dst_stride);
}
-void vpx_idct4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct4x4_16_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
v8i16 in0, in1, in2, in3;
@@ -75,16 +75,16 @@
LD4x4_SH(input, in0, in1, in2, in3);
/* rows */
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
- VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
/* columns */
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
- VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+ AOM_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
/* rounding (add 2^3, divide by 2^4) */
SRARI_H4_SH(in0, in1, in2, in3, 4);
ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
}
-void vpx_idct4x4_1_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct4x4_1_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
int16_t out;
v8i16 vec;
diff --git a/aom_dsp/mips/idct8x8_msa.c b/aom_dsp/mips/idct8x8_msa.c
index 420433f..8366ef8 100644
--- a/aom_dsp/mips/idct8x8_msa.c
+++ b/aom_dsp/mips/idct8x8_msa.c
@@ -10,7 +10,7 @@
#include "aom_dsp/mips/inv_txfm_msa.h"
-void vpx_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
@@ -21,24 +21,24 @@
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
/* 1D idct8x8 */
- VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
/* columns transform */
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
/* 1D idct8x8 */
- VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
/* final rounding (add 2^4, divide by 2^5) and shift */
SRARI_H4_SH(in0, in1, in2, in3, 5);
SRARI_H4_SH(in4, in5, in6, in7, 5);
/* add block and store 8x8 */
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
dst += (4 * dst_stride);
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
}
-void vpx_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3;
@@ -51,10 +51,10 @@
/* stage1 */
ILVL_H2_SH(in3, in0, in2, in1, s0, s1);
- k0 = VPX_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
- k1 = VPX_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
- k2 = VPX_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
- k3 = VPX_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
+ k0 = AOM_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
+ k1 = AOM_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
+ k2 = AOM_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
+ k3 = AOM_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
@@ -63,10 +63,10 @@
/* stage2 */
ILVR_H2_SH(in3, in1, in2, in0, s1, s0);
- k0 = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
- k1 = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
- k2 = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
- k3 = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
+ k0 = AOM_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
+ k1 = AOM_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
+ k2 = AOM_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
+ k3 = AOM_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
@@ -76,7 +76,7 @@
/* stage3 */
s0 = __msa_ilvr_h(s6, s5);
- k1 = VPX_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
+ k1 = AOM_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1);
SRARI_W2_SW(tmp0, tmp1, DCT_CONST_BITS);
PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3);
@@ -86,7 +86,7 @@
in7);
TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
- VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
/* final rounding (add 2^4, divide by 2^5) and shift */
@@ -94,12 +94,12 @@
SRARI_H4_SH(in4, in5, in6, in7, 5);
/* add block and store 8x8 */
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
dst += (4 * dst_stride);
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
}
-void vpx_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst,
+void aom_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride) {
int16_t out;
int32_t val;
@@ -110,7 +110,7 @@
val = ROUND_POWER_OF_TWO(out, 5);
vec = __msa_fill_h(val);
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
dst += (4 * dst_stride);
- VPX_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
+ AOM_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
}
diff --git a/aom_dsp/mips/intrapred16_dspr2.c b/aom_dsp/mips/intrapred16_dspr2.c
index b9bb55c..a5c0a64 100644
--- a/aom_dsp/mips/intrapred16_dspr2.c
+++ b/aom_dsp/mips/intrapred16_dspr2.c
@@ -11,7 +11,7 @@
#include "aom_dsp/mips/common_dspr2.h"
#if HAVE_DSPR2
-void vpx_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
int32_t tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
@@ -155,7 +155,7 @@
: [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
}
-void vpx_dc_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int32_t expected_dc;
int32_t average;
diff --git a/aom_dsp/mips/intrapred4_dspr2.c b/aom_dsp/mips/intrapred4_dspr2.c
index 4494bc8..c26d5d3 100644
--- a/aom_dsp/mips/intrapred4_dspr2.c
+++ b/aom_dsp/mips/intrapred4_dspr2.c
@@ -11,7 +11,7 @@
#include "aom_dsp/mips/common_dspr2.h"
#if HAVE_DSPR2
-void vpx_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int32_t tmp1, tmp2, tmp3, tmp4;
@@ -37,7 +37,7 @@
: [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
}
-void vpx_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int32_t expected_dc;
int32_t average;
@@ -78,7 +78,7 @@
[stride] "r"(stride));
}
-void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int32_t abovel, abover;
int32_t left0, left1, left2, left3;
@@ -86,7 +86,7 @@
int32_t resl;
int32_t resr;
int32_t top_left;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
__asm__ __volatile__(
"ulw %[resl], (%[above]) \n\t"
diff --git a/aom_dsp/mips/intrapred8_dspr2.c b/aom_dsp/mips/intrapred8_dspr2.c
index f85209b..fe0d339 100644
--- a/aom_dsp/mips/intrapred8_dspr2.c
+++ b/aom_dsp/mips/intrapred8_dspr2.c
@@ -11,7 +11,7 @@
#include "aom_dsp/mips/common_dspr2.h"
#if HAVE_DSPR2
-void vpx_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
@@ -64,7 +64,7 @@
: [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
}
-void vpx_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int32_t expected_dc;
int32_t average;
@@ -146,7 +146,7 @@
[stride] "r"(stride));
}
-void vpx_tm_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
+void aom_tm_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int32_t abovel, abover;
int32_t abovel_1, abover_1;
@@ -154,7 +154,7 @@
int32_t res0, res1, res2, res3;
int32_t reshw;
int32_t top_left;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
__asm__ __volatile__(
"ulw %[reshw], (%[above]) \n\t"
diff --git a/aom_dsp/mips/intrapred_msa.c b/aom_dsp/mips/intrapred_msa.c
index 17dd57b..3d868cd 100644
--- a/aom_dsp/mips/intrapred_msa.c
+++ b/aom_dsp/mips/intrapred_msa.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/macros_msa.h"
#define IPRED_SUBS_UH2_UH(in0, in1, out0, out1) \
@@ -551,125 +551,125 @@
}
}
-void vpx_v_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_v_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
intra_predict_vert_4x4_msa(above, dst, y_stride);
}
-void vpx_v_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_v_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
intra_predict_vert_8x8_msa(above, dst, y_stride);
}
-void vpx_v_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_v_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
intra_predict_vert_16x16_msa(above, dst, y_stride);
}
-void vpx_v_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_v_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
intra_predict_vert_32x32_msa(above, dst, y_stride);
}
-void vpx_h_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_h_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
intra_predict_horiz_4x4_msa(left, dst, y_stride);
}
-void vpx_h_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_h_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
intra_predict_horiz_8x8_msa(left, dst, y_stride);
}
-void vpx_h_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_h_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
intra_predict_horiz_16x16_msa(left, dst, y_stride);
}
-void vpx_h_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_h_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
intra_predict_horiz_32x32_msa(left, dst, y_stride);
}
-void vpx_dc_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
intra_predict_dc_4x4_msa(above, left, dst, y_stride);
}
-void vpx_dc_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
intra_predict_dc_8x8_msa(above, left, dst, y_stride);
}
-void vpx_dc_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
intra_predict_dc_16x16_msa(above, left, dst, y_stride);
}
-void vpx_dc_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
intra_predict_dc_32x32_msa(above, left, dst, y_stride);
}
-void vpx_dc_top_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_top_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
intra_predict_dc_tl_4x4_msa(above, dst, y_stride);
}
-void vpx_dc_top_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_top_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
intra_predict_dc_tl_8x8_msa(above, dst, y_stride);
}
-void vpx_dc_top_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_top_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
intra_predict_dc_tl_16x16_msa(above, dst, y_stride);
}
-void vpx_dc_top_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_top_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
intra_predict_dc_tl_32x32_msa(above, dst, y_stride);
}
-void vpx_dc_left_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_left_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
intra_predict_dc_tl_4x4_msa(left, dst, y_stride);
}
-void vpx_dc_left_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_left_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
intra_predict_dc_tl_8x8_msa(left, dst, y_stride);
}
-void vpx_dc_left_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_left_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above,
const uint8_t *left) {
(void)above;
@@ -677,7 +677,7 @@
intra_predict_dc_tl_16x16_msa(left, dst, y_stride);
}
-void vpx_dc_left_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_left_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above,
const uint8_t *left) {
(void)above;
@@ -685,7 +685,7 @@
intra_predict_dc_tl_32x32_msa(left, dst, y_stride);
}
-void vpx_dc_128_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_128_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
(void)left;
@@ -693,7 +693,7 @@
intra_predict_128dc_4x4_msa(dst, y_stride);
}
-void vpx_dc_128_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_128_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
(void)left;
@@ -701,7 +701,7 @@
intra_predict_128dc_8x8_msa(dst, y_stride);
}
-void vpx_dc_128_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_128_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
(void)left;
@@ -709,7 +709,7 @@
intra_predict_128dc_16x16_msa(dst, y_stride);
}
-void vpx_dc_128_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_dc_128_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
(void)left;
@@ -717,22 +717,22 @@
intra_predict_128dc_32x32_msa(dst, y_stride);
}
-void vpx_tm_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_tm_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
intra_predict_tm_4x4_msa(above, left, dst, y_stride);
}
-void vpx_tm_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_tm_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
intra_predict_tm_8x8_msa(above, left, dst, y_stride);
}
-void vpx_tm_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_tm_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
intra_predict_tm_16x16_msa(above, left, dst, y_stride);
}
-void vpx_tm_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
+void aom_tm_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride,
const uint8_t *above, const uint8_t *left) {
intra_predict_tm_32x32_msa(above, left, dst, y_stride);
}
diff --git a/aom_dsp/mips/inv_txfm_dspr2.h b/aom_dsp/mips/inv_txfm_dspr2.h
index 5c9a020..1642c11 100644
--- a/aom_dsp/mips/inv_txfm_dspr2.h
+++ b/aom_dsp/mips/inv_txfm_dspr2.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_MIPS_INV_TXFM_DSPR2_H_
-#define VPX_DSP_MIPS_INV_TXFM_DSPR2_H_
+#ifndef AOM_DSP_MIPS_INV_TXFM_DSPR2_H_
+#define AOM_DSP_MIPS_INV_TXFM_DSPR2_H_
#include <assert.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/inv_txfm.h"
#include "aom_dsp/mips/common_dspr2.h"
@@ -57,10 +57,10 @@
out; \
})
-void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
+void aom_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
int dest_stride);
-void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output);
-void vpx_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
+void aom_idct4_rows_dspr2(const int16_t *input, int16_t *output);
+void aom_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
int dest_stride);
void iadst4_dspr2(const int16_t *input, int16_t *output);
void idct8_rows_dspr2(const int16_t *input, int16_t *output, uint32_t no_rows);
@@ -76,4 +76,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_MIPS_INV_TXFM_DSPR2_H_
+#endif // AOM_DSP_MIPS_INV_TXFM_DSPR2_H_
diff --git a/aom_dsp/mips/inv_txfm_msa.h b/aom_dsp/mips/inv_txfm_msa.h
index 14d38b8..d9478c9 100644
--- a/aom_dsp/mips/inv_txfm_msa.h
+++ b/aom_dsp/mips/inv_txfm_msa.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_MIPS_INV_TXFM_MSA_H_
-#define VPX_DSP_MIPS_INV_TXFM_MSA_H_
+#ifndef AOM_DSP_MIPS_INV_TXFM_MSA_H_
+#define AOM_DSP_MIPS_INV_TXFM_MSA_H_
#include "aom_dsp/mips/macros_msa.h"
#include "aom_dsp/mips/txfm_macros_msa.h"
#include "aom_dsp/txfm_common.h"
-#define VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
+#define AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
out3, out4, out5, out6, out7) \
{ \
v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
@@ -79,7 +79,7 @@
out5 = -out5; \
}
-#define VPX_SET_COSPI_PAIR(c0_h, c1_h) \
+#define AOM_SET_COSPI_PAIR(c0_h, c1_h) \
({ \
v8i16 out0_m, r0_m, r1_m; \
\
@@ -90,7 +90,7 @@
out0_m; \
})
-#define VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) \
+#define AOM_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) \
{ \
uint8_t *dst_m = (uint8_t *)(dst); \
v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
@@ -108,19 +108,19 @@
ST8x4_UB(tmp0_m, tmp1_m, dst_m, dst_stride); \
}
-#define VPX_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) \
+#define AOM_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
v8i16 c0_m, c1_m, c2_m, c3_m; \
v8i16 step0_m, step1_m; \
v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
\
- c0_m = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \
- c1_m = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \
+ c0_m = AOM_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \
+ c1_m = AOM_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \
step0_m = __msa_ilvr_h(in2, in0); \
DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m); \
\
- c2_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
- c3_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
+ c2_m = AOM_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
+ c3_m = AOM_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
step1_m = __msa_ilvr_h(in3, in1); \
DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m); \
SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \
@@ -131,7 +131,7 @@
out0, out1, out2, out3); \
}
-#define VPX_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) \
+#define AOM_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
v8i16 res0_m, res1_m, c0_m, c1_m; \
v8i16 k1_m, k2_m, k3_m, k4_m; \
@@ -181,7 +181,7 @@
PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3); \
}
-#define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) \
+#define AV1_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) \
({ \
v8i16 c0_m, c1_m; \
\
@@ -192,7 +192,7 @@
})
/* multiply and add macro */
-#define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1, \
+#define AV1_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1, \
out2, out3) \
{ \
v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \
@@ -211,7 +211,7 @@
}
/* idct 8x8 macro */
-#define VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+#define AOM_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3, out4, out5, out6, out7) \
{ \
v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m; \
@@ -220,13 +220,13 @@
v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64, \
cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \
\
- k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5); \
- k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0); \
- k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3); \
- k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2); \
- VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \
+ k0_m = AV1_SET_CONST_PAIR(mask_m, 0, 5); \
+ k1_m = AV1_SET_CONST_PAIR(mask_m, 1, 0); \
+ k2_m = AV1_SET_CONST_PAIR(mask_m, 6, 3); \
+ k3_m = AV1_SET_CONST_PAIR(mask_m, 3, 2); \
+ AV1_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \
SUB2(in1, in3, in7, in5, res0_m, res1_m); \
- k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7); \
+ k0_m = AV1_SET_CONST_PAIR(mask_m, 4, 7); \
k1_m = __msa_splati_h(mask_m, 4); \
\
ILVRL_H2_SH(res0_m, res1_m, res2_m, res3_m); \
@@ -236,15 +236,15 @@
tp4_m = in1 + in3; \
PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m); \
tp7_m = in7 + in5; \
- k2_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
- k3_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
- VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, in0, in4, in2, in6); \
+ k2_m = AOM_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
+ k3_m = AOM_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
+ AV1_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, in0, in4, in2, in6); \
BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m); \
BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m, out0, \
out1, out2, out3, out4, out5, out6, out7); \
}
-#define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+#define AV1_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3, out4, out5, out6, out7) \
{ \
v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m; \
@@ -258,13 +258,13 @@
-cospi_24_64, cospi_8_64, cospi_16_64, -cospi_16_64, 0, 0, 0, 0 \
}; \
\
- k0_m = VP9_SET_CONST_PAIR(mask1_m, 0, 1); \
- k1_m = VP9_SET_CONST_PAIR(mask1_m, 1, 2); \
+ k0_m = AV1_SET_CONST_PAIR(mask1_m, 0, 1); \
+ k1_m = AV1_SET_CONST_PAIR(mask1_m, 1, 2); \
ILVRL_H2_SH(in1, in0, in_s1, in_s0); \
DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m, \
r1_m, r2_m, r3_m); \
- k0_m = VP9_SET_CONST_PAIR(mask1_m, 6, 7); \
- k1_m = VP9_SET_CONST_PAIR(mask2_m, 0, 1); \
+ k0_m = AV1_SET_CONST_PAIR(mask1_m, 6, 7); \
+ k1_m = AV1_SET_CONST_PAIR(mask2_m, 0, 1); \
ILVRL_H2_SH(in5, in4, in_s1, in_s0); \
DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r4_m, \
r5_m, r6_m, r7_m); \
@@ -276,13 +276,13 @@
m3_m); \
SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \
PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, t0_m, t1_m); \
- k0_m = VP9_SET_CONST_PAIR(mask1_m, 3, 4); \
- k1_m = VP9_SET_CONST_PAIR(mask1_m, 4, 5); \
+ k0_m = AV1_SET_CONST_PAIR(mask1_m, 3, 4); \
+ k1_m = AV1_SET_CONST_PAIR(mask1_m, 4, 5); \
ILVRL_H2_SH(in3, in2, in_s1, in_s0); \
DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m, \
r1_m, r2_m, r3_m); \
- k0_m = VP9_SET_CONST_PAIR(mask2_m, 2, 3); \
- k1_m = VP9_SET_CONST_PAIR(mask2_m, 3, 4); \
+ k0_m = AV1_SET_CONST_PAIR(mask2_m, 2, 3); \
+ k1_m = AV1_SET_CONST_PAIR(mask2_m, 3, 4); \
ILVRL_H2_SH(in7, in6, in_s1, in_s0); \
DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r4_m, \
r5_m, r6_m, r7_m); \
@@ -296,12 +296,12 @@
PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, r2_m, r3_m); \
ILVRL_H2_SW(r3_m, r2_m, m2_m, m3_m); \
BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3); \
- k0_m = VP9_SET_CONST_PAIR(mask2_m, 5, 6); \
- k1_m = VP9_SET_CONST_PAIR(mask2_m, 6, 7); \
+ k0_m = AV1_SET_CONST_PAIR(mask2_m, 5, 6); \
+ k1_m = AV1_SET_CONST_PAIR(mask2_m, 6, 7); \
ILVRL_H2_SH(t1_m, t0_m, in_s1, in_s0); \
DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m, \
r1_m, r2_m, r3_m); \
- k1_m = VP9_SET_CONST_PAIR(mask3_m, 0, 1); \
+ k1_m = AV1_SET_CONST_PAIR(mask3_m, 0, 1); \
DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, r4_m, r5_m, \
r6_m, r7_m); \
ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, m0_m, m1_m, m2_m, \
@@ -312,8 +312,8 @@
m3_m); \
SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \
PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in2, in5); \
- k0_m = VP9_SET_CONST_PAIR(mask3_m, 2, 2); \
- k1_m = VP9_SET_CONST_PAIR(mask3_m, 2, 3); \
+ k0_m = AV1_SET_CONST_PAIR(mask3_m, 2, 2); \
+ k1_m = AV1_SET_CONST_PAIR(mask3_m, 2, 3); \
ILVRL_H2_SH(in4, in3, in_s1, in_s0); \
DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, m0_m, \
m1_m, m2_m, m3_m); \
@@ -331,7 +331,7 @@
out7 = -in7; \
}
-#define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, \
+#define AV1_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, \
r12, r13, r14, r15, out0, out1, out2, out3, out4, \
out5, out6, out7, out8, out9, out10, out11, out12, \
out13, out14, out15) \
@@ -343,38 +343,38 @@
v8i16 k0_m, k1_m, k2_m, k3_m; \
\
/* stage 1 */ \
- k0_m = VPX_SET_COSPI_PAIR(cospi_1_64, cospi_31_64); \
- k1_m = VPX_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64); \
- k2_m = VPX_SET_COSPI_PAIR(cospi_17_64, cospi_15_64); \
- k3_m = VPX_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64); \
+ k0_m = AOM_SET_COSPI_PAIR(cospi_1_64, cospi_31_64); \
+ k1_m = AOM_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64); \
+ k2_m = AOM_SET_COSPI_PAIR(cospi_17_64, cospi_15_64); \
+ k3_m = AOM_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64); \
MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m, g0_m, g1_m, g2_m, g3_m); \
- k0_m = VPX_SET_COSPI_PAIR(cospi_5_64, cospi_27_64); \
- k1_m = VPX_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64); \
- k2_m = VPX_SET_COSPI_PAIR(cospi_21_64, cospi_11_64); \
- k3_m = VPX_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64); \
+ k0_m = AOM_SET_COSPI_PAIR(cospi_5_64, cospi_27_64); \
+ k1_m = AOM_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64); \
+ k2_m = AOM_SET_COSPI_PAIR(cospi_21_64, cospi_11_64); \
+ k3_m = AOM_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64); \
MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m, g4_m, g5_m, g6_m, g7_m); \
- k0_m = VPX_SET_COSPI_PAIR(cospi_9_64, cospi_23_64); \
- k1_m = VPX_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64); \
- k2_m = VPX_SET_COSPI_PAIR(cospi_25_64, cospi_7_64); \
- k3_m = VPX_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64); \
+ k0_m = AOM_SET_COSPI_PAIR(cospi_9_64, cospi_23_64); \
+ k1_m = AOM_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64); \
+ k2_m = AOM_SET_COSPI_PAIR(cospi_25_64, cospi_7_64); \
+ k3_m = AOM_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64); \
MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m, g8_m, g9_m, g10_m, \
g11_m); \
- k0_m = VPX_SET_COSPI_PAIR(cospi_13_64, cospi_19_64); \
- k1_m = VPX_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64); \
- k2_m = VPX_SET_COSPI_PAIR(cospi_29_64, cospi_3_64); \
- k3_m = VPX_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64); \
+ k0_m = AOM_SET_COSPI_PAIR(cospi_13_64, cospi_19_64); \
+ k1_m = AOM_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64); \
+ k2_m = AOM_SET_COSPI_PAIR(cospi_29_64, cospi_3_64); \
+ k3_m = AOM_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64); \
MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m, g12_m, g13_m, g14_m, \
g15_m); \
\
/* stage 2 */ \
- k0_m = VPX_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); \
- k1_m = VPX_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); \
- k2_m = VPX_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64); \
+ k0_m = AOM_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); \
+ k1_m = AOM_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); \
+ k2_m = AOM_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64); \
MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m, h0_m, h1_m, h2_m, \
h3_m); \
- k0_m = VPX_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); \
- k1_m = VPX_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); \
- k2_m = VPX_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64); \
+ k0_m = AOM_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); \
+ k1_m = AOM_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); \
+ k2_m = AOM_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64); \
MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m, h4_m, h5_m, \
h6_m, h7_m); \
BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10); \
@@ -383,29 +383,29 @@
\
/* stage 3 */ \
BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m); \
- k0_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
- k1_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
- k2_m = VPX_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); \
+ k0_m = AOM_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
+ k1_m = AOM_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
+ k2_m = AOM_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); \
MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m, out4, out6, out5, \
out7); \
MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m, out12, out14, \
out13, out15); \
\
/* stage 4 */ \
- k0_m = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \
- k1_m = VPX_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64); \
- k2_m = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \
- k3_m = VPX_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); \
+ k0_m = AOM_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \
+ k1_m = AOM_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64); \
+ k2_m = AOM_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \
+ k3_m = AOM_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); \
MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3); \
MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7); \
MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11); \
MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15); \
}
-void vpx_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
+void aom_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
int32_t dst_stride);
-void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output);
-void vpx_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
+void aom_idct16_1d_rows_msa(const int16_t *input, int16_t *output);
+void aom_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
int32_t dst_stride);
-void vpx_iadst16_1d_rows_msa(const int16_t *input, int16_t *output);
-#endif // VPX_DSP_MIPS_INV_TXFM_MSA_H_
+void aom_iadst16_1d_rows_msa(const int16_t *input, int16_t *output);
+#endif // AOM_DSP_MIPS_INV_TXFM_MSA_H_
diff --git a/aom_dsp/mips/itrans16_dspr2.c b/aom_dsp/mips/itrans16_dspr2.c
index 8d184cb..cb3659e 100644
--- a/aom_dsp/mips/itrans16_dspr2.c
+++ b/aom_dsp/mips/itrans16_dspr2.c
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/inv_txfm_dspr2.h"
#include "aom_dsp/txfm_common.h"
@@ -401,17 +401,17 @@
int result1, result2, result3, result4;
const int const_2_power_13 = 8192;
uint8_t *dest_pix;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
- /* prefetch vpx_ff_cropTbl */
- prefetch_load(vpx_ff_cropTbl);
- prefetch_load(vpx_ff_cropTbl + 32);
- prefetch_load(vpx_ff_cropTbl + 64);
- prefetch_load(vpx_ff_cropTbl + 96);
- prefetch_load(vpx_ff_cropTbl + 128);
- prefetch_load(vpx_ff_cropTbl + 160);
- prefetch_load(vpx_ff_cropTbl + 192);
- prefetch_load(vpx_ff_cropTbl + 224);
+ /* prefetch aom_ff_cropTbl */
+ prefetch_load(aom_ff_cropTbl);
+ prefetch_load(aom_ff_cropTbl + 32);
+ prefetch_load(aom_ff_cropTbl + 64);
+ prefetch_load(aom_ff_cropTbl + 96);
+ prefetch_load(aom_ff_cropTbl + 128);
+ prefetch_load(aom_ff_cropTbl + 160);
+ prefetch_load(aom_ff_cropTbl + 192);
+ prefetch_load(aom_ff_cropTbl + 224);
for (i = 0; i < 16; ++i) {
dest_pix = (dest + i);
@@ -868,7 +868,7 @@
}
}
-void vpx_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride) {
DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
uint32_t pos = 45;
@@ -883,7 +883,7 @@
idct16_cols_add_blk_dspr2(out, dest, dest_stride);
}
-void vpx_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride) {
DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
int16_t *outptr = out;
@@ -927,7 +927,7 @@
idct16_cols_add_blk_dspr2(out, dest, dest_stride);
}
-void vpx_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride) {
uint32_t pos = 45;
int32_t out;
diff --git a/aom_dsp/mips/itrans32_cols_dspr2.c b/aom_dsp/mips/itrans32_cols_dspr2.c
index 7997131..8918bd5 100644
--- a/aom_dsp/mips/itrans32_cols_dspr2.c
+++ b/aom_dsp/mips/itrans32_cols_dspr2.c
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/mips/inv_txfm_dspr2.h"
#include "aom_dsp/txfm_common.h"
#if HAVE_DSPR2
-void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
+void aom_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
int dest_stride) {
int16_t step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6;
int16_t step1_7, step1_8, step1_9, step1_10, step1_11, step1_12, step1_13;
@@ -35,17 +35,17 @@
int i, temp21;
uint8_t *dest_pix, *dest_pix1;
const int const_2_power_13 = 8192;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
- /* prefetch vpx_ff_cropTbl */
- prefetch_load(vpx_ff_cropTbl);
- prefetch_load(vpx_ff_cropTbl + 32);
- prefetch_load(vpx_ff_cropTbl + 64);
- prefetch_load(vpx_ff_cropTbl + 96);
- prefetch_load(vpx_ff_cropTbl + 128);
- prefetch_load(vpx_ff_cropTbl + 160);
- prefetch_load(vpx_ff_cropTbl + 192);
- prefetch_load(vpx_ff_cropTbl + 224);
+ /* prefetch aom_ff_cropTbl */
+ prefetch_load(aom_ff_cropTbl);
+ prefetch_load(aom_ff_cropTbl + 32);
+ prefetch_load(aom_ff_cropTbl + 64);
+ prefetch_load(aom_ff_cropTbl + 96);
+ prefetch_load(aom_ff_cropTbl + 128);
+ prefetch_load(aom_ff_cropTbl + 160);
+ prefetch_load(aom_ff_cropTbl + 192);
+ prefetch_load(aom_ff_cropTbl + 224);
for (i = 0; i < 32; ++i) {
dest_pix = dest + i;
diff --git a/aom_dsp/mips/itrans32_dspr2.c b/aom_dsp/mips/itrans32_dspr2.c
index 74248b3..5ce34d1 100644
--- a/aom_dsp/mips/itrans32_dspr2.c
+++ b/aom_dsp/mips/itrans32_dspr2.c
@@ -11,7 +11,7 @@
#include <assert.h>
#include <stdio.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_dsp/mips/inv_txfm_dspr2.h"
#include "aom_dsp/txfm_common.h"
@@ -835,7 +835,7 @@
}
}
-void vpx_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride) {
DECLARE_ALIGNED(32, int16_t, out[32 * 32]);
int16_t *outptr = out;
@@ -850,10 +850,10 @@
idct32_rows_dspr2(input, outptr, 32);
// Columns
- vpx_idct32_cols_add_blk_dspr2(out, dest, dest_stride);
+ aom_idct32_cols_add_blk_dspr2(out, dest, dest_stride);
}
-void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest,
int stride) {
DECLARE_ALIGNED(32, int16_t, out[32 * 32]);
int16_t *outptr = out;
@@ -908,10 +908,10 @@
}
// Columns
- vpx_idct32_cols_add_blk_dspr2(out, dest, stride);
+ aom_idct32_cols_add_blk_dspr2(out, dest, stride);
}
-void vpx_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest,
int stride) {
int r, out;
int32_t a1, absa1;
diff --git a/aom_dsp/mips/itrans4_dspr2.c b/aom_dsp/mips/itrans4_dspr2.c
index d6ea667..9453e95 100644
--- a/aom_dsp/mips/itrans4_dspr2.c
+++ b/aom_dsp/mips/itrans4_dspr2.c
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/inv_txfm_dspr2.h"
#include "aom_dsp/txfm_common.h"
#if HAVE_DSPR2
-void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output) {
+void aom_idct4_rows_dspr2(const int16_t *input, int16_t *output) {
int16_t step_0, step_1, step_2, step_3;
int Temp0, Temp1, Temp2, Temp3;
const int const_2_power_13 = 8192;
@@ -95,24 +95,24 @@
}
}
-void vpx_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
+void aom_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
int dest_stride) {
int16_t step_0, step_1, step_2, step_3;
int Temp0, Temp1, Temp2, Temp3;
const int const_2_power_13 = 8192;
int i;
uint8_t *dest_pix;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
- /* prefetch vpx_ff_cropTbl */
- prefetch_load(vpx_ff_cropTbl);
- prefetch_load(vpx_ff_cropTbl + 32);
- prefetch_load(vpx_ff_cropTbl + 64);
- prefetch_load(vpx_ff_cropTbl + 96);
- prefetch_load(vpx_ff_cropTbl + 128);
- prefetch_load(vpx_ff_cropTbl + 160);
- prefetch_load(vpx_ff_cropTbl + 192);
- prefetch_load(vpx_ff_cropTbl + 224);
+ /* prefetch aom_ff_cropTbl */
+ prefetch_load(aom_ff_cropTbl);
+ prefetch_load(aom_ff_cropTbl + 32);
+ prefetch_load(aom_ff_cropTbl + 64);
+ prefetch_load(aom_ff_cropTbl + 96);
+ prefetch_load(aom_ff_cropTbl + 128);
+ prefetch_load(aom_ff_cropTbl + 160);
+ prefetch_load(aom_ff_cropTbl + 192);
+ prefetch_load(aom_ff_cropTbl + 224);
for (i = 0; i < 4; ++i) {
dest_pix = (dest + i);
@@ -215,7 +215,7 @@
}
}
-void vpx_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride) {
DECLARE_ALIGNED(32, int16_t, out[4 * 4]);
int16_t *outptr = out;
@@ -227,13 +227,13 @@
: [pos] "r"(pos));
// Rows
- vpx_idct4_rows_dspr2(input, outptr);
+ aom_idct4_rows_dspr2(input, outptr);
// Columns
- vpx_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
+ aom_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
}
-void vpx_idct4x4_1_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct4x4_1_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride) {
int a1, absa1;
int r;
diff --git a/aom_dsp/mips/itrans8_dspr2.c b/aom_dsp/mips/itrans8_dspr2.c
index 4cee3d0..c1d1141 100644
--- a/aom_dsp/mips/itrans8_dspr2.c
+++ b/aom_dsp/mips/itrans8_dspr2.c
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/inv_txfm_dspr2.h"
#include "aom_dsp/txfm_common.h"
@@ -199,17 +199,17 @@
int i;
const int const_2_power_13 = 8192;
uint8_t *dest_pix;
- uint8_t *cm = vpx_ff_cropTbl;
+ uint8_t *cm = aom_ff_cropTbl;
- /* prefetch vpx_ff_cropTbl */
- prefetch_load(vpx_ff_cropTbl);
- prefetch_load(vpx_ff_cropTbl + 32);
- prefetch_load(vpx_ff_cropTbl + 64);
- prefetch_load(vpx_ff_cropTbl + 96);
- prefetch_load(vpx_ff_cropTbl + 128);
- prefetch_load(vpx_ff_cropTbl + 160);
- prefetch_load(vpx_ff_cropTbl + 192);
- prefetch_load(vpx_ff_cropTbl + 224);
+ /* prefetch aom_ff_cropTbl */
+ prefetch_load(aom_ff_cropTbl);
+ prefetch_load(aom_ff_cropTbl + 32);
+ prefetch_load(aom_ff_cropTbl + 64);
+ prefetch_load(aom_ff_cropTbl + 96);
+ prefetch_load(aom_ff_cropTbl + 128);
+ prefetch_load(aom_ff_cropTbl + 160);
+ prefetch_load(aom_ff_cropTbl + 192);
+ prefetch_load(aom_ff_cropTbl + 224);
for (i = 0; i < 8; ++i) {
dest_pix = (dest + i);
@@ -438,7 +438,7 @@
}
}
-void vpx_idct8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride) {
DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
int16_t *outptr = out;
@@ -454,7 +454,7 @@
idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride);
}
-void vpx_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride) {
DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
int16_t *outptr = out;
@@ -493,7 +493,7 @@
idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride);
}
-void vpx_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest,
+void aom_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride) {
uint32_t pos = 45;
int32_t out;
diff --git a/aom_dsp/mips/loopfilter_16_msa.c b/aom_dsp/mips/loopfilter_16_msa.c
index ff7dc05..de7f754 100644
--- a/aom_dsp/mips/loopfilter_16_msa.c
+++ b/aom_dsp/mips/loopfilter_16_msa.c
@@ -11,7 +11,7 @@
#include "aom_ports/mem.h"
#include "aom_dsp/mips/loopfilter_msa.h"
-int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, uint8_t *filter48,
+int32_t aom_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, uint8_t *filter48,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
const uint8_t *thresh_ptr) {
@@ -34,8 +34,8 @@
/* mask and hev */
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
mask, flat);
- VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
- VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+ AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
if (__msa_test_bz_v(flat)) {
ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch);
@@ -44,12 +44,12 @@
} else {
ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
- VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+ AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
- VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
/* convert 16 bit output data into 8 bit */
@@ -77,7 +77,7 @@
}
}
-void vpx_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) {
+void aom_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) {
v16u8 flat, flat2, filter8;
v16i8 zero = { 0 };
v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
@@ -92,7 +92,7 @@
LD_UB8((src - 8 * pitch), pitch, p7, p6, p5, p4, p3, p2, p1, p0);
LD_UB8(src, pitch, q0, q1, q2, q3, q4, q5, q6, q7);
- VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+ AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
if (__msa_test_bz_v(flat2)) {
LD_UB4(filter48, 16, p2, p1, p0, q0);
@@ -403,7 +403,7 @@
}
}
-void vpx_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
const uint8_t *thresh_ptr, int32_t count) {
@@ -412,11 +412,11 @@
(void)count;
- early_exit = vpx_hz_lpf_t4_and_t8_16w(src, pitch, &filter48[0], b_limit_ptr,
+ early_exit = aom_hz_lpf_t4_and_t8_16w(src, pitch, &filter48[0], b_limit_ptr,
limit_ptr, thresh_ptr);
if (0 == early_exit) {
- vpx_hz_lpf_t16_16w(src, pitch, filter48);
+ aom_hz_lpf_t16_16w(src, pitch, filter48);
}
}
@@ -447,8 +447,8 @@
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
mask, flat);
- VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
- VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
q1_out);
flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
@@ -464,7 +464,7 @@
ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
q3_r);
- VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
+ AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
/* convert 16 bit output data into 8 bit */
@@ -484,7 +484,7 @@
LD_UB4((src - 8 * pitch), pitch, p7, p6, p5, p4);
LD_UB4(src + (4 * pitch), pitch, q4, q5, q6, q7);
- VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+ AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
if (__msa_test_bz_v(flat2)) {
p2_d = __msa_copy_u_d((v2i64)p2_out, 0);
@@ -638,19 +638,19 @@
}
}
} else {
- vpx_lpf_horizontal_16_dual_msa(src, pitch, b_limit_ptr, limit_ptr,
+ aom_lpf_horizontal_16_dual_msa(src, pitch, b_limit_ptr, limit_ptr,
thresh_ptr, count);
}
}
-void vpx_lpf_horizontal_edge_8_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_edge_8_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
const uint8_t *thresh_ptr) {
mb_lpf_horizontal_edge(src, pitch, b_limit_ptr, limit_ptr, thresh_ptr, 1);
}
-void vpx_lpf_horizontal_edge_16_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_edge_16_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
const uint8_t *thresh_ptr) {
@@ -752,7 +752,7 @@
ST_UB8(q0, q1, q2, q3, q4, q5, q6, q7, output, out_pitch);
}
-int32_t vpx_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48,
+int32_t aom_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48,
uint8_t *src_org, int32_t pitch_org,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
@@ -776,9 +776,9 @@
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
mask, flat);
/* flat4 */
- VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
/* filter4 */
- VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+ AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
@@ -790,7 +790,7 @@
} else {
ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
- VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+ AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
/* convert 16 bit output data into 8 bit */
@@ -819,7 +819,7 @@
}
}
-int32_t vpx_vt_lpf_t16_8w(uint8_t *src, uint8_t *src_org, int32_t pitch,
+int32_t aom_vt_lpf_t16_8w(uint8_t *src, uint8_t *src_org, int32_t pitch,
uint8_t *filter48) {
v16i8 zero = { 0 };
v16u8 filter8, flat, flat2;
@@ -834,7 +834,7 @@
LD_UB8((src - 8 * 16), 16, p7, p6, p5, p4, p3, p2, p1, p0);
LD_UB8(src, 16, q0, q1, q2, q3, q4, q5, q6, q7);
- VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+ AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
if (__msa_test_bz_v(flat2)) {
v8i16 vec0, vec1, vec2, vec3, vec4;
@@ -1039,7 +1039,7 @@
}
}
-void vpx_lpf_vertical_16_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_16_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
const uint8_t *thresh_ptr) {
@@ -1050,11 +1050,11 @@
transpose_16x8_to_8x16(src - 8, pitch, transposed_input, 16);
early_exit =
- vpx_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8), &filter48[0], src,
+ aom_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8), &filter48[0], src,
pitch, b_limit_ptr, limit_ptr, thresh_ptr);
if (0 == early_exit) {
- early_exit = vpx_vt_lpf_t16_8w((transposed_input + 16 * 8), src, pitch,
+ early_exit = aom_vt_lpf_t16_8w((transposed_input + 16 * 8), src, pitch,
&filter48[0]);
if (0 == early_exit) {
@@ -1063,7 +1063,7 @@
}
}
-int32_t vpx_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48,
+int32_t aom_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48,
uint8_t *src_org, int32_t pitch,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
@@ -1089,9 +1089,9 @@
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
mask, flat);
/* flat4 */
- VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
/* filter4 */
- VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+ AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
if (__msa_test_bz_v(flat)) {
ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
@@ -1108,11 +1108,11 @@
} else {
ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
- VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+ AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
- VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
/* convert 16 bit output data into 8 bit */
@@ -1140,7 +1140,7 @@
}
}
-int32_t vpx_vt_lpf_t16_16w(uint8_t *src, uint8_t *src_org, int32_t pitch,
+int32_t aom_vt_lpf_t16_16w(uint8_t *src, uint8_t *src_org, int32_t pitch,
uint8_t *filter48) {
v16u8 flat, flat2, filter8;
v16i8 zero = { 0 };
@@ -1157,7 +1157,7 @@
LD_UB8((src - 8 * 16), 16, p7, p6, p5, p4, p3, p2, p1, p0);
LD_UB8(src, 16, q0, q1, q2, q3, q4, q5, q6, q7);
- VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+ AOM_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
if (__msa_test_bz_v(flat2)) {
v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
@@ -1461,7 +1461,7 @@
}
}
-void vpx_lpf_vertical_16_dual_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_16_dual_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
const uint8_t *thresh_ptr) {
@@ -1472,11 +1472,11 @@
transpose_16x16((src - 8), pitch, &transposed_input[0], 16);
early_exit =
- vpx_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8), &filter48[0], src,
+ aom_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8), &filter48[0], src,
pitch, b_limit_ptr, limit_ptr, thresh_ptr);
if (0 == early_exit) {
- early_exit = vpx_vt_lpf_t16_16w((transposed_input + 16 * 8), src, pitch,
+ early_exit = aom_vt_lpf_t16_16w((transposed_input + 16 * 8), src, pitch,
&filter48[0]);
if (0 == early_exit) {
diff --git a/aom_dsp/mips/loopfilter_4_msa.c b/aom_dsp/mips/loopfilter_4_msa.c
index 9411d96..6e95b53 100644
--- a/aom_dsp/mips/loopfilter_4_msa.c
+++ b/aom_dsp/mips/loopfilter_4_msa.c
@@ -10,7 +10,7 @@
#include "aom_dsp/mips/loopfilter_msa.h"
-void vpx_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
const uint8_t *thresh_ptr) {
@@ -27,7 +27,7 @@
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
mask, flat);
- VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+ AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
p0_d = __msa_copy_u_d((v2i64)p0_out, 0);
@@ -36,7 +36,7 @@
SD4(p1_d, p0_d, q0_d, q1_d, (src - 2 * pitch), pitch);
}
-void vpx_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit0_ptr,
const uint8_t *limit0_ptr,
const uint8_t *thresh0_ptr,
@@ -63,12 +63,12 @@
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
mask, flat);
- VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+ AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch);
}
-void vpx_lpf_vertical_4_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_4_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
const uint8_t *thresh_ptr) {
@@ -86,7 +86,7 @@
q3);
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
mask, flat);
- VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+ AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
ILVR_B2_SH(p0, p1, q1, q0, vec0, vec1);
ILVRL_H2_SH(vec1, vec0, vec2, vec3);
@@ -96,7 +96,7 @@
ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch);
}
-void vpx_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit0_ptr,
const uint8_t *limit0_ptr,
const uint8_t *thresh0_ptr,
@@ -132,7 +132,7 @@
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
mask, flat);
- VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+ AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3);
ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
diff --git a/aom_dsp/mips/loopfilter_8_msa.c b/aom_dsp/mips/loopfilter_8_msa.c
index 4f745da..0208c69 100644
--- a/aom_dsp/mips/loopfilter_8_msa.c
+++ b/aom_dsp/mips/loopfilter_8_msa.c
@@ -10,7 +10,7 @@
#include "aom_dsp/mips/loopfilter_msa.h"
-void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
const uint8_t *thresh_ptr) {
@@ -31,8 +31,8 @@
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
mask, flat);
- VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
- VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+ AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
@@ -45,7 +45,7 @@
} else {
ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
- VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
+ AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
/* convert 16 bit output data into 8 bit */
@@ -78,7 +78,7 @@
}
}
-void vpx_lpf_horizontal_8_dual_msa(
+void aom_lpf_horizontal_8_dual_msa(
uint8_t *src, int32_t pitch, const uint8_t *b_limit0, const uint8_t *limit0,
const uint8_t *thresh0, const uint8_t *b_limit1, const uint8_t *limit1,
const uint8_t *thresh1) {
@@ -109,20 +109,20 @@
/* mask and hev */
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
mask, flat);
- VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
- VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+ AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
if (__msa_test_bz_v(flat)) {
ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch);
} else {
ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
- VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+ AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
- VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
/* convert 16 bit output data into 8 bit */
@@ -149,7 +149,7 @@
}
}
-void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_8_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit_ptr,
const uint8_t *limit_ptr,
const uint8_t *thresh_ptr) {
@@ -175,9 +175,9 @@
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
mask, flat);
/* flat4 */
- VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
/* filter4 */
- VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+ AOM_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
@@ -193,7 +193,7 @@
} else {
ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
- VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+ AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
/* convert 16 bit output data into 8 bit */
PCKEV_B4_SH(p2_filt8_r, p2_filt8_r, p1_filt8_r, p1_filt8_r, p0_filt8_r,
@@ -224,7 +224,7 @@
}
}
-void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch,
+void aom_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit0, const uint8_t *limit0,
const uint8_t *thresh0,
const uint8_t *b_limit1, const uint8_t *limit1,
@@ -268,9 +268,9 @@
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
mask, flat);
/* flat4 */
- VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat);
/* filter4 */
- VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+ AOM_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
if (__msa_test_bz_v(flat)) {
ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
@@ -285,14 +285,14 @@
} else {
ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
- VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+ AOM_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
/* filter8 */
- VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ AOM_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
/* convert 16 bit output data into 8 bit */
diff --git a/aom_dsp/mips/loopfilter_filters_dspr2.c b/aom_dsp/mips/loopfilter_filters_dspr2.c
index dc59838..d6f6213 100644
--- a/aom_dsp/mips/loopfilter_filters_dspr2.c
+++ b/aom_dsp/mips/loopfilter_filters_dspr2.c
@@ -10,16 +10,16 @@
#include <stdlib.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/mips/common_dspr2.h"
#include "aom_dsp/mips/loopfilter_filters_dspr2.h"
#include "aom_dsp/mips/loopfilter_macros_dspr2.h"
#include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#if HAVE_DSPR2
-void vpx_lpf_horizontal_4_dspr2(unsigned char *s, int pitch,
+void aom_lpf_horizontal_4_dspr2(unsigned char *s, int pitch,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh) {
uint8_t i;
@@ -104,7 +104,7 @@
}
}
-void vpx_lpf_vertical_4_dspr2(unsigned char *s, int pitch,
+void aom_lpf_vertical_4_dspr2(unsigned char *s, int pitch,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh) {
uint8_t i;
@@ -281,46 +281,46 @@
}
}
-void vpx_lpf_horizontal_4_dual_dspr2(
+void aom_lpf_horizontal_4_dual_dspr2(
uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
const uint8_t *limit1, const uint8_t *thresh1) {
- vpx_lpf_horizontal_4_dspr2(s, p, blimit0, limit0, thresh0);
- vpx_lpf_horizontal_4_dspr2(s + 8, p, blimit1, limit1, thresh1);
+ aom_lpf_horizontal_4_dspr2(s, p, blimit0, limit0, thresh0);
+ aom_lpf_horizontal_4_dspr2(s + 8, p, blimit1, limit1, thresh1);
}
-void vpx_lpf_horizontal_8_dual_dspr2(
+void aom_lpf_horizontal_8_dual_dspr2(
uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
const uint8_t *limit1, const uint8_t *thresh1) {
- vpx_lpf_horizontal_8_dspr2(s, p, blimit0, limit0, thresh0);
- vpx_lpf_horizontal_8_dspr2(s + 8, p, blimit1, limit1, thresh1);
+ aom_lpf_horizontal_8_dspr2(s, p, blimit0, limit0, thresh0);
+ aom_lpf_horizontal_8_dspr2(s + 8, p, blimit1, limit1, thresh1);
}
-void vpx_lpf_vertical_4_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_4_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
const uint8_t *limit0,
const uint8_t *thresh0,
const uint8_t *blimit1,
const uint8_t *limit1,
const uint8_t *thresh1) {
- vpx_lpf_vertical_4_dspr2(s, p, blimit0, limit0, thresh0);
- vpx_lpf_vertical_4_dspr2(s + 8 * p, p, blimit1, limit1, thresh1);
+ aom_lpf_vertical_4_dspr2(s, p, blimit0, limit0, thresh0);
+ aom_lpf_vertical_4_dspr2(s + 8 * p, p, blimit1, limit1, thresh1);
}
-void vpx_lpf_vertical_8_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_8_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
const uint8_t *limit0,
const uint8_t *thresh0,
const uint8_t *blimit1,
const uint8_t *limit1,
const uint8_t *thresh1) {
- vpx_lpf_vertical_8_dspr2(s, p, blimit0, limit0, thresh0);
- vpx_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1);
+ aom_lpf_vertical_8_dspr2(s, p, blimit0, limit0, thresh0);
+ aom_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1);
}
-void vpx_lpf_vertical_16_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit,
+void aom_lpf_vertical_16_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh) {
- vpx_lpf_vertical_16_dspr2(s, p, blimit, limit, thresh);
- vpx_lpf_vertical_16_dspr2(s + 8 * p, p, blimit, limit, thresh);
+ aom_lpf_vertical_16_dspr2(s, p, blimit, limit, thresh);
+ aom_lpf_vertical_16_dspr2(s + 8 * p, p, blimit, limit, thresh);
}
#endif // #if HAVE_DSPR2
diff --git a/aom_dsp/mips/loopfilter_filters_dspr2.h b/aom_dsp/mips/loopfilter_filters_dspr2.h
index 919618c..a511202 100644
--- a/aom_dsp/mips/loopfilter_filters_dspr2.h
+++ b/aom_dsp/mips/loopfilter_filters_dspr2.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
-#define VPX_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
+#ifndef AOM_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
+#define AOM_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
#include <stdlib.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#ifdef __cplusplus
@@ -26,7 +26,7 @@
/* inputs & outputs are quad-byte vectors */
static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, uint32_t *ps1,
uint32_t *ps0, uint32_t *qs0, uint32_t *qs1) {
- int32_t vpx_filter_l, vpx_filter_r;
+ int32_t aom_filter_l, aom_filter_r;
int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r;
int32_t subr_r, subr_l;
uint32_t t1, t2, HWM, t3;
@@ -72,33 +72,33 @@
hev_r = hev_r & HWM;
__asm__ __volatile__(
- /* vpx_filter = vp8_signed_char_clamp(ps1 - qs1); */
- "subq_s.ph %[vpx_filter_l], %[vps1_l], %[vqs1_l] \n\t"
- "subq_s.ph %[vpx_filter_r], %[vps1_r], %[vqs1_r] \n\t"
+ /* aom_filter = vp8_signed_char_clamp(ps1 - qs1); */
+ "subq_s.ph %[aom_filter_l], %[vps1_l], %[vqs1_l] \n\t"
+ "subq_s.ph %[aom_filter_r], %[vps1_r], %[vqs1_r] \n\t"
/* qs0 - ps0 */
"subq_s.ph %[subr_l], %[vqs0_l], %[vps0_l] \n\t"
"subq_s.ph %[subr_r], %[vqs0_r], %[vps0_r] \n\t"
- /* vpx_filter &= hev; */
- "and %[vpx_filter_l], %[vpx_filter_l], %[hev_l] \n\t"
- "and %[vpx_filter_r], %[vpx_filter_r], %[hev_r] \n\t"
+ /* aom_filter &= hev; */
+ "and %[aom_filter_l], %[aom_filter_l], %[hev_l] \n\t"
+ "and %[aom_filter_r], %[aom_filter_r], %[hev_r] \n\t"
- /* vpx_filter = vp8_signed_char_clamp(vpx_filter + 3 * (qs0 - ps0)); */
- "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t"
- "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t"
+ /* aom_filter = vp8_signed_char_clamp(aom_filter + 3 * (qs0 - ps0)); */
+ "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t"
"xor %[invhev_l], %[hev_l], %[HWM] \n\t"
- "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t"
- "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t"
+ "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t"
"xor %[invhev_r], %[hev_r], %[HWM] \n\t"
- "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t"
- "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t"
+ "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t"
- /* vpx_filter &= mask; */
- "and %[vpx_filter_l], %[vpx_filter_l], %[mask_l] \n\t"
- "and %[vpx_filter_r], %[vpx_filter_r], %[mask_r] \n\t"
+ /* aom_filter &= mask; */
+ "and %[aom_filter_l], %[aom_filter_l], %[mask_l] \n\t"
+ "and %[aom_filter_r], %[aom_filter_r], %[mask_r] \n\t"
- : [vpx_filter_l] "=&r"(vpx_filter_l), [vpx_filter_r] "=&r"(vpx_filter_r),
+ : [aom_filter_l] "=&r"(aom_filter_l), [aom_filter_r] "=&r"(aom_filter_r),
[subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r),
[invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r)
: [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l),
@@ -109,13 +109,13 @@
/* save bottom 3 bits so that we round one side +4 and the other +3 */
__asm__ __volatile__(
- /* Filter2 = vp8_signed_char_clamp(vpx_filter + 3) >>= 3; */
- "addq_s.ph %[Filter1_l], %[vpx_filter_l], %[t2] \n\t"
- "addq_s.ph %[Filter1_r], %[vpx_filter_r], %[t2] \n\t"
+ /* Filter2 = vp8_signed_char_clamp(aom_filter + 3) >>= 3; */
+ "addq_s.ph %[Filter1_l], %[aom_filter_l], %[t2] \n\t"
+ "addq_s.ph %[Filter1_r], %[aom_filter_r], %[t2] \n\t"
- /* Filter1 = vp8_signed_char_clamp(vpx_filter + 4) >>= 3; */
- "addq_s.ph %[Filter2_l], %[vpx_filter_l], %[t1] \n\t"
- "addq_s.ph %[Filter2_r], %[vpx_filter_r], %[t1] \n\t"
+ /* Filter1 = vp8_signed_char_clamp(aom_filter + 4) >>= 3; */
+ "addq_s.ph %[Filter2_l], %[aom_filter_l], %[t1] \n\t"
+ "addq_s.ph %[Filter2_r], %[aom_filter_r], %[t1] \n\t"
"shra.ph %[Filter1_r], %[Filter1_r], 3 \n\t"
"shra.ph %[Filter1_l], %[Filter1_l], 3 \n\t"
@@ -138,22 +138,22 @@
[vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l),
[vqs0_r] "+r"(vqs0_r)
: [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM),
- [vpx_filter_l] "r"(vpx_filter_l), [vpx_filter_r] "r"(vpx_filter_r));
+ [aom_filter_l] "r"(aom_filter_l), [aom_filter_r] "r"(aom_filter_r));
__asm__ __volatile__(
- /* (vpx_filter += 1) >>= 1 */
+ /* (aom_filter += 1) >>= 1 */
"addqh.ph %[Filter1_l], %[Filter1_l], %[t3] \n\t"
"addqh.ph %[Filter1_r], %[Filter1_r], %[t3] \n\t"
- /* vpx_filter &= ~hev; */
+ /* aom_filter &= ~hev; */
"and %[Filter1_l], %[Filter1_l], %[invhev_l] \n\t"
"and %[Filter1_r], %[Filter1_r], %[invhev_r] \n\t"
- /* vps1 = vp8_signed_char_clamp(ps1 + vpx_filter); */
+ /* vps1 = vp8_signed_char_clamp(ps1 + aom_filter); */
"addq_s.ph %[vps1_l], %[vps1_l], %[Filter1_l] \n\t"
"addq_s.ph %[vps1_r], %[vps1_r], %[Filter1_r] \n\t"
- /* vqs1 = vp8_signed_char_clamp(qs1 - vpx_filter); */
+ /* vqs1 = vp8_signed_char_clamp(qs1 - aom_filter); */
"subq_s.ph %[vqs1_l], %[vqs1_l], %[Filter1_l] \n\t"
"subq_s.ph %[vqs1_r], %[vqs1_r], %[Filter1_r] \n\t"
@@ -193,7 +193,7 @@
uint32_t ps0, uint32_t qs0, uint32_t qs1,
uint32_t *p1_f0, uint32_t *p0_f0,
uint32_t *q0_f0, uint32_t *q1_f0) {
- int32_t vpx_filter_l, vpx_filter_r;
+ int32_t aom_filter_l, aom_filter_r;
int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r;
int32_t subr_r, subr_l;
uint32_t t1, t2, HWM, t3;
@@ -239,33 +239,33 @@
hev_r = hev_r & HWM;
__asm__ __volatile__(
- /* vpx_filter = vp8_signed_char_clamp(ps1 - qs1); */
- "subq_s.ph %[vpx_filter_l], %[vps1_l], %[vqs1_l] \n\t"
- "subq_s.ph %[vpx_filter_r], %[vps1_r], %[vqs1_r] \n\t"
+ /* aom_filter = vp8_signed_char_clamp(ps1 - qs1); */
+ "subq_s.ph %[aom_filter_l], %[vps1_l], %[vqs1_l] \n\t"
+ "subq_s.ph %[aom_filter_r], %[vps1_r], %[vqs1_r] \n\t"
/* qs0 - ps0 */
"subq_s.ph %[subr_l], %[vqs0_l], %[vps0_l] \n\t"
"subq_s.ph %[subr_r], %[vqs0_r], %[vps0_r] \n\t"
- /* vpx_filter &= hev; */
- "and %[vpx_filter_l], %[vpx_filter_l], %[hev_l] \n\t"
- "and %[vpx_filter_r], %[vpx_filter_r], %[hev_r] \n\t"
+ /* aom_filter &= hev; */
+ "and %[aom_filter_l], %[aom_filter_l], %[hev_l] \n\t"
+ "and %[aom_filter_r], %[aom_filter_r], %[hev_r] \n\t"
- /* vpx_filter = vp8_signed_char_clamp(vpx_filter + 3 * (qs0 - ps0)); */
- "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t"
- "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t"
+ /* aom_filter = vp8_signed_char_clamp(aom_filter + 3 * (qs0 - ps0)); */
+ "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t"
"xor %[invhev_l], %[hev_l], %[HWM] \n\t"
- "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t"
- "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t"
+ "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t"
"xor %[invhev_r], %[hev_r], %[HWM] \n\t"
- "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t"
- "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t"
+ "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t"
- /* vpx_filter &= mask; */
- "and %[vpx_filter_l], %[vpx_filter_l], %[mask_l] \n\t"
- "and %[vpx_filter_r], %[vpx_filter_r], %[mask_r] \n\t"
+ /* aom_filter &= mask; */
+ "and %[aom_filter_l], %[aom_filter_l], %[mask_l] \n\t"
+ "and %[aom_filter_r], %[aom_filter_r], %[mask_r] \n\t"
- : [vpx_filter_l] "=&r"(vpx_filter_l), [vpx_filter_r] "=&r"(vpx_filter_r),
+ : [aom_filter_l] "=&r"(aom_filter_l), [aom_filter_r] "=&r"(aom_filter_r),
[subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r),
[invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r)
: [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l),
@@ -276,13 +276,13 @@
/* save bottom 3 bits so that we round one side +4 and the other +3 */
__asm__ __volatile__(
- /* Filter2 = vp8_signed_char_clamp(vpx_filter + 3) >>= 3; */
- "addq_s.ph %[Filter1_l], %[vpx_filter_l], %[t2] \n\t"
- "addq_s.ph %[Filter1_r], %[vpx_filter_r], %[t2] \n\t"
+ /* Filter2 = vp8_signed_char_clamp(aom_filter + 3) >>= 3; */
+ "addq_s.ph %[Filter1_l], %[aom_filter_l], %[t2] \n\t"
+ "addq_s.ph %[Filter1_r], %[aom_filter_r], %[t2] \n\t"
- /* Filter1 = vp8_signed_char_clamp(vpx_filter + 4) >>= 3; */
- "addq_s.ph %[Filter2_l], %[vpx_filter_l], %[t1] \n\t"
- "addq_s.ph %[Filter2_r], %[vpx_filter_r], %[t1] \n\t"
+ /* Filter1 = vp8_signed_char_clamp(aom_filter + 4) >>= 3; */
+ "addq_s.ph %[Filter2_l], %[aom_filter_l], %[t1] \n\t"
+ "addq_s.ph %[Filter2_r], %[aom_filter_r], %[t1] \n\t"
"shra.ph %[Filter1_r], %[Filter1_r], 3 \n\t"
"shra.ph %[Filter1_l], %[Filter1_l], 3 \n\t"
@@ -305,22 +305,22 @@
[vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l),
[vqs0_r] "+r"(vqs0_r)
: [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM),
- [vpx_filter_l] "r"(vpx_filter_l), [vpx_filter_r] "r"(vpx_filter_r));
+ [aom_filter_l] "r"(aom_filter_l), [aom_filter_r] "r"(aom_filter_r));
__asm__ __volatile__(
- /* (vpx_filter += 1) >>= 1 */
+ /* (aom_filter += 1) >>= 1 */
"addqh.ph %[Filter1_l], %[Filter1_l], %[t3] \n\t"
"addqh.ph %[Filter1_r], %[Filter1_r], %[t3] \n\t"
- /* vpx_filter &= ~hev; */
+ /* aom_filter &= ~hev; */
"and %[Filter1_l], %[Filter1_l], %[invhev_l] \n\t"
"and %[Filter1_r], %[Filter1_r], %[invhev_r] \n\t"
- /* vps1 = vp8_signed_char_clamp(ps1 + vpx_filter); */
+ /* vps1 = vp8_signed_char_clamp(ps1 + aom_filter); */
"addq_s.ph %[vps1_l], %[vps1_l], %[Filter1_l] \n\t"
"addq_s.ph %[vps1_r], %[vps1_r], %[Filter1_r] \n\t"
- /* vqs1 = vp8_signed_char_clamp(qs1 - vpx_filter); */
+ /* vqs1 = vp8_signed_char_clamp(qs1 - aom_filter); */
"subq_s.ph %[vqs1_l], %[vqs1_l], %[Filter1_l] \n\t"
"subq_s.ph %[vqs1_r], %[vqs1_r], %[Filter1_r] \n\t"
@@ -731,4 +731,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
+#endif // AOM_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
diff --git a/aom_dsp/mips/loopfilter_macros_dspr2.h b/aom_dsp/mips/loopfilter_macros_dspr2.h
index 3928263..6db867e 100644
--- a/aom_dsp/mips/loopfilter_macros_dspr2.h
+++ b/aom_dsp/mips/loopfilter_macros_dspr2.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
-#define VPX_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
+#ifndef AOM_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
+#define AOM_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
#include <stdlib.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
+#include "aom_mem/aom_mem.h"
#ifdef __cplusplus
extern "C" {
@@ -432,4 +432,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
+#endif // AOM_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
diff --git a/aom_dsp/mips/loopfilter_masks_dspr2.h b/aom_dsp/mips/loopfilter_masks_dspr2.h
index 986db05..141a71a 100644
--- a/aom_dsp/mips/loopfilter_masks_dspr2.h
+++ b/aom_dsp/mips/loopfilter_masks_dspr2.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
-#define VPX_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
+#ifndef AOM_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
+#define AOM_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
#include <stdlib.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
+#include "aom_mem/aom_mem.h"
#ifdef __cplusplus
extern "C" {
@@ -352,4 +352,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
+#endif // AOM_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
diff --git a/aom_dsp/mips/loopfilter_mb_dspr2.c b/aom_dsp/mips/loopfilter_mb_dspr2.c
index ea30e16..a365131 100644
--- a/aom_dsp/mips/loopfilter_mb_dspr2.c
+++ b/aom_dsp/mips/loopfilter_mb_dspr2.c
@@ -10,16 +10,16 @@
#include <stdlib.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/mips/common_dspr2.h"
#include "aom_dsp/mips/loopfilter_filters_dspr2.h"
#include "aom_dsp/mips/loopfilter_macros_dspr2.h"
#include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#if HAVE_DSPR2
-void vpx_lpf_horizontal_8_dspr2(unsigned char *s, int pitch,
+void aom_lpf_horizontal_8_dspr2(unsigned char *s, int pitch,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh) {
uint32_t mask;
@@ -286,7 +286,7 @@
}
}
-void vpx_lpf_vertical_8_dspr2(unsigned char *s, int pitch,
+void aom_lpf_vertical_8_dspr2(unsigned char *s, int pitch,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh) {
uint8_t i;
diff --git a/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c b/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c
index 82a44c5..1665367 100644
--- a/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c
+++ b/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c
@@ -10,13 +10,13 @@
#include <stdlib.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/mips/common_dspr2.h"
#include "aom_dsp/mips/loopfilter_filters_dspr2.h"
#include "aom_dsp/mips/loopfilter_macros_dspr2.h"
#include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#if HAVE_DSPR2
static void mb_lpf_horizontal_edge(unsigned char *s, int pitch,
@@ -717,14 +717,14 @@
}
}
-void vpx_lpf_horizontal_edge_8_dspr2(unsigned char *s, int pitch,
+void aom_lpf_horizontal_edge_8_dspr2(unsigned char *s, int pitch,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh) {
mb_lpf_horizontal_edge(s, pitch, blimit, limit, thresh, 1);
}
-void vpx_lpf_horizontal_edge_16_dspr2(unsigned char *s, int pitch,
+void aom_lpf_horizontal_edge_16_dspr2(unsigned char *s, int pitch,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh) {
diff --git a/aom_dsp/mips/loopfilter_mb_vert_dspr2.c b/aom_dsp/mips/loopfilter_mb_vert_dspr2.c
index 22d7261..f313275 100644
--- a/aom_dsp/mips/loopfilter_mb_vert_dspr2.c
+++ b/aom_dsp/mips/loopfilter_mb_vert_dspr2.c
@@ -10,16 +10,16 @@
#include <stdlib.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/mips/common_dspr2.h"
#include "aom_dsp/mips/loopfilter_filters_dspr2.h"
#include "aom_dsp/mips/loopfilter_macros_dspr2.h"
#include "aom_dsp/mips/loopfilter_masks_dspr2.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#if HAVE_DSPR2
-void vpx_lpf_vertical_16_dspr2(uint8_t *s, int pitch, const uint8_t *blimit,
+void aom_lpf_vertical_16_dspr2(uint8_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
uint8_t i;
uint32_t mask, hev, flat, flat2;
diff --git a/aom_dsp/mips/loopfilter_msa.h b/aom_dsp/mips/loopfilter_msa.h
index d977f34..5aadad2 100644
--- a/aom_dsp/mips/loopfilter_msa.h
+++ b/aom_dsp/mips/loopfilter_msa.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_LOOPFILTER_MSA_H_
-#define VPX_DSP_LOOPFILTER_MSA_H_
+#ifndef AOM_DSP_LOOPFILTER_MSA_H_
+#define AOM_DSP_LOOPFILTER_MSA_H_
#include "aom_dsp/mips/macros_msa.h"
-#define VPX_LPF_FILTER4_8W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \
+#define AOM_LPF_FILTER4_8W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \
p1_out, p0_out, q0_out, q1_out) \
{ \
v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \
@@ -64,7 +64,7 @@
p1_out = __msa_xori_b((v16u8)p1_m, 0x80); \
}
-#define VPX_LPF_FILTER4_4W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \
+#define AOM_LPF_FILTER4_4W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \
p1_out, p0_out, q0_out, q1_out) \
{ \
v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \
@@ -122,7 +122,7 @@
p1_out = __msa_xori_b((v16u8)p1_m, 0x80); \
}
-#define VPX_FLAT4(p3_in, p2_in, p0_in, q0_in, q2_in, q3_in, flat_out) \
+#define AOM_FLAT4(p3_in, p2_in, p0_in, q0_in, q2_in, q3_in, flat_out) \
{ \
v16u8 tmp, p2_a_sub_p0, q2_a_sub_q0, p3_a_sub_p0, q3_a_sub_q0; \
v16u8 zero_in = { 0 }; \
@@ -143,7 +143,7 @@
flat_out = flat_out & (mask); \
}
-#define VPX_FLAT5(p7_in, p6_in, p5_in, p4_in, p0_in, q0_in, q4_in, q5_in, \
+#define AOM_FLAT5(p7_in, p6_in, p5_in, p4_in, p0_in, q0_in, q4_in, q5_in, \
q6_in, q7_in, flat_in, flat2_out) \
{ \
v16u8 tmp, zero_in = { 0 }; \
@@ -173,7 +173,7 @@
flat2_out = flat2_out & flat_in; \
}
-#define VPX_FILTER8(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \
+#define AOM_FILTER8(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \
p2_filt8_out, p1_filt8_out, p0_filt8_out, q0_filt8_out, \
q1_filt8_out, q2_filt8_out) \
{ \
@@ -247,4 +247,4 @@
mask_out = limit_in < (v16u8)mask_out; \
mask_out = __msa_xori_b(mask_out, 0xff); \
}
-#endif /* VPX_DSP_LOOPFILTER_MSA_H_ */
+#endif /* AOM_DSP_LOOPFILTER_MSA_H_ */
diff --git a/aom_dsp/mips/macros_msa.h b/aom_dsp/mips/macros_msa.h
index f21b895..d7e9ad4 100644
--- a/aom_dsp/mips/macros_msa.h
+++ b/aom_dsp/mips/macros_msa.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_MIPS_MACROS_MSA_H_
-#define VPX_DSP_MIPS_MACROS_MSA_H_
+#ifndef AOM_DSP_MIPS_MACROS_MSA_H_
+#define AOM_DSP_MIPS_MACROS_MSA_H_
#include <msa.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#define LD_B(RTYPE, psrc) *((const RTYPE *)(psrc))
#define LD_UB(...) LD_B(v16u8, __VA_ARGS__)
@@ -2054,4 +2054,4 @@
\
tmp1_m; \
})
-#endif /* VPX_DSP_MIPS_MACROS_MSA_H_ */
+#endif /* AOM_DSP_MIPS_MACROS_MSA_H_ */
diff --git a/aom_dsp/mips/sad_msa.c b/aom_dsp/mips/sad_msa.c
index 30123d6..aeeae84 100644
--- a/aom_dsp/mips/sad_msa.c
+++ b/aom_dsp/mips/sad_msa.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/macros_msa.h"
#define SAD_INSVE_W4(RTYPE, in0, in1, in2, in3, out) \
@@ -1259,175 +1259,175 @@
return HADD_SW_S32(sad);
}
-#define VPX_SAD_4xHEIGHT_MSA(height) \
- uint32_t vpx_sad4x##height##_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_4xHEIGHT_MSA(height) \
+ uint32_t aom_sad4x##height##_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride) { \
return sad_4width_msa(src, src_stride, ref, ref_stride, height); \
}
-#define VPX_SAD_8xHEIGHT_MSA(height) \
- uint32_t vpx_sad8x##height##_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_8xHEIGHT_MSA(height) \
+ uint32_t aom_sad8x##height##_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride) { \
return sad_8width_msa(src, src_stride, ref, ref_stride, height); \
}
-#define VPX_SAD_16xHEIGHT_MSA(height) \
- uint32_t vpx_sad16x##height##_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_16xHEIGHT_MSA(height) \
+ uint32_t aom_sad16x##height##_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride) { \
return sad_16width_msa(src, src_stride, ref, ref_stride, height); \
}
-#define VPX_SAD_32xHEIGHT_MSA(height) \
- uint32_t vpx_sad32x##height##_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_32xHEIGHT_MSA(height) \
+ uint32_t aom_sad32x##height##_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride) { \
return sad_32width_msa(src, src_stride, ref, ref_stride, height); \
}
-#define VPX_SAD_64xHEIGHT_MSA(height) \
- uint32_t vpx_sad64x##height##_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_64xHEIGHT_MSA(height) \
+ uint32_t aom_sad64x##height##_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride) { \
return sad_64width_msa(src, src_stride, ref, ref_stride, height); \
}
-#define VPX_SAD_4xHEIGHTx3_MSA(height) \
- void vpx_sad4x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_4xHEIGHTx3_MSA(height) \
+ void aom_sad4x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
uint32_t *sads) { \
sad_4width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \
}
-#define VPX_SAD_8xHEIGHTx3_MSA(height) \
- void vpx_sad8x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_8xHEIGHTx3_MSA(height) \
+ void aom_sad8x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
uint32_t *sads) { \
sad_8width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \
}
-#define VPX_SAD_16xHEIGHTx3_MSA(height) \
- void vpx_sad16x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_16xHEIGHTx3_MSA(height) \
+ void aom_sad16x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
uint32_t *sads) { \
sad_16width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \
}
-#define VPX_SAD_32xHEIGHTx3_MSA(height) \
- void vpx_sad32x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_32xHEIGHTx3_MSA(height) \
+ void aom_sad32x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
uint32_t *sads) { \
sad_32width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \
}
-#define VPX_SAD_64xHEIGHTx3_MSA(height) \
- void vpx_sad64x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_64xHEIGHTx3_MSA(height) \
+ void aom_sad64x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
uint32_t *sads) { \
sad_64width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \
}
-#define VPX_SAD_4xHEIGHTx8_MSA(height) \
- void vpx_sad4x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_4xHEIGHTx8_MSA(height) \
+ void aom_sad4x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
uint32_t *sads) { \
sad_4width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \
}
-#define VPX_SAD_8xHEIGHTx8_MSA(height) \
- void vpx_sad8x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_8xHEIGHTx8_MSA(height) \
+ void aom_sad8x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
uint32_t *sads) { \
sad_8width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \
}
-#define VPX_SAD_16xHEIGHTx8_MSA(height) \
- void vpx_sad16x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_16xHEIGHTx8_MSA(height) \
+ void aom_sad16x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
uint32_t *sads) { \
sad_16width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \
}
-#define VPX_SAD_32xHEIGHTx8_MSA(height) \
- void vpx_sad32x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_32xHEIGHTx8_MSA(height) \
+ void aom_sad32x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
uint32_t *sads) { \
sad_32width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \
}
-#define VPX_SAD_64xHEIGHTx8_MSA(height) \
- void vpx_sad64x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_64xHEIGHTx8_MSA(height) \
+ void aom_sad64x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
uint32_t *sads) { \
sad_64width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \
}
-#define VPX_SAD_4xHEIGHTx4D_MSA(height) \
- void vpx_sad4x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_4xHEIGHTx4D_MSA(height) \
+ void aom_sad4x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *const refs[], \
int32_t ref_stride, uint32_t *sads) { \
sad_4width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \
}
-#define VPX_SAD_8xHEIGHTx4D_MSA(height) \
- void vpx_sad8x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_8xHEIGHTx4D_MSA(height) \
+ void aom_sad8x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *const refs[], \
int32_t ref_stride, uint32_t *sads) { \
sad_8width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \
}
-#define VPX_SAD_16xHEIGHTx4D_MSA(height) \
- void vpx_sad16x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_16xHEIGHTx4D_MSA(height) \
+ void aom_sad16x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *const refs[], \
int32_t ref_stride, uint32_t *sads) { \
sad_16width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \
}
-#define VPX_SAD_32xHEIGHTx4D_MSA(height) \
- void vpx_sad32x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_32xHEIGHTx4D_MSA(height) \
+ void aom_sad32x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *const refs[], \
int32_t ref_stride, uint32_t *sads) { \
sad_32width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \
}
-#define VPX_SAD_64xHEIGHTx4D_MSA(height) \
- void vpx_sad64x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_SAD_64xHEIGHTx4D_MSA(height) \
+ void aom_sad64x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *const refs[], \
int32_t ref_stride, uint32_t *sads) { \
sad_64width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \
}
-#define VPX_AVGSAD_4xHEIGHT_MSA(height) \
- uint32_t vpx_sad4x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_AVGSAD_4xHEIGHT_MSA(height) \
+ uint32_t aom_sad4x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
const uint8_t *second_pred) { \
return avgsad_4width_msa(src, src_stride, ref, ref_stride, height, \
second_pred); \
}
-#define VPX_AVGSAD_8xHEIGHT_MSA(height) \
- uint32_t vpx_sad8x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
+#define AOM_AVGSAD_8xHEIGHT_MSA(height) \
+ uint32_t aom_sad8x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
const uint8_t *ref, int32_t ref_stride, \
const uint8_t *second_pred) { \
return avgsad_8width_msa(src, src_stride, ref, ref_stride, height, \
second_pred); \
}
-#define VPX_AVGSAD_16xHEIGHT_MSA(height) \
- uint32_t vpx_sad16x##height##_avg_msa( \
+#define AOM_AVGSAD_16xHEIGHT_MSA(height) \
+ uint32_t aom_sad16x##height##_avg_msa( \
const uint8_t *src, int32_t src_stride, const uint8_t *ref, \
int32_t ref_stride, const uint8_t *second_pred) { \
return avgsad_16width_msa(src, src_stride, ref, ref_stride, height, \
second_pred); \
}
-#define VPX_AVGSAD_32xHEIGHT_MSA(height) \
- uint32_t vpx_sad32x##height##_avg_msa( \
+#define AOM_AVGSAD_32xHEIGHT_MSA(height) \
+ uint32_t aom_sad32x##height##_avg_msa( \
const uint8_t *src, int32_t src_stride, const uint8_t *ref, \
int32_t ref_stride, const uint8_t *second_pred) { \
return avgsad_32width_msa(src, src_stride, ref, ref_stride, height, \
second_pred); \
}
-#define VPX_AVGSAD_64xHEIGHT_MSA(height) \
- uint32_t vpx_sad64x##height##_avg_msa( \
+#define AOM_AVGSAD_64xHEIGHT_MSA(height) \
+ uint32_t aom_sad64x##height##_avg_msa( \
const uint8_t *src, int32_t src_stride, const uint8_t *ref, \
int32_t ref_stride, const uint8_t *second_pred) { \
return avgsad_64width_msa(src, src_stride, ref, ref_stride, height, \
@@ -1435,92 +1435,92 @@
}
// 64x64
-VPX_SAD_64xHEIGHT_MSA(64);
-VPX_SAD_64xHEIGHTx3_MSA(64);
-VPX_SAD_64xHEIGHTx8_MSA(64);
-VPX_SAD_64xHEIGHTx4D_MSA(64);
-VPX_AVGSAD_64xHEIGHT_MSA(64);
+AOM_SAD_64xHEIGHT_MSA(64);
+AOM_SAD_64xHEIGHTx3_MSA(64);
+AOM_SAD_64xHEIGHTx8_MSA(64);
+AOM_SAD_64xHEIGHTx4D_MSA(64);
+AOM_AVGSAD_64xHEIGHT_MSA(64);
// 64x32
-VPX_SAD_64xHEIGHT_MSA(32);
-VPX_SAD_64xHEIGHTx3_MSA(32);
-VPX_SAD_64xHEIGHTx8_MSA(32);
-VPX_SAD_64xHEIGHTx4D_MSA(32);
-VPX_AVGSAD_64xHEIGHT_MSA(32);
+AOM_SAD_64xHEIGHT_MSA(32);
+AOM_SAD_64xHEIGHTx3_MSA(32);
+AOM_SAD_64xHEIGHTx8_MSA(32);
+AOM_SAD_64xHEIGHTx4D_MSA(32);
+AOM_AVGSAD_64xHEIGHT_MSA(32);
// 32x64
-VPX_SAD_32xHEIGHT_MSA(64);
-VPX_SAD_32xHEIGHTx3_MSA(64);
-VPX_SAD_32xHEIGHTx8_MSA(64);
-VPX_SAD_32xHEIGHTx4D_MSA(64);
-VPX_AVGSAD_32xHEIGHT_MSA(64);
+AOM_SAD_32xHEIGHT_MSA(64);
+AOM_SAD_32xHEIGHTx3_MSA(64);
+AOM_SAD_32xHEIGHTx8_MSA(64);
+AOM_SAD_32xHEIGHTx4D_MSA(64);
+AOM_AVGSAD_32xHEIGHT_MSA(64);
// 32x32
-VPX_SAD_32xHEIGHT_MSA(32);
-VPX_SAD_32xHEIGHTx3_MSA(32);
-VPX_SAD_32xHEIGHTx8_MSA(32);
-VPX_SAD_32xHEIGHTx4D_MSA(32);
-VPX_AVGSAD_32xHEIGHT_MSA(32);
+AOM_SAD_32xHEIGHT_MSA(32);
+AOM_SAD_32xHEIGHTx3_MSA(32);
+AOM_SAD_32xHEIGHTx8_MSA(32);
+AOM_SAD_32xHEIGHTx4D_MSA(32);
+AOM_AVGSAD_32xHEIGHT_MSA(32);
// 32x16
-VPX_SAD_32xHEIGHT_MSA(16);
-VPX_SAD_32xHEIGHTx3_MSA(16);
-VPX_SAD_32xHEIGHTx8_MSA(16);
-VPX_SAD_32xHEIGHTx4D_MSA(16);
-VPX_AVGSAD_32xHEIGHT_MSA(16);
+AOM_SAD_32xHEIGHT_MSA(16);
+AOM_SAD_32xHEIGHTx3_MSA(16);
+AOM_SAD_32xHEIGHTx8_MSA(16);
+AOM_SAD_32xHEIGHTx4D_MSA(16);
+AOM_AVGSAD_32xHEIGHT_MSA(16);
// 16x32
-VPX_SAD_16xHEIGHT_MSA(32);
-VPX_SAD_16xHEIGHTx3_MSA(32);
-VPX_SAD_16xHEIGHTx8_MSA(32);
-VPX_SAD_16xHEIGHTx4D_MSA(32);
-VPX_AVGSAD_16xHEIGHT_MSA(32);
+AOM_SAD_16xHEIGHT_MSA(32);
+AOM_SAD_16xHEIGHTx3_MSA(32);
+AOM_SAD_16xHEIGHTx8_MSA(32);
+AOM_SAD_16xHEIGHTx4D_MSA(32);
+AOM_AVGSAD_16xHEIGHT_MSA(32);
// 16x16
-VPX_SAD_16xHEIGHT_MSA(16);
-VPX_SAD_16xHEIGHTx3_MSA(16);
-VPX_SAD_16xHEIGHTx8_MSA(16);
-VPX_SAD_16xHEIGHTx4D_MSA(16);
-VPX_AVGSAD_16xHEIGHT_MSA(16);
+AOM_SAD_16xHEIGHT_MSA(16);
+AOM_SAD_16xHEIGHTx3_MSA(16);
+AOM_SAD_16xHEIGHTx8_MSA(16);
+AOM_SAD_16xHEIGHTx4D_MSA(16);
+AOM_AVGSAD_16xHEIGHT_MSA(16);
// 16x8
-VPX_SAD_16xHEIGHT_MSA(8);
-VPX_SAD_16xHEIGHTx3_MSA(8);
-VPX_SAD_16xHEIGHTx8_MSA(8);
-VPX_SAD_16xHEIGHTx4D_MSA(8);
-VPX_AVGSAD_16xHEIGHT_MSA(8);
+AOM_SAD_16xHEIGHT_MSA(8);
+AOM_SAD_16xHEIGHTx3_MSA(8);
+AOM_SAD_16xHEIGHTx8_MSA(8);
+AOM_SAD_16xHEIGHTx4D_MSA(8);
+AOM_AVGSAD_16xHEIGHT_MSA(8);
// 8x16
-VPX_SAD_8xHEIGHT_MSA(16);
-VPX_SAD_8xHEIGHTx3_MSA(16);
-VPX_SAD_8xHEIGHTx8_MSA(16);
-VPX_SAD_8xHEIGHTx4D_MSA(16);
-VPX_AVGSAD_8xHEIGHT_MSA(16);
+AOM_SAD_8xHEIGHT_MSA(16);
+AOM_SAD_8xHEIGHTx3_MSA(16);
+AOM_SAD_8xHEIGHTx8_MSA(16);
+AOM_SAD_8xHEIGHTx4D_MSA(16);
+AOM_AVGSAD_8xHEIGHT_MSA(16);
// 8x8
-VPX_SAD_8xHEIGHT_MSA(8);
-VPX_SAD_8xHEIGHTx3_MSA(8);
-VPX_SAD_8xHEIGHTx8_MSA(8);
-VPX_SAD_8xHEIGHTx4D_MSA(8);
-VPX_AVGSAD_8xHEIGHT_MSA(8);
+AOM_SAD_8xHEIGHT_MSA(8);
+AOM_SAD_8xHEIGHTx3_MSA(8);
+AOM_SAD_8xHEIGHTx8_MSA(8);
+AOM_SAD_8xHEIGHTx4D_MSA(8);
+AOM_AVGSAD_8xHEIGHT_MSA(8);
// 8x4
-VPX_SAD_8xHEIGHT_MSA(4);
-VPX_SAD_8xHEIGHTx3_MSA(4);
-VPX_SAD_8xHEIGHTx8_MSA(4);
-VPX_SAD_8xHEIGHTx4D_MSA(4);
-VPX_AVGSAD_8xHEIGHT_MSA(4);
+AOM_SAD_8xHEIGHT_MSA(4);
+AOM_SAD_8xHEIGHTx3_MSA(4);
+AOM_SAD_8xHEIGHTx8_MSA(4);
+AOM_SAD_8xHEIGHTx4D_MSA(4);
+AOM_AVGSAD_8xHEIGHT_MSA(4);
// 4x8
-VPX_SAD_4xHEIGHT_MSA(8);
-VPX_SAD_4xHEIGHTx3_MSA(8);
-VPX_SAD_4xHEIGHTx8_MSA(8);
-VPX_SAD_4xHEIGHTx4D_MSA(8);
-VPX_AVGSAD_4xHEIGHT_MSA(8);
+AOM_SAD_4xHEIGHT_MSA(8);
+AOM_SAD_4xHEIGHTx3_MSA(8);
+AOM_SAD_4xHEIGHTx8_MSA(8);
+AOM_SAD_4xHEIGHTx4D_MSA(8);
+AOM_AVGSAD_4xHEIGHT_MSA(8);
// 4x4
-VPX_SAD_4xHEIGHT_MSA(4);
-VPX_SAD_4xHEIGHTx3_MSA(4);
-VPX_SAD_4xHEIGHTx8_MSA(4);
-VPX_SAD_4xHEIGHTx4D_MSA(4);
-VPX_AVGSAD_4xHEIGHT_MSA(4);
+AOM_SAD_4xHEIGHT_MSA(4);
+AOM_SAD_4xHEIGHTx3_MSA(4);
+AOM_SAD_4xHEIGHTx8_MSA(4);
+AOM_SAD_4xHEIGHTx4D_MSA(4);
+AOM_AVGSAD_4xHEIGHT_MSA(4);
diff --git a/aom_dsp/mips/sub_pixel_variance_msa.c b/aom_dsp/mips/sub_pixel_variance_msa.c
index 4352ff5..cfbdb15 100644
--- a/aom_dsp/mips/sub_pixel_variance_msa.c
+++ b/aom_dsp/mips/sub_pixel_variance_msa.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
#include "aom_dsp/mips/macros_msa.h"
#include "aom_dsp/variance.h"
@@ -1617,8 +1617,8 @@
#define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
#define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12);
-#define VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht) \
- uint32_t vpx_sub_pixel_variance##wd##x##ht##_msa( \
+#define AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht) \
+ uint32_t aom_sub_pixel_variance##wd##x##ht##_msa( \
const uint8_t *src, int32_t src_stride, int32_t xoffset, \
int32_t yoffset, const uint8_t *ref, int32_t ref_stride, \
uint32_t *sse) { \
@@ -1644,7 +1644,7 @@
\
var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \
} else { \
- var = vpx_variance##wd##x##ht##_msa(src, src_stride, ref, ref_stride, \
+ var = aom_variance##wd##x##ht##_msa(src, src_stride, ref, ref_stride, \
sse); \
} \
} \
@@ -1652,26 +1652,26 @@
return var; \
}
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 4);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 8);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 4);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 8);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 4);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 8);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 16);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 4);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 8);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 16);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 8);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 16);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 32);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 8);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 16);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 32);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 16);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 32);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 64);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 16);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 32);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 64);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 32);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 32);
+AOM_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64);
-#define VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(wd, ht) \
- uint32_t vpx_sub_pixel_avg_variance##wd##x##ht##_msa( \
+#define AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(wd, ht) \
+ uint32_t aom_sub_pixel_avg_variance##wd##x##ht##_msa( \
const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset, \
int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride, \
uint32_t *sse, const uint8_t *sec_pred) { \
@@ -1703,21 +1703,21 @@
return VARIANCE_##wd##Wx##ht##H(*sse, diff); \
}
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 4);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 8);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 4);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 8);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 4);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 8);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 16);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 4);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 8);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 16);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 8);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 16);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 32);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 8);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 16);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 32);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 16);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 32);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 16);
+AOM_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 32);
-uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
+uint32_t aom_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
int32_t src_stride,
int32_t xoffset, int32_t yoffset,
const uint8_t *ref_ptr,
@@ -1751,8 +1751,8 @@
return VARIANCE_32Wx64H(*sse, diff);
}
-#define VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht) \
- uint32_t vpx_sub_pixel_avg_variance64x##ht##_msa( \
+#define AOM_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht) \
+ uint32_t aom_sub_pixel_avg_variance64x##ht##_msa( \
const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset, \
int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride, \
uint32_t *sse, const uint8_t *sec_pred) { \
@@ -1784,5 +1784,5 @@
return VARIANCE_64Wx##ht##H(*sse, diff); \
}
-VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(32);
-VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(64);
+AOM_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(32);
+AOM_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(64);
diff --git a/aom_dsp/mips/subtract_msa.c b/aom_dsp/mips/subtract_msa.c
index 04cb922..018e5f4 100644
--- a/aom_dsp/mips/subtract_msa.c
+++ b/aom_dsp/mips/subtract_msa.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/macros_msa.h"
static void sub_blk_4x4_msa(const uint8_t *src_ptr, int32_t src_stride,
@@ -226,7 +226,7 @@
}
}
-void vpx_subtract_block_msa(int32_t rows, int32_t cols, int16_t *diff_ptr,
+void aom_subtract_block_msa(int32_t rows, int32_t cols, int16_t *diff_ptr,
ptrdiff_t diff_stride, const uint8_t *src_ptr,
ptrdiff_t src_stride, const uint8_t *pred_ptr,
ptrdiff_t pred_stride) {
@@ -253,12 +253,12 @@
diff_stride);
break;
default:
- vpx_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr,
+ aom_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr,
src_stride, pred_ptr, pred_stride);
break;
}
} else {
- vpx_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr, src_stride,
+ aom_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr, src_stride,
pred_ptr, pred_stride);
}
}
diff --git a/aom_dsp/mips/txfm_macros_msa.h b/aom_dsp/mips/txfm_macros_msa.h
index a7da24e..955473f 100644
--- a/aom_dsp/mips/txfm_macros_msa.h
+++ b/aom_dsp/mips/txfm_macros_msa.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
-#define VPX_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
+#ifndef AOM_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
+#define AOM_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
#include "aom_dsp/mips/macros_msa.h"
@@ -93,4 +93,4 @@
SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \
PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3); \
}
-#endif // VPX_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
+#endif // AOM_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
diff --git a/aom_dsp/mips/variance_msa.c b/aom_dsp/mips/variance_msa.c
index 767dcf0..078625e 100644
--- a/aom_dsp/mips/variance_msa.c
+++ b/aom_dsp/mips/variance_msa.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/mips/macros_msa.h"
#define CALC_MSE_B(src, ref, var) \
@@ -487,7 +487,7 @@
return HADD_SW_S32(var);
}
-uint32_t vpx_get4x4sse_cs_msa(const uint8_t *src_ptr, int32_t src_stride,
+uint32_t aom_get4x4sse_cs_msa(const uint8_t *src_ptr, int32_t src_stride,
const uint8_t *ref_ptr, int32_t ref_stride) {
uint32_t err = 0;
uint32_t src0, src1, src2, src3;
@@ -527,8 +527,8 @@
#define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
#define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12);
-#define VPX_VARIANCE_WDXHT_MSA(wd, ht) \
- uint32_t vpx_variance##wd##x##ht##_msa( \
+#define AOM_VARIANCE_WDXHT_MSA(wd, ht) \
+ uint32_t aom_variance##wd##x##ht##_msa( \
const uint8_t *src, int32_t src_stride, const uint8_t *ref, \
int32_t ref_stride, uint32_t *sse) { \
int32_t diff; \
@@ -539,21 +539,21 @@
return VARIANCE_##wd##Wx##ht##H(*sse, diff); \
}
-VPX_VARIANCE_WDXHT_MSA(4, 4);
-VPX_VARIANCE_WDXHT_MSA(4, 8);
+AOM_VARIANCE_WDXHT_MSA(4, 4);
+AOM_VARIANCE_WDXHT_MSA(4, 8);
-VPX_VARIANCE_WDXHT_MSA(8, 4)
-VPX_VARIANCE_WDXHT_MSA(8, 8)
-VPX_VARIANCE_WDXHT_MSA(8, 16)
+AOM_VARIANCE_WDXHT_MSA(8, 4)
+AOM_VARIANCE_WDXHT_MSA(8, 8)
+AOM_VARIANCE_WDXHT_MSA(8, 16)
-VPX_VARIANCE_WDXHT_MSA(16, 8)
-VPX_VARIANCE_WDXHT_MSA(16, 16)
-VPX_VARIANCE_WDXHT_MSA(16, 32)
+AOM_VARIANCE_WDXHT_MSA(16, 8)
+AOM_VARIANCE_WDXHT_MSA(16, 16)
+AOM_VARIANCE_WDXHT_MSA(16, 32)
-VPX_VARIANCE_WDXHT_MSA(32, 16)
-VPX_VARIANCE_WDXHT_MSA(32, 32)
+AOM_VARIANCE_WDXHT_MSA(32, 16)
+AOM_VARIANCE_WDXHT_MSA(32, 32)
-uint32_t vpx_variance32x64_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_variance32x64_msa(const uint8_t *src, int32_t src_stride,
const uint8_t *ref, int32_t ref_stride,
uint32_t *sse) {
int32_t diff;
@@ -563,7 +563,7 @@
return VARIANCE_32Wx64H(*sse, diff);
}
-uint32_t vpx_variance64x32_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_variance64x32_msa(const uint8_t *src, int32_t src_stride,
const uint8_t *ref, int32_t ref_stride,
uint32_t *sse) {
int32_t diff;
@@ -573,7 +573,7 @@
return VARIANCE_64Wx32H(*sse, diff);
}
-uint32_t vpx_variance64x64_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_variance64x64_msa(const uint8_t *src, int32_t src_stride,
const uint8_t *ref, int32_t ref_stride,
uint32_t *sse) {
int32_t diff;
@@ -583,14 +583,14 @@
return VARIANCE_64Wx64H(*sse, diff);
}
-uint32_t vpx_mse8x8_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_mse8x8_msa(const uint8_t *src, int32_t src_stride,
const uint8_t *ref, int32_t ref_stride, uint32_t *sse) {
*sse = sse_8width_msa(src, src_stride, ref, ref_stride, 8);
return *sse;
}
-uint32_t vpx_mse8x16_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_mse8x16_msa(const uint8_t *src, int32_t src_stride,
const uint8_t *ref, int32_t ref_stride,
uint32_t *sse) {
*sse = sse_8width_msa(src, src_stride, ref, ref_stride, 16);
@@ -598,7 +598,7 @@
return *sse;
}
-uint32_t vpx_mse16x8_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_mse16x8_msa(const uint8_t *src, int32_t src_stride,
const uint8_t *ref, int32_t ref_stride,
uint32_t *sse) {
*sse = sse_16width_msa(src, src_stride, ref, ref_stride, 8);
@@ -606,7 +606,7 @@
return *sse;
}
-uint32_t vpx_mse16x16_msa(const uint8_t *src, int32_t src_stride,
+uint32_t aom_mse16x16_msa(const uint8_t *src, int32_t src_stride,
const uint8_t *ref, int32_t ref_stride,
uint32_t *sse) {
*sse = sse_16width_msa(src, src_stride, ref, ref_stride, 16);
@@ -614,16 +614,16 @@
return *sse;
}
-void vpx_get8x8var_msa(const uint8_t *src, int32_t src_stride,
+void aom_get8x8var_msa(const uint8_t *src, int32_t src_stride,
const uint8_t *ref, int32_t ref_stride, uint32_t *sse,
int32_t *sum) {
*sse = sse_diff_8width_msa(src, src_stride, ref, ref_stride, 8, sum);
}
-void vpx_get16x16var_msa(const uint8_t *src, int32_t src_stride,
+void aom_get16x16var_msa(const uint8_t *src, int32_t src_stride,
const uint8_t *ref, int32_t ref_stride, uint32_t *sse,
int32_t *sum) {
*sse = sse_diff_16width_msa(src, src_stride, ref, ref_stride, 16, sum);
}
-uint32_t vpx_get_mb_ss_msa(const int16_t *src) { return get_mb_ss_msa(src); }
+uint32_t aom_get_mb_ss_msa(const int16_t *src) { return get_mb_ss_msa(src); }
diff --git a/aom_dsp/postproc.h b/aom_dsp/postproc.h
index 78d11b1..f78a472 100644
--- a/aom_dsp/postproc.h
+++ b/aom_dsp/postproc.h
@@ -8,18 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_POSTPROC_H_
-#define VPX_DSP_POSTPROC_H_
+#ifndef AOM_DSP_POSTPROC_H_
+#define AOM_DSP_POSTPROC_H_
#ifdef __cplusplus
extern "C" {
#endif
// Fills a noise buffer with gaussian noise strength determined by sigma.
-int vpx_setup_noise(double sigma, int size, char *noise);
+int aom_setup_noise(double sigma, int size, char *noise);
#ifdef __cplusplus
}
#endif
-#endif // VPX_DSP_POSTPROC_H_
+#endif // AOM_DSP_POSTPROC_H_
diff --git a/aom_dsp/prob.c b/aom_dsp/prob.c
index 819e950..2fd9c13 100644
--- a/aom_dsp/prob.c
+++ b/aom_dsp/prob.c
@@ -10,7 +10,7 @@
#include "./prob.h"
-const uint8_t vpx_norm[256] = {
+const uint8_t aom_norm[256] = {
0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -24,10 +24,10 @@
};
static unsigned int tree_merge_probs_impl(unsigned int i,
- const vpx_tree_index *tree,
- const vpx_prob *pre_probs,
+ const aom_tree_index *tree,
+ const aom_prob *pre_probs,
const unsigned int *counts,
- vpx_prob *probs) {
+ aom_prob *probs) {
const int l = tree[i];
const unsigned int left_count =
(l <= 0) ? counts[-l]
@@ -41,7 +41,7 @@
return left_count + right_count;
}
-void vpx_tree_merge_probs(const vpx_tree_index *tree, const vpx_prob *pre_probs,
- const unsigned int *counts, vpx_prob *probs) {
+void aom_tree_merge_probs(const aom_tree_index *tree, const aom_prob *pre_probs,
+ const unsigned int *counts, aom_prob *probs) {
tree_merge_probs_impl(0, tree, pre_probs, counts, probs);
}
diff --git a/aom_dsp/prob.h b/aom_dsp/prob.h
index 3de6463..e00cd7c 100644
--- a/aom_dsp/prob.h
+++ b/aom_dsp/prob.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_PROB_H_
-#define VPX_DSP_PROB_H_
+#ifndef AOM_DSP_PROB_H_
+#define AOM_DSP_PROB_H_
-#include "./vpx_config.h"
-#include "./vpx_dsp_common.h"
+#include "./aom_config.h"
+#include "./aom_dsp_common.h"
#include "aom_ports/mem.h"
@@ -20,51 +20,51 @@
extern "C" {
#endif
-typedef uint8_t vpx_prob;
+typedef uint8_t aom_prob;
#define MAX_PROB 255
-#define vpx_prob_half ((vpx_prob)128)
+#define aom_prob_half ((aom_prob)128)
-typedef int8_t vpx_tree_index;
+typedef int8_t aom_tree_index;
#define TREE_SIZE(leaf_count) (2 * (leaf_count)-2)
-#define vpx_complement(x) (255 - x)
+#define aom_complement(x) (255 - x)
#define MODE_MV_COUNT_SAT 20
/* We build coding trees compactly in arrays.
- Each node of the tree is a pair of vpx_tree_indices.
+ Each node of the tree is a pair of aom_tree_indices.
Array index often references a corresponding probability table.
Index <= 0 means done encoding/decoding and value = -Index,
Index > 0 means need another bit, specification at index.
Nonnegative indices are always even; processing begins at node 0. */
-typedef const vpx_tree_index vpx_tree[];
+typedef const aom_tree_index aom_tree[];
-static INLINE vpx_prob clip_prob(int p) {
+static INLINE aom_prob clip_prob(int p) {
return (p > 255) ? 255 : (p < 1) ? 1 : p;
}
-static INLINE vpx_prob get_prob(int num, int den) {
+static INLINE aom_prob get_prob(int num, int den) {
return (den == 0) ? 128u : clip_prob(((int64_t)num * 256 + (den >> 1)) / den);
}
-static INLINE vpx_prob get_binary_prob(int n0, int n1) {
+static INLINE aom_prob get_binary_prob(int n0, int n1) {
return get_prob(n0, n0 + n1);
}
/* This function assumes prob1 and prob2 are already within [1,255] range. */
-static INLINE vpx_prob weighted_prob(int prob1, int prob2, int factor) {
+static INLINE aom_prob weighted_prob(int prob1, int prob2, int factor) {
return ROUND_POWER_OF_TWO(prob1 * (256 - factor) + prob2 * factor, 8);
}
-static INLINE vpx_prob merge_probs(vpx_prob pre_prob, const unsigned int ct[2],
+static INLINE aom_prob merge_probs(aom_prob pre_prob, const unsigned int ct[2],
unsigned int count_sat,
unsigned int max_update_factor) {
- const vpx_prob prob = get_binary_prob(ct[0], ct[1]);
- const unsigned int count = VPXMIN(ct[0] + ct[1], count_sat);
+ const aom_prob prob = get_binary_prob(ct[0], ct[1]);
+ const unsigned int count = AOMMIN(ct[0] + ct[1], count_sat);
const unsigned int factor = max_update_factor * count / count_sat;
return weighted_prob(pre_prob, prob, factor);
}
@@ -75,27 +75,27 @@
70, 76, 83, 89, 96, 102, 108, 115, 121, 128
};
-static INLINE vpx_prob mode_mv_merge_probs(vpx_prob pre_prob,
+static INLINE aom_prob mode_mv_merge_probs(aom_prob pre_prob,
const unsigned int ct[2]) {
const unsigned int den = ct[0] + ct[1];
if (den == 0) {
return pre_prob;
} else {
- const unsigned int count = VPXMIN(den, MODE_MV_COUNT_SAT);
+ const unsigned int count = AOMMIN(den, MODE_MV_COUNT_SAT);
const unsigned int factor = count_to_update_factor[count];
- const vpx_prob prob =
+ const aom_prob prob =
clip_prob(((int64_t)(ct[0]) * 256 + (den >> 1)) / den);
return weighted_prob(pre_prob, prob, factor);
}
}
-void vpx_tree_merge_probs(const vpx_tree_index *tree, const vpx_prob *pre_probs,
- const unsigned int *counts, vpx_prob *probs);
+void aom_tree_merge_probs(const aom_tree_index *tree, const aom_prob *pre_probs,
+ const unsigned int *counts, aom_prob *probs);
-DECLARE_ALIGNED(16, extern const uint8_t, vpx_norm[256]);
+DECLARE_ALIGNED(16, extern const uint8_t, aom_norm[256]);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VPX_DSP_PROB_H_
+#endif // AOM_DSP_PROB_H_
diff --git a/aom_dsp/psnr.c b/aom_dsp/psnr.c
index f237ca4..70659dc 100644
--- a/aom_dsp/psnr.c
+++ b/aom_dsp/psnr.c
@@ -10,11 +10,11 @@
#include <math.h>
#include <assert.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/psnr.h"
#include "aom_scale/yv12config.h"
-double vpx_sse_to_psnr(double samples, double peak, double sse) {
+double aom_sse_to_psnr(double samples, double peak, double sse) {
if (sse > 0.0) {
const double psnr = 10.0 * log10(samples * peak * peak / sse);
return psnr > MAX_PSNR ? MAX_PSNR : psnr;
@@ -46,7 +46,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void encoder_highbd_variance64(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, int w,
int h, uint64_t *sse, int64_t *sum) {
@@ -78,7 +78,7 @@
*sse = (unsigned int)sse_long;
*sum = (int)sum_long;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static int64_t get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, int width, int height) {
@@ -106,7 +106,7 @@
const uint8_t *pa = a;
const uint8_t *pb = b;
for (x = 0; x < width / 16; ++x) {
- vpx_mse16x16(pa, a_stride, pb, b_stride, &sse);
+ aom_mse16x16(pa, a_stride, pb, b_stride, &sse);
total_sse += sse;
pa += 16;
@@ -120,7 +120,7 @@
return total_sse;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static int64_t highbd_get_sse_shift(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, int width,
int height, unsigned int input_shift) {
@@ -163,7 +163,7 @@
const uint8_t *pa = a;
const uint8_t *pb = b;
for (x = 0; x < width / 16; ++x) {
- vpx_highbd_8_mse16x16(pa, a_stride, pb, b_stride, &sse);
+ aom_highbd_8_mse16x16(pa, a_stride, pb, b_stride, &sse);
total_sse += sse;
pa += 16;
pb += 16;
@@ -173,9 +173,9 @@
}
return total_sse;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-int64_t vpx_get_y_sse(const YV12_BUFFER_CONFIG *a,
+int64_t aom_get_y_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b) {
assert(a->y_crop_width == b->y_crop_width);
assert(a->y_crop_height == b->y_crop_height);
@@ -184,7 +184,7 @@
a->y_crop_width, a->y_crop_height);
}
-int64_t vpx_get_u_sse(const YV12_BUFFER_CONFIG *a,
+int64_t aom_get_u_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b) {
assert(a->uv_crop_width == b->uv_crop_width);
assert(a->uv_crop_height == b->uv_crop_height);
@@ -193,7 +193,7 @@
a->uv_crop_width, a->uv_crop_height);
}
-int64_t vpx_get_v_sse(const YV12_BUFFER_CONFIG *a,
+int64_t aom_get_v_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b) {
assert(a->uv_crop_width == b->uv_crop_width);
assert(a->uv_crop_height == b->uv_crop_height);
@@ -202,8 +202,8 @@
a->uv_crop_width, a->uv_crop_height);
}
-#if CONFIG_VP9_HIGHBITDEPTH
-int64_t vpx_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t aom_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b) {
assert(a->y_crop_width == b->y_crop_width);
assert(a->y_crop_height == b->y_crop_height);
@@ -214,7 +214,7 @@
a->y_crop_width, a->y_crop_height);
}
-int64_t vpx_highbd_get_u_sse(const YV12_BUFFER_CONFIG *a,
+int64_t aom_highbd_get_u_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b) {
assert(a->uv_crop_width == b->uv_crop_width);
assert(a->uv_crop_height == b->uv_crop_height);
@@ -225,7 +225,7 @@
a->uv_crop_width, a->uv_crop_height);
}
-int64_t vpx_highbd_get_v_sse(const YV12_BUFFER_CONFIG *a,
+int64_t aom_highbd_get_v_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b) {
assert(a->uv_crop_width == b->uv_crop_width);
assert(a->uv_crop_height == b->uv_crop_height);
@@ -235,10 +235,10 @@
return highbd_get_sse(a->v_buffer, a->uv_stride, b->v_buffer, b->uv_stride,
a->uv_crop_width, a->uv_crop_height);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b, PSNR_STATS *psnr,
uint32_t bit_depth, uint32_t in_bit_depth) {
const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width };
@@ -272,7 +272,7 @@
}
psnr->sse[1 + i] = sse;
psnr->samples[1 + i] = samples;
- psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
+ psnr->psnr[1 + i] = aom_sse_to_psnr(samples, peak, (double)sse);
total_sse += sse;
total_samples += samples;
@@ -281,12 +281,12 @@
psnr->sse[0] = total_sse;
psnr->samples[0] = total_samples;
psnr->psnr[0] =
- vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse);
+ aom_sse_to_psnr((double)total_samples, peak, (double)total_sse);
}
-#endif // !CONFIG_VP9_HIGHBITDEPTH
+#endif // !CONFIG_AOM_HIGHBITDEPTH
-void vpx_calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
+void aom_calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
PSNR_STATS *psnr) {
static const double peak = 255.0;
const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width };
@@ -308,7 +308,7 @@
get_sse(a_planes[i], a_strides[i], b_planes[i], b_strides[i], w, h);
psnr->sse[1 + i] = sse;
psnr->samples[1 + i] = samples;
- psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
+ psnr->psnr[1 + i] = aom_sse_to_psnr(samples, peak, (double)sse);
total_sse += sse;
total_samples += samples;
@@ -317,5 +317,5 @@
psnr->sse[0] = total_sse;
psnr->samples[0] = total_samples;
psnr->psnr[0] =
- vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse);
+ aom_sse_to_psnr((double)total_samples, peak, (double)total_sse);
}
diff --git a/aom_dsp/psnr.h b/aom_dsp/psnr.h
index ba70407..48e7613 100644
--- a/aom_dsp/psnr.h
+++ b/aom_dsp/psnr.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_PSNR_H_
-#define VPX_DSP_PSNR_H_
+#ifndef AOM_DSP_PSNR_H_
+#define AOM_DSP_PSNR_H_
#include "aom_scale/yv12config.h"
@@ -25,7 +25,7 @@
uint32_t samples[4]; // total/y/u/v
} PSNR_STATS;
-// TODO(dkovalev) change vpx_sse_to_psnr signature: double -> int64_t
+// TODO(dkovalev) change aom_sse_to_psnr signature: double -> int64_t
/*!\brief Converts SSE to PSNR
*
@@ -35,29 +35,29 @@
* \param[in] peak Max sample value
* \param[in] sse Sum of squared errors
*/
-double vpx_sse_to_psnr(double samples, double peak, double sse);
-int64_t vpx_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
-int64_t vpx_get_u_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
-int64_t vpx_get_v_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
-#if CONFIG_VP9_HIGHBITDEPTH
-int64_t vpx_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
+double aom_sse_to_psnr(double samples, double peak, double sse);
+int64_t aom_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
+int64_t aom_get_u_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
+int64_t aom_get_v_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t aom_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b);
-int64_t vpx_highbd_get_u_sse(const YV12_BUFFER_CONFIG *a,
+int64_t v_highbd_get_u_sse(const YV12_BUFFER_CONFIG *a,
+ const YV12_BUFFER_CONFIG *b);
+int64_t aom_highbd_get_v_sse(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b);
-int64_t vpx_highbd_get_v_sse(const YV12_BUFFER_CONFIG *a,
- const YV12_BUFFER_CONFIG *b);
-void vpx_calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
+void aom_calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b, PSNR_STATS *psnr,
unsigned int bit_depth, unsigned int in_bit_depth);
#endif
-void vpx_calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
+void aom_calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
PSNR_STATS *psnr);
-double vpx_psnrhvs(const YV12_BUFFER_CONFIG *source,
+double aom_psnrhvs(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest, double *phvs_y,
double *phvs_u, double *phvs_v, uint32_t bd, uint32_t in_bd);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VPX_DSP_PSNR_H_
+#endif // AOM_DSP_PSNR_H_
diff --git a/aom_dsp/psnrhvs.c b/aom_dsp/psnrhvs.c
index 333ff63..f7b78ae 100644
--- a/aom_dsp/psnrhvs.c
+++ b/aom_dsp/psnrhvs.c
@@ -15,8 +15,8 @@
#include <stdlib.h>
#include <math.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/ssim.h"
#include "aom_ports/system_state.h"
#include "aom_dsp/psnr.h"
@@ -30,17 +30,17 @@
int xstride) {
int i, j;
(void)xstride;
- vpx_fdct8x8(x, y, ystride);
+ aom_fdct8x8(x, y, ystride);
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
*(y + ystride * i + j) = (*(y + ystride * i + j) + 4) >> 3;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void hbd_od_bin_fdct8x8(tran_low_t *y, int ystride, const int16_t *x,
int xstride) {
int i, j;
(void)xstride;
- vpx_highbd_fdct8x8(x, y, ystride);
+ aom_highbd_fdct8x8(x, y, ystride);
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
*(y + ystride * i + j) = (*(y + ystride * i + j) + 4) >> 3;
@@ -210,7 +210,7 @@
s_gvar = (s_vars[0] + s_vars[1] + s_vars[2] + s_vars[3]) / s_gvar;
if (d_gvar > 0)
d_gvar = (d_vars[0] + d_vars[1] + d_vars[2] + d_vars[3]) / d_gvar;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (bit_depth == 10 || bit_depth == 12) {
hbd_od_bin_fdct8x8(dct_s_coef, 8, dct_s, 8);
hbd_od_bin_fdct8x8(dct_d_coef, 8, dct_d, 8);
@@ -246,7 +246,7 @@
return ret;
}
-double vpx_psnrhvs(const YV12_BUFFER_CONFIG *src,
+double aom_psnrhvs(const YV12_BUFFER_CONFIG *src,
const YV12_BUFFER_CONFIG *dest, double *y_psnrhvs,
double *u_psnrhvs, double *v_psnrhvs, uint32_t bd,
uint32_t in_bd) {
@@ -254,7 +254,7 @@
const double par = 1.0;
const int step = 7;
uint32_t bd_shift = 0;
- vpx_clear_system_state();
+ aom_clear_system_state();
assert(bd == 8 || bd == 10 || bd == 12);
assert(bd >= in_bd);
diff --git a/aom_dsp/quantize.c b/aom_dsp/quantize.c
index c901aa0..76e7e97 100644
--- a/aom_dsp/quantize.c
+++ b/aom_dsp/quantize.c
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/quantize.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#if CONFIG_AOM_QM
-void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
+void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t dequant_ptr, uint16_t *eob_ptr,
@@ -40,8 +40,8 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t quant, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
@@ -69,7 +69,7 @@
}
#endif
-void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t dequant_ptr, uint16_t *eob_ptr,
@@ -99,8 +99,8 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
@@ -131,7 +131,7 @@
}
#endif
-void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -192,8 +192,8 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr,
@@ -252,7 +252,7 @@
}
#endif
-void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr,
@@ -316,8 +316,8 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_b_32x32_c(
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_b_32x32_c(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -377,7 +377,7 @@
}
#endif
#else
-void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
+void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t dequant_ptr, uint16_t *eob_ptr) {
@@ -400,8 +400,8 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t quant, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
@@ -425,7 +425,7 @@
}
#endif
-void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t dequant_ptr, uint16_t *eob_ptr) {
@@ -450,8 +450,8 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
@@ -477,7 +477,7 @@
}
#endif
-void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -527,8 +527,8 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr,
@@ -578,7 +578,7 @@
}
#endif
-void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr,
@@ -632,8 +632,8 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_b_32x32_c(
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_b_32x32_c(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
diff --git a/aom_dsp/quantize.h b/aom_dsp/quantize.h
index b994d9a..720dce3 100644
--- a/aom_dsp/quantize.h
+++ b/aom_dsp/quantize.h
@@ -8,28 +8,28 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_QUANTIZE_H_
-#define VPX_DSP_QUANTIZE_H_
+#ifndef AOM_DSP_QUANTIZE_H_
+#define AOM_DSP_QUANTIZE_H_
-#include "./vpx_config.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./aom_config.h"
+#include "aom_dsp/aom_dsp_common.h"
#ifdef __cplusplus
extern "C" {
#endif
#if CONFIG_AOM_QM
-void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
+void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
const int16_t *round_ptr, const int16_t quant_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t dequant_ptr, uint16_t *eob_ptr,
const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr);
-void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int16_t *round_ptr, const int16_t quant_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t dequant_ptr, uint16_t *eob_ptr,
const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr);
-void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -37,19 +37,19 @@
uint16_t *eob_ptr, const int16_t *scan,
const int16_t *iscan, const qm_val_t *qm_ptr,
const qm_val_t *iqm_ptr);
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t quant_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
uint16_t *eob_ptr, const qm_val_t *qm_ptr,
const qm_val_t *iqm_ptr);
-void vpx_highbd_quantize_dc_32x32(
+void aom_highbd_quantize_dc_32x32(
const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr,
const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t dequant_ptr, uint16_t *eob_ptr, const qm_val_t *qm_ptr,
const qm_val_t *iqm_ptr);
-void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr,
@@ -59,22 +59,22 @@
const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr);
#endif
#else
-void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
+void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
const int16_t *round_ptr, const int16_t quant_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t dequant_ptr, uint16_t *eob_ptr);
-void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int16_t *round_ptr, const int16_t quant_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t dequant_ptr, uint16_t *eob_ptr);
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t quant_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
uint16_t *eob_ptr);
-void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int16_t *round_ptr,
const int16_t quant_ptr,
tran_low_t *qcoeff_ptr,
@@ -87,4 +87,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_QUANTIZE_H_
+#endif // AOM_DSP_QUANTIZE_H_
diff --git a/aom_dsp/sad.c b/aom_dsp/sad.c
index 8bbf83f..f5d19cc 100644
--- a/aom_dsp/sad.c
+++ b/aom_dsp/sad.c
@@ -10,10 +10,10 @@
#include <stdlib.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
/* Sum the difference between every corresponding element of the buffers. */
@@ -32,43 +32,43 @@
}
#define sadMxN(m, n) \
- unsigned int vpx_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
+ unsigned int aom_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride) { \
return sad(src, src_stride, ref, ref_stride, m, n); \
} \
- unsigned int vpx_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \
+ unsigned int aom_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
uint8_t comp_pred[m * n]; \
- vpx_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \
+ aom_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \
return sad(src, src_stride, comp_pred, m, m, n); \
}
// depending on call sites, pass **ref_array to avoid & in subsequent call and
// de-dup with 4D below.
#define sadMxNxK(m, n, k) \
- void vpx_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \
+ void aom_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \
const uint8_t *ref_array, int ref_stride, \
uint32_t *sad_array) { \
int i; \
for (i = 0; i < k; ++i) \
sad_array[i] = \
- vpx_sad##m##x##n##_c(src, src_stride, &ref_array[i], ref_stride); \
+ aom_sad##m##x##n##_c(src, src_stride, &ref_array[i], ref_stride); \
}
// This appears to be equivalent to the above when k == 4 and refs is const
#define sadMxNx4D(m, n) \
- void vpx_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \
+ void aom_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \
const uint8_t *const ref_array[], \
int ref_stride, uint32_t *sad_array) { \
int i; \
for (i = 0; i < 4; ++i) \
sad_array[i] = \
- vpx_sad##m##x##n##_c(src, src_stride, ref_array[i], ref_stride); \
+ aom_sad##m##x##n##_c(src, src_stride, ref_array[i], ref_stride); \
}
/* clang-format off */
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
// 128x128
sadMxN(128, 128)
sadMxNxK(128, 128, 3)
@@ -82,7 +82,7 @@
// 64x128
sadMxN(64, 128)
sadMxNx4D(64, 128)
-#endif // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif // CONFIG_AV1 && CONFIG_EXT_PARTITION
// 64x64
sadMxN(64, 64)
@@ -153,7 +153,7 @@
sadMxNx4D(4, 4)
/* clang-format on */
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE
unsigned int highbd_sad(const uint8_t *a8, int a_stride, const uint8_t *b8,
int b_stride, int width, int height) {
@@ -186,43 +186,43 @@
}
#define highbd_sadMxN(m, n) \
- unsigned int vpx_highbd_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
+ unsigned int aom_highbd_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, \
int ref_stride) { \
return highbd_sad(src, src_stride, ref, ref_stride, m, n); \
} \
- unsigned int vpx_highbd_sad##m##x##n##_avg_c( \
+ unsigned int aom_highbd_sad##m##x##n##_avg_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
uint16_t comp_pred[m * n]; \
- vpx_highbd_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \
+ aom_highbd_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \
return highbd_sadb(src, src_stride, comp_pred, m, m, n); \
}
#define highbd_sadMxNxK(m, n, k) \
- void vpx_highbd_sad##m##x##n##x##k##_c( \
+ void aom_highbd_sad##m##x##n##x##k##_c( \
const uint8_t *src, int src_stride, const uint8_t *ref_array, \
int ref_stride, uint32_t *sad_array) { \
int i; \
for (i = 0; i < k; ++i) { \
- sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride, \
+ sad_array[i] = aom_highbd_sad##m##x##n##_c(src, src_stride, \
&ref_array[i], ref_stride); \
} \
}
#define highbd_sadMxNx4D(m, n) \
- void vpx_highbd_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \
+ void aom_highbd_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \
const uint8_t *const ref_array[], \
int ref_stride, uint32_t *sad_array) { \
int i; \
for (i = 0; i < 4; ++i) { \
- sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride, \
+ sad_array[i] = aom_highbd_sad##m##x##n##_c(src, src_stride, \
ref_array[i], ref_stride); \
} \
}
/* clang-format off */
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
// 128x128
highbd_sadMxN(128, 128)
highbd_sadMxNxK(128, 128, 3)
@@ -236,7 +236,7 @@
// 64x128
highbd_sadMxN(64, 128)
highbd_sadMxNx4D(64, 128)
-#endif // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif // CONFIG_AV1 && CONFIG_EXT_PARTITION
// 64x64
highbd_sadMxN(64, 64)
@@ -306,9 +306,9 @@
highbd_sadMxNxK(4, 4, 8)
highbd_sadMxNx4D(4, 4)
/* clang-format on */
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if CONFIG_VP10 && CONFIG_EXT_INTER
+#if CONFIG_AV1 && CONFIG_EXT_INTER
static INLINE
unsigned int masked_sad(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, const uint8_t *m, int m_stride,
@@ -329,7 +329,7 @@
}
#define MASKSADMxN(m, n) \
- unsigned int vpx_masked_sad##m##x##n##_c( \
+ unsigned int aom_masked_sad##m##x##n##_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return masked_sad(src, src_stride, ref, ref_stride, msk, msk_stride, m, \
@@ -357,7 +357,7 @@
MASKSADMxN(4, 4)
/* clang-format on */
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE
unsigned int highbd_masked_sad(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride,
@@ -381,7 +381,7 @@
}
#define HIGHBD_MASKSADMXN(m, n) \
- unsigned int vpx_highbd_masked_sad##m##x##n##_c( \
+ unsigned int aom_highbd_masked_sad##m##x##n##_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return highbd_masked_sad(src, src_stride, ref, ref_stride, msk, \
@@ -406,10 +406,10 @@
HIGHBD_MASKSADMXN(8, 4)
HIGHBD_MASKSADMXN(4, 8)
HIGHBD_MASKSADMXN(4, 4)
-#endif // CONFIG_VP9_HIGHBITDEPTH
-#endif // CONFIG_VP10 && CONFIG_EXT_INTER
+#endif // CONFIG_AOM_HIGHBITDEPTH
+#endif // CONFIG_AV1 && CONFIG_EXT_INTER
-#if CONFIG_VP10 && CONFIG_OBMC
+#if CONFIG_AV1 && CONFIG_OBMC
// pre: predictor being evaluated
// wsrc: target weighted prediction (has been *4096 to keep precision)
// mask: 2d weights (scaled by 4096)
@@ -432,7 +432,7 @@
}
#define OBMCSADMxN(m, n) \
- unsigned int vpx_obmc_sad##m##x##n##_c(const uint8_t *ref, int ref_stride, \
+ unsigned int aom_obmc_sad##m##x##n##_c(const uint8_t *ref, int ref_stride, \
const int32_t *wsrc, \
const int32_t *mask) { \
return obmc_sad(ref, ref_stride, wsrc, mask, m, n); \
@@ -459,7 +459,7 @@
OBMCSADMxN(4, 4)
/* clang-format on */
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE
unsigned int highbd_obmc_sad(const uint8_t *pre8, int pre_stride,
const int32_t *wsrc, const int32_t *mask,
@@ -481,7 +481,7 @@
}
#define HIGHBD_OBMCSADMXN(m, n) \
- unsigned int vpx_highbd_obmc_sad##m##x##n##_c( \
+ unsigned int aom_highbd_obmc_sad##m##x##n##_c( \
const uint8_t *ref, int ref_stride, const int32_t *wsrc, \
const int32_t *mask) { \
return highbd_obmc_sad(ref, ref_stride, wsrc, mask, m, n); \
@@ -507,5 +507,5 @@
HIGHBD_OBMCSADMXN(4, 8)
HIGHBD_OBMCSADMXN(4, 4)
/* clang-format on */
-#endif // CONFIG_VP9_HIGHBITDEPTH
-#endif // CONFIG_VP10 && CONFIG_OBMC
+#endif // CONFIG_AOM_HIGHBITDEPTH
+#endif // CONFIG_AV1 && CONFIG_OBMC
diff --git a/aom_dsp/ssim.c b/aom_dsp/ssim.c
index c111ead..ed8aaea 100644
--- a/aom_dsp/ssim.c
+++ b/aom_dsp/ssim.c
@@ -10,12 +10,12 @@
#include <assert.h>
#include <math.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/ssim.h"
#include "aom_ports/mem.h"
#include "aom_ports/system_state.h"
-void vpx_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
+void aom_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
uint32_t *sum_s, uint32_t *sum_r,
uint32_t *sum_sq_s, uint32_t *sum_sq_r,
uint32_t *sum_sxr) {
@@ -30,7 +30,7 @@
}
}
}
-void vpx_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
+void aom_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s,
uint32_t *sum_sq_r, uint32_t *sum_sxr) {
int i, j;
@@ -45,8 +45,8 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, const uint16_t *r,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, const uint16_t *r,
int rp, uint32_t *sum_s, uint32_t *sum_r,
uint32_t *sum_sq_s, uint32_t *sum_sq_r,
uint32_t *sum_sxr) {
@@ -61,7 +61,7 @@
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static const int64_t cc1 = 26634; // (64^2*(.01*255)^2
static const int64_t cc2 = 239708; // (64^2*(.03*255)^2
@@ -102,26 +102,26 @@
static double ssim_8x8(const uint8_t *s, int sp, const uint8_t *r, int rp) {
uint32_t sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
- vpx_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
+ aom_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
&sum_sxr);
return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64, 8);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static double highbd_ssim_8x8(const uint16_t *s, int sp, const uint16_t *r,
int rp, uint32_t bd, uint32_t shift) {
uint32_t sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
- vpx_highbd_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
+ aom_highbd_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
&sum_sxr);
return similarity(sum_s >> shift, sum_r >> shift, sum_sq_s >> (2 * shift),
sum_sq_r >> (2 * shift), sum_sxr >> (2 * shift), 64, bd);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
// We are using a 8x8 moving window with starting location of each 8x8 window
// on the 4x4 pixel grid. Such arrangement allows the windows to overlap
// block boundaries to penalize blocking artifacts.
-static double vpx_ssim2(const uint8_t *img1, const uint8_t *img2,
+static double aom_ssim2(const uint8_t *img1, const uint8_t *img2,
int stride_img1, int stride_img2, int width,
int height) {
int i, j;
@@ -141,8 +141,8 @@
return ssim_total;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-static double vpx_highbd_ssim2(const uint8_t *img1, const uint8_t *img2,
+#if CONFIG_AOM_HIGHBITDEPTH
+static double aom_highbd_ssim2(const uint8_t *img1, const uint8_t *img2,
int stride_img1, int stride_img2, int width,
int height, uint32_t bd, uint32_t shift) {
int i, j;
@@ -163,20 +163,20 @@
ssim_total /= samples;
return ssim_total;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source,
+double aom_calc_ssim(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest, double *weight) {
double a, b, c;
double ssimv;
- a = vpx_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
+ a = aom_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
dest->y_stride, source->y_crop_width, source->y_crop_height);
- b = vpx_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
+ b = aom_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
dest->uv_stride, source->uv_crop_width, source->uv_crop_height);
- c = vpx_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
+ c = aom_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
dest->uv_stride, source->uv_crop_width, source->uv_crop_height);
ssimv = a * .8 + .1 * (b + c);
@@ -264,11 +264,11 @@
}
static void ssimv_parms(uint8_t *img1, int img1_pitch, uint8_t *img2,
int img2_pitch, Ssimv *sv) {
- vpx_ssim_parms_8x8(img1, img1_pitch, img2, img2_pitch, &sv->sum_s, &sv->sum_r,
+ aom_ssim_parms_8x8(img1, img1_pitch, img2, img2_pitch, &sv->sum_s, &sv->sum_r,
&sv->sum_sq_s, &sv->sum_sq_r, &sv->sum_sxr);
}
-double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
+double aom_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
int img2_pitch, int width, int height, Ssimv *sv2,
Metrics *m, int do_inconsistency) {
double dssim_total = 0;
@@ -279,7 +279,7 @@
int c = 0;
double norm;
double old_ssim_total = 0;
- vpx_clear_system_state();
+ aom_clear_system_state();
// We can sample points as frequently as we like start with 1 per 4x4.
for (i = 0; i < height;
i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) {
@@ -428,8 +428,8 @@
return inconsistency_total;
}
-#if CONFIG_VP9_HIGHBITDEPTH
-double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
+#if CONFIG_AOM_HIGHBITDEPTH
+double aom_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest, double *weight,
uint32_t bd, uint32_t in_bd) {
double a, b, c;
@@ -439,15 +439,15 @@
assert(bd >= in_bd);
shift = bd - in_bd;
- a = vpx_highbd_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
+ a = aom_highbd_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
dest->y_stride, source->y_crop_width,
source->y_crop_height, in_bd, shift);
- b = vpx_highbd_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
+ b = aom_highbd_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
dest->uv_stride, source->uv_crop_width,
source->uv_crop_height, in_bd, shift);
- c = vpx_highbd_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
+ c = aom_highbd_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
dest->uv_stride, source->uv_crop_width,
source->uv_crop_height, in_bd, shift);
@@ -458,4 +458,4 @@
return ssimv;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/ssim.h b/aom_dsp/ssim.h
index 0127d3f..831803a 100644
--- a/aom_dsp/ssim.h
+++ b/aom_dsp/ssim.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_SSIM_H_
-#define VPX_DSP_SSIM_H_
+#ifndef AOM_DSP_SSIM_H_
+#define AOM_DSP_SSIM_H_
#define MAX_SSIM_DB 100.0;
@@ -17,7 +17,7 @@
extern "C" {
#endif
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_scale/yv12config.h"
// metrics used for calculating ssim, ssim2, dssim, and ssimc
@@ -62,26 +62,26 @@
double ssimcd;
} Metrics;
-double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
+double aom_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
int img2_pitch, int width, int height, Ssimv *sv2,
Metrics *m, int do_inconsistency);
-double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source,
+double aom_calc_ssim(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest, double *weight);
-double vpx_calc_fastssim(const YV12_BUFFER_CONFIG *source,
+double aom_calc_fastssim(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest, double *ssim_y,
double *ssim_u, double *ssim_v, uint32_t bd,
uint32_t in_bd);
-#if CONFIG_VP9_HIGHBITDEPTH
-double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
+#if CONFIG_AOM_HIGHBITDEPTH
+double aom_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest, double *weight,
uint32_t bd, uint32_t in_bd);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VPX_DSP_SSIM_H_
+#endif // AOM_DSP_SSIM_H_
diff --git a/aom_dsp/subtract.c b/aom_dsp/subtract.c
index 5d048ed..a68bc64 100644
--- a/aom_dsp/subtract.c
+++ b/aom_dsp/subtract.c
@@ -10,13 +10,13 @@
#include <stdlib.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
-void vpx_subtract_block_c(int rows, int cols, int16_t *diff,
+void aom_subtract_block_c(int rows, int cols, int16_t *diff,
ptrdiff_t diff_stride, const uint8_t *src,
ptrdiff_t src_stride, const uint8_t *pred,
ptrdiff_t pred_stride) {
@@ -31,8 +31,8 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_subtract_block_c(int rows, int cols, int16_t *diff,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_subtract_block_c(int rows, int cols, int16_t *diff,
ptrdiff_t diff_stride, const uint8_t *src8,
ptrdiff_t src_stride, const uint8_t *pred8,
ptrdiff_t pred_stride, int bd) {
@@ -51,4 +51,4 @@
src += src_stride;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/sum_squares.c b/aom_dsp/sum_squares.c
index 73a9006..6b71d44 100644
--- a/aom_dsp/sum_squares.c
+++ b/aom_dsp/sum_squares.c
@@ -10,9 +10,9 @@
#include <assert.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
-uint64_t vpx_sum_squares_2d_i16_c(const int16_t *src, int src_stride,
+uint64_t aom_sum_squares_2d_i16_c(const int16_t *src, int src_stride,
int size) {
int r, c;
uint64_t ss = 0;
@@ -28,7 +28,7 @@
return ss;
}
-uint64_t vpx_sum_squares_i16_c(const int16_t *src, uint32_t n) {
+uint64_t aom_sum_squares_i16_c(const int16_t *src, uint32_t n) {
uint64_t ss = 0;
do {
const int16_t v = *src++;
diff --git a/aom_dsp/txfm_common.h b/aom_dsp/txfm_common.h
index 38fe2b7..3287990 100644
--- a/aom_dsp/txfm_common.h
+++ b/aom_dsp/txfm_common.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_TXFM_COMMON_H_
-#define VPX_DSP_TXFM_COMMON_H_
+#ifndef AOM_DSP_TXFM_COMMON_H_
+#define AOM_DSP_TXFM_COMMON_H_
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
// Constants and Macros used by all idct/dct functions
#define DCT_CONST_BITS 14
@@ -66,4 +66,4 @@
// 16384 * sqrt(2)
static const tran_high_t Sqrt2 = 23170;
-#endif // VPX_DSP_TXFM_COMMON_H_
+#endif // AOM_DSP_TXFM_COMMON_H_
diff --git a/aom_dsp/variance.c b/aom_dsp/variance.c
index 5df2aa5..bb7720b 100644
--- a/aom_dsp/variance.c
+++ b/aom_dsp/variance.c
@@ -9,16 +9,16 @@
*/
#include <stdlib.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/variance.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
-uint32_t vpx_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b,
+uint32_t aom_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride) {
int distortion = 0;
int r, c;
@@ -36,7 +36,7 @@
return distortion;
}
-uint32_t vpx_get_mb_ss_c(const int16_t *a) {
+uint32_t aom_get_mb_ss_c(const int16_t *a) {
unsigned int i, sum = 0;
for (i = 0; i < 256; ++i) {
@@ -46,22 +46,22 @@
return sum;
}
-uint32_t vpx_variance_halfpixvar16x16_h_c(const uint8_t *a, int a_stride,
+uint32_t aom_variance_halfpixvar16x16_h_c(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
uint32_t *sse) {
- return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 0, b, b_stride, sse);
+ return aom_sub_pixel_variance16x16_c(a, a_stride, 4, 0, b, b_stride, sse);
}
-uint32_t vpx_variance_halfpixvar16x16_v_c(const uint8_t *a, int a_stride,
+uint32_t aom_variance_halfpixvar16x16_v_c(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
uint32_t *sse) {
- return vpx_sub_pixel_variance16x16_c(a, a_stride, 0, 4, b, b_stride, sse);
+ return aom_sub_pixel_variance16x16_c(a, a_stride, 0, 4, b, b_stride, sse);
}
-uint32_t vpx_variance_halfpixvar16x16_hv_c(const uint8_t *a, int a_stride,
+uint32_t aom_variance_halfpixvar16x16_hv_c(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
uint32_t *sse) {
- return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 4, b, b_stride, sse);
+ return aom_sub_pixel_variance16x16_c(a, a_stride, 4, 4, b, b_stride, sse);
}
static void variance(const uint8_t *a, int a_stride, const uint8_t *b,
@@ -142,7 +142,7 @@
}
#define VAR(W, H) \
- uint32_t vpx_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
+ uint32_t aom_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
const uint8_t *b, int b_stride, \
uint32_t *sse) { \
int sum; \
@@ -151,7 +151,7 @@
}
#define SUBPIX_VAR(W, H) \
- uint32_t vpx_sub_pixel_variance##W##x##H##_c( \
+ uint32_t aom_sub_pixel_variance##W##x##H##_c( \
const uint8_t *a, int a_stride, int xoffset, int yoffset, \
const uint8_t *b, int b_stride, uint32_t *sse) { \
uint16_t fdata3[(H + 1) * W]; \
@@ -162,11 +162,11 @@
var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
bilinear_filters_2t[yoffset]); \
\
- return vpx_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \
+ return aom_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \
}
#define SUBPIX_AVG_VAR(W, H) \
- uint32_t vpx_sub_pixel_avg_variance##W##x##H##_c( \
+ uint32_t aom_sub_pixel_avg_variance##W##x##H##_c( \
const uint8_t *a, int a_stride, int xoffset, int yoffset, \
const uint8_t *b, int b_stride, uint32_t *sse, \
const uint8_t *second_pred) { \
@@ -179,9 +179,9 @@
var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
bilinear_filters_2t[yoffset]); \
\
- vpx_comp_avg_pred(temp3, second_pred, W, H, temp2, W); \
+ aom_comp_avg_pred(temp3, second_pred, W, H, temp2, W); \
\
- return vpx_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \
+ return aom_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \
}
/* Identical to the variance call except it takes an additional parameter, sum,
@@ -189,7 +189,7 @@
* sse - sum^2 / w*h
*/
#define GET_VAR(W, H) \
- void vpx_get##W##x##H##var_c(const uint8_t *a, int a_stride, \
+ void aom_get##W##x##H##var_c(const uint8_t *a, int a_stride, \
const uint8_t *b, int b_stride, uint32_t *sse, \
int *sum) { \
variance(a, a_stride, b, b_stride, W, H, sse, sum); \
@@ -200,7 +200,7 @@
* variable.
*/
#define MSE(W, H) \
- uint32_t vpx_mse##W##x##H##_c(const uint8_t *a, int a_stride, \
+ uint32_t aom_mse##W##x##H##_c(const uint8_t *a, int a_stride, \
const uint8_t *b, int b_stride, \
uint32_t *sse) { \
int sum; \
@@ -214,11 +214,11 @@
SUBPIX_VAR(W, H) \
SUBPIX_AVG_VAR(W, H)
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
VARIANCES(128, 128)
VARIANCES(128, 64)
VARIANCES(64, 128)
-#endif // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif // CONFIG_AV1 && CONFIG_EXT_PARTITION
VARIANCES(64, 64)
VARIANCES(64, 32)
VARIANCES(32, 64)
@@ -241,7 +241,7 @@
MSE(8, 16)
MSE(8, 8)
-void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
+void aom_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
int height, const uint8_t *ref, int ref_stride) {
int i, j;
@@ -257,7 +257,7 @@
}
// Get pred block from up-sampled reference.
-void vpx_upsampled_pred_c(uint8_t *comp_pred, int width, int height,
+void aom_upsampled_pred_c(uint8_t *comp_pred, int width, int height,
const uint8_t *ref, int ref_stride) {
int i, j, k;
int stride = ref_stride << 3;
@@ -271,7 +271,7 @@
}
}
-void vpx_comp_avg_upsampled_pred_c(uint8_t *comp_pred, const uint8_t *pred,
+void aom_comp_avg_upsampled_pred_c(uint8_t *comp_pred, const uint8_t *pred,
int width, int height, const uint8_t *ref,
int ref_stride) {
int i, j;
@@ -288,7 +288,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static void highbd_variance64(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, int w, int h,
uint64_t *sse, int64_t *sum) {
@@ -341,7 +341,7 @@
}
#define HIGHBD_VAR(W, H) \
- uint32_t vpx_highbd_8_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
+ uint32_t aom_highbd_8_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
const uint8_t *b, int b_stride, \
uint32_t *sse) { \
int sum; \
@@ -349,7 +349,7 @@
return *sse - (((int64_t)sum * sum) / (W * H)); \
} \
\
- uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
+ uint32_t aom_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
const uint8_t *b, int b_stride, \
uint32_t *sse) { \
int sum; \
@@ -359,7 +359,7 @@
return (var >= 0) ? (uint32_t)var : 0; \
} \
\
- uint32_t vpx_highbd_12_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
+ uint32_t aom_highbd_12_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
const uint8_t *b, int b_stride, \
uint32_t *sse) { \
int sum; \
@@ -370,26 +370,26 @@
}
#define HIGHBD_GET_VAR(S) \
- void vpx_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
+ void aom_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride, \
uint32_t *sse, int *sum) { \
highbd_8_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
} \
\
- void vpx_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
+ void aom_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride, \
uint32_t *sse, int *sum) { \
highbd_10_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
} \
\
- void vpx_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
+ void aom_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride, \
uint32_t *sse, int *sum) { \
highbd_12_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
}
#define HIGHBD_MSE(W, H) \
- uint32_t vpx_highbd_8_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
+ uint32_t aom_highbd_8_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride, \
uint32_t *sse) { \
int sum; \
@@ -397,7 +397,7 @@
return *sse; \
} \
\
- uint32_t vpx_highbd_10_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
+ uint32_t aom_highbd_10_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride, \
uint32_t *sse) { \
int sum; \
@@ -405,7 +405,7 @@
return *sse; \
} \
\
- uint32_t vpx_highbd_12_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
+ uint32_t aom_highbd_12_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride, \
uint32_t *sse) { \
int sum; \
@@ -413,7 +413,7 @@
return *sse; \
}
-void vpx_highbd_var_filter_block2d_bil_first_pass(
+void aom_highbd_var_filter_block2d_bil_first_pass(
const uint8_t *src_ptr8, uint16_t *output_ptr,
unsigned int src_pixels_per_line, int pixel_step,
unsigned int output_height, unsigned int output_width,
@@ -435,7 +435,7 @@
}
}
-void vpx_highbd_var_filter_block2d_bil_second_pass(
+void aom_highbd_var_filter_block2d_bil_second_pass(
const uint16_t *src_ptr, uint16_t *output_ptr,
unsigned int src_pixels_per_line, unsigned int pixel_step,
unsigned int output_height, unsigned int output_width,
@@ -456,53 +456,53 @@
}
#define HIGHBD_SUBPIX_VAR(W, H) \
- uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_c( \
+ uint32_t aom_highbd_8_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, uint32_t *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+ return aom_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
dst, dst_stride, sse); \
} \
\
- uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_c( \
+ uint32_t aom_highbd_10_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, uint32_t *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+ return aom_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
dst, dst_stride, sse); \
} \
\
- uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_c( \
+ uint32_t aom_highbd_12_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, uint32_t *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+ return aom_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
dst, dst_stride, sse); \
}
#define HIGHBD_SUBPIX_AVG_VAR(W, H) \
- uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_c( \
+ uint32_t aom_highbd_8_sub_pixel_avg_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, uint32_t *sse, \
const uint8_t *second_pred) { \
@@ -510,19 +510,19 @@
uint16_t temp2[H * W]; \
DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- vpx_highbd_comp_avg_pred_c(temp3, second_pred, W, H, \
+ aom_highbd_comp_avg_pred_c(temp3, second_pred, W, H, \
CONVERT_TO_BYTEPTR(temp2), W); \
\
- return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+ return aom_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
dst, dst_stride, sse); \
} \
\
- uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \
+ uint32_t aom_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, uint32_t *sse, \
const uint8_t *second_pred) { \
@@ -530,19 +530,19 @@
uint16_t temp2[H * W]; \
DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- vpx_highbd_comp_avg_pred_c(temp3, second_pred, W, H, \
+ aom_highbd_comp_avg_pred_c(temp3, second_pred, W, H, \
CONVERT_TO_BYTEPTR(temp2), W); \
\
- return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+ return aom_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
dst, dst_stride, sse); \
} \
\
- uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \
+ uint32_t aom_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, uint32_t *sse, \
const uint8_t *second_pred) { \
@@ -550,15 +550,15 @@
uint16_t temp2[H * W]; \
DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- vpx_highbd_comp_avg_pred_c(temp3, second_pred, W, H, \
+ aom_highbd_comp_avg_pred_c(temp3, second_pred, W, H, \
CONVERT_TO_BYTEPTR(temp2), W); \
\
- return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+ return aom_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
dst, dst_stride, sse); \
}
@@ -568,11 +568,11 @@
HIGHBD_SUBPIX_VAR(W, H) \
HIGHBD_SUBPIX_AVG_VAR(W, H)
-#if CONFIG_VP10 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION
HIGHBD_VARIANCES(128, 128)
HIGHBD_VARIANCES(128, 64)
HIGHBD_VARIANCES(64, 128)
-#endif // CONFIG_VP10 && CONFIG_EXT_PARTITION
+#endif // CONFIG_AV1 && CONFIG_EXT_PARTITION
HIGHBD_VARIANCES(64, 64)
HIGHBD_VARIANCES(64, 32)
HIGHBD_VARIANCES(32, 64)
@@ -595,7 +595,7 @@
HIGHBD_MSE(8, 16)
HIGHBD_MSE(8, 8)
-void vpx_highbd_comp_avg_pred_c(uint16_t *comp_pred, const uint8_t *pred8,
+void aom_highbd_comp_avg_pred_c(uint16_t *comp_pred, const uint8_t *pred8,
int width, int height, const uint8_t *ref8,
int ref_stride) {
int i, j;
@@ -612,7 +612,7 @@
}
}
-void vpx_highbd_upsampled_pred_c(uint16_t *comp_pred, int width, int height,
+void aom_highbd_upsampled_pred_c(uint16_t *comp_pred, int width, int height,
const uint8_t *ref8, int ref_stride) {
int i, j;
int stride = ref_stride << 3;
@@ -627,7 +627,7 @@
}
}
-void vpx_highbd_comp_avg_upsampled_pred_c(uint16_t *comp_pred,
+void aom_highbd_comp_avg_upsampled_pred_c(uint16_t *comp_pred,
const uint8_t *pred8, int width,
int height, const uint8_t *ref8,
int ref_stride) {
@@ -646,9 +646,9 @@
ref += stride;
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#if CONFIG_VP10 && CONFIG_EXT_INTER
+#if CONFIG_AV1 && CONFIG_EXT_INTER
void masked_variance(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, const uint8_t *m, int m_stride, int w, int h,
unsigned int *sse, int *sum) {
@@ -674,7 +674,7 @@
}
#define MASK_VAR(W, H) \
- unsigned int vpx_masked_variance##W##x##H##_c( \
+ unsigned int aom_masked_variance##W##x##H##_c( \
const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
const uint8_t *m, int m_stride, unsigned int *sse) { \
int sum; \
@@ -683,7 +683,7 @@
}
#define MASK_SUBPIX_VAR(W, H) \
- unsigned int vpx_masked_sub_pixel_variance##W##x##H##_c( \
+ unsigned int aom_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
unsigned int *sse) { \
@@ -695,7 +695,7 @@
var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
bilinear_filters_2t[yoffset]); \
\
- return vpx_masked_variance##W##x##H##_c(temp2, W, dst, dst_stride, msk, \
+ return aom_masked_variance##W##x##H##_c(temp2, W, dst, dst_stride, msk, \
msk_stride, sse); \
}
@@ -749,7 +749,7 @@
MASK_SUBPIX_VAR(128, 128)
#endif // CONFIG_EXT_PARTITION
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
void highbd_masked_variance64(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, const uint8_t *m,
int m_stride, int w, int h, uint64_t *sse,
@@ -813,7 +813,7 @@
}
#define HIGHBD_MASK_VAR(W, H) \
- unsigned int vpx_highbd_masked_variance##W##x##H##_c( \
+ unsigned int aom_highbd_masked_variance##W##x##H##_c( \
const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
const uint8_t *m, int m_stride, unsigned int *sse) { \
int sum; \
@@ -822,7 +822,7 @@
return *sse - (((int64_t)sum * sum) / (W * H)); \
} \
\
- unsigned int vpx_highbd_10_masked_variance##W##x##H##_c( \
+ unsigned int aom_highbd_10_masked_variance##W##x##H##_c( \
const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
const uint8_t *m, int m_stride, unsigned int *sse) { \
int sum; \
@@ -831,7 +831,7 @@
return *sse - (((int64_t)sum * sum) / (W * H)); \
} \
\
- unsigned int vpx_highbd_12_masked_variance##W##x##H##_c( \
+ unsigned int aom_highbd_12_masked_variance##W##x##H##_c( \
const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
const uint8_t *m, int m_stride, unsigned int *sse) { \
int sum; \
@@ -841,51 +841,51 @@
}
#define HIGHBD_MASK_SUBPIX_VAR(W, H) \
- unsigned int vpx_highbd_masked_sub_pixel_variance##W##x##H##_c( \
+ unsigned int aom_highbd_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
unsigned int *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- return vpx_highbd_masked_variance##W##x##H##_c( \
+ return aom_highbd_masked_variance##W##x##H##_c( \
CONVERT_TO_BYTEPTR(temp2), W, dst, dst_stride, msk, msk_stride, sse); \
} \
\
- unsigned int vpx_highbd_10_masked_sub_pixel_variance##W##x##H##_c( \
+ unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
unsigned int *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- return vpx_highbd_10_masked_variance##W##x##H##_c( \
+ return aom_highbd_10_masked_variance##W##x##H##_c( \
CONVERT_TO_BYTEPTR(temp2), W, dst, dst_stride, msk, msk_stride, sse); \
} \
\
- unsigned int vpx_highbd_12_masked_sub_pixel_variance##W##x##H##_c( \
+ unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
unsigned int *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- return vpx_highbd_12_masked_variance##W##x##H##_c( \
+ return aom_highbd_12_masked_variance##W##x##H##_c( \
CONVERT_TO_BYTEPTR(temp2), W, dst, dst_stride, msk, msk_stride, sse); \
}
@@ -938,10 +938,10 @@
HIGHBD_MASK_VAR(128, 128)
HIGHBD_MASK_SUBPIX_VAR(128, 128)
#endif // CONFIG_EXT_PARTITION
-#endif // CONFIG_VP9_HIGHBITDEPTH
-#endif // CONFIG_VP10 && CONFIG_EXT_INTER
+#endif // CONFIG_AOM_HIGHBITDEPTH
+#endif // CONFIG_AV1 && CONFIG_EXT_INTER
-#if CONFIG_VP10 && CONFIG_OBMC
+#if CONFIG_AV1 && CONFIG_OBMC
static INLINE void obmc_variance(const uint8_t *pre, int pre_stride,
const int32_t *wsrc, const int32_t *mask,
int w, int h, unsigned int *sse, int *sum) {
@@ -964,7 +964,7 @@
}
#define OBMC_VAR(W, H) \
- unsigned int vpx_obmc_variance##W##x##H##_c( \
+ unsigned int aom_obmc_variance##W##x##H##_c( \
const uint8_t *pre, int pre_stride, const int32_t *wsrc, \
const int32_t *mask, unsigned int *sse) { \
int sum; \
@@ -973,7 +973,7 @@
}
#define OBMC_SUBPIX_VAR(W, H) \
- unsigned int vpx_obmc_sub_pixel_variance##W##x##H##_c( \
+ unsigned int aom_obmc_sub_pixel_variance##W##x##H##_c( \
const uint8_t *pre, int pre_stride, int xoffset, int yoffset, \
const int32_t *wsrc, const int32_t *mask, unsigned int *sse) { \
uint16_t fdata3[(H + 1) * W]; \
@@ -984,7 +984,7 @@
var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
bilinear_filters_2t[yoffset]); \
\
- return vpx_obmc_variance##W##x##H##_c(temp2, W, wsrc, mask, sse); \
+ return aom_obmc_variance##W##x##H##_c(temp2, W, wsrc, mask, sse); \
}
OBMC_VAR(4, 4)
@@ -1037,7 +1037,7 @@
OBMC_SUBPIX_VAR(128, 128)
#endif // CONFIG_EXT_PARTITION
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE void highbd_obmc_variance64(const uint8_t *pre8, int pre_stride,
const int32_t *wsrc,
const int32_t *mask, int w, int h,
@@ -1095,7 +1095,7 @@
}
#define HIGHBD_OBMC_VAR(W, H) \
- unsigned int vpx_highbd_obmc_variance##W##x##H##_c( \
+ unsigned int aom_highbd_obmc_variance##W##x##H##_c( \
const uint8_t *pre, int pre_stride, const int32_t *wsrc, \
const int32_t *mask, unsigned int *sse) { \
int sum; \
@@ -1103,7 +1103,7 @@
return *sse - (((int64_t)sum * sum) / (W * H)); \
} \
\
- unsigned int vpx_highbd_10_obmc_variance##W##x##H##_c( \
+ unsigned int aom_highbd_10_obmc_variance##W##x##H##_c( \
const uint8_t *pre, int pre_stride, const int32_t *wsrc, \
const int32_t *mask, unsigned int *sse) { \
int sum; \
@@ -1111,7 +1111,7 @@
return *sse - (((int64_t)sum * sum) / (W * H)); \
} \
\
- unsigned int vpx_highbd_12_obmc_variance##W##x##H##_c( \
+ unsigned int aom_highbd_12_obmc_variance##W##x##H##_c( \
const uint8_t *pre, int pre_stride, const int32_t *wsrc, \
const int32_t *mask, unsigned int *sse) { \
int sum; \
@@ -1120,48 +1120,48 @@
}
#define HIGHBD_OBMC_SUBPIX_VAR(W, H) \
- unsigned int vpx_highbd_obmc_sub_pixel_variance##W##x##H##_c( \
+ unsigned int aom_highbd_obmc_sub_pixel_variance##W##x##H##_c( \
const uint8_t *pre, int pre_stride, int xoffset, int yoffset, \
const int32_t *wsrc, const int32_t *mask, unsigned int *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
pre, fdata3, pre_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- return vpx_highbd_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+ return aom_highbd_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
wsrc, mask, sse); \
} \
\
- unsigned int vpx_highbd_10_obmc_sub_pixel_variance##W##x##H##_c( \
+ unsigned int aom_highbd_10_obmc_sub_pixel_variance##W##x##H##_c( \
const uint8_t *pre, int pre_stride, int xoffset, int yoffset, \
const int32_t *wsrc, const int32_t *mask, unsigned int *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
pre, fdata3, pre_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- return vpx_highbd_10_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
+ return aom_highbd_10_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
W, wsrc, mask, sse); \
} \
\
- unsigned int vpx_highbd_12_obmc_sub_pixel_variance##W##x##H##_c( \
+ unsigned int aom_highbd_12_obmc_sub_pixel_variance##W##x##H##_c( \
const uint8_t *pre, int pre_stride, int xoffset, int yoffset, \
const int32_t *wsrc, const int32_t *mask, unsigned int *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
\
- vpx_highbd_var_filter_block2d_bil_first_pass( \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
pre, fdata3, pre_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- vpx_highbd_var_filter_block2d_bil_second_pass( \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
\
- return vpx_highbd_12_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
+ return aom_highbd_12_obmc_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
W, wsrc, mask, sse); \
}
@@ -1214,5 +1214,5 @@
HIGHBD_OBMC_VAR(128, 128)
HIGHBD_OBMC_SUBPIX_VAR(128, 128)
#endif // CONFIG_EXT_PARTITION
-#endif // CONFIG_VP9_HIGHBITDEPTH
-#endif // CONFIG_VP10 && CONFIG_OBMC
+#endif // CONFIG_AOM_HIGHBITDEPTH
+#endif // CONFIG_AV1 && CONFIG_OBMC
diff --git a/aom_dsp/variance.h b/aom_dsp/variance.h
index 088e09c..a4bad8c 100644
--- a/aom_dsp/variance.h
+++ b/aom_dsp/variance.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_VARIANCE_H_
-#define VPX_DSP_VARIANCE_H_
+#ifndef AOM_DSP_VARIANCE_H_
+#define AOM_DSP_VARIANCE_H_
-#include "./vpx_config.h"
+#include "./aom_config.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#ifdef __cplusplus
extern "C" {
@@ -22,95 +22,95 @@
#define FILTER_BITS 7
#define FILTER_WEIGHT 128
-typedef unsigned int (*vpx_sad_fn_t)(const uint8_t *a, int a_stride,
+typedef unsigned int (*aom_sad_fn_t)(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride);
-typedef unsigned int (*vpx_sad_avg_fn_t)(const uint8_t *a, int a_stride,
+typedef unsigned int (*aom_sad_avg_fn_t)(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
const uint8_t *second_pred);
-typedef void (*vp8_copy32xn_fn_t)(const uint8_t *a, int a_stride, uint8_t *b,
+typedef void (*aom_copy32xn_fn_t)(const uint8_t *a, int a_stride, uint8_t *b,
int b_stride, int n);
-typedef void (*vpx_sad_multi_fn_t)(const uint8_t *a, int a_stride,
+typedef void (*aom_sad_multi_fn_t)(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
unsigned int *sad_array);
-typedef void (*vpx_sad_multi_d_fn_t)(const uint8_t *a, int a_stride,
+typedef void (*aom_sad_multi_d_fn_t)(const uint8_t *a, int a_stride,
const uint8_t *const b_array[],
int b_stride, unsigned int *sad_array);
-typedef unsigned int (*vpx_variance_fn_t)(const uint8_t *a, int a_stride,
+typedef unsigned int (*aom_variance_fn_t)(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
unsigned int *sse);
-typedef unsigned int (*vpx_subpixvariance_fn_t)(const uint8_t *a, int a_stride,
+typedef unsigned int (*aom_subpixvariance_fn_t)(const uint8_t *a, int a_stride,
int xoffset, int yoffset,
const uint8_t *b, int b_stride,
unsigned int *sse);
-typedef unsigned int (*vpx_subp_avg_variance_fn_t)(
+typedef unsigned int (*aom_subp_avg_variance_fn_t)(
const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b,
int b_stride, unsigned int *sse, const uint8_t *second_pred);
-#if CONFIG_VP10 && CONFIG_EXT_INTER
-typedef unsigned int (*vpx_masked_sad_fn_t)(const uint8_t *src, int src_stride,
+#if CONFIG_AV1 && CONFIG_EXT_INTER
+typedef unsigned int (*aom_masked_sad_fn_t)(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
const uint8_t *msk_ptr,
int msk_stride);
-typedef unsigned int (*vpx_masked_variance_fn_t)(
+typedef unsigned int (*aom_masked_variance_fn_t)(
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,
const uint8_t *msk, int msk_stride, unsigned int *sse);
-typedef unsigned int (*vpx_masked_subpixvariance_fn_t)(
+typedef unsigned int (*aom_masked_subpixvariance_fn_t)(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *ref, int ref_stride, const uint8_t *msk, int msk_stride,
unsigned int *sse);
-#endif // CONFIG_VP10 && CONFIG_EXT_INTER
+#endif // CONFIG_AV1 && CONFIG_EXT_INTER
-#if CONFIG_VP10 && CONFIG_OBMC
-typedef unsigned int (*vpx_obmc_sad_fn_t)(const uint8_t *pred, int pred_stride,
+#if CONFIG_AV1 && CONFIG_OBMC
+typedef unsigned int (*aom_obmc_sad_fn_t)(const uint8_t *pred, int pred_stride,
const int32_t *wsrc,
const int32_t *msk);
-typedef unsigned int (*vpx_obmc_variance_fn_t)(const uint8_t *pred,
+typedef unsigned int (*aom_obmc_variance_fn_t)(const uint8_t *pred,
int pred_stride,
const int32_t *wsrc,
const int32_t *msk,
unsigned int *sse);
-typedef unsigned int (*vpx_obmc_subpixvariance_fn_t)(
+typedef unsigned int (*aom_obmc_subpixvariance_fn_t)(
const uint8_t *pred, int pred_stride, int xoffset, int yoffset,
const int32_t *wsrc, const int32_t *msk, unsigned int *sse);
-#endif // CONFIG_VP10 && CONFIG_OBMC
+#endif // CONFIG_AV1 && CONFIG_OBMC
-#if CONFIG_VP10
-typedef struct vpx_variance_vtable {
- vpx_sad_fn_t sdf;
- vpx_sad_avg_fn_t sdaf;
- vpx_variance_fn_t vf;
- vpx_subpixvariance_fn_t svf;
- vpx_subp_avg_variance_fn_t svaf;
- vpx_sad_multi_fn_t sdx3f;
- vpx_sad_multi_fn_t sdx8f;
- vpx_sad_multi_d_fn_t sdx4df;
+#if CONFIG_AV1
+typedef struct aom_variance_vtable {
+ aom_sad_fn_t sdf;
+ aom_sad_avg_fn_t sdaf;
+ aom_variance_fn_t vf;
+ aom_subpixvariance_fn_t svf;
+ aom_subp_avg_variance_fn_t svaf;
+ aom_sad_multi_fn_t sdx3f;
+ aom_sad_multi_fn_t sdx8f;
+ aom_sad_multi_d_fn_t sdx4df;
#if CONFIG_EXT_INTER
- vpx_masked_sad_fn_t msdf;
- vpx_masked_variance_fn_t mvf;
- vpx_masked_subpixvariance_fn_t msvf;
+ aom_masked_sad_fn_t msdf;
+ aom_masked_variance_fn_t mvf;
+ aom_masked_subpixvariance_fn_t msvf;
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
- vpx_obmc_sad_fn_t osdf;
- vpx_obmc_variance_fn_t ovf;
- vpx_obmc_subpixvariance_fn_t osvf;
+ aom_obmc_sad_fn_t osdf;
+ aom_obmc_variance_fn_t ovf;
+ aom_obmc_subpixvariance_fn_t osvf;
#endif // CONFIG_OBMC
-} vpx_variance_fn_ptr_t;
-#endif // CONFIG_VP10
+} aom_variance_fn_ptr_t;
+#endif // CONFIG_AV1
-void vpx_highbd_var_filter_block2d_bil_first_pass(
+void aom_highbd_var_filter_block2d_bil_first_pass(
const uint8_t *src_ptr8, uint16_t *output_ptr,
unsigned int src_pixels_per_line, int pixel_step,
unsigned int output_height, unsigned int output_width,
const uint8_t *filter);
-void vpx_highbd_var_filter_block2d_bil_second_pass(
+void aom_highbd_var_filter_block2d_bil_second_pass(
const uint16_t *src_ptr, uint16_t *output_ptr,
unsigned int src_pixels_per_line, unsigned int pixel_step,
unsigned int output_height, unsigned int output_width,
@@ -120,4 +120,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_VARIANCE_H_
+#endif // AOM_DSP_VARIANCE_H_
diff --git a/aom_dsp/vpx_dsp_rtcd_defs.pl b/aom_dsp/vpx_dsp_rtcd_defs.pl
deleted file mode 100644
index 509fba6..0000000
--- a/aom_dsp/vpx_dsp_rtcd_defs.pl
+++ /dev/null
@@ -1,1929 +0,0 @@
-sub vpx_dsp_forward_decls() {
-print <<EOF
-/*
- * DSP
- */
-
-#include "aom/vpx_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
-
-EOF
-}
-forward_decls qw/vpx_dsp_forward_decls/;
-
-# optimizations which depend on multiple features
-$avx2_ssse3 = '';
-if ((vpx_config("HAVE_AVX2") eq "yes") && (vpx_config("HAVE_SSSE3") eq "yes")) {
- $avx2_ssse3 = 'avx2';
-}
-
-# functions that are 64 bit only.
-$mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = '';
-if ($opts{arch} eq "x86_64") {
- $mmx_x86_64 = 'mmx';
- $sse2_x86_64 = 'sse2';
- $ssse3_x86_64 = 'ssse3';
- $avx_x86_64 = 'avx';
- $avx2_x86_64 = 'avx2';
-}
-
-if (vpx_config("CONFIG_EXT_PARTITION") eq "yes") {
- @block_widths = (4, 8, 16, 32, 64, 128)
-} else {
- @block_widths = (4, 8, 16, 32, 64)
-}
-
-@block_sizes = ();
-foreach $w (@block_widths) {
- foreach $h (@block_widths) {
- push @block_sizes, [$w, $h] if ($w <= 2*$h && $h <= 2*$w) ;
- }
-}
-
-#
-# Intra prediction
-#
-
-add_proto qw/void vpx_d207_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207_predictor_4x4 sse2/;
-
-add_proto qw/void vpx_d207e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207e_predictor_4x4/;
-
-add_proto qw/void vpx_d45_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45_predictor_4x4 neon sse2/;
-
-add_proto qw/void vpx_d45e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45e_predictor_4x4/;
-
-add_proto qw/void vpx_d63_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63_predictor_4x4 ssse3/;
-
-add_proto qw/void vpx_d63e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63e_predictor_4x4/;
-
-add_proto qw/void vpx_d63f_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63f_predictor_4x4/;
-
-add_proto qw/void vpx_h_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_h_predictor_4x4 neon dspr2 msa sse2/;
-
-add_proto qw/void vpx_he_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_he_predictor_4x4/;
-
-add_proto qw/void vpx_d117_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d117_predictor_4x4/;
-
-add_proto qw/void vpx_d135_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d135_predictor_4x4 neon/;
-
-add_proto qw/void vpx_d153_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d153_predictor_4x4 ssse3/;
-
-add_proto qw/void vpx_v_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_v_predictor_4x4 neon msa sse2/;
-
-add_proto qw/void vpx_ve_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_ve_predictor_4x4/;
-
-add_proto qw/void vpx_tm_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_tm_predictor_4x4 neon dspr2 msa sse2/;
-
-add_proto qw/void vpx_dc_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_predictor_4x4 dspr2 msa neon sse2/;
-
-add_proto qw/void vpx_dc_top_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_top_predictor_4x4 msa neon sse2/;
-
-add_proto qw/void vpx_dc_left_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_left_predictor_4x4 msa neon sse2/;
-
-add_proto qw/void vpx_dc_128_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_128_predictor_4x4 msa neon sse2/;
-
-add_proto qw/void vpx_d207_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207_predictor_8x8 ssse3/;
-
-add_proto qw/void vpx_d207e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207e_predictor_8x8/;
-
-add_proto qw/void vpx_d45_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45_predictor_8x8 neon sse2/;
-
-add_proto qw/void vpx_d45e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45e_predictor_8x8/;
-
-add_proto qw/void vpx_d63_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63_predictor_8x8 ssse3/;
-
-add_proto qw/void vpx_d63e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63e_predictor_8x8/;
-
-add_proto qw/void vpx_h_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_h_predictor_8x8 neon dspr2 msa sse2/;
-
-add_proto qw/void vpx_d117_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d117_predictor_8x8/;
-
-add_proto qw/void vpx_d135_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d135_predictor_8x8/;
-
-add_proto qw/void vpx_d153_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d153_predictor_8x8 ssse3/;
-
-add_proto qw/void vpx_v_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_v_predictor_8x8 neon msa sse2/;
-
-add_proto qw/void vpx_tm_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_tm_predictor_8x8 neon dspr2 msa sse2/;
-
-add_proto qw/void vpx_dc_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_predictor_8x8 dspr2 neon msa sse2/;
-
-add_proto qw/void vpx_dc_top_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_top_predictor_8x8 neon msa sse2/;
-
-add_proto qw/void vpx_dc_left_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_left_predictor_8x8 neon msa sse2/;
-
-add_proto qw/void vpx_dc_128_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_128_predictor_8x8 neon msa sse2/;
-
-add_proto qw/void vpx_d207_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207_predictor_16x16 ssse3/;
-
-add_proto qw/void vpx_d207e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207e_predictor_16x16/;
-
-add_proto qw/void vpx_d45_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45_predictor_16x16 neon ssse3/;
-
-add_proto qw/void vpx_d45e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45e_predictor_16x16/;
-
-add_proto qw/void vpx_d63_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63_predictor_16x16 ssse3/;
-
-add_proto qw/void vpx_d63e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63e_predictor_16x16/;
-
-add_proto qw/void vpx_h_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_h_predictor_16x16 neon dspr2 msa sse2/;
-
-add_proto qw/void vpx_d117_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d117_predictor_16x16/;
-
-add_proto qw/void vpx_d135_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d135_predictor_16x16/;
-
-add_proto qw/void vpx_d153_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d153_predictor_16x16 ssse3/;
-
-add_proto qw/void vpx_v_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_v_predictor_16x16 neon msa sse2/;
-
-add_proto qw/void vpx_tm_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_tm_predictor_16x16 neon msa sse2/;
-
-add_proto qw/void vpx_dc_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_predictor_16x16 dspr2 neon msa sse2/;
-
-add_proto qw/void vpx_dc_top_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_top_predictor_16x16 neon msa sse2/;
-
-add_proto qw/void vpx_dc_left_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_left_predictor_16x16 neon msa sse2/;
-
-add_proto qw/void vpx_dc_128_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_128_predictor_16x16 neon msa sse2/;
-
-add_proto qw/void vpx_d207_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207_predictor_32x32 ssse3/;
-
-add_proto qw/void vpx_d207e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d207e_predictor_32x32/;
-
-add_proto qw/void vpx_d45_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45_predictor_32x32 ssse3/;
-
-add_proto qw/void vpx_d45e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d45e_predictor_32x32/;
-
-add_proto qw/void vpx_d63_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63_predictor_32x32 ssse3/;
-
-add_proto qw/void vpx_d63e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d63e_predictor_32x32/;
-
-add_proto qw/void vpx_h_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_h_predictor_32x32 neon msa sse2/;
-
-add_proto qw/void vpx_d117_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d117_predictor_32x32/;
-
-add_proto qw/void vpx_d135_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d135_predictor_32x32/;
-
-add_proto qw/void vpx_d153_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_d153_predictor_32x32 ssse3/;
-
-add_proto qw/void vpx_v_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_v_predictor_32x32 neon msa sse2/;
-
-add_proto qw/void vpx_tm_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_tm_predictor_32x32 neon msa sse2/;
-
-add_proto qw/void vpx_dc_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_predictor_32x32 msa neon sse2/;
-
-add_proto qw/void vpx_dc_top_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_top_predictor_32x32 msa neon sse2/;
-
-add_proto qw/void vpx_dc_left_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_left_predictor_32x32 msa neon sse2/;
-
-add_proto qw/void vpx_dc_128_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_128_predictor_32x32 msa neon sse2/;
-
-# High bitdepth functions
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vpx_highbd_d207_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d207_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_d207e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d207e_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_d45_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d45_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_d45e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d45e_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_d63_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_d63e_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63e_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_h_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_h_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_d117_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d117_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_d135_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d135_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_d153_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d153_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_v_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_v_predictor_4x4 sse2/;
-
- add_proto qw/void vpx_highbd_tm_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_tm_predictor_4x4 sse2/;
-
- add_proto qw/void vpx_highbd_dc_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_predictor_4x4 sse2/;
-
- add_proto qw/void vpx_highbd_dc_top_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_top_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_dc_left_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_left_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_dc_128_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_128_predictor_4x4/;
-
- add_proto qw/void vpx_highbd_d207_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d207_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_d207e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d207e_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_d45_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d45_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_d45e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d45e_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_d63_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_d63e_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63e_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_h_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_h_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_d117_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d117_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_d135_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d135_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_d153_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d153_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_v_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_v_predictor_8x8 sse2/;
-
- add_proto qw/void vpx_highbd_tm_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_tm_predictor_8x8 sse2/;
-
- add_proto qw/void vpx_highbd_dc_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_predictor_8x8 sse2/;;
-
- add_proto qw/void vpx_highbd_dc_top_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_top_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_dc_left_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_left_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_dc_128_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_128_predictor_8x8/;
-
- add_proto qw/void vpx_highbd_d207_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d207_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_d207e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d207e_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_d45_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d45_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_d45e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d45e_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_d63_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_d63e_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63e_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_h_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_h_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_d117_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d117_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_d135_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d135_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_d153_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d153_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_v_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_v_predictor_16x16 sse2/;
-
- add_proto qw/void vpx_highbd_tm_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_tm_predictor_16x16 sse2/;
-
- add_proto qw/void vpx_highbd_dc_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_predictor_16x16 sse2/;
-
- add_proto qw/void vpx_highbd_dc_top_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_top_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_dc_left_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_left_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_dc_128_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_128_predictor_16x16/;
-
- add_proto qw/void vpx_highbd_d207_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d207_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_d207e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d207e_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_d45_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d45_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_d45e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d45e_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_d63_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_d63e_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63e_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_h_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_h_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_d117_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d117_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_d135_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d135_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_d153_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d153_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_v_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_v_predictor_32x32 sse2/;
-
- add_proto qw/void vpx_highbd_tm_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_tm_predictor_32x32 sse2/;
-
- add_proto qw/void vpx_highbd_dc_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_predictor_32x32 sse2/;
-
- add_proto qw/void vpx_highbd_dc_top_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_top_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_dc_left_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_left_predictor_32x32/;
-
- add_proto qw/void vpx_highbd_dc_128_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_dc_128_predictor_32x32/;
-} # CONFIG_VP9_HIGHBITDEPTH
-
-#
-# Sub Pixel Filters
-#
-add_proto qw/void vpx_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_2d/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_avg_2d/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void vpx_scaled_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-
-specialize qw/vpx_convolve_copy sse2 /;
-specialize qw/vpx_convolve_avg sse2 /;
-specialize qw/vpx_convolve8 sse2 ssse3/, "$avx2_ssse3";
-specialize qw/vpx_convolve8_horiz sse2 ssse3/, "$avx2_ssse3";
-specialize qw/vpx_convolve8_vert sse2 ssse3/, "$avx2_ssse3";
-specialize qw/vpx_convolve8_avg sse2 ssse3/;
-specialize qw/vpx_convolve8_avg_horiz sse2 ssse3/;
-specialize qw/vpx_convolve8_avg_vert sse2 ssse3/;
-specialize qw/vpx_scaled_2d ssse3/;
-
-# TODO(any): These need to be extended to up to 128x128 block sizes
-if (!(vpx_config("CONFIG_VP10") eq "yes" && vpx_config("CONFIG_EXT_PARTITION") eq "yes")) {
- specialize qw/vpx_convolve_copy neon dspr2 msa/;
- specialize qw/vpx_convolve_avg neon dspr2 msa/;
- specialize qw/vpx_convolve8 neon dspr2 msa/;
- specialize qw/vpx_convolve8_horiz neon dspr2 msa/;
- specialize qw/vpx_convolve8_vert neon dspr2 msa/;
- specialize qw/vpx_convolve8_avg neon dspr2 msa/;
- specialize qw/vpx_convolve8_avg_horiz neon dspr2 msa/;
- specialize qw/vpx_convolve8_avg_vert neon dspr2 msa/;
-}
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vpx_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vpx_highbd_convolve_copy sse2/;
-
- add_proto qw/void vpx_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vpx_highbd_convolve_avg sse2/;
-
- add_proto qw/void vpx_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vpx_highbd_convolve8/, "$sse2_x86_64";
-
- add_proto qw/void vpx_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vpx_highbd_convolve8_horiz/, "$sse2_x86_64";
-
- add_proto qw/void vpx_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vpx_highbd_convolve8_vert/, "$sse2_x86_64";
-
- add_proto qw/void vpx_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vpx_highbd_convolve8_avg/, "$sse2_x86_64";
-
- add_proto qw/void vpx_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vpx_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
-
- add_proto qw/void vpx_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
- specialize qw/vpx_highbd_convolve8_avg_vert/, "$sse2_x86_64";
-} # CONFIG_VP9_HIGHBITDEPTH
-
-#
-# Loopfilter
-#
-add_proto qw/void vpx_lpf_vertical_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_vertical_16 sse2 neon_asm dspr2 msa/;
-$vpx_lpf_vertical_16_neon_asm=vpx_lpf_vertical_16_neon;
-
-add_proto qw/void vpx_lpf_vertical_16_dual/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_vertical_16_dual sse2 neon_asm dspr2 msa/;
-$vpx_lpf_vertical_16_dual_neon_asm=vpx_lpf_vertical_16_dual_neon;
-
-add_proto qw/void vpx_lpf_vertical_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_vertical_8 sse2 neon dspr2 msa/;
-
-add_proto qw/void vpx_lpf_vertical_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vpx_lpf_vertical_8_dual sse2 neon_asm dspr2 msa/;
-$vpx_lpf_vertical_8_dual_neon_asm=vpx_lpf_vertical_8_dual_neon;
-
-add_proto qw/void vpx_lpf_vertical_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_vertical_4 sse2 neon dspr2 msa/;
-
-add_proto qw/void vpx_lpf_vertical_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vpx_lpf_vertical_4_dual sse2 neon dspr2 msa/;
-
-add_proto qw/void vpx_lpf_horizontal_edge_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_horizontal_edge_8 sse2 avx2 neon_asm dspr2 msa/;
-$vpx_lpf_horizontal_edge_8_neon_asm=vpx_lpf_horizontal_edge_8_neon;
-
-add_proto qw/void vpx_lpf_horizontal_edge_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_horizontal_edge_16 sse2 avx2 neon_asm dspr2 msa/;
-$vpx_lpf_horizontal_edge_16_neon_asm=vpx_lpf_horizontal_edge_16_neon;
-
-add_proto qw/void vpx_lpf_horizontal_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_horizontal_8 sse2 neon dspr2 msa/;
-
-add_proto qw/void vpx_lpf_horizontal_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vpx_lpf_horizontal_8_dual sse2 neon_asm dspr2 msa/;
-$vpx_lpf_horizontal_8_dual_neon_asm=vpx_lpf_horizontal_8_dual_neon;
-
-add_proto qw/void vpx_lpf_horizontal_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh";
-specialize qw/vpx_lpf_horizontal_4 sse2 neon dspr2 msa/;
-
-add_proto qw/void vpx_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vpx_lpf_horizontal_4_dual sse2 neon dspr2 msa/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vpx_highbd_lpf_vertical_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
- specialize qw/vpx_highbd_lpf_vertical_16 sse2/;
-
- add_proto qw/void vpx_highbd_lpf_vertical_16_dual/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
- specialize qw/vpx_highbd_lpf_vertical_16_dual sse2/;
-
- add_proto qw/void vpx_highbd_lpf_vertical_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
- specialize qw/vpx_highbd_lpf_vertical_8 sse2/;
-
- add_proto qw/void vpx_highbd_lpf_vertical_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
- specialize qw/vpx_highbd_lpf_vertical_8_dual sse2/;
-
- add_proto qw/void vpx_highbd_lpf_vertical_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
- specialize qw/vpx_highbd_lpf_vertical_4 sse2/;
-
- add_proto qw/void vpx_highbd_lpf_vertical_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
- specialize qw/vpx_highbd_lpf_vertical_4_dual sse2/;
-
- add_proto qw/void vpx_highbd_lpf_horizontal_edge_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
- specialize qw/vpx_highbd_lpf_horizontal_edge_8 sse2/;
-
- add_proto qw/void vpx_highbd_lpf_horizontal_edge_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
- specialize qw/vpx_highbd_lpf_horizontal_edge_16 sse2/;
-
- add_proto qw/void vpx_highbd_lpf_horizontal_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
- specialize qw/vpx_highbd_lpf_horizontal_8 sse2/;
-
- add_proto qw/void vpx_highbd_lpf_horizontal_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
- specialize qw/vpx_highbd_lpf_horizontal_8_dual sse2/;
-
- add_proto qw/void vpx_highbd_lpf_horizontal_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
- specialize qw/vpx_highbd_lpf_horizontal_4 sse2/;
-
- add_proto qw/void vpx_highbd_lpf_horizontal_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
- specialize qw/vpx_highbd_lpf_horizontal_4_dual sse2/;
-} # CONFIG_VP9_HIGHBITDEPTH
-
-#
-# Encoder functions.
-#
-
-#
-# Forward transform
-#
-if ((vpx_config("CONFIG_VP10_ENCODER") eq "yes")) {
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vpx_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct4x4 sse2/;
-
- add_proto qw/void vpx_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct4x4_1 sse2/;
-
- add_proto qw/void vpx_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct8x8 sse2/;
-
- add_proto qw/void vpx_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct8x8_1 sse2/;
-
- add_proto qw/void vpx_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct16x16 sse2/;
-
- add_proto qw/void vpx_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct16x16_1 sse2/;
-
- add_proto qw/void vpx_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct32x32 sse2/;
-
- add_proto qw/void vpx_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct32x32_rd sse2/;
-
- add_proto qw/void vpx_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct32x32_1 sse2/;
-
- add_proto qw/void vpx_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_highbd_fdct4x4 sse2/;
-
- add_proto qw/void vpx_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_highbd_fdct8x8 sse2/;
-
- add_proto qw/void vpx_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_highbd_fdct8x8_1/;
-
- add_proto qw/void vpx_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_highbd_fdct16x16 sse2/;
-
- add_proto qw/void vpx_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_highbd_fdct16x16_1/;
-
- add_proto qw/void vpx_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_highbd_fdct32x32 sse2/;
-
- add_proto qw/void vpx_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_highbd_fdct32x32_rd sse2/;
-
- add_proto qw/void vpx_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_highbd_fdct32x32_1/;
-} else {
- add_proto qw/void vpx_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct4x4 sse2 msa/;
-
- add_proto qw/void vpx_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct4x4_1 sse2/;
-
- add_proto qw/void vpx_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct8x8 sse2 neon msa/, "$ssse3_x86_64";
-
- add_proto qw/void vpx_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct8x8_1 sse2 neon msa/;
-
- add_proto qw/void vpx_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct16x16 sse2 msa/;
-
- add_proto qw/void vpx_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct16x16_1 sse2 msa/;
-
- add_proto qw/void vpx_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct32x32 sse2 avx2 msa/;
-
- add_proto qw/void vpx_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct32x32_rd sse2 avx2 msa/;
-
- add_proto qw/void vpx_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/vpx_fdct32x32_1 sse2 msa/;
-} # CONFIG_VP9_HIGHBITDEPTH
-} # CONFIG_VP10_ENCODER
-
-#
-# Inverse transform
-if (vpx_config("CONFIG_VP10") eq "yes") {
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- # Note as optimized versions of these functions are added we need to add a check to ensure
- # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
- add_proto qw/void vpx_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_iwht4x4_1_add/;
-
- add_proto qw/void vpx_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_iwht4x4_16_add sse2/;
-
- add_proto qw/void vpx_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct4x4_1_add/;
-
- add_proto qw/void vpx_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct8x8_1_add/;
-
- add_proto qw/void vpx_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct16x16_1_add/;
-
- add_proto qw/void vpx_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct32x32_1024_add/;
-
- add_proto qw/void vpx_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct32x32_34_add/;
-
- add_proto qw/void vpx_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct32x32_1_add/;
-
- add_proto qw/void vpx_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_iwht4x4_1_add/;
-
- add_proto qw/void vpx_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_iwht4x4_16_add/;
-
- # Force C versions if CONFIG_EMULATE_HARDWARE is 1
- if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
- add_proto qw/void vpx_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct4x4_16_add/;
-
- add_proto qw/void vpx_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct4x4_1_add/;
-
- add_proto qw/void vpx_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_64_add/;
-
- add_proto qw/void vpx_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_12_add/;
-
- add_proto qw/void vpx_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_1_add/;
-
- add_proto qw/void vpx_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_256_add/;
-
- add_proto qw/void vpx_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_10_add/;
-
- add_proto qw/void vpx_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_1_add/;
-
- add_proto qw/void vpx_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_1024_add/;
-
- add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_135_add/;
-
- add_proto qw/void vpx_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_34_add/;
-
- add_proto qw/void vpx_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_1_add/;
-
- add_proto qw/void vpx_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct4x4_16_add/;
-
- add_proto qw/void vpx_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct8x8_64_add/;
-
- add_proto qw/void vpx_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct8x8_10_add/;
-
- add_proto qw/void vpx_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct16x16_256_add/;
-
- add_proto qw/void vpx_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct16x16_10_add/;
- } else {
- add_proto qw/void vpx_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct4x4_16_add sse2/;
-
- add_proto qw/void vpx_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct4x4_1_add sse2/;
-
- add_proto qw/void vpx_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_64_add sse2/, "$ssse3_x86_64";
-
- add_proto qw/void vpx_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_12_add sse2/, "$ssse3_x86_64";
-
- add_proto qw/void vpx_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_1_add sse2/;
-
- add_proto qw/void vpx_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_256_add sse2/;
-
- add_proto qw/void vpx_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_10_add sse2/;
-
- add_proto qw/void vpx_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_1_add sse2/;
-
- add_proto qw/void vpx_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_1024_add sse2/, "$ssse3_x86_64";
-
- add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_135_add sse2/, "$ssse3_x86_64";
- # Need to add 135 eob idct32x32 implementations.
- $vpx_idct32x32_135_add_sse2=vpx_idct32x32_1024_add_sse2;
-
- add_proto qw/void vpx_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_34_add sse2/, "$ssse3_x86_64";
-
- add_proto qw/void vpx_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_1_add sse2/;
-
- add_proto qw/void vpx_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct4x4_16_add sse2/;
-
- add_proto qw/void vpx_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct8x8_64_add sse2/;
-
- add_proto qw/void vpx_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct8x8_10_add sse2/;
-
- add_proto qw/void vpx_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct16x16_256_add sse2/;
-
- add_proto qw/void vpx_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/vpx_highbd_idct16x16_10_add sse2/;
- } # CONFIG_EMULATE_HARDWARE
-} else {
- # Force C versions if CONFIG_EMULATE_HARDWARE is 1
- if (vpx_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
- add_proto qw/void vpx_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct4x4_1_add/;
-
- add_proto qw/void vpx_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct4x4_16_add/;
-
- add_proto qw/void vpx_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_1_add/;
-
- add_proto qw/void vpx_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_64_add/;
-
- add_proto qw/void vpx_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_12_add/;
-
- add_proto qw/void vpx_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_1_add/;
-
- add_proto qw/void vpx_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_256_add/;
-
- add_proto qw/void vpx_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_10_add/;
-
- add_proto qw/void vpx_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_1024_add/;
-
- add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_135_add/;
-
- add_proto qw/void vpx_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_34_add/;
-
- add_proto qw/void vpx_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_1_add/;
-
- add_proto qw/void vpx_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_iwht4x4_1_add/;
-
- add_proto qw/void vpx_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_iwht4x4_16_add/;
- } else {
- add_proto qw/void vpx_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct4x4_1_add sse2 neon dspr2 msa/;
-
- add_proto qw/void vpx_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct4x4_16_add sse2 neon dspr2 msa/;
-
- add_proto qw/void vpx_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_1_add sse2 neon dspr2 msa/;
-
- add_proto qw/void vpx_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_64_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
-
- add_proto qw/void vpx_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct8x8_12_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
-
- add_proto qw/void vpx_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_1_add sse2 neon dspr2 msa/;
-
- add_proto qw/void vpx_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_256_add sse2 neon dspr2 msa/;
-
- add_proto qw/void vpx_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct16x16_10_add sse2 neon dspr2 msa/;
-
- add_proto qw/void vpx_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_1024_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
-
- add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_135_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
- # Need to add 135 eob idct32x32 implementations.
- $vpx_idct32x32_135_add_sse2=vpx_idct32x32_1024_add_sse2;
- $vpx_idct32x32_135_add_neon=vpx_idct32x32_1024_add_neon;
- $vpx_idct32x32_135_add_dspr2=vpx_idct32x32_1024_add_dspr2;
- $vpx_idct32x32_135_add_msa=vpx_idct32x32_1024_add_msa;
-
- add_proto qw/void vpx_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_34_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
- # Need to add 34 eob idct32x32 neon implementation.
- $vpx_idct32x32_34_add_neon=vpx_idct32x32_1024_add_neon;
-
- add_proto qw/void vpx_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_1_add sse2 neon dspr2 msa/;
-
- add_proto qw/void vpx_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_iwht4x4_1_add msa/;
-
- add_proto qw/void vpx_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_iwht4x4_16_add msa sse2/;
- } # CONFIG_EMULATE_HARDWARE
-} # CONFIG_VP9_HIGHBITDEPTH
-} # CONFIG_VP10
-
-#
-# Quantization
-#
-if (vpx_config("CONFIG_AOM_QM") eq "yes") {
- if (vpx_config("CONFIG_VP10_ENCODER") eq "yes") {
- add_proto qw/void vpx_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-
- add_proto qw/void vpx_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vpx_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-
- add_proto qw/void vpx_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
- } # CONFIG_VPX_HIGHBITDEPTH
- } # CONFIG_VP10_ENCODER
-} else {
- if (vpx_config("CONFIG_VP10_ENCODER") eq "yes") {
- add_proto qw/void vpx_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vpx_quantize_b sse2/, "$ssse3_x86_64", "$avx_x86_64";
-
- add_proto qw/void vpx_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vpx_quantize_b_32x32/, "$ssse3_x86_64", "$avx_x86_64";
-
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vpx_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vpx_highbd_quantize_b sse2/;
-
- add_proto qw/void vpx_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vpx_highbd_quantize_b_32x32 sse2/;
- } # CONFIG_VP9_HIGHBITDEPTH
- } # CONFIG_VP10_ENCODER
-} # CONFIG_AOM_QM
-if (vpx_config("CONFIG_VP10") eq "yes") {
- #
- # Alpha blending with mask
- #
- add_proto qw/void vpx_blend_a64_mask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, int suby, int subx";
- add_proto qw/void vpx_blend_a64_hmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w";
- add_proto qw/void vpx_blend_a64_vmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w";
- specialize "vpx_blend_a64_mask", qw/sse4_1/;
- specialize "vpx_blend_a64_hmask", qw/sse4_1/;
- specialize "vpx_blend_a64_vmask", qw/sse4_1/;
-
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vpx_highbd_blend_a64_mask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, int suby, int subx, int bd";
- add_proto qw/void vpx_highbd_blend_a64_hmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w, int bd";
- add_proto qw/void vpx_highbd_blend_a64_vmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w, int bd";
- specialize "vpx_highbd_blend_a64_mask", qw/sse4_1/;
- specialize "vpx_highbd_blend_a64_hmask", qw/sse4_1/;
- specialize "vpx_highbd_blend_a64_vmask", qw/sse4_1/;
- }
-} # CONFIG_VP10
-
-if (vpx_config("CONFIG_ENCODERS") eq "yes") {
-#
-# Block subtraction
-#
-add_proto qw/void vpx_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride";
-specialize qw/vpx_subtract_block neon msa sse2/;
-
-if (vpx_config("CONFIG_VP10_ENCODER") eq "yes") {
-#
-# Sum of Squares
-#
-add_proto qw/uint64_t vpx_sum_squares_2d_i16/, "const int16_t *src, int stride, int size";
-specialize qw/vpx_sum_squares_2d_i16 sse2/;
-
-add_proto qw/uint64_t vpx_sum_squares_i16/, "const int16_t *src, uint32_t N";
-specialize qw/vpx_sum_squares_i16 sse2/;
-}
-
-
-# Single block SAD
-#
-add_proto qw/unsigned int vpx_sad64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad64x64 avx2 neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad64x32 avx2 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad32x64 avx2 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad32x32 avx2 neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad32x16 avx2 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad16x32 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad16x16 media neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad16x8 neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad8x16 neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad8x8 neon msa sse2/;
-
-add_proto qw/unsigned int vpx_sad8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad8x4 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad4x8 msa sse2/;
-
-add_proto qw/unsigned int vpx_sad4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad4x4 neon msa sse2/;
-
-#
-# Avg
-#
-if ((vpx_config("CONFIG_VP10_ENCODER") eq "yes")) {
- #
- # Avg
- #
- add_proto qw/unsigned int vpx_avg_8x8/, "const uint8_t *, int p";
- specialize qw/vpx_avg_8x8 sse2 neon msa/;
- add_proto qw/unsigned int vpx_avg_4x4/, "const uint8_t *, int p";
- specialize qw/vpx_avg_4x4 sse2 neon msa/;
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/unsigned int vpx_highbd_avg_8x8/, "const uint8_t *, int p";
- specialize qw/vpx_highbd_avg_8x8/;
- add_proto qw/unsigned int vpx_highbd_avg_4x4/, "const uint8_t *, int p";
- specialize qw/vpx_highbd_avg_4x4/;
- add_proto qw/void vpx_highbd_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride, int bd";
- specialize qw/vpx_highbd_subtract_block sse2/;
- }
-
- #
- # Minmax
- #
- add_proto qw/void vpx_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
- specialize qw/vpx_minmax_8x8 sse2 neon/;
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vpx_highbd_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
- specialize qw/vpx_highbd_minmax_8x8/;
- }
-
- add_proto qw/void vpx_hadamard_8x8/, "const int16_t *src_diff, int src_stride, int16_t *coeff";
- specialize qw/vpx_hadamard_8x8 sse2 neon/, "$ssse3_x86_64";
-
- add_proto qw/void vpx_hadamard_16x16/, "const int16_t *src_diff, int src_stride, int16_t *coeff";
- specialize qw/vpx_hadamard_16x16 sse2 neon/;
-
- add_proto qw/int vpx_satd/, "const int16_t *coeff, int length";
- specialize qw/vpx_satd sse2 neon/;
-
- add_proto qw/void vpx_int_pro_row/, "int16_t *hbuf, const uint8_t *ref, const int ref_stride, const int height";
- specialize qw/vpx_int_pro_row sse2 neon/;
-
- add_proto qw/int16_t vpx_int_pro_col/, "const uint8_t *ref, const int width";
- specialize qw/vpx_int_pro_col sse2 neon/;
-
- add_proto qw/int vpx_vector_var/, "const int16_t *ref, const int16_t *src, const int bwl";
- specialize qw/vpx_vector_var neon sse2/;
-} # CONFIG_VP10_ENCODER
-
-#
-# Single block SAD / Single block Avg SAD
-#
-foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_sad${w}x${h}", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
- add_proto qw/unsigned int/, "vpx_sad${w}x${h}_avg", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
-}
-
-specialize qw/vpx_sad128x128 sse2/;
-specialize qw/vpx_sad128x64 sse2/;
-specialize qw/vpx_sad64x128 sse2/;
-specialize qw/vpx_sad64x64 avx2 msa sse2/;
-specialize qw/vpx_sad64x32 avx2 msa sse2/;
-specialize qw/vpx_sad32x64 avx2 msa sse2/;
-specialize qw/vpx_sad32x32 avx2 neon msa sse2/;
-specialize qw/vpx_sad32x16 avx2 msa sse2/;
-specialize qw/vpx_sad16x32 msa sse2/;
-specialize qw/vpx_sad16x16 media neon msa sse2/;
-specialize qw/vpx_sad16x8 neon msa sse2/;
-specialize qw/vpx_sad8x16 neon msa sse2/;
-specialize qw/vpx_sad8x8 neon msa sse2/;
-specialize qw/vpx_sad8x4 msa sse2/;
-specialize qw/vpx_sad4x8 msa sse2/;
-specialize qw/vpx_sad4x4 neon msa sse2/;
-
-specialize qw/vpx_sad128x128_avg sse2/;
-specialize qw/vpx_sad128x64_avg sse2/;
-specialize qw/vpx_sad64x128_avg sse2/;
-specialize qw/vpx_sad64x64_avg avx2 msa sse2/;
-specialize qw/vpx_sad64x32_avg avx2 msa sse2/;
-specialize qw/vpx_sad32x64_avg avx2 msa sse2/;
-specialize qw/vpx_sad32x32_avg avx2 msa sse2/;
-specialize qw/vpx_sad32x16_avg avx2 msa sse2/;
-specialize qw/vpx_sad16x32_avg msa sse2/;
-specialize qw/vpx_sad16x16_avg msa sse2/;
-specialize qw/vpx_sad16x8_avg msa sse2/;
-specialize qw/vpx_sad8x16_avg msa sse2/;
-specialize qw/vpx_sad8x8_avg msa sse2/;
-specialize qw/vpx_sad8x4_avg msa sse2/;
-specialize qw/vpx_sad4x8_avg msa sse2/;
-specialize qw/vpx_sad4x4_avg msa sse2/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_highbd_sad${w}x${h}", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
- add_proto qw/unsigned int/, "vpx_highbd_sad${w}x${h}_avg", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
- if ($w != 128 && $h != 128 && $w != 4) {
- specialize "vpx_highbd_sad${w}x${h}", qw/sse2/;
- specialize "vpx_highbd_sad${w}x${h}_avg", qw/sse2/;
- }
- }
-}
-
-#
-# Masked SAD
-#
-if (vpx_config("CONFIG_EXT_INTER") eq "yes") {
- foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
- specialize "vpx_masked_sad${w}x${h}", qw/ssse3/;
- }
-
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_highbd_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
- specialize "vpx_highbd_masked_sad${w}x${h}", qw/ssse3/;
- }
- }
-}
-
-#
-# OBMC SAD
-#
-if (vpx_config("CONFIG_OBMC") eq "yes") {
- foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_obmc_sad${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask";
- specialize "vpx_obmc_sad${w}x${h}", qw/sse4_1/;
- }
-
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_highbd_obmc_sad${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask";
- specialize "vpx_highbd_obmc_sad${w}x${h}", qw/sse4_1/;
- }
- }
-}
-
-#
-# Multi-block SAD, comparing a reference to N blocks 1 pixel apart horizontally
-#
-# Blocks of 3
-foreach $s (@block_widths) {
- add_proto qw/void/, "vpx_sad${s}x${s}x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-}
-specialize qw/vpx_sad64x64x3 msa/;
-specialize qw/vpx_sad32x32x3 msa/;
-specialize qw/vpx_sad16x16x3 sse3 ssse3 msa/;
-specialize qw/vpx_sad8x8x3 sse3 msa/;
-specialize qw/vpx_sad4x4x3 sse3 msa/;
-
-add_proto qw/void/, "vpx_sad16x8x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad16x8x3 sse3 ssse3 msa/;
-add_proto qw/void/, "vpx_sad8x16x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad8x16x3 sse3 msa/;
-
-# Blocks of 8
-foreach $s (@block_widths) {
- add_proto qw/void/, "vpx_sad${s}x${s}x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-}
-specialize qw/vpx_sad64x64x8 msa/;
-specialize qw/vpx_sad32x32x8 msa/;
-specialize qw/vpx_sad16x16x8 sse4_1 msa/;
-specialize qw/vpx_sad8x8x8 sse4_1 msa/;
-specialize qw/vpx_sad4x4x8 sse4_1 msa/;
-
-add_proto qw/void/, "vpx_sad16x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad16x8x8 sse4_1 msa/;
-add_proto qw/void/, "vpx_sad8x16x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad8x16x8 sse4_1 msa/;
-add_proto qw/void/, "vpx_sad8x4x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad8x4x8 msa/;
-add_proto qw/void/, "vpx_sad4x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-specialize qw/vpx_sad4x8x8 msa/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- foreach $s (@block_widths) {
- # Blocks of 3
- add_proto qw/void/, "vpx_highbd_sad${s}x${s}x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
- # Blocks of 8
- add_proto qw/void/, "vpx_highbd_sad${s}x${s}x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
- }
- # Blocks of 3
- add_proto qw/void/, "vpx_highbd_sad16x8x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
- add_proto qw/void/, "vpx_highbd_sad8x16x3", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
- # Blocks of 8
- add_proto qw/void/, "vpx_highbd_sad16x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
- add_proto qw/void/, "vpx_highbd_sad8x16x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
- add_proto qw/void/, "vpx_highbd_sad8x4x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
- add_proto qw/void/, "vpx_highbd_sad4x8x8", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array";
-}
-
-#
-# Multi-block SAD, comparing a reference to N independent blocks
-#
-foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/void/, "vpx_sad${w}x${h}x4d", "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
-}
-
-specialize qw/vpx_sad128x128x4d sse2/;
-specialize qw/vpx_sad128x64x4d sse2/;
-specialize qw/vpx_sad64x128x4d sse2/;
-specialize qw/vpx_sad64x64x4d avx2 neon msa sse2/;
-specialize qw/vpx_sad64x32x4d msa sse2/;
-specialize qw/vpx_sad32x64x4d msa sse2/;
-specialize qw/vpx_sad32x32x4d avx2 neon msa sse2/;
-specialize qw/vpx_sad32x16x4d msa sse2/;
-specialize qw/vpx_sad16x32x4d msa sse2/;
-specialize qw/vpx_sad16x16x4d neon msa sse2/;
-specialize qw/vpx_sad16x8x4d msa sse2/;
-specialize qw/vpx_sad8x16x4d msa sse2/;
-specialize qw/vpx_sad8x8x4d msa sse2/;
-specialize qw/vpx_sad8x4x4d msa sse2/;
-specialize qw/vpx_sad4x8x4d msa sse2/;
-specialize qw/vpx_sad4x4x4d msa sse2/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- #
- # Multi-block SAD, comparing a reference to N independent blocks
- #
- foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/void/, "vpx_highbd_sad${w}x${h}x4d", "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
- if ($w != 128 && $h != 128) {
- specialize "vpx_highbd_sad${w}x${h}x4d", qw/sse2/;
- }
- }
-}
-
-#
-# Structured Similarity (SSIM)
-#
-if (vpx_config("CONFIG_INTERNAL_STATS") eq "yes") {
- add_proto qw/void vpx_ssim_parms_8x8/, "const uint8_t *s, int sp, const uint8_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
- specialize qw/vpx_ssim_parms_8x8/, "$sse2_x86_64";
-
- add_proto qw/void vpx_ssim_parms_16x16/, "const uint8_t *s, int sp, const uint8_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
- specialize qw/vpx_ssim_parms_16x16/, "$sse2_x86_64";
-
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vpx_highbd_ssim_parms_8x8/, "const uint16_t *s, int sp, const uint16_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
- }
-}
-} # CONFIG_ENCODERS
-
-if (vpx_config("CONFIG_ENCODERS") eq "yes") {
-
-#
-# Variance
-#
-add_proto qw/unsigned int vpx_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance64x64 sse2 avx2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance64x32 sse2 avx2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance32x64 sse2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance32x32 sse2 avx2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance32x16 sse2 avx2 msa/;
-
-add_proto qw/unsigned int vpx_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance16x32 sse2 msa/;
-
-add_proto qw/unsigned int vpx_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance16x16 sse2 avx2 media neon msa/;
-
-add_proto qw/unsigned int vpx_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance16x8 sse2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance8x16 sse2 neon msa/;
-
-add_proto qw/unsigned int vpx_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance8x8 sse2 media neon msa/;
-
-add_proto qw/unsigned int vpx_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance8x4 sse2 msa/;
-
-add_proto qw/unsigned int vpx_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance4x8 sse2 msa/;
-
-add_proto qw/unsigned int vpx_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_variance4x4 sse2 msa/;
-
-#
-# Specialty Variance
-#
-add_proto qw/void vpx_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
-add_proto qw/void vpx_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
-specialize qw/vpx_get16x16var sse2 avx2 neon msa/;
-specialize qw/vpx_get8x8var sse2 neon msa/;
-
-
-add_proto qw/unsigned int vpx_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
-add_proto qw/unsigned int vpx_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
-add_proto qw/unsigned int vpx_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
-add_proto qw/unsigned int vpx_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
-
-specialize qw/vpx_mse16x16 sse2 avx2 media neon msa/;
-specialize qw/vpx_mse16x8 sse2 msa/;
-specialize qw/vpx_mse8x16 sse2 msa/;
-specialize qw/vpx_mse8x8 sse2 msa/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- foreach $bd (8, 10, 12) {
- add_proto qw/void/, "vpx_highbd_${bd}_get16x16var", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- add_proto qw/void/, "vpx_highbd_${bd}_get8x8var", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
- add_proto qw/unsigned int/, "vpx_highbd_${bd}_mse16x16", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int/, "vpx_highbd_${bd}_mse16x8", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int/, "vpx_highbd_${bd}_mse8x16", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int/, "vpx_highbd_${bd}_mse8x8", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
-
- specialize "vpx_highbd_${bd}_mse16x16", qw/sse2/;
- specialize "vpx_highbd_${bd}_mse8x8", qw/sse2/;
- }
-}
-
-#
-# ...
-#
-add_proto qw/void vpx_upsampled_pred/, "uint8_t *comp_pred, int width, int height, const uint8_t *ref, int ref_stride";
-specialize qw/vpx_upsampled_pred sse2/;
-add_proto qw/void vpx_comp_avg_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
-specialize qw/vpx_comp_avg_upsampled_pred sse2/;
-
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vpx_highbd_upsampled_pred/, "uint16_t *comp_pred, int width, int height, const uint8_t *ref8, int ref_stride";
- specialize qw/vpx_highbd_upsampled_pred sse2/;
- add_proto qw/void vpx_highbd_comp_avg_upsampled_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride";
- specialize qw/vpx_highbd_comp_avg_upsampled_pred sse2/;
-}
-
-#
-# ...
-#
-add_proto qw/unsigned int vpx_get_mb_ss/, "const int16_t *";
-add_proto qw/unsigned int vpx_get4x4sse_cs/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride";
-
-specialize qw/vpx_get_mb_ss sse2 msa/;
-specialize qw/vpx_get4x4sse_cs neon msa/;
-
-#
-# Variance / Subpixel Variance / Subpixel Avg Variance
-#
-foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/uint32_t/, "vpx_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t/, "vpx_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-}
-
-specialize qw/vpx_variance64x64 sse2 avx2 neon msa/;
-specialize qw/vpx_variance64x32 sse2 avx2 neon msa/;
-specialize qw/vpx_variance32x64 sse2 neon msa/;
-specialize qw/vpx_variance32x32 sse2 avx2 neon msa/;
-specialize qw/vpx_variance32x16 sse2 avx2 msa/;
-specialize qw/vpx_variance16x32 sse2 msa/;
-specialize qw/vpx_variance16x16 sse2 avx2 media neon msa/;
-specialize qw/vpx_variance16x8 sse2 neon msa/;
-specialize qw/vpx_variance8x16 sse2 neon msa/;
-specialize qw/vpx_variance8x8 sse2 media neon msa/;
-specialize qw/vpx_variance8x4 sse2 msa/;
-specialize qw/vpx_variance4x8 sse2 msa/;
-specialize qw/vpx_variance4x4 sse2 msa/;
-
-specialize qw/vpx_sub_pixel_variance64x64 avx2 neon msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance64x32 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance32x64 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance32x32 avx2 neon msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance32x16 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance16x32 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance16x16 media neon msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance16x8 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance8x16 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance8x8 media neon msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance8x4 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance4x8 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_variance4x4 msa sse2 ssse3/;
-
-specialize qw/vpx_sub_pixel_avg_variance64x64 avx2 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance64x32 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance32x64 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance32x32 avx2 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance32x16 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance16x32 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance16x16 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance16x8 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance8x16 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance8x8 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance8x4 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance4x8 msa sse2 ssse3/;
-specialize qw/vpx_sub_pixel_avg_variance4x4 msa sse2 ssse3/;
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- foreach $bd (8, 10, 12) {
- foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_highbd_${bd}_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t/, "vpx_highbd_${bd}_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t/, "vpx_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- if ($w != 128 && $h != 128 && $w != 4 && $h != 4) {
- specialize "vpx_highbd_${bd}_variance${w}x${h}", "sse2";
- }
- if ($w == 4 && $h == 4) {
- specialize "vpx_highbd_${bd}_variance${w}x${h}", "sse4_1";
- }
- if ($w != 128 && $h != 128 && $w != 4) {
- specialize "vpx_highbd_${bd}_sub_pixel_variance${w}x${h}", qw/sse2/;
- specialize "vpx_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", qw/sse2/;
- }
- if ($w == 4 && $h == 4) {
- specialize "vpx_highbd_${bd}_sub_pixel_variance${w}x${h}", "sse4_1";
- specialize "vpx_highbd_${bd}_sub_pixel_avg_variance${w}x${h}", "sse4_1";
- }
- }
- }
-} # CONFIG_VP9_HIGHBITDEPTH
-
-if (vpx_config("CONFIG_EXT_INTER") eq "yes") {
-#
-# Masked Variance / Masked Subpixel Variance
-#
- foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
- add_proto qw/unsigned int/, "vpx_masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
- specialize "vpx_masked_variance${w}x${h}", qw/ssse3/;
- specialize "vpx_masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
- }
-
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- foreach $bd ("_", "_10_", "_12_") {
- foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_highbd${bd}masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
- add_proto qw/unsigned int/, "vpx_highbd${bd}masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
- specialize "vpx_highbd${bd}masked_variance${w}x${h}", qw/ssse3/;
- specialize "vpx_highbd${bd}masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
- }
- }
- }
-}
-
-#
-# OBMC Variance / OBMC Subpixel Variance
-#
-if (vpx_config("CONFIG_OBMC") eq "yes") {
- foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_obmc_variance${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
- add_proto qw/unsigned int/, "vpx_obmc_sub_pixel_variance${w}x${h}", "const uint8_t *pre, int pre_stride, int xoffset, int yoffset, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
- specialize "vpx_obmc_variance${w}x${h}", q/sse4_1/;
- specialize "vpx_obmc_sub_pixel_variance${w}x${h}";
- }
-
- if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- foreach $bd ("_", "_10_", "_12_") {
- foreach (@block_sizes) {
- ($w, $h) = @$_;
- add_proto qw/unsigned int/, "vpx_highbd${bd}obmc_variance${w}x${h}", "const uint8_t *pre, int pre_stride, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
- add_proto qw/unsigned int/, "vpx_highbd${bd}obmc_sub_pixel_variance${w}x${h}", "const uint8_t *pre, int pre_stride, int xoffset, int yoffset, const int32_t *wsrc, const int32_t *mask, unsigned int *sse";
- specialize "vpx_highbd${bd}obmc_variance${w}x${h}", qw/sse4_1/;
- specialize "vpx_highbd${bd}obmc_sub_pixel_variance${w}x${h}";
- }
- }
- }
-}
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance64x64 avx2 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance64x32 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance32x64 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance32x32 avx2 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance32x16 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance16x32 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance16x16 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance16x8 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance8x16 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance8x8 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance8x4 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance4x8 msa sse2 ssse3/;
-
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_sub_pixel_avg_variance4x4 msa sse2 ssse3/;
-
-#
-# Specialty Subpixel
-#
-add_proto qw/uint32_t vpx_variance_halfpixvar16x16_h/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_variance_halfpixvar16x16_h sse2 media/;
-
-add_proto qw/uint32_t vpx_variance_halfpixvar16x16_v/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_variance_halfpixvar16x16_v sse2 media/;
-
-add_proto qw/uint32_t vpx_variance_halfpixvar16x16_hv/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_variance_halfpixvar16x16_hv sse2 media/;
-
-#
-# Comp Avg
-#
-add_proto qw/void vpx_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
-if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/unsigned int vpx_highbd_12_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_variance64x64 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_variance64x32 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_variance32x64 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_variance32x32 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_variance32x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_variance16x32 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_variance16x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_variance16x8 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_variance8x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_variance8x8 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_12_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_12_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-
- add_proto qw/unsigned int vpx_highbd_10_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_variance64x64 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_variance64x32 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_variance32x64 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_variance32x32 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_variance32x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_variance16x32 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_variance16x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_variance16x8 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_variance8x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_variance8x8 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_10_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_10_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-
- add_proto qw/unsigned int vpx_highbd_8_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_variance64x64 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_8_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_variance64x32 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_8_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_variance32x64 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_8_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_variance32x32 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_8_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_variance32x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_8_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_variance16x32 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_8_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_variance16x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_8_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_variance16x8 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_8_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_variance8x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_8_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_variance8x8 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_8_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_8_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_8_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-
- add_proto qw/void vpx_highbd_8_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- add_proto qw/void vpx_highbd_8_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
- add_proto qw/void vpx_highbd_10_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- add_proto qw/void vpx_highbd_10_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
- add_proto qw/void vpx_highbd_12_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- add_proto qw/void vpx_highbd_12_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-
- add_proto qw/unsigned int vpx_highbd_8_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_mse16x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_8_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_8_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_8_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- specialize qw/vpx_highbd_8_mse8x8 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_mse16x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_10_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_10_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_10_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- specialize qw/vpx_highbd_10_mse8x8 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_mse16x16 sse2/;
-
- add_proto qw/unsigned int vpx_highbd_12_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_12_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_12_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- specialize qw/vpx_highbd_12_mse8x8 sse2/;
-
- add_proto qw/void vpx_highbd_comp_avg_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride";
-
- #
- # Subpixel Variance
- #
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_12_sub_pixel_variance64x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_12_sub_pixel_variance64x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_12_sub_pixel_variance32x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_12_sub_pixel_variance32x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_12_sub_pixel_variance32x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_12_sub_pixel_variance16x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_12_sub_pixel_variance16x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_12_sub_pixel_variance16x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_12_sub_pixel_variance8x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_12_sub_pixel_variance8x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_12_sub_pixel_variance8x4 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_10_sub_pixel_variance64x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_10_sub_pixel_variance64x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_10_sub_pixel_variance32x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_10_sub_pixel_variance32x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_10_sub_pixel_variance32x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_10_sub_pixel_variance16x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_10_sub_pixel_variance16x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_10_sub_pixel_variance16x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_10_sub_pixel_variance8x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_10_sub_pixel_variance8x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_10_sub_pixel_variance8x4 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_8_sub_pixel_variance64x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_8_sub_pixel_variance64x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_8_sub_pixel_variance32x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_8_sub_pixel_variance32x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_8_sub_pixel_variance32x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_8_sub_pixel_variance16x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_8_sub_pixel_variance16x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_8_sub_pixel_variance16x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_8_sub_pixel_variance8x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_8_sub_pixel_variance8x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/vpx_highbd_8_sub_pixel_variance8x4 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_12_sub_pixel_avg_variance64x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_12_sub_pixel_avg_variance64x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x4 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_10_sub_pixel_avg_variance64x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_10_sub_pixel_avg_variance64x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x4 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_8_sub_pixel_avg_variance64x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_8_sub_pixel_avg_variance64x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x64 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x32 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x16 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x8 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x4 sse2/;
-
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
-
-} # CONFIG_VP9_HIGHBITDEPTH
-
-} # CONFIG_ENCODERS
-
-1;
diff --git a/aom_dsp/x86/add_noise_sse2.asm b/aom_dsp/x86/add_noise_sse2.asm
index a86ca72..18fc165 100644
--- a/aom_dsp/x86/add_noise_sse2.asm
+++ b/aom_dsp/x86/add_noise_sse2.asm
@@ -11,14 +11,14 @@
%include "aom_ports/x86_abi_support.asm"
-;void vpx_plane_add_noise_sse2(unsigned char *start, unsigned char *noise,
+;void aom_plane_add_noise_sse2(unsigned char *start, unsigned char *noise,
; unsigned char blackclamp[16],
; unsigned char whiteclamp[16],
; unsigned char bothclamp[16],
; unsigned int width, unsigned int height,
; int pitch)
-global sym(vpx_plane_add_noise_sse2) PRIVATE
-sym(vpx_plane_add_noise_sse2):
+global sym(aom_plane_add_noise_sse2) PRIVATE
+sym(aom_plane_add_noise_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 8
diff --git a/aom_dsp/x86/vpx_asm_stubs.c b/aom_dsp/x86/aom_asm_stubs.c
similarity index 60%
rename from aom_dsp/x86/vpx_asm_stubs.c
rename to aom_dsp/x86/aom_asm_stubs.c
index be56a69..0f0aaa8 100644
--- a/aom_dsp/x86/vpx_asm_stubs.c
+++ b/aom_dsp/x86/aom_asm_stubs.c
@@ -8,53 +8,53 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/x86/convolve.h"
#if HAVE_SSE2
-filter8_1dfunction vpx_filter_block1d16_v8_sse2;
-filter8_1dfunction vpx_filter_block1d16_h8_sse2;
-filter8_1dfunction vpx_filter_block1d8_v8_sse2;
-filter8_1dfunction vpx_filter_block1d8_h8_sse2;
-filter8_1dfunction vpx_filter_block1d4_v8_sse2;
-filter8_1dfunction vpx_filter_block1d4_h8_sse2;
-filter8_1dfunction vpx_filter_block1d16_v8_avg_sse2;
-filter8_1dfunction vpx_filter_block1d16_h8_avg_sse2;
-filter8_1dfunction vpx_filter_block1d8_v8_avg_sse2;
-filter8_1dfunction vpx_filter_block1d8_h8_avg_sse2;
-filter8_1dfunction vpx_filter_block1d4_v8_avg_sse2;
-filter8_1dfunction vpx_filter_block1d4_h8_avg_sse2;
+filter8_1dfunction aom_filter_block1d16_v8_sse2;
+filter8_1dfunction aom_filter_block1d16_h8_sse2;
+filter8_1dfunction aom_filter_block1d8_v8_sse2;
+filter8_1dfunction aom_filter_block1d8_h8_sse2;
+filter8_1dfunction aom_filter_block1d4_v8_sse2;
+filter8_1dfunction aom_filter_block1d4_h8_sse2;
+filter8_1dfunction aom_filter_block1d16_v8_avg_sse2;
+filter8_1dfunction aom_filter_block1d16_h8_avg_sse2;
+filter8_1dfunction aom_filter_block1d8_v8_avg_sse2;
+filter8_1dfunction aom_filter_block1d8_h8_avg_sse2;
+filter8_1dfunction aom_filter_block1d4_v8_avg_sse2;
+filter8_1dfunction aom_filter_block1d4_h8_avg_sse2;
-filter8_1dfunction vpx_filter_block1d16_v2_sse2;
-filter8_1dfunction vpx_filter_block1d16_h2_sse2;
-filter8_1dfunction vpx_filter_block1d8_v2_sse2;
-filter8_1dfunction vpx_filter_block1d8_h2_sse2;
-filter8_1dfunction vpx_filter_block1d4_v2_sse2;
-filter8_1dfunction vpx_filter_block1d4_h2_sse2;
-filter8_1dfunction vpx_filter_block1d16_v2_avg_sse2;
-filter8_1dfunction vpx_filter_block1d16_h2_avg_sse2;
-filter8_1dfunction vpx_filter_block1d8_v2_avg_sse2;
-filter8_1dfunction vpx_filter_block1d8_h2_avg_sse2;
-filter8_1dfunction vpx_filter_block1d4_v2_avg_sse2;
-filter8_1dfunction vpx_filter_block1d4_h2_avg_sse2;
+filter8_1dfunction aom_filter_block1d16_v2_sse2;
+filter8_1dfunction aom_filter_block1d16_h2_sse2;
+filter8_1dfunction aom_filter_block1d8_v2_sse2;
+filter8_1dfunction aom_filter_block1d8_h2_sse2;
+filter8_1dfunction aom_filter_block1d4_v2_sse2;
+filter8_1dfunction aom_filter_block1d4_h2_sse2;
+filter8_1dfunction aom_filter_block1d16_v2_avg_sse2;
+filter8_1dfunction aom_filter_block1d16_h2_avg_sse2;
+filter8_1dfunction aom_filter_block1d8_v2_avg_sse2;
+filter8_1dfunction aom_filter_block1d8_h2_avg_sse2;
+filter8_1dfunction aom_filter_block1d4_v2_avg_sse2;
+filter8_1dfunction aom_filter_block1d4_h2_avg_sse2;
-// void vpx_convolve8_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
// int w, int h);
-// void vpx_convolve8_vert_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_vert_sse2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
// int w, int h);
-// void vpx_convolve8_avg_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
// int w, int h);
-// void vpx_convolve8_avg_vert_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_vert_sse2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
@@ -64,12 +64,12 @@
FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, sse2);
FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_, sse2);
-// void vpx_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
// int w, int h);
-// void vpx_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
@@ -77,34 +77,34 @@
FUN_CONV_2D(, sse2);
FUN_CONV_2D(avg_, sse2);
-#if CONFIG_VP9_HIGHBITDEPTH && ARCH_X86_64
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_h8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_v8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_h8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h8_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v8_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_h8_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_v8_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_h8_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v8_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h8_avg_sse2;
+#if CONFIG_AOM_HIGHBITDEPTH && ARCH_X86_64
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_v8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_h8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_v8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_h8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_v8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_h8_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_v8_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_h8_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_v8_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_h8_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_v8_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_h8_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_h2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_v2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_h2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h2_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v2_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d16_h2_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_v2_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d8_h2_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v2_avg_sse2;
-highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_v2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_h2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_v2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_h2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_v2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_h2_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_v2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d16_h2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_v2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d8_h2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_v2_avg_sse2;
+highbd_filter8_1dfunction aom_highbd_filter_block1d4_h2_avg_sse2;
-// void vpx_highbd_convolve8_horiz_sse2(const uint8_t *src,
+// void aom_highbd_convolve8_horiz_sse2(const uint8_t *src,
// ptrdiff_t src_stride,
// uint8_t *dst,
// ptrdiff_t dst_stride,
@@ -113,7 +113,7 @@
// const int16_t *filter_y,
// int y_step_q4,
// int w, int h, int bd);
-// void vpx_highbd_convolve8_vert_sse2(const uint8_t *src,
+// void aom_highbd_convolve8_vert_sse2(const uint8_t *src,
// ptrdiff_t src_stride,
// uint8_t *dst,
// ptrdiff_t dst_stride,
@@ -122,7 +122,7 @@
// const int16_t *filter_y,
// int y_step_q4,
// int w, int h, int bd);
-// void vpx_highbd_convolve8_avg_horiz_sse2(const uint8_t *src,
+// void aom_highbd_convolve8_avg_horiz_sse2(const uint8_t *src,
// ptrdiff_t src_stride,
// uint8_t *dst,
// ptrdiff_t dst_stride,
@@ -131,7 +131,7 @@
// const int16_t *filter_y,
// int y_step_q4,
// int w, int h, int bd);
-// void vpx_highbd_convolve8_avg_vert_sse2(const uint8_t *src,
+// void aom_highbd_convolve8_avg_vert_sse2(const uint8_t *src,
// ptrdiff_t src_stride,
// uint8_t *dst,
// ptrdiff_t dst_stride,
@@ -146,17 +146,17 @@
HIGH_FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_,
sse2);
-// void vpx_highbd_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_highbd_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
// int w, int h, int bd);
-// void vpx_highbd_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_highbd_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
// int w, int h, int bd);
HIGH_FUN_CONV_2D(, sse2);
HIGH_FUN_CONV_2D(avg_, sse2);
-#endif // CONFIG_VP9_HIGHBITDEPTH && ARCH_X86_64
+#endif // CONFIG_AOM_HIGHBITDEPTH && ARCH_X86_64
#endif // HAVE_SSE2
diff --git a/aom_dsp/x86/vpx_convolve_copy_sse2.asm b/aom_dsp/x86/aom_convolve_copy_sse2.asm
similarity index 98%
rename from aom_dsp/x86/vpx_convolve_copy_sse2.asm
rename to aom_dsp/x86/aom_convolve_copy_sse2.asm
index 964ee14..bbfcf03 100644
--- a/aom_dsp/x86/vpx_convolve_copy_sse2.asm
+++ b/aom_dsp/x86/aom_convolve_copy_sse2.asm
@@ -47,7 +47,7 @@
cmp r4d, 32
je .w32
-%if CONFIG_VP10 && CONFIG_EXT_PARTITION
+%if CONFIG_AV1 && CONFIG_EXT_PARTITION
cmp r4d, 64
je .w64
%ifidn %2, highbd
@@ -157,7 +157,7 @@
jnz .loop128
RET
-%else ; CONFIG_VP10 && CONFIG_EXT_PARTITION
+%else ; CONFIG_AV1 && CONFIG_EXT_PARTITION
%ifidn %2, highbd
cmp r4d, 64
@@ -199,7 +199,7 @@
jnz .loop128
RET
%endif
-%endif ; CONFIG_VP10 && CONFIG_EXT_PARTITION
+%endif ; CONFIG_AV1 && CONFIG_EXT_PARTITION
.w64:
mov r4d, dword hm
@@ -336,7 +336,7 @@
INIT_XMM sse2
convolve_fn copy
convolve_fn avg
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
convolve_fn copy, highbd
convolve_fn avg, highbd
%endif
diff --git a/aom_dsp/x86/vpx_high_subpixel_8t_sse2.asm b/aom_dsp/x86/aom_high_subpixel_8t_sse2.asm
similarity index 94%
rename from aom_dsp/x86/vpx_high_subpixel_8t_sse2.asm
rename to aom_dsp/x86/aom_high_subpixel_8t_sse2.asm
index f02845e..da738fe 100644
--- a/aom_dsp/x86/vpx_high_subpixel_8t_sse2.asm
+++ b/aom_dsp/x86/aom_high_subpixel_8t_sse2.asm
@@ -197,7 +197,7 @@
movdqu [rdi + %2], xmm0
%endm
-;void vpx_filter_block1d4_v8_sse2
+;void aom_filter_block1d4_v8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pitch,
@@ -206,8 +206,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_highbd_filter_block1d4_v8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_v8_sse2):
+global sym(aom_highbd_filter_block1d4_v8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_v8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -267,7 +267,7 @@
pop rbp
ret
-;void vpx_filter_block1d8_v8_sse2
+;void aom_filter_block1d8_v8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pitch,
@@ -276,8 +276,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_highbd_filter_block1d8_v8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_v8_sse2):
+global sym(aom_highbd_filter_block1d8_v8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_v8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -326,7 +326,7 @@
pop rbp
ret
-;void vpx_filter_block1d16_v8_sse2
+;void aom_filter_block1d16_v8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pitch,
@@ -335,8 +335,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_highbd_filter_block1d16_v8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_v8_sse2):
+global sym(aom_highbd_filter_block1d16_v8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_v8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -389,8 +389,8 @@
pop rbp
ret
-global sym(vpx_highbd_filter_block1d4_v8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_v8_avg_sse2):
+global sym(aom_highbd_filter_block1d4_v8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_v8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -450,8 +450,8 @@
pop rbp
ret
-global sym(vpx_highbd_filter_block1d8_v8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_v8_avg_sse2):
+global sym(aom_highbd_filter_block1d8_v8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_v8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -499,8 +499,8 @@
pop rbp
ret
-global sym(vpx_highbd_filter_block1d16_v8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_v8_avg_sse2):
+global sym(aom_highbd_filter_block1d16_v8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_v8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -552,7 +552,7 @@
pop rbp
ret
-;void vpx_filter_block1d4_h8_sse2
+;void aom_filter_block1d4_h8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pixels_per_line,
@@ -561,8 +561,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_highbd_filter_block1d4_h8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_h8_sse2):
+global sym(aom_highbd_filter_block1d4_h8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_h8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -627,7 +627,7 @@
pop rbp
ret
-;void vpx_filter_block1d8_h8_sse2
+;void aom_filter_block1d8_h8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pixels_per_line,
@@ -636,8 +636,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_highbd_filter_block1d8_h8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_h8_sse2):
+global sym(aom_highbd_filter_block1d8_h8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_h8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -693,7 +693,7 @@
pop rbp
ret
-;void vpx_filter_block1d16_h8_sse2
+;void aom_filter_block1d16_h8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pixels_per_line,
@@ -702,8 +702,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_highbd_filter_block1d16_h8_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_h8_sse2):
+global sym(aom_highbd_filter_block1d16_h8_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_h8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -770,8 +770,8 @@
pop rbp
ret
-global sym(vpx_highbd_filter_block1d4_h8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_h8_avg_sse2):
+global sym(aom_highbd_filter_block1d4_h8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_h8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -836,8 +836,8 @@
pop rbp
ret
-global sym(vpx_highbd_filter_block1d8_h8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_h8_avg_sse2):
+global sym(aom_highbd_filter_block1d8_h8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_h8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -893,8 +893,8 @@
pop rbp
ret
-global sym(vpx_highbd_filter_block1d16_h8_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_h8_avg_sse2):
+global sym(aom_highbd_filter_block1d16_h8_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_h8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
diff --git a/aom_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm b/aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm
similarity index 89%
rename from aom_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm
rename to aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm
index eacedc5..c926ab6 100644
--- a/aom_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm
+++ b/aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm
@@ -171,8 +171,8 @@
%endm
%endif
-global sym(vpx_highbd_filter_block1d4_v2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_v2_sse2):
+global sym(aom_highbd_filter_block1d4_v2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_v2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -196,8 +196,8 @@
ret
%if ARCH_X86_64
-global sym(vpx_highbd_filter_block1d8_v2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_v2_sse2):
+global sym(aom_highbd_filter_block1d8_v2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_v2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -222,8 +222,8 @@
pop rbp
ret
-global sym(vpx_highbd_filter_block1d16_v2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_v2_sse2):
+global sym(aom_highbd_filter_block1d16_v2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_v2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -251,8 +251,8 @@
ret
%endif
-global sym(vpx_highbd_filter_block1d4_v2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_v2_avg_sse2):
+global sym(aom_highbd_filter_block1d4_v2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_v2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -276,8 +276,8 @@
ret
%if ARCH_X86_64
-global sym(vpx_highbd_filter_block1d8_v2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_v2_avg_sse2):
+global sym(aom_highbd_filter_block1d8_v2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_v2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -302,8 +302,8 @@
pop rbp
ret
-global sym(vpx_highbd_filter_block1d16_v2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_v2_avg_sse2):
+global sym(aom_highbd_filter_block1d16_v2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_v2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -331,8 +331,8 @@
ret
%endif
-global sym(vpx_highbd_filter_block1d4_h2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_h2_sse2):
+global sym(aom_highbd_filter_block1d4_h2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_h2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -357,8 +357,8 @@
ret
%if ARCH_X86_64
-global sym(vpx_highbd_filter_block1d8_h2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_h2_sse2):
+global sym(aom_highbd_filter_block1d8_h2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_h2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -383,8 +383,8 @@
pop rbp
ret
-global sym(vpx_highbd_filter_block1d16_h2_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_h2_sse2):
+global sym(aom_highbd_filter_block1d16_h2_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_h2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -412,8 +412,8 @@
ret
%endif
-global sym(vpx_highbd_filter_block1d4_h2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d4_h2_avg_sse2):
+global sym(aom_highbd_filter_block1d4_h2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d4_h2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -438,8 +438,8 @@
ret
%if ARCH_X86_64
-global sym(vpx_highbd_filter_block1d8_h2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d8_h2_avg_sse2):
+global sym(aom_highbd_filter_block1d8_h2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d8_h2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -464,8 +464,8 @@
pop rbp
ret
-global sym(vpx_highbd_filter_block1d16_h2_avg_sse2) PRIVATE
-sym(vpx_highbd_filter_block1d16_h2_avg_sse2):
+global sym(aom_highbd_filter_block1d16_h2_avg_sse2) PRIVATE
+sym(aom_highbd_filter_block1d16_h2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
diff --git a/aom_dsp/x86/vpx_subpixel_8t_intrin_avx2.c b/aom_dsp/x86/aom_subpixel_8t_intrin_avx2.c
similarity index 92%
rename from aom_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
rename to aom_dsp/x86/aom_subpixel_8t_intrin_avx2.c
index 2453bca..61be3d8 100644
--- a/aom_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
+++ b/aom_dsp/x86/aom_subpixel_8t_intrin_avx2.c
@@ -10,7 +10,7 @@
#include <immintrin.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/x86/convolve.h"
#include "aom_ports/mem.h"
@@ -59,7 +59,7 @@
#define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
#endif // __clang__
-static void vpx_filter_block1d16_h8_avx2(
+static void aom_filter_block1d16_h8_avx2(
const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
__m128i filtersReg;
@@ -287,7 +287,7 @@
}
}
-static void vpx_filter_block1d16_v8_avx2(
+static void aom_filter_block1d16_v8_avx2(
const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr,
ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) {
__m128i filtersReg;
@@ -523,41 +523,41 @@
}
#if HAVE_AVX2 && HAVE_SSSE3
-filter8_1dfunction vpx_filter_block1d4_v8_ssse3;
+filter8_1dfunction aom_filter_block1d4_v8_ssse3;
#if ARCH_X86_64
-filter8_1dfunction vpx_filter_block1d8_v8_intrin_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h8_intrin_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h8_intrin_ssse3;
-#define vpx_filter_block1d8_v8_avx2 vpx_filter_block1d8_v8_intrin_ssse3
-#define vpx_filter_block1d8_h8_avx2 vpx_filter_block1d8_h8_intrin_ssse3
-#define vpx_filter_block1d4_h8_avx2 vpx_filter_block1d4_h8_intrin_ssse3
+filter8_1dfunction aom_filter_block1d8_v8_intrin_ssse3;
+filter8_1dfunction aom_filter_block1d8_h8_intrin_ssse3;
+filter8_1dfunction aom_filter_block1d4_h8_intrin_ssse3;
+#define aom_filter_block1d8_v8_avx2 aom_filter_block1d8_v8_intrin_ssse3
+#define aom_filter_block1d8_h8_avx2 aom_filter_block1d8_h8_intrin_ssse3
+#define aom_filter_block1d4_h8_avx2 aom_filter_block1d4_h8_intrin_ssse3
#else // ARCH_X86
-filter8_1dfunction vpx_filter_block1d8_v8_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h8_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h8_ssse3;
-#define vpx_filter_block1d8_v8_avx2 vpx_filter_block1d8_v8_ssse3
-#define vpx_filter_block1d8_h8_avx2 vpx_filter_block1d8_h8_ssse3
-#define vpx_filter_block1d4_h8_avx2 vpx_filter_block1d4_h8_ssse3
+filter8_1dfunction aom_filter_block1d8_v8_ssse3;
+filter8_1dfunction aom_filter_block1d8_h8_ssse3;
+filter8_1dfunction aom_filter_block1d4_h8_ssse3;
+#define aom_filter_block1d8_v8_avx2 aom_filter_block1d8_v8_ssse3
+#define aom_filter_block1d8_h8_avx2 aom_filter_block1d8_h8_ssse3
+#define aom_filter_block1d4_h8_avx2 aom_filter_block1d4_h8_ssse3
#endif // ARCH_X86_64
-filter8_1dfunction vpx_filter_block1d16_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d16_h2_ssse3;
-filter8_1dfunction vpx_filter_block1d8_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h2_ssse3;
-filter8_1dfunction vpx_filter_block1d4_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h2_ssse3;
-#define vpx_filter_block1d4_v8_avx2 vpx_filter_block1d4_v8_ssse3
-#define vpx_filter_block1d16_v2_avx2 vpx_filter_block1d16_v2_ssse3
-#define vpx_filter_block1d16_h2_avx2 vpx_filter_block1d16_h2_ssse3
-#define vpx_filter_block1d8_v2_avx2 vpx_filter_block1d8_v2_ssse3
-#define vpx_filter_block1d8_h2_avx2 vpx_filter_block1d8_h2_ssse3
-#define vpx_filter_block1d4_v2_avx2 vpx_filter_block1d4_v2_ssse3
-#define vpx_filter_block1d4_h2_avx2 vpx_filter_block1d4_h2_ssse3
-// void vpx_convolve8_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride,
+filter8_1dfunction aom_filter_block1d16_v2_ssse3;
+filter8_1dfunction aom_filter_block1d16_h2_ssse3;
+filter8_1dfunction aom_filter_block1d8_v2_ssse3;
+filter8_1dfunction aom_filter_block1d8_h2_ssse3;
+filter8_1dfunction aom_filter_block1d4_v2_ssse3;
+filter8_1dfunction aom_filter_block1d4_h2_ssse3;
+#define aom_filter_block1d4_v8_avx2 aom_filter_block1d4_v8_ssse3
+#define aom_filter_block1d16_v2_avx2 aom_filter_block1d16_v2_ssse3
+#define aom_filter_block1d16_h2_avx2 aom_filter_block1d16_h2_ssse3
+#define aom_filter_block1d8_v2_avx2 aom_filter_block1d8_v2_ssse3
+#define aom_filter_block1d8_h2_avx2 aom_filter_block1d8_h2_ssse3
+#define aom_filter_block1d4_v2_avx2 aom_filter_block1d4_v2_ssse3
+#define aom_filter_block1d4_h2_avx2 aom_filter_block1d4_h2_ssse3
+// void aom_convolve8_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
// int w, int h);
-// void vpx_convolve8_vert_avx2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_vert_avx2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
@@ -565,7 +565,7 @@
FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , avx2);
FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , avx2);
-// void vpx_convolve8_avx2(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avx2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
diff --git a/aom_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c b/aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c
similarity index 94%
rename from aom_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
rename to aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c
index bd472ff..6b22775 100644
--- a/aom_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
+++ b/aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c
@@ -10,10 +10,10 @@
#include <tmmintrin.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/vpx_filter.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_filter.h"
#include "aom_dsp/x86/convolve.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "aom_ports/emmintrin_compat.h"
@@ -44,11 +44,11 @@
};
// These are reused by the avx2 intrinsics.
-filter8_1dfunction vpx_filter_block1d8_v8_intrin_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h8_intrin_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h8_intrin_ssse3;
+filter8_1dfunction aom_filter_block1d8_v8_intrin_ssse3;
+filter8_1dfunction aom_filter_block1d8_h8_intrin_ssse3;
+filter8_1dfunction aom_filter_block1d4_h8_intrin_ssse3;
-void vpx_filter_block1d4_h8_intrin_ssse3(
+void aom_filter_block1d4_h8_intrin_ssse3(
const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
__m128i firstFilters, secondFilters, shuffle1, shuffle2;
@@ -116,7 +116,7 @@
}
}
-void vpx_filter_block1d8_h8_intrin_ssse3(
+void aom_filter_block1d8_h8_intrin_ssse3(
const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
__m128i firstFilters, secondFilters, thirdFilters, forthFilters, srcReg;
@@ -193,7 +193,7 @@
}
}
-void vpx_filter_block1d8_v8_intrin_ssse3(
+void aom_filter_block1d8_v8_intrin_ssse3(
const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr,
ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) {
__m128i addFilterReg64, filtersReg, minReg;
@@ -278,48 +278,48 @@
}
}
-filter8_1dfunction vpx_filter_block1d16_v8_ssse3;
-filter8_1dfunction vpx_filter_block1d16_h8_ssse3;
-filter8_1dfunction vpx_filter_block1d8_v8_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h8_ssse3;
-filter8_1dfunction vpx_filter_block1d4_v8_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h8_ssse3;
-filter8_1dfunction vpx_filter_block1d16_v8_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d16_h8_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d8_v8_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h8_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d4_v8_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d16_v8_ssse3;
+filter8_1dfunction aom_filter_block1d16_h8_ssse3;
+filter8_1dfunction aom_filter_block1d8_v8_ssse3;
+filter8_1dfunction aom_filter_block1d8_h8_ssse3;
+filter8_1dfunction aom_filter_block1d4_v8_ssse3;
+filter8_1dfunction aom_filter_block1d4_h8_ssse3;
+filter8_1dfunction aom_filter_block1d16_v8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d16_h8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d8_v8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d8_h8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d4_v8_avg_ssse3;
+filter8_1dfunction aom_filter_block1d4_h8_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d16_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d16_h2_ssse3;
-filter8_1dfunction vpx_filter_block1d8_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h2_ssse3;
-filter8_1dfunction vpx_filter_block1d4_v2_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h2_ssse3;
-filter8_1dfunction vpx_filter_block1d16_v2_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d16_h2_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d8_v2_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d8_h2_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d4_v2_avg_ssse3;
-filter8_1dfunction vpx_filter_block1d4_h2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d16_v2_ssse3;
+filter8_1dfunction aom_filter_block1d16_h2_ssse3;
+filter8_1dfunction aom_filter_block1d8_v2_ssse3;
+filter8_1dfunction aom_filter_block1d8_h2_ssse3;
+filter8_1dfunction aom_filter_block1d4_v2_ssse3;
+filter8_1dfunction aom_filter_block1d4_h2_ssse3;
+filter8_1dfunction aom_filter_block1d16_v2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d16_h2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d8_v2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d8_h2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d4_v2_avg_ssse3;
+filter8_1dfunction aom_filter_block1d4_h2_avg_ssse3;
-// void vpx_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
// int w, int h);
-// void vpx_convolve8_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
// int w, int h);
-// void vpx_convolve8_avg_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
// int w, int h);
-// void vpx_convolve8_avg_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
@@ -873,7 +873,7 @@
return (int)((const InterpKernel *)(intptr_t)f - base);
}
-void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+void aom_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int x_step_q4, const int16_t *filter_y, int y_step_q4,
int w, int h) {
@@ -887,12 +887,12 @@
x_step_q4, filters_y, y0_q4, y_step_q4, w, h);
}
-// void vpx_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
// int w, int h);
-// void vpx_convolve8_avg_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void aom_convolve8_avg_ssse3(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const int16_t *filter_x, int x_step_q4,
// const int16_t *filter_y, int y_step_q4,
diff --git a/aom_dsp/x86/vpx_subpixel_8t_sse2.asm b/aom_dsp/x86/aom_subpixel_8t_sse2.asm
similarity index 94%
rename from aom_dsp/x86/vpx_subpixel_8t_sse2.asm
rename to aom_dsp/x86/aom_subpixel_8t_sse2.asm
index b197150..535581e 100644
--- a/aom_dsp/x86/vpx_subpixel_8t_sse2.asm
+++ b/aom_dsp/x86/aom_subpixel_8t_sse2.asm
@@ -176,7 +176,7 @@
movq [rdi + %2], xmm0
%endm
-;void vpx_filter_block1d4_v8_sse2
+;void aom_filter_block1d4_v8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pitch,
@@ -185,8 +185,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_filter_block1d4_v8_sse2) PRIVATE
-sym(vpx_filter_block1d4_v8_sse2):
+global sym(aom_filter_block1d4_v8_sse2) PRIVATE
+sym(aom_filter_block1d4_v8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -243,7 +243,7 @@
pop rbp
ret
-;void vpx_filter_block1d8_v8_sse2
+;void aom_filter_block1d8_v8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pitch,
@@ -252,8 +252,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_filter_block1d8_v8_sse2) PRIVATE
-sym(vpx_filter_block1d8_v8_sse2):
+global sym(aom_filter_block1d8_v8_sse2) PRIVATE
+sym(aom_filter_block1d8_v8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -302,7 +302,7 @@
pop rbp
ret
-;void vpx_filter_block1d16_v8_sse2
+;void aom_filter_block1d16_v8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pitch,
@@ -311,8 +311,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_filter_block1d16_v8_sse2) PRIVATE
-sym(vpx_filter_block1d16_v8_sse2):
+global sym(aom_filter_block1d16_v8_sse2) PRIVATE
+sym(aom_filter_block1d16_v8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -365,8 +365,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d4_v8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d4_v8_avg_sse2):
+global sym(aom_filter_block1d4_v8_avg_sse2) PRIVATE
+sym(aom_filter_block1d4_v8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -423,8 +423,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d8_v8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d8_v8_avg_sse2):
+global sym(aom_filter_block1d8_v8_avg_sse2) PRIVATE
+sym(aom_filter_block1d8_v8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -472,8 +472,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d16_v8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d16_v8_avg_sse2):
+global sym(aom_filter_block1d16_v8_avg_sse2) PRIVATE
+sym(aom_filter_block1d16_v8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -525,7 +525,7 @@
pop rbp
ret
-;void vpx_filter_block1d4_h8_sse2
+;void aom_filter_block1d4_h8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pixels_per_line,
@@ -534,8 +534,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_filter_block1d4_h8_sse2) PRIVATE
-sym(vpx_filter_block1d4_h8_sse2):
+global sym(aom_filter_block1d4_h8_sse2) PRIVATE
+sym(aom_filter_block1d4_h8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -599,7 +599,7 @@
pop rbp
ret
-;void vpx_filter_block1d8_h8_sse2
+;void aom_filter_block1d8_h8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pixels_per_line,
@@ -608,8 +608,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_filter_block1d8_h8_sse2) PRIVATE
-sym(vpx_filter_block1d8_h8_sse2):
+global sym(aom_filter_block1d8_h8_sse2) PRIVATE
+sym(aom_filter_block1d8_h8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -674,7 +674,7 @@
pop rbp
ret
-;void vpx_filter_block1d16_h8_sse2
+;void aom_filter_block1d16_h8_sse2
;(
; unsigned char *src_ptr,
; unsigned int src_pixels_per_line,
@@ -683,8 +683,8 @@
; unsigned int output_height,
; short *filter
;)
-global sym(vpx_filter_block1d16_h8_sse2) PRIVATE
-sym(vpx_filter_block1d16_h8_sse2):
+global sym(aom_filter_block1d16_h8_sse2) PRIVATE
+sym(aom_filter_block1d16_h8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -769,8 +769,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d4_h8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d4_h8_avg_sse2):
+global sym(aom_filter_block1d4_h8_avg_sse2) PRIVATE
+sym(aom_filter_block1d4_h8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -834,8 +834,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d8_h8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d8_h8_avg_sse2):
+global sym(aom_filter_block1d8_h8_avg_sse2) PRIVATE
+sym(aom_filter_block1d8_h8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -900,8 +900,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d16_h8_avg_sse2) PRIVATE
-sym(vpx_filter_block1d16_h8_avg_sse2):
+global sym(aom_filter_block1d16_h8_avg_sse2) PRIVATE
+sym(aom_filter_block1d16_h8_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
diff --git a/aom_dsp/x86/vpx_subpixel_8t_ssse3.asm b/aom_dsp/x86/aom_subpixel_8t_ssse3.asm
similarity index 99%
rename from aom_dsp/x86/vpx_subpixel_8t_ssse3.asm
rename to aom_dsp/x86/aom_subpixel_8t_ssse3.asm
index c1a6f23..5b5eafe 100644
--- a/aom_dsp/x86/vpx_subpixel_8t_ssse3.asm
+++ b/aom_dsp/x86/aom_subpixel_8t_ssse3.asm
@@ -17,7 +17,7 @@
; NOTE: pmulhrsw has a latency of 5 cycles. Tests showed a performance loss
; when using this instruction.
;
-; The add order below (based on ffvp9) must be followed to prevent outranges.
+; The add order below (based on ffav1) must be followed to prevent outranges.
; x = k0k1 + k4k5
; y = k2k3 + k6k7
; z = signed SAT(x + y)
diff --git a/aom_dsp/x86/vpx_subpixel_bilinear_sse2.asm b/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm
similarity index 89%
rename from aom_dsp/x86/vpx_subpixel_bilinear_sse2.asm
rename to aom_dsp/x86/aom_subpixel_bilinear_sse2.asm
index 7de58ff..78ac1c4 100644
--- a/aom_dsp/x86/vpx_subpixel_bilinear_sse2.asm
+++ b/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm
@@ -131,8 +131,8 @@
dec rcx
%endm
-global sym(vpx_filter_block1d4_v2_sse2) PRIVATE
-sym(vpx_filter_block1d4_v2_sse2):
+global sym(aom_filter_block1d4_v2_sse2) PRIVATE
+sym(aom_filter_block1d4_v2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -155,8 +155,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d8_v2_sse2) PRIVATE
-sym(vpx_filter_block1d8_v2_sse2):
+global sym(aom_filter_block1d8_v2_sse2) PRIVATE
+sym(aom_filter_block1d8_v2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -181,8 +181,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d16_v2_sse2) PRIVATE
-sym(vpx_filter_block1d16_v2_sse2):
+global sym(aom_filter_block1d16_v2_sse2) PRIVATE
+sym(aom_filter_block1d16_v2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -209,8 +209,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d4_v2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d4_v2_avg_sse2):
+global sym(aom_filter_block1d4_v2_avg_sse2) PRIVATE
+sym(aom_filter_block1d4_v2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -233,8 +233,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d8_v2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d8_v2_avg_sse2):
+global sym(aom_filter_block1d8_v2_avg_sse2) PRIVATE
+sym(aom_filter_block1d8_v2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -259,8 +259,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d16_v2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d16_v2_avg_sse2):
+global sym(aom_filter_block1d16_v2_avg_sse2) PRIVATE
+sym(aom_filter_block1d16_v2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -287,8 +287,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d4_h2_sse2) PRIVATE
-sym(vpx_filter_block1d4_h2_sse2):
+global sym(aom_filter_block1d4_h2_sse2) PRIVATE
+sym(aom_filter_block1d4_h2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -312,8 +312,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d8_h2_sse2) PRIVATE
-sym(vpx_filter_block1d8_h2_sse2):
+global sym(aom_filter_block1d8_h2_sse2) PRIVATE
+sym(aom_filter_block1d8_h2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -339,8 +339,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d16_h2_sse2) PRIVATE
-sym(vpx_filter_block1d16_h2_sse2):
+global sym(aom_filter_block1d16_h2_sse2) PRIVATE
+sym(aom_filter_block1d16_h2_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -367,8 +367,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d4_h2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d4_h2_avg_sse2):
+global sym(aom_filter_block1d4_h2_avg_sse2) PRIVATE
+sym(aom_filter_block1d4_h2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -392,8 +392,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d8_h2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d8_h2_avg_sse2):
+global sym(aom_filter_block1d8_h2_avg_sse2) PRIVATE
+sym(aom_filter_block1d8_h2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -419,8 +419,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d16_h2_avg_sse2) PRIVATE
-sym(vpx_filter_block1d16_h2_avg_sse2):
+global sym(aom_filter_block1d16_h2_avg_sse2) PRIVATE
+sym(aom_filter_block1d16_h2_avg_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
diff --git a/aom_dsp/x86/vpx_subpixel_bilinear_ssse3.asm b/aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm
similarity index 88%
rename from aom_dsp/x86/vpx_subpixel_bilinear_ssse3.asm
rename to aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm
index 318c7c4..5f24460 100644
--- a/aom_dsp/x86/vpx_subpixel_bilinear_ssse3.asm
+++ b/aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm
@@ -105,8 +105,8 @@
dec rcx
%endm
-global sym(vpx_filter_block1d4_v2_ssse3) PRIVATE
-sym(vpx_filter_block1d4_v2_ssse3):
+global sym(aom_filter_block1d4_v2_ssse3) PRIVATE
+sym(aom_filter_block1d4_v2_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -129,8 +129,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d8_v2_ssse3) PRIVATE
-sym(vpx_filter_block1d8_v2_ssse3):
+global sym(aom_filter_block1d8_v2_ssse3) PRIVATE
+sym(aom_filter_block1d8_v2_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -155,8 +155,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d16_v2_ssse3) PRIVATE
-sym(vpx_filter_block1d16_v2_ssse3):
+global sym(aom_filter_block1d16_v2_ssse3) PRIVATE
+sym(aom_filter_block1d16_v2_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -182,8 +182,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d4_v2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d4_v2_avg_ssse3):
+global sym(aom_filter_block1d4_v2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d4_v2_avg_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -206,8 +206,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d8_v2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d8_v2_avg_ssse3):
+global sym(aom_filter_block1d8_v2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d8_v2_avg_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -232,8 +232,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d16_v2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d16_v2_avg_ssse3):
+global sym(aom_filter_block1d16_v2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d16_v2_avg_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -259,8 +259,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d4_h2_ssse3) PRIVATE
-sym(vpx_filter_block1d4_h2_ssse3):
+global sym(aom_filter_block1d4_h2_ssse3) PRIVATE
+sym(aom_filter_block1d4_h2_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -284,8 +284,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d8_h2_ssse3) PRIVATE
-sym(vpx_filter_block1d8_h2_ssse3):
+global sym(aom_filter_block1d8_h2_ssse3) PRIVATE
+sym(aom_filter_block1d8_h2_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -311,8 +311,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d16_h2_ssse3) PRIVATE
-sym(vpx_filter_block1d16_h2_ssse3):
+global sym(aom_filter_block1d16_h2_ssse3) PRIVATE
+sym(aom_filter_block1d16_h2_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -338,8 +338,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d4_h2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d4_h2_avg_ssse3):
+global sym(aom_filter_block1d4_h2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d4_h2_avg_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -363,8 +363,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d8_h2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d8_h2_avg_ssse3):
+global sym(aom_filter_block1d8_h2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d8_h2_avg_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -390,8 +390,8 @@
pop rbp
ret
-global sym(vpx_filter_block1d16_h2_avg_ssse3) PRIVATE
-sym(vpx_filter_block1d16_h2_avg_ssse3):
+global sym(aom_filter_block1d16_h2_avg_ssse3) PRIVATE
+sym(aom_filter_block1d16_h2_avg_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
diff --git a/aom_dsp/x86/avg_intrin_sse2.c b/aom_dsp/x86/avg_intrin_sse2.c
index c778f09..2365833 100644
--- a/aom_dsp/x86/avg_intrin_sse2.c
+++ b/aom_dsp/x86/avg_intrin_sse2.c
@@ -12,10 +12,10 @@
#include "aom_dsp/x86/synonyms.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
-void vpx_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp,
+void aom_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp,
int *min, int *max) {
__m128i u0, s0, d0, diff, maxabsdiff, minabsdiff, negdiff, absdiff0, absdiff;
u0 = _mm_setzero_si128();
@@ -93,7 +93,7 @@
*min = _mm_extract_epi16(minabsdiff, 0);
}
-unsigned int vpx_avg_8x8_sse2(const uint8_t *s, int p) {
+unsigned int aom_avg_8x8_sse2(const uint8_t *s, int p) {
__m128i s0, s1, u0;
unsigned int avg = 0;
u0 = _mm_setzero_si128();
@@ -120,7 +120,7 @@
return (avg + 32) >> 6;
}
-unsigned int vpx_avg_4x4_sse2(const uint8_t *s, int p) {
+unsigned int aom_avg_4x4_sse2(const uint8_t *s, int p) {
__m128i s0, s1, u0;
unsigned int avg = 0;
@@ -215,7 +215,7 @@
}
}
-void vpx_hadamard_8x8_sse2(int16_t const *src_diff, int src_stride,
+void aom_hadamard_8x8_sse2(int16_t const *src_diff, int src_stride,
int16_t *coeff) {
__m128i src[8];
src[0] = _mm_load_si128((const __m128i *)src_diff);
@@ -247,13 +247,13 @@
_mm_store_si128((__m128i *)coeff, src[7]);
}
-void vpx_hadamard_16x16_sse2(int16_t const *src_diff, int src_stride,
+void aom_hadamard_16x16_sse2(int16_t const *src_diff, int src_stride,
int16_t *coeff) {
int idx;
for (idx = 0; idx < 4; ++idx) {
int16_t const *src_ptr =
src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
- vpx_hadamard_8x8_sse2(src_ptr, src_stride, coeff + idx * 64);
+ aom_hadamard_8x8_sse2(src_ptr, src_stride, coeff + idx * 64);
}
for (idx = 0; idx < 64; idx += 8) {
@@ -286,7 +286,7 @@
}
}
-int vpx_satd_sse2(const int16_t *coeff, int length) {
+int aom_satd_sse2(const int16_t *coeff, int length) {
int i;
const __m128i zero = _mm_setzero_si128();
__m128i accum = zero;
@@ -312,7 +312,7 @@
return _mm_cvtsi128_si32(accum);
}
-void vpx_int_pro_row_sse2(int16_t *hbuf, uint8_t const *ref,
+void aom_int_pro_row_sse2(int16_t *hbuf, uint8_t const *ref,
const int ref_stride, const int height) {
int idx;
__m128i zero = _mm_setzero_si128();
@@ -361,7 +361,7 @@
_mm_storeu_si128((__m128i *)hbuf, s1);
}
-int16_t vpx_int_pro_col_sse2(uint8_t const *ref, const int width) {
+int16_t aom_int_pro_col_sse2(uint8_t const *ref, const int width) {
__m128i zero = _mm_setzero_si128();
__m128i src_line = _mm_load_si128((const __m128i *)ref);
__m128i s0 = _mm_sad_epu8(src_line, zero);
@@ -381,7 +381,7 @@
return _mm_extract_epi16(s0, 0);
}
-int vpx_vector_var_sse2(int16_t const *ref, int16_t const *src, const int bwl) {
+int aom_vector_var_sse2(int16_t const *ref, int16_t const *src, const int bwl) {
int idx;
int width = 4 << bwl;
int16_t mean;
diff --git a/aom_dsp/x86/avg_ssse3_x86_64.asm b/aom_dsp/x86/avg_ssse3_x86_64.asm
index 26412e8..8f28874 100644
--- a/aom_dsp/x86/avg_ssse3_x86_64.asm
+++ b/aom_dsp/x86/avg_ssse3_x86_64.asm
@@ -8,7 +8,7 @@
; be found in the AUTHORS file in the root of the source tree.
;
-%define private_prefix vpx
+%define private_prefix aom
%include "third_party/x86inc/x86inc.asm"
diff --git a/aom_dsp/x86/blend_a64_hmask_sse4.c b/aom_dsp/x86/blend_a64_hmask_sse4.c
index 1e452e5..4ee735d 100644
--- a/aom_dsp/x86/blend_a64_hmask_sse4.c
+++ b/aom_dsp/x86/blend_a64_hmask_sse4.c
@@ -8,28 +8,28 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
// To start out, just dispatch to the function using the 2D mask and
// pass mask stride as 0. This can be improved upon if necessary.
-void vpx_blend_a64_hmask_sse4_1(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_hmask_sse4_1(uint8_t *dst, uint32_t dst_stride,
const uint8_t *src0, uint32_t src0_stride,
const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, int h, int w) {
- vpx_blend_a64_mask_sse4_1(dst, dst_stride, src0, src0_stride, src1,
+ aom_blend_a64_mask_sse4_1(dst, dst_stride, src0, src0_stride, src1,
src1_stride, mask, 0, h, w, 0, 0);
}
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_blend_a64_hmask_sse4_1(
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_blend_a64_hmask_sse4_1(
uint8_t *dst_8, uint32_t dst_stride, const uint8_t *src0_8,
uint32_t src0_stride, const uint8_t *src1_8, uint32_t src1_stride,
const uint8_t *mask, int h, int w, int bd) {
- vpx_highbd_blend_a64_mask_sse4_1(dst_8, dst_stride, src0_8, src0_stride,
+ aom_highbd_blend_a64_mask_sse4_1(dst_8, dst_stride, src0_8, src0_stride,
src1_8, src1_stride, mask, 0, h, w, 0, 0,
bd);
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/blend_a64_mask_sse4.c b/aom_dsp/x86/blend_a64_mask_sse4.c
index 2384556..6463ecc 100644
--- a/aom_dsp/x86/blend_a64_mask_sse4.c
+++ b/aom_dsp/x86/blend_a64_mask_sse4.c
@@ -12,15 +12,15 @@
#include <assert.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/blend.h"
#include "aom_dsp/x86/synonyms.h"
#include "aom_dsp/x86/blend_sse4.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
//////////////////////////////////////////////////////////////////////////////
// No sub-sampling
@@ -31,7 +31,7 @@
const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride,
int h, int w) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
(void)w;
@@ -58,7 +58,7 @@
const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride,
int h, int w) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
(void)w;
@@ -84,7 +84,7 @@
uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride, int h, int w) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
int c;
@@ -121,7 +121,7 @@
const uint8_t *mask, uint32_t mask_stride, int h, int w) {
const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
0xff, 0, 0xff, 0, 0xff, 0, 0xff);
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
(void)w;
@@ -151,7 +151,7 @@
const uint8_t *mask, uint32_t mask_stride, int h, int w) {
const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
0xff, 0, 0xff, 0, 0xff, 0, 0xff);
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
(void)w;
@@ -181,7 +181,7 @@
const uint8_t *mask, uint32_t mask_stride, int h, int w) {
const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
0xff, 0, 0xff, 0, 0xff, 0, 0xff);
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
int c;
@@ -219,7 +219,7 @@
uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride, int h, int w) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
(void)w;
@@ -248,7 +248,7 @@
uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride, int h, int w) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
(void)w;
@@ -278,7 +278,7 @@
uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride, int h, int w) {
const __m128i v_zero = _mm_setzero_si128();
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
int c;
@@ -317,7 +317,7 @@
const uint8_t *mask, uint32_t mask_stride, int h, int w) {
const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
0xff, 0, 0xff, 0, 0xff, 0, 0xff);
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
(void)w;
@@ -352,7 +352,7 @@
const uint8_t *mask, uint32_t mask_stride, int h, int w) {
const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
0xff, 0, 0xff, 0, 0xff, 0, 0xff);
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
(void)w;
@@ -387,7 +387,7 @@
const uint8_t *mask, uint32_t mask_stride, int h, int w) {
const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
0xff, 0, 0xff, 0, 0xff, 0, 0xff);
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
int c;
@@ -431,7 +431,7 @@
// Dispatch
//////////////////////////////////////////////////////////////////////////////
-void vpx_blend_a64_mask_sse4_1(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_mask_sse4_1(uint8_t *dst, uint32_t dst_stride,
const uint8_t *src0, uint32_t src0_stride,
const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride, int h,
@@ -463,7 +463,7 @@
assert(IS_POWER_OF_TWO(w));
if (UNLIKELY((h | w) & 3)) { // if (w <= 2 || h <= 2)
- vpx_blend_a64_mask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+ aom_blend_a64_mask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
mask, mask_stride, h, w, suby, subx);
} else {
blend[(w >> 2) & 3][subx != 0][suby != 0](dst, dst_stride, src0,
@@ -472,7 +472,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
//////////////////////////////////////////////////////////////////////////////
// No sub-sampling
//////////////////////////////////////////////////////////////////////////////
@@ -481,7 +481,7 @@
uint16_t *dst, uint32_t dst_stride, const uint16_t *src0,
uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride, int h, blend_unit_fn blend) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
const __m128i v_m0_b = xx_loadl_32(mask);
@@ -522,7 +522,7 @@
uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride, int h, int w,
blend_unit_fn blend) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
int c;
@@ -570,7 +570,7 @@
const uint8_t *mask, uint32_t mask_stride, int h, blend_unit_fn blend) {
const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
0xff, 0, 0xff, 0, 0xff, 0, 0xff);
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
const __m128i v_r_b = xx_loadl_64(mask);
@@ -617,7 +617,7 @@
blend_unit_fn blend) {
const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
0xff, 0, 0xff, 0, 0xff, 0, 0xff);
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
int c;
@@ -665,7 +665,7 @@
uint16_t *dst, uint32_t dst_stride, const uint16_t *src0,
uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride, int h, blend_unit_fn blend) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
const __m128i v_ra_b = xx_loadl_32(mask);
@@ -711,7 +711,7 @@
uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
const uint8_t *mask, uint32_t mask_stride, int h, int w,
blend_unit_fn blend) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
int c;
@@ -762,7 +762,7 @@
const uint8_t *mask, uint32_t mask_stride, int h, blend_unit_fn blend) {
const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
0xff, 0, 0xff, 0, 0xff, 0, 0xff);
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
const __m128i v_ra_b = xx_loadl_64(mask);
@@ -814,7 +814,7 @@
blend_unit_fn blend) {
const __m128i v_zmask_b = _mm_set_epi8(0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0,
0xff, 0, 0xff, 0, 0xff, 0, 0xff);
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
int c;
@@ -863,7 +863,7 @@
// Dispatch
//////////////////////////////////////////////////////////////////////////////
-void vpx_highbd_blend_a64_mask_sse4_1(uint8_t *dst_8, uint32_t dst_stride,
+void aom_highbd_blend_a64_mask_sse4_1(uint8_t *dst_8, uint32_t dst_stride,
const uint8_t *src0_8,
uint32_t src0_stride,
const uint8_t *src1_8,
@@ -907,7 +907,7 @@
assert(bd == 8 || bd == 10 || bd == 12);
if (UNLIKELY((h | w) & 3)) { // if (w <= 2 || h <= 2)
- vpx_highbd_blend_a64_mask_c(dst_8, dst_stride, src0_8, src0_stride, src1_8,
+ aom_highbd_blend_a64_mask_c(dst_8, dst_stride, src0_8, src0_stride, src1_8,
src1_stride, mask, mask_stride, h, w, suby,
subx, bd);
} else {
@@ -920,4 +920,4 @@
mask_stride, h, w);
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/blend_a64_vmask_sse4.c b/aom_dsp/x86/blend_a64_vmask_sse4.c
index 0108d02..946c8ff 100644
--- a/aom_dsp/x86/blend_a64_vmask_sse4.c
+++ b/aom_dsp/x86/blend_a64_vmask_sse4.c
@@ -12,15 +12,15 @@
#include <assert.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/blend.h"
#include "aom_dsp/x86/synonyms.h"
#include "aom_dsp/x86/blend_sse4.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
//////////////////////////////////////////////////////////////////////////////
// Implementation - No sub-sampling
@@ -30,7 +30,7 @@
const uint8_t *src0, uint32_t src0_stride,
const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, int h, int w) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
(void)w;
@@ -55,7 +55,7 @@
const uint8_t *src0, uint32_t src0_stride,
const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, int h, int w) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
(void)w;
@@ -82,7 +82,7 @@
const uint8_t *src1,
uint32_t src1_stride,
const uint8_t *mask, int h, int w) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
int c;
@@ -108,7 +108,7 @@
// Dispatch
//////////////////////////////////////////////////////////////////////////////
-void vpx_blend_a64_vmask_sse4_1(uint8_t *dst, uint32_t dst_stride,
+void aom_blend_a64_vmask_sse4_1(uint8_t *dst, uint32_t dst_stride,
const uint8_t *src0, uint32_t src0_stride,
const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, int h, int w) {
@@ -120,8 +120,8 @@
// Dimension: width_index
static const blend_fn blend[9] = {
blend_a64_vmask_w16n_sse4_1, // w % 16 == 0
- vpx_blend_a64_vmask_c, // w == 1
- vpx_blend_a64_vmask_c, // w == 2
+ aom_blend_a64_vmask_c, // w == 1
+ aom_blend_a64_vmask_c, // w == 2
NULL, // INVALID
blend_a64_vmask_w4_sse4_1, // w == 4
NULL, // INVALID
@@ -142,7 +142,7 @@
w);
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
//////////////////////////////////////////////////////////////////////////////
// Implementation - No sub-sampling
//////////////////////////////////////////////////////////////////////////////
@@ -151,7 +151,7 @@
uint16_t *dst, uint32_t dst_stride, const uint16_t *src0,
uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
const uint8_t *mask, int h, blend_unit_fn blend) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
const __m128i v_m0_w = _mm_set1_epi16(*mask);
@@ -194,7 +194,7 @@
uint16_t *dst, uint32_t dst_stride, const uint16_t *src0,
uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride,
const uint8_t *mask, int h, int w, blend_unit_fn blend) {
- const __m128i v_maxval_w = _mm_set1_epi16(VPX_BLEND_A64_MAX_ALPHA);
+ const __m128i v_maxval_w = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
do {
int c;
@@ -236,7 +236,7 @@
// Dispatch
//////////////////////////////////////////////////////////////////////////////
-void vpx_highbd_blend_a64_vmask_sse4_1(
+void aom_highbd_blend_a64_vmask_sse4_1(
uint8_t *dst_8, uint32_t dst_stride, const uint8_t *src0_8,
uint32_t src0_stride, const uint8_t *src1_8, uint32_t src1_stride,
const uint8_t *mask, int h, int w, int bd) {
@@ -270,7 +270,7 @@
assert(bd == 8 || bd == 10 || bd == 12);
if (UNLIKELY((h | w) & 3)) { // if (w <= 2 || h <= 2)
- vpx_highbd_blend_a64_vmask_c(dst_8, dst_stride, src0_8, src0_stride, src1_8,
+ aom_highbd_blend_a64_vmask_c(dst_8, dst_stride, src0_8, src0_stride, src1_8,
src1_stride, mask, h, w, bd);
} else {
uint16_t *const dst = CONVERT_TO_SHORTPTR(dst_8);
@@ -281,4 +281,4 @@
src1_stride, mask, h, w);
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/blend_sse4.h b/aom_dsp/x86/blend_sse4.h
index 068518c..4fd5f5f 100644
--- a/aom_dsp/x86/blend_sse4.h
+++ b/aom_dsp/x86/blend_sse4.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_X86_BLEND_SSE4_H_
-#define VPX_DSP_X86_BLEND_SSE4_H_
+#ifndef AOM_DSP_X86_BLEND_SSE4_H_
+#define AOM_DSP_X86_BLEND_SSE4_H_
#include "aom_dsp/blend.h"
#include "aom_dsp/x86/synonyms.h"
@@ -30,7 +30,7 @@
const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
- const __m128i v_res_w = xx_roundn_epu16(v_sum_w, VPX_BLEND_A64_ROUND_BITS);
+ const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
return v_res_w;
}
@@ -47,12 +47,12 @@
const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
- const __m128i v_res_w = xx_roundn_epu16(v_sum_w, VPX_BLEND_A64_ROUND_BITS);
+ const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
return v_res_w;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef __m128i (*blend_unit_fn)(const uint16_t *src0, const uint16_t *src1,
const __m128i v_m0_w, const __m128i v_m1_w);
@@ -66,7 +66,7 @@
const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
- const __m128i v_res_w = xx_roundn_epu16(v_sum_w, VPX_BLEND_A64_ROUND_BITS);
+ const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
return v_res_w;
}
@@ -81,7 +81,7 @@
const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
- const __m128i v_res_w = xx_roundn_epu16(v_sum_w, VPX_BLEND_A64_ROUND_BITS);
+ const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
return v_res_w;
}
@@ -100,7 +100,7 @@
// Scale
const __m128i v_ssum_d =
- _mm_srli_epi32(v_sum_d, VPX_BLEND_A64_ROUND_BITS - 1);
+ _mm_srli_epi32(v_sum_d, AOM_BLEND_A64_ROUND_BITS - 1);
// Pack
const __m128i v_pssum_d = _mm_packs_epi32(v_ssum_d, v_ssum_d);
@@ -128,9 +128,9 @@
// Scale
const __m128i v_ssuml_d =
- _mm_srli_epi32(v_suml_d, VPX_BLEND_A64_ROUND_BITS - 1);
+ _mm_srli_epi32(v_suml_d, AOM_BLEND_A64_ROUND_BITS - 1);
const __m128i v_ssumh_d =
- _mm_srli_epi32(v_sumh_d, VPX_BLEND_A64_ROUND_BITS - 1);
+ _mm_srli_epi32(v_sumh_d, AOM_BLEND_A64_ROUND_BITS - 1);
// Pack
const __m128i v_pssum_d = _mm_packs_epi32(v_ssuml_d, v_ssumh_d);
@@ -140,6 +140,6 @@
return v_res_w;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#endif // VPX_DSP_X86_BLEND_SSE4_H_
+#endif // AOM_DSP_X86_BLEND_SSE4_H_
diff --git a/aom_dsp/x86/convolve.h b/aom_dsp/x86/convolve.h
index 9f1f10f..9c0e6aa 100644
--- a/aom_dsp/x86/convolve.h
+++ b/aom_dsp/x86/convolve.h
@@ -7,22 +7,22 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_X86_CONVOLVE_H_
-#define VPX_DSP_X86_CONVOLVE_H_
+#ifndef AOM_DSP_X86_CONVOLVE_H_
+#define AOM_DSP_X86_CONVOLVE_H_
#include <assert.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
-#include "aom_dsp/vpx_convolve.h"
+#include "aom_dsp/aom_convolve.h"
typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch,
uint8_t *output_ptr, ptrdiff_t out_pitch,
uint32_t output_height, const int16_t *filter);
#define FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt) \
- void vpx_convolve8_##name##_##opt( \
+ void aom_convolve8_##name##_##opt( \
const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \
ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \
const int16_t *filter_y, int y_step_q4, int w, int h) { \
@@ -30,39 +30,39 @@
assert(step_q4 == 16); \
if (filter[0] | filter[1] | filter[2]) { \
while (w >= 16) { \
- vpx_filter_block1d16_##dir##8_##avg##opt(src_start, src_stride, dst, \
+ aom_filter_block1d16_##dir##8_##avg##opt(src_start, src_stride, dst, \
dst_stride, h, filter); \
src += 16; \
dst += 16; \
w -= 16; \
} \
if (w == 8) { \
- vpx_filter_block1d8_##dir##8_##avg##opt(src_start, src_stride, dst, \
+ aom_filter_block1d8_##dir##8_##avg##opt(src_start, src_stride, dst, \
dst_stride, h, filter); \
} else if (w == 4) { \
- vpx_filter_block1d4_##dir##8_##avg##opt(src_start, src_stride, dst, \
+ aom_filter_block1d4_##dir##8_##avg##opt(src_start, src_stride, dst, \
dst_stride, h, filter); \
} \
} else { \
while (w >= 16) { \
- vpx_filter_block1d16_##dir##2_##avg##opt(src, src_stride, dst, \
+ aom_filter_block1d16_##dir##2_##avg##opt(src, src_stride, dst, \
dst_stride, h, filter); \
src += 16; \
dst += 16; \
w -= 16; \
} \
if (w == 8) { \
- vpx_filter_block1d8_##dir##2_##avg##opt(src, src_stride, dst, \
+ aom_filter_block1d8_##dir##2_##avg##opt(src, src_stride, dst, \
dst_stride, h, filter); \
} else if (w == 4) { \
- vpx_filter_block1d4_##dir##2_##avg##opt(src, src_stride, dst, \
+ aom_filter_block1d4_##dir##2_##avg##opt(src, src_stride, dst, \
dst_stride, h, filter); \
} \
} \
}
#define FUN_CONV_2D(avg, opt) \
- void vpx_convolve8_##avg##opt( \
+ void aom_convolve8_##avg##opt( \
const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \
ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \
const int16_t *filter_y, int y_step_q4, int w, int h) { \
@@ -75,24 +75,24 @@
if (filter_x[0] || filter_x[1] || filter_x[2] || filter_y[0] || \
filter_y[1] || filter_y[2]) { \
DECLARE_ALIGNED(16, uint8_t, fdata2[MAX_SB_SIZE * (MAX_SB_SIZE + 7)]); \
- vpx_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, \
+ aom_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, \
MAX_SB_SIZE, filter_x, x_step_q4, filter_y, \
y_step_q4, w, h + 7); \
- vpx_convolve8_##avg##vert_##opt(fdata2 + 3 * MAX_SB_SIZE, MAX_SB_SIZE, \
+ aom_convolve8_##avg##vert_##opt(fdata2 + 3 * MAX_SB_SIZE, MAX_SB_SIZE, \
dst, dst_stride, filter_x, x_step_q4, \
filter_y, y_step_q4, w, h); \
} else { \
DECLARE_ALIGNED(16, uint8_t, fdata2[MAX_SB_SIZE * (MAX_SB_SIZE + 1)]); \
- vpx_convolve8_horiz_##opt(src, src_stride, fdata2, MAX_SB_SIZE, \
+ aom_convolve8_horiz_##opt(src, src_stride, fdata2, MAX_SB_SIZE, \
filter_x, x_step_q4, filter_y, y_step_q4, w, \
h + 1); \
- vpx_convolve8_##avg##vert_##opt(fdata2, MAX_SB_SIZE, dst, dst_stride, \
+ aom_convolve8_##avg##vert_##opt(fdata2, MAX_SB_SIZE, dst, dst_stride, \
filter_x, x_step_q4, filter_y, \
y_step_q4, w, h); \
} \
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr,
const ptrdiff_t src_pitch,
@@ -102,7 +102,7 @@
const int16_t *filter, int bd);
#define HIGH_FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt) \
- void vpx_highbd_convolve8_##name##_##opt( \
+ void aom_highbd_convolve8_##name##_##opt( \
const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8, \
ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \
const int16_t *filter_y, int y_step_q4, int w, int h, int bd) { \
@@ -111,21 +111,21 @@
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
if (filter[0] | filter[1] | filter[2]) { \
while (w >= 16) { \
- vpx_highbd_filter_block1d16_##dir##8_##avg##opt( \
+ aom_highbd_filter_block1d16_##dir##8_##avg##opt( \
src_start, src_stride, dst, dst_stride, h, filter, bd); \
src += 16; \
dst += 16; \
w -= 16; \
} \
while (w >= 8) { \
- vpx_highbd_filter_block1d8_##dir##8_##avg##opt( \
+ aom_highbd_filter_block1d8_##dir##8_##avg##opt( \
src_start, src_stride, dst, dst_stride, h, filter, bd); \
src += 8; \
dst += 8; \
w -= 8; \
} \
while (w >= 4) { \
- vpx_highbd_filter_block1d4_##dir##8_##avg##opt( \
+ aom_highbd_filter_block1d4_##dir##8_##avg##opt( \
src_start, src_stride, dst, dst_stride, h, filter, bd); \
src += 4; \
dst += 4; \
@@ -133,21 +133,21 @@
} \
} else { \
while (w >= 16) { \
- vpx_highbd_filter_block1d16_##dir##2_##avg##opt( \
+ aom_highbd_filter_block1d16_##dir##2_##avg##opt( \
src, src_stride, dst, dst_stride, h, filter, bd); \
src += 16; \
dst += 16; \
w -= 16; \
} \
while (w >= 8) { \
- vpx_highbd_filter_block1d8_##dir##2_##avg##opt( \
+ aom_highbd_filter_block1d8_##dir##2_##avg##opt( \
src, src_stride, dst, dst_stride, h, filter, bd); \
src += 8; \
dst += 8; \
w -= 8; \
} \
while (w >= 4) { \
- vpx_highbd_filter_block1d4_##dir##2_##avg##opt( \
+ aom_highbd_filter_block1d4_##dir##2_##avg##opt( \
src, src_stride, dst, dst_stride, h, filter, bd); \
src += 4; \
dst += 4; \
@@ -156,14 +156,14 @@
} \
} \
if (w) { \
- vpx_highbd_convolve8_##name##_c(src8, src_stride, dst8, dst_stride, \
+ aom_highbd_convolve8_##name##_c(src8, src_stride, dst8, dst_stride, \
filter_x, x_step_q4, filter_y, \
y_step_q4, w, h, bd); \
} \
}
#define HIGH_FUN_CONV_2D(avg, opt) \
- void vpx_highbd_convolve8_##avg##opt( \
+ void aom_highbd_convolve8_##avg##opt( \
const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \
ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \
const int16_t *filter_y, int y_step_q4, int w, int h, int bd) { \
@@ -174,29 +174,29 @@
filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \
DECLARE_ALIGNED(16, uint16_t, \
fdata2[MAX_SB_SIZE * (MAX_SB_SIZE + 7)]); \
- vpx_highbd_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, \
+ aom_highbd_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, \
CONVERT_TO_BYTEPTR(fdata2), \
MAX_SB_SIZE, filter_x, x_step_q4, \
filter_y, y_step_q4, w, h + 7, bd); \
- vpx_highbd_convolve8_##avg##vert_##opt( \
+ aom_highbd_convolve8_##avg##vert_##opt( \
CONVERT_TO_BYTEPTR(fdata2) + 3 * MAX_SB_SIZE, MAX_SB_SIZE, dst, \
dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd); \
} else { \
DECLARE_ALIGNED(16, uint16_t, \
fdata2[MAX_SB_SIZE * (MAX_SB_SIZE + 1)]); \
- vpx_highbd_convolve8_horiz_##opt( \
+ aom_highbd_convolve8_horiz_##opt( \
src, src_stride, CONVERT_TO_BYTEPTR(fdata2), MAX_SB_SIZE, \
filter_x, x_step_q4, filter_y, y_step_q4, w, h + 1, bd); \
- vpx_highbd_convolve8_##avg##vert_##opt( \
+ aom_highbd_convolve8_##avg##vert_##opt( \
CONVERT_TO_BYTEPTR(fdata2), MAX_SB_SIZE, dst, dst_stride, \
filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd); \
} \
} else { \
- vpx_highbd_convolve8_##avg##c(src, src_stride, dst, dst_stride, \
+ aom_highbd_convolve8_##avg##c(src, src_stride, dst, dst_stride, \
filter_x, x_step_q4, filter_y, y_step_q4, \
w, h, bd); \
} \
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
-#endif // VPX_DSP_X86_CONVOLVE_H_
+#endif // AOM_DSP_X86_CONVOLVE_H_
diff --git a/aom_dsp/x86/deblock_sse2.asm b/aom_dsp/x86/deblock_sse2.asm
index c3d23a3..bae6cf4 100644
--- a/aom_dsp/x86/deblock_sse2.asm
+++ b/aom_dsp/x86/deblock_sse2.asm
@@ -83,7 +83,7 @@
add rbx, 16
%endmacro
-;void vpx_post_proc_down_and_across_mb_row_sse2
+;void aom_post_proc_down_and_across_mb_row_sse2
;(
; unsigned char *src_ptr,
; unsigned char *dst_ptr,
@@ -93,8 +93,8 @@
; int *flimits,
; int size
;)
-global sym(vpx_post_proc_down_and_across_mb_row_sse2) PRIVATE
-sym(vpx_post_proc_down_and_across_mb_row_sse2):
+global sym(aom_post_proc_down_and_across_mb_row_sse2) PRIVATE
+sym(aom_post_proc_down_and_across_mb_row_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -230,11 +230,11 @@
ret
%undef flimit
-;void vpx_mbpost_proc_down_xmm(unsigned char *dst,
+;void aom_mbpost_proc_down_xmm(unsigned char *dst,
; int pitch, int rows, int cols,int flimit)
-extern sym(vpx_rv)
-global sym(vpx_mbpost_proc_down_xmm) PRIVATE
-sym(vpx_mbpost_proc_down_xmm):
+extern sym(aom_rv)
+global sym(aom_mbpost_proc_down_xmm) PRIVATE
+sym(aom_mbpost_proc_down_xmm):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@@ -257,7 +257,7 @@
%define flimit4 [rsp+128]
%if ABI_IS_32BIT=0
- lea r8, [GLOBAL(sym(vpx_rv))]
+ lea r8, [GLOBAL(sym(aom_rv))]
%endif
;rows +=8;
@@ -403,13 +403,13 @@
and rcx, 127
%if ABI_IS_32BIT=1 && CONFIG_PIC=1
push rax
- lea rax, [GLOBAL(sym(vpx_rv))]
- movdqu xmm4, [rax + rcx*2] ;vpx_rv[rcx*2]
+ lea rax, [GLOBAL(sym(aom_rv))]
+ movdqu xmm4, [rax + rcx*2] ;aom_rv[rcx*2]
pop rax
%elif ABI_IS_32BIT=0
- movdqu xmm4, [r8 + rcx*2] ;vpx_rv[rcx*2]
+ movdqu xmm4, [r8 + rcx*2] ;aom_rv[rcx*2]
%else
- movdqu xmm4, [sym(vpx_rv) + rcx*2]
+ movdqu xmm4, [sym(aom_rv) + rcx*2]
%endif
paddw xmm1, xmm4
@@ -462,10 +462,10 @@
%undef flimit4
-;void vpx_mbpost_proc_across_ip_xmm(unsigned char *src,
+;void aom_mbpost_proc_across_ip_xmm(unsigned char *src,
; int pitch, int rows, int cols,int flimit)
-global sym(vpx_mbpost_proc_across_ip_xmm) PRIVATE
-sym(vpx_mbpost_proc_across_ip_xmm):
+global sym(aom_mbpost_proc_across_ip_xmm) PRIVATE
+sym(aom_mbpost_proc_across_ip_xmm):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
diff --git a/aom_dsp/x86/fwd_dct32x32_impl_avx2.h b/aom_dsp/x86/fwd_dct32x32_impl_avx2.h
index 1d129bf..891b952 100644
--- a/aom_dsp/x86/fwd_dct32x32_impl_avx2.h
+++ b/aom_dsp/x86/fwd_dct32x32_impl_avx2.h
@@ -10,7 +10,7 @@
#include <immintrin.h> // AVX2
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/txfm_common.h"
#define pair256_set_epi16(a, b) \
@@ -2939,7 +2939,7 @@
tr2_6 = _mm256_sub_epi16(tr2_6, tr2_6_0);
tr2_7 = _mm256_sub_epi16(tr2_7, tr2_7_0);
// ... and here.
- // PS: also change code in vp9/encoder/vp9_dct.c
+ // PS: also change code in av1/encoder/av1_dct.c
tr2_0 = _mm256_add_epi16(tr2_0, kOne);
tr2_1 = _mm256_add_epi16(tr2_1, kOne);
tr2_2 = _mm256_add_epi16(tr2_2, kOne);
diff --git a/aom_dsp/x86/fwd_dct32x32_impl_sse2.h b/aom_dsp/x86/fwd_dct32x32_impl_sse2.h
index 04e3e37..3b1d5ba 100644
--- a/aom_dsp/x86/fwd_dct32x32_impl_sse2.h
+++ b/aom_dsp/x86/fwd_dct32x32_impl_sse2.h
@@ -21,31 +21,31 @@
#define ADD_EPI16 _mm_adds_epi16
#define SUB_EPI16 _mm_subs_epi16
#if FDCT32x32_HIGH_PRECISION
-void vpx_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void aom_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
int i, j;
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
- vpx_fdct32(temp_in, temp_out, 0);
+ aom_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
out[j + i * 32] =
(tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
}
}
-#define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_c
-#define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rows_c
+#define HIGH_FDCT32x32_2D_C aom_highbd_fdct32x32_c
+#define HIGH_FDCT32x32_2D_ROWS_C aom_fdct32x32_rows_c
#else
-void vpx_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void aom_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
int i, j;
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
- vpx_fdct32(temp_in, temp_out, 1);
+ aom_fdct32(temp_in, temp_out, 1);
for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
}
}
-#define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_rd_c
-#define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rd_rows_c
+#define HIGH_FDCT32x32_2D_C aom_highbd_fdct32x32_rd_c
+#define HIGH_FDCT32x32_2D_ROWS_C aom_fdct32x32_rd_rows_c
#endif // FDCT32x32_HIGH_PRECISION
#else
#define ADD_EPI16 _mm_add_epi16
@@ -3145,7 +3145,7 @@
tr2_6 = _mm_sub_epi16(tr2_6, tr2_6_0);
tr2_7 = _mm_sub_epi16(tr2_7, tr2_7_0);
// ... and here.
- // PS: also change code in vp9/encoder/vp9_dct.c
+ // PS: also change code in av1/encoder/av1_dct.c
tr2_0 = _mm_add_epi16(tr2_0, kOne);
tr2_1 = _mm_add_epi16(tr2_1, kOne);
tr2_2 = _mm_add_epi16(tr2_2, kOne);
diff --git a/aom_dsp/x86/fwd_txfm_avx2.c b/aom_dsp/x86/fwd_txfm_avx2.c
index 325a5e9..d10e822 100644
--- a/aom_dsp/x86/fwd_txfm_avx2.c
+++ b/aom_dsp/x86/fwd_txfm_avx2.c
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
+#include "./aom_config.h"
-#define FDCT32x32_2D_AVX2 vpx_fdct32x32_rd_avx2
+#define FDCT32x32_2D_AVX2 aom_fdct32x32_rd_avx2
#define FDCT32x32_HIGH_PRECISION 0
#include "aom_dsp/x86/fwd_dct32x32_impl_avx2.h"
#undef FDCT32x32_2D_AVX2
#undef FDCT32x32_HIGH_PRECISION
-#define FDCT32x32_2D_AVX2 vpx_fdct32x32_avx2
+#define FDCT32x32_2D_AVX2 aom_fdct32x32_avx2
#define FDCT32x32_HIGH_PRECISION 1
#include "aom_dsp/x86/fwd_dct32x32_impl_avx2.h" // NOLINT
#undef FDCT32x32_2D_AVX2
diff --git a/aom_dsp/x86/fwd_txfm_impl_sse2.h b/aom_dsp/x86/fwd_txfm_impl_sse2.h
index 8b57fab..83c9d3b 100644
--- a/aom_dsp/x86/fwd_txfm_impl_sse2.h
+++ b/aom_dsp/x86/fwd_txfm_impl_sse2.h
@@ -10,7 +10,7 @@
#include <emmintrin.h> // SSE2
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/txfm_common.h"
#include "aom_dsp/x86/fwd_txfm_sse2.h"
#include "aom_dsp/x86/txfm_common_sse2.h"
@@ -98,7 +98,7 @@
_mm_cmplt_epi16(in1, _mm_set1_epi16(0xfc00)));
test = _mm_movemask_epi8(_mm_or_si128(cmp0, cmp1));
if (test) {
- vpx_highbd_fdct4x4_c(input, output, stride);
+ aom_highbd_fdct4x4_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -169,7 +169,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x2(&x0, &x1);
if (overflow) {
- vpx_highbd_fdct4x4_c(input, output, stride);
+ aom_highbd_fdct4x4_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -191,7 +191,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x2(&t0, &t1);
if (overflow) {
- vpx_highbd_fdct4x4_c(input, output, stride);
+ aom_highbd_fdct4x4_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -230,7 +230,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x2(&x0, &x1);
if (overflow) {
- vpx_highbd_fdct4x4_c(input, output, stride);
+ aom_highbd_fdct4x4_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -313,7 +313,7 @@
overflow =
check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
}
@@ -328,7 +328,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -371,7 +371,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&res0, &res4, &res2, &res6);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -401,7 +401,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x2(&r0, &r1);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -414,7 +414,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -457,7 +457,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&res1, &res7, &res5, &res3);
if (overflow) {
- vpx_highbd_fdct8x8_c(input, output, stride);
+ aom_highbd_fdct8x8_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -720,7 +720,7 @@
overflow = check_epi16_overflow_x8(&input0, &input1, &input2, &input3,
&input4, &input5, &input6, &input7);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -740,7 +740,7 @@
check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
&step1_4, &step1_5, &step1_6, &step1_7);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -760,7 +760,7 @@
overflow =
check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -774,7 +774,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -796,7 +796,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&res00, &res08, &res04, &res12);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -817,7 +817,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x2(&r0, &r1);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -830,7 +830,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -853,7 +853,7 @@
overflow =
check_epi16_overflow_x4(&res02, &res14, &res10, &res06);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -881,7 +881,7 @@
overflow =
check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, &step2_4);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -901,7 +901,7 @@
check_epi16_overflow_x8(&step3_0, &step3_1, &step3_2, &step3_3,
&step3_4, &step3_5, &step3_6, &step3_7);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -924,7 +924,7 @@
overflow =
check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, &step2_5);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -944,7 +944,7 @@
check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
&step1_4, &step1_5, &step1_6, &step1_7);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -966,7 +966,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&res01, &res09, &res15, &res07);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
@@ -987,7 +987,7 @@
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x4(&res05, &res13, &res11, &res03);
if (overflow) {
- vpx_highbd_fdct16x16_c(input, output, stride);
+ aom_highbd_fdct16x16_c(input, output, stride);
return;
}
#endif // DCT_HIGH_BIT_DEPTH
diff --git a/aom_dsp/x86/fwd_txfm_sse2.c b/aom_dsp/x86/fwd_txfm_sse2.c
index d5b2f0d..7b66e28 100644
--- a/aom_dsp/x86/fwd_txfm_sse2.c
+++ b/aom_dsp/x86/fwd_txfm_sse2.c
@@ -10,12 +10,12 @@
#include <emmintrin.h> // SSE2
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/x86/fwd_txfm_sse2.h"
-void vpx_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
__m128i in0, in1;
__m128i tmp;
const __m128i zero = _mm_setzero_si128();
@@ -44,7 +44,7 @@
output[0] = (tran_low_t)_mm_cvtsi128_si32(in0);
}
-void vpx_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void aom_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
__m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
__m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
__m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
@@ -84,7 +84,7 @@
output[0] = (tran_low_t)_mm_cvtsi128_si32(in1);
}
-void vpx_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
+void aom_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
int stride) {
__m128i in0, in1, in2, in3;
__m128i u0, u1;
@@ -153,7 +153,7 @@
output[0] = (tran_low_t)_mm_cvtsi128_si32(in1);
}
-void vpx_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
+void aom_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
int stride) {
__m128i in0, in1, in2, in3;
__m128i u0, u1;
@@ -226,47 +226,47 @@
}
#define DCT_HIGH_BIT_DEPTH 0
-#define FDCT4x4_2D vpx_fdct4x4_sse2
-#define FDCT8x8_2D vpx_fdct8x8_sse2
-#define FDCT16x16_2D vpx_fdct16x16_sse2
+#define FDCT4x4_2D aom_fdct4x4_sse2
+#define FDCT8x8_2D aom_fdct8x8_sse2
+#define FDCT16x16_2D aom_fdct16x16_sse2
#include "aom_dsp/x86/fwd_txfm_impl_sse2.h"
#undef FDCT4x4_2D
#undef FDCT8x8_2D
#undef FDCT16x16_2D
-#define FDCT32x32_2D vpx_fdct32x32_rd_sse2
+#define FDCT32x32_2D aom_fdct32x32_rd_sse2
#define FDCT32x32_HIGH_PRECISION 0
#include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h"
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
-#define FDCT32x32_2D vpx_fdct32x32_sse2
+#define FDCT32x32_2D aom_fdct32x32_sse2
#define FDCT32x32_HIGH_PRECISION 1
#include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
#undef DCT_HIGH_BIT_DEPTH
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
#define DCT_HIGH_BIT_DEPTH 1
-#define FDCT4x4_2D vpx_highbd_fdct4x4_sse2
-#define FDCT8x8_2D vpx_highbd_fdct8x8_sse2
-#define FDCT16x16_2D vpx_highbd_fdct16x16_sse2
+#define FDCT4x4_2D aom_highbd_fdct4x4_sse2
+#define FDCT8x8_2D aom_highbd_fdct8x8_sse2
+#define FDCT16x16_2D aom_highbd_fdct16x16_sse2
#include "aom_dsp/x86/fwd_txfm_impl_sse2.h" // NOLINT
#undef FDCT4x4_2D
#undef FDCT8x8_2D
#undef FDCT16x16_2D
-#define FDCT32x32_2D vpx_highbd_fdct32x32_rd_sse2
+#define FDCT32x32_2D aom_highbd_fdct32x32_rd_sse2
#define FDCT32x32_HIGH_PRECISION 0
#include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
-#define FDCT32x32_2D vpx_highbd_fdct32x32_sse2
+#define FDCT32x32_2D aom_highbd_fdct32x32_sse2
#define FDCT32x32_HIGH_PRECISION 1
#include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT
#undef FDCT32x32_2D
#undef FDCT32x32_HIGH_PRECISION
#undef DCT_HIGH_BIT_DEPTH
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/fwd_txfm_sse2.h b/aom_dsp/x86/fwd_txfm_sse2.h
index 5201e76..faf6d52 100644
--- a/aom_dsp/x86/fwd_txfm_sse2.h
+++ b/aom_dsp/x86/fwd_txfm_sse2.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_X86_FWD_TXFM_SSE2_H_
-#define VPX_DSP_X86_FWD_TXFM_SSE2_H_
+#ifndef AOM_DSP_X86_FWD_TXFM_SSE2_H_
+#define AOM_DSP_X86_FWD_TXFM_SSE2_H_
#ifdef __cplusplus
extern "C" {
@@ -244,7 +244,7 @@
}
static INLINE void store_output(const __m128i *poutput, tran_low_t *dst_ptr) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const __m128i zero = _mm_setzero_si128();
const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
__m128i out0 = _mm_unpacklo_epi16(*poutput, sign_bits);
@@ -253,11 +253,11 @@
_mm_store_si128((__m128i *)(dst_ptr + 4), out1);
#else
_mm_store_si128((__m128i *)(dst_ptr), *poutput);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
static INLINE void storeu_output(const __m128i *poutput, tran_low_t *dst_ptr) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
const __m128i zero = _mm_setzero_si128();
const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
__m128i out0 = _mm_unpacklo_epi16(*poutput, sign_bits);
@@ -266,7 +266,7 @@
_mm_storeu_si128((__m128i *)(dst_ptr + 4), out1);
#else
_mm_storeu_si128((__m128i *)(dst_ptr), *poutput);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
}
static INLINE __m128i mult_round_shift(const __m128i *pin0, const __m128i *pin1,
@@ -368,4 +368,4 @@
} // extern "C"
#endif
-#endif // VPX_DSP_X86_FWD_TXFM_SSE2_H_
+#endif // AOM_DSP_X86_FWD_TXFM_SSE2_H_
diff --git a/aom_dsp/x86/halfpix_variance_impl_sse2.asm b/aom_dsp/x86/halfpix_variance_impl_sse2.asm
index b91d1dc..66e752e 100644
--- a/aom_dsp/x86/halfpix_variance_impl_sse2.asm
+++ b/aom_dsp/x86/halfpix_variance_impl_sse2.asm
@@ -10,15 +10,15 @@
%include "aom_ports/x86_abi_support.asm"
-;void vpx_half_horiz_vert_variance16x_h_sse2(unsigned char *ref,
+;void aom_half_horiz_vert_variance16x_h_sse2(unsigned char *ref,
; int ref_stride,
; unsigned char *src,
; int src_stride,
; unsigned int height,
; int *sum,
; unsigned int *sumsquared)
-global sym(vpx_half_horiz_vert_variance16x_h_sse2) PRIVATE
-sym(vpx_half_horiz_vert_variance16x_h_sse2):
+global sym(aom_half_horiz_vert_variance16x_h_sse2) PRIVATE
+sym(aom_half_horiz_vert_variance16x_h_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -45,7 +45,7 @@
lea rsi, [rsi + rax]
-vpx_half_horiz_vert_variance16x_h_1:
+aom_half_horiz_vert_variance16x_h_1:
movdqu xmm1, XMMWORD PTR [rsi] ;
movdqu xmm2, XMMWORD PTR [rsi+1] ;
pavgb xmm1, xmm2 ; xmm1 = avg(xmm1,xmm3) horizontal line i+1
@@ -77,7 +77,7 @@
lea rdi, [rdi + rdx]
sub rcx, 1 ;
- jnz vpx_half_horiz_vert_variance16x_h_1 ;
+ jnz aom_half_horiz_vert_variance16x_h_1 ;
pxor xmm1, xmm1
pxor xmm5, xmm5
@@ -123,15 +123,15 @@
ret
-;void vpx_half_vert_variance16x_h_sse2(unsigned char *ref,
+;void aom_half_vert_variance16x_h_sse2(unsigned char *ref,
; int ref_stride,
; unsigned char *src,
; int src_stride,
; unsigned int height,
; int *sum,
; unsigned int *sumsquared)
-global sym(vpx_half_vert_variance16x_h_sse2) PRIVATE
-sym(vpx_half_vert_variance16x_h_sse2):
+global sym(aom_half_vert_variance16x_h_sse2) PRIVATE
+sym(aom_half_vert_variance16x_h_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -154,7 +154,7 @@
lea rsi, [rsi + rax ]
pxor xmm0, xmm0
-vpx_half_vert_variance16x_h_1:
+aom_half_vert_variance16x_h_1:
movdqu xmm3, XMMWORD PTR [rsi]
pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3)
@@ -182,7 +182,7 @@
lea rdi, [rdi + rdx]
sub rcx, 1
- jnz vpx_half_vert_variance16x_h_1
+ jnz aom_half_vert_variance16x_h_1
pxor xmm1, xmm1
pxor xmm5, xmm5
@@ -228,15 +228,15 @@
ret
-;void vpx_half_horiz_variance16x_h_sse2(unsigned char *ref,
+;void aom_half_horiz_variance16x_h_sse2(unsigned char *ref,
; int ref_stride
; unsigned char *src,
; int src_stride,
; unsigned int height,
; int *sum,
; unsigned int *sumsquared)
-global sym(vpx_half_horiz_variance16x_h_sse2) PRIVATE
-sym(vpx_half_horiz_variance16x_h_sse2):
+global sym(aom_half_horiz_variance16x_h_sse2) PRIVATE
+sym(aom_half_horiz_variance16x_h_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@@ -257,7 +257,7 @@
pxor xmm0, xmm0 ;
-vpx_half_horiz_variance16x_h_1:
+aom_half_horiz_variance16x_h_1:
movdqu xmm5, XMMWORD PTR [rsi] ; xmm5 = s0,s1,s2..s15
movdqu xmm3, XMMWORD PTR [rsi+1] ; xmm3 = s1,s2,s3..s16
@@ -284,7 +284,7 @@
lea rdi, [rdi + rdx]
sub rcx, 1 ;
- jnz vpx_half_horiz_variance16x_h_1 ;
+ jnz aom_half_horiz_variance16x_h_1 ;
pxor xmm1, xmm1
pxor xmm5, xmm5
@@ -335,7 +335,7 @@
xmm_bi_rd:
times 8 dw 64
align 16
-vpx_bilinear_filters_sse2:
+aom_bilinear_filters_sse2:
dw 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0
dw 112, 112, 112, 112, 112, 112, 112, 112, 16, 16, 16, 16, 16, 16, 16, 16
dw 96, 96, 96, 96, 96, 96, 96, 96, 32, 32, 32, 32, 32, 32, 32, 32
diff --git a/aom_dsp/x86/halfpix_variance_sse2.c b/aom_dsp/x86/halfpix_variance_sse2.c
index 8d26b75..1de0c43 100644
--- a/aom_dsp/x86/halfpix_variance_sse2.c
+++ b/aom_dsp/x86/halfpix_variance_sse2.c
@@ -10,32 +10,32 @@
#include <assert.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
-void vpx_half_horiz_vert_variance16x_h_sse2(const unsigned char *ref,
+void aom_half_horiz_vert_variance16x_h_sse2(const unsigned char *ref,
int ref_stride,
const unsigned char *src,
int src_stride, unsigned int height,
int *sum, unsigned int *sumsquared);
-void vpx_half_horiz_variance16x_h_sse2(const unsigned char *ref, int ref_stride,
+void aom_half_horiz_variance16x_h_sse2(const unsigned char *ref, int ref_stride,
const unsigned char *src, int src_stride,
unsigned int height, int *sum,
unsigned int *sumsquared);
-void vpx_half_vert_variance16x_h_sse2(const unsigned char *ref, int ref_stride,
+void aom_half_vert_variance16x_h_sse2(const unsigned char *ref, int ref_stride,
const unsigned char *src, int src_stride,
unsigned int height, int *sum,
unsigned int *sumsquared);
-uint32_t vpx_variance_halfpixvar16x16_h_sse2(const unsigned char *src,
+uint32_t aom_variance_halfpixvar16x16_h_sse2(const unsigned char *src,
int src_stride,
const unsigned char *dst,
int dst_stride, uint32_t *sse) {
int xsum0;
unsigned int xxsum0;
- vpx_half_horiz_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16,
+ aom_half_horiz_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16,
&xsum0, &xxsum0);
*sse = xxsum0;
@@ -44,13 +44,13 @@
return (xxsum0 - ((uint32_t)((int64_t)xsum0 * xsum0) >> 8));
}
-uint32_t vpx_variance_halfpixvar16x16_v_sse2(const unsigned char *src,
+uint32_t aom_variance_halfpixvar16x16_v_sse2(const unsigned char *src,
int src_stride,
const unsigned char *dst,
int dst_stride, uint32_t *sse) {
int xsum0;
unsigned int xxsum0;
- vpx_half_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, &xsum0,
+ aom_half_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, &xsum0,
&xxsum0);
*sse = xxsum0;
@@ -59,14 +59,14 @@
return (xxsum0 - ((uint32_t)((int64_t)xsum0 * xsum0) >> 8));
}
-uint32_t vpx_variance_halfpixvar16x16_hv_sse2(const unsigned char *src,
+uint32_t aom_variance_halfpixvar16x16_hv_sse2(const unsigned char *src,
int src_stride,
const unsigned char *dst,
int dst_stride, uint32_t *sse) {
int xsum0;
unsigned int xxsum0;
- vpx_half_horiz_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16,
+ aom_half_horiz_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16,
&xsum0, &xxsum0);
*sse = xxsum0;
diff --git a/aom_dsp/x86/highbd_loopfilter_sse2.c b/aom_dsp/x86/highbd_loopfilter_sse2.c
index 15b8283..ee4b83e 100644
--- a/aom_dsp/x86/highbd_loopfilter_sse2.c
+++ b/aom_dsp/x86/highbd_loopfilter_sse2.c
@@ -10,7 +10,7 @@
#include <emmintrin.h> // SSE2
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
#include "aom_ports/emmintrin_compat.h"
@@ -48,7 +48,7 @@
// TODO(debargha, peter): Break up large functions into smaller ones
// in this file.
-void vpx_highbd_lpf_horizontal_edge_8_sse2(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_edge_8_sse2(uint16_t *s, int p,
const uint8_t *_blimit,
const uint8_t *_limit,
const uint8_t *_thresh, int bd) {
@@ -475,15 +475,15 @@
_mm_store_si128((__m128i *)(s - 0 * p), q0);
}
-void vpx_highbd_lpf_horizontal_edge_16_sse2(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_edge_16_sse2(uint16_t *s, int p,
const uint8_t *_blimit,
const uint8_t *_limit,
const uint8_t *_thresh, int bd) {
- vpx_highbd_lpf_horizontal_edge_8_sse2(s, p, _blimit, _limit, _thresh, bd);
- vpx_highbd_lpf_horizontal_edge_8_sse2(s + 8, p, _blimit, _limit, _thresh, bd);
+ aom_highbd_lpf_horizontal_edge_8_sse2(s, p, _blimit, _limit, _thresh, bd);
+ aom_highbd_lpf_horizontal_edge_8_sse2(s + 8, p, _blimit, _limit, _thresh, bd);
}
-void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
const uint8_t *_blimit,
const uint8_t *_limit,
const uint8_t *_thresh, int bd) {
@@ -640,7 +640,7 @@
filt = _mm_adds_epi16(filt, work_a);
filt = _mm_adds_epi16(filt, work_a);
filt = _mm_adds_epi16(filt, work_a);
- // (vpx_filter + 3 * (qs0 - ps0)) & mask
+ // (aom_filter + 3 * (qs0 - ps0)) & mask
filt = signed_char_clamp_bd_sse2(filt, bd);
filt = _mm_and_si128(filt, mask);
@@ -709,15 +709,15 @@
_mm_store_si128((__m128i *)(s + 2 * p), q2);
}
-void vpx_highbd_lpf_horizontal_8_dual_sse2(
+void aom_highbd_lpf_horizontal_8_dual_sse2(
uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
const uint8_t *_thresh1, int bd) {
- vpx_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, bd);
- vpx_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd);
+ aom_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, bd);
+ aom_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd);
}
-void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
+void aom_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
const uint8_t *_blimit,
const uint8_t *_limit,
const uint8_t *_thresh, int bd) {
@@ -834,7 +834,7 @@
filt = _mm_adds_epi16(filt, work_a);
filt = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, work_a), bd);
- // (vpx_filter + 3 * (qs0 - ps0)) & mask
+ // (aom_filter + 3 * (qs0 - ps0)) & mask
filt = _mm_and_si128(filt, mask);
filter1 = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, t4), bd);
@@ -879,12 +879,12 @@
_mm_storeu_si128((__m128i *)(s + 1 * p), q1);
}
-void vpx_highbd_lpf_horizontal_4_dual_sse2(
+void aom_highbd_lpf_horizontal_4_dual_sse2(
uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
const uint8_t *_thresh1, int bd) {
- vpx_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, bd);
- vpx_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd);
+ aom_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, bd);
+ aom_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd);
}
static INLINE void highbd_transpose(uint16_t *src[], int in_p, uint16_t *dst[],
@@ -999,7 +999,7 @@
highbd_transpose(src1, in_p, dest1, out_p, 1);
}
-void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int bd) {
DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]);
@@ -1013,7 +1013,7 @@
highbd_transpose(src, p, dst, 8, 1);
// Loop filtering
- vpx_highbd_lpf_horizontal_4_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd);
+ aom_highbd_lpf_horizontal_4_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd);
src[0] = t_dst;
dst[0] = s - 4;
@@ -1022,7 +1022,7 @@
highbd_transpose(src, 8, dst, p, 1);
}
-void vpx_highbd_lpf_vertical_4_dual_sse2(
+void aom_highbd_lpf_vertical_4_dual_sse2(
uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1, int bd) {
@@ -1034,7 +1034,7 @@
highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
// Loop filtering
- vpx_highbd_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
+ aom_highbd_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
thresh0, blimit1, limit1, thresh1, bd);
src[0] = t_dst;
src[1] = t_dst + 8;
@@ -1045,7 +1045,7 @@
highbd_transpose(src, 16, dst, p, 2);
}
-void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int bd) {
DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]);
@@ -1059,7 +1059,7 @@
highbd_transpose(src, p, dst, 8, 1);
// Loop filtering
- vpx_highbd_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd);
+ aom_highbd_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd);
src[0] = t_dst;
dst[0] = s - 4;
@@ -1068,7 +1068,7 @@
highbd_transpose(src, 8, dst, p, 1);
}
-void vpx_highbd_lpf_vertical_8_dual_sse2(
+void aom_highbd_lpf_vertical_8_dual_sse2(
uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1, int bd) {
@@ -1080,7 +1080,7 @@
highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
// Loop filtering
- vpx_highbd_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
+ aom_highbd_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
thresh0, blimit1, limit1, thresh1, bd);
src[0] = t_dst;
src[1] = t_dst + 8;
@@ -1092,7 +1092,7 @@
highbd_transpose(src, 16, dst, p, 2);
}
-void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit,
+void aom_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int bd) {
DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 16]);
@@ -1108,7 +1108,7 @@
highbd_transpose(src, p, dst, 8, 2);
// Loop filtering
- vpx_highbd_lpf_horizontal_edge_8_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh,
+ aom_highbd_lpf_horizontal_edge_8_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh,
bd);
src[0] = t_dst;
src[1] = t_dst + 8 * 8;
@@ -1119,7 +1119,7 @@
highbd_transpose(src, 8, dst, p, 2);
}
-void vpx_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, int p,
+void aom_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, int p,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int bd) {
@@ -1130,7 +1130,7 @@
highbd_transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16);
// Loop filtering
- vpx_highbd_lpf_horizontal_edge_16_sse2(t_dst + 8 * 16, 16, blimit, limit,
+ aom_highbd_lpf_horizontal_edge_16_sse2(t_dst + 8 * 16, 16, blimit, limit,
thresh, bd);
// Transpose back
diff --git a/aom_dsp/x86/highbd_quantize_intrin_sse2.c b/aom_dsp/x86/highbd_quantize_intrin_sse2.c
index ecde8c2..34028d9 100644
--- a/aom_dsp/x86/highbd_quantize_intrin_sse2.c
+++ b/aom_dsp/x86/highbd_quantize_intrin_sse2.c
@@ -10,13 +10,13 @@
#include <emmintrin.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
-#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count,
+#if CONFIG_AOM_HIGHBITDEPTH
+void aom_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr,
const int16_t *quant_ptr,
@@ -92,7 +92,7 @@
*eob_ptr = eob_i + 1;
}
-void vpx_highbd_quantize_b_32x32_sse2(
+void aom_highbd_quantize_b_32x32_sse2(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
diff --git a/aom_dsp/x86/highbd_sad4d_sse2.asm b/aom_dsp/x86/highbd_sad4d_sse2.asm
index 6c2a61e..54501d1 100644
--- a/aom_dsp/x86/highbd_sad4d_sse2.asm
+++ b/aom_dsp/x86/highbd_sad4d_sse2.asm
@@ -209,7 +209,7 @@
HIGH_PROCESS_32x2x4 0, %4, %5, (%4 + 32), (%5 + 32), %6
%endmacro
-; void vpx_highbd_sadNxNx4d_sse2(uint8_t *src, int src_stride,
+; void aom_highbd_sadNxNx4d_sse2(uint8_t *src, int src_stride,
; uint8_t *ref[4], int ref_stride,
; uint32_t res[4]);
; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16 or 8x8
diff --git a/aom_dsp/x86/highbd_sad_sse2.asm b/aom_dsp/x86/highbd_sad_sse2.asm
index bc4b28d..2da8c83 100644
--- a/aom_dsp/x86/highbd_sad_sse2.asm
+++ b/aom_dsp/x86/highbd_sad_sse2.asm
@@ -50,7 +50,7 @@
%endif
%endmacro
-; unsigned int vpx_highbd_sad64x{16,32,64}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_highbd_sad64x{16,32,64}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
%macro HIGH_SAD64XN 1-2 0
HIGH_SAD_FN 64, %1, 5, %2
@@ -157,7 +157,7 @@
HIGH_SAD64XN 32, 1 ; highbd_sad64x32_avg_sse2
-; unsigned int vpx_highbd_sad32x{16,32,64}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_highbd_sad32x{16,32,64}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
%macro HIGH_SAD32XN 1-2 0
HIGH_SAD_FN 32, %1, 5, %2
@@ -225,7 +225,7 @@
HIGH_SAD32XN 32, 1 ; highbd_sad32x32_avg_sse2
HIGH_SAD32XN 16, 1 ; highbd_sad32x16_avg_sse2
-; unsigned int vpx_highbd_sad16x{8,16,32}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_highbd_sad16x{8,16,32}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
%macro HIGH_SAD16XN 1-2 0
HIGH_SAD_FN 16, %1, 5, %2
@@ -294,7 +294,7 @@
HIGH_SAD16XN 8, 1 ; highbd_sad16x8_avg_sse2
-; unsigned int vpx_highbd_sad8x{4,8,16}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_highbd_sad8x{4,8,16}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
%macro HIGH_SAD8XN 1-2 0
HIGH_SAD_FN 8, %1, 7, %2
diff --git a/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm b/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
index 30ee81b..1175742 100644
--- a/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
+++ b/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
@@ -30,7 +30,7 @@
SECTION .text
-; int vpx_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride,
+; int aom_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride,
; int x_offset, int y_offset,
; const uint8_t *dst, ptrdiff_t dst_stride,
; int height, unsigned int *sse);
diff --git a/aom_dsp/x86/highbd_subtract_sse2.c b/aom_dsp/x86/highbd_subtract_sse2.c
index e7d5ac2..23d6630 100644
--- a/aom_dsp/x86/highbd_subtract_sse2.c
+++ b/aom_dsp/x86/highbd_subtract_sse2.c
@@ -12,8 +12,8 @@
#include <emmintrin.h>
#include <stddef.h>
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
typedef void (*SubtractWxHFuncType)(int16_t *diff, ptrdiff_t diff_stride,
const uint16_t *src, ptrdiff_t src_stride,
@@ -349,7 +349,7 @@
return ret_func_ptr;
}
-void vpx_highbd_subtract_block_sse2(int rows, int cols, int16_t *diff,
+void aom_highbd_subtract_block_sse2(int rows, int cols, int16_t *diff,
ptrdiff_t diff_stride, const uint8_t *src8,
ptrdiff_t src_stride, const uint8_t *pred8,
ptrdiff_t pred_stride, int bd) {
diff --git a/aom_dsp/x86/highbd_variance_impl_sse2.asm b/aom_dsp/x86/highbd_variance_impl_sse2.asm
index 1bf3abb..3abb44f 100644
--- a/aom_dsp/x86/highbd_variance_impl_sse2.asm
+++ b/aom_dsp/x86/highbd_variance_impl_sse2.asm
@@ -11,7 +11,7 @@
%include "aom_ports/x86_abi_support.asm"
-;unsigned int vpx_highbd_calc16x16var_sse2
+;unsigned int aom_highbd_calc16x16var_sse2
;(
; unsigned char * src_ptr,
; int source_stride,
@@ -20,8 +20,8 @@
; unsigned int * SSE,
; int * Sum
;)
-global sym(vpx_highbd_calc16x16var_sse2) PRIVATE
-sym(vpx_highbd_calc16x16var_sse2):
+global sym(aom_highbd_calc16x16var_sse2) PRIVATE
+sym(aom_highbd_calc16x16var_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@@ -164,7 +164,7 @@
ret
-;unsigned int vpx_highbd_calc8x8var_sse2
+;unsigned int aom_highbd_calc8x8var_sse2
;(
; unsigned char * src_ptr,
; int source_stride,
@@ -173,8 +173,8 @@
; unsigned int * SSE,
; int * Sum
;)
-global sym(vpx_highbd_calc8x8var_sse2) PRIVATE
-sym(vpx_highbd_calc8x8var_sse2):
+global sym(aom_highbd_calc8x8var_sse2) PRIVATE
+sym(aom_highbd_calc8x8var_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
diff --git a/aom_dsp/x86/highbd_variance_sse2.c b/aom_dsp/x86/highbd_variance_sse2.c
index 90ef4d4..d19214c 100644
--- a/aom_dsp/x86/highbd_variance_sse2.c
+++ b/aom_dsp/x86/highbd_variance_sse2.c
@@ -10,8 +10,8 @@
#include <emmintrin.h> // SSE2
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
@@ -19,11 +19,11 @@
const uint16_t *ref, int ref_stride,
uint32_t *sse, int *sum);
-uint32_t vpx_highbd_calc8x8var_sse2(const uint16_t *src, int src_stride,
+uint32_t aom_highbd_calc8x8var_sse2(const uint16_t *src, int src_stride,
const uint16_t *ref, int ref_stride,
uint32_t *sse, int *sum);
-uint32_t vpx_highbd_calc16x16var_sse2(const uint16_t *src, int src_stride,
+uint32_t aom_highbd_calc16x16var_sse2(const uint16_t *src, int src_stride,
const uint16_t *ref, int ref_stride,
uint32_t *sse, int *sum);
@@ -93,32 +93,32 @@
}
#define HIGH_GET_VAR(S) \
- void vpx_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \
+ void aom_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \
const uint8_t *ref8, int ref_stride, \
uint32_t *sse, int *sum) { \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
- vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
+ aom_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
sum); \
} \
\
- void vpx_highbd_10_get##S##x##S##var_sse2( \
+ void aom_highbd_10_get##S##x##S##var_sse2( \
const uint8_t *src8, int src_stride, const uint8_t *ref8, \
int ref_stride, uint32_t *sse, int *sum) { \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
- vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
+ aom_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
sum); \
*sum = ROUND_POWER_OF_TWO(*sum, 2); \
*sse = ROUND_POWER_OF_TWO(*sse, 4); \
} \
\
- void vpx_highbd_12_get##S##x##S##var_sse2( \
+ void aom_highbd_12_get##S##x##S##var_sse2( \
const uint8_t *src8, int src_stride, const uint8_t *ref8, \
int ref_stride, uint32_t *sse, int *sum) { \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
- vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
+ aom_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
sum); \
*sum = ROUND_POWER_OF_TWO(*sum, 4); \
*sse = ROUND_POWER_OF_TWO(*sse, 8); \
@@ -130,7 +130,7 @@
#undef HIGH_GET_VAR
#define VAR_FN(w, h, block_size, shift) \
- uint32_t vpx_highbd_8_variance##w##x##h##_sse2( \
+ uint32_t aom_highbd_8_variance##w##x##h##_sse2( \
const uint8_t *src8, int src_stride, const uint8_t *ref8, \
int ref_stride, uint32_t *sse) { \
int sum; \
@@ -138,11 +138,11 @@
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
highbd_8_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
- vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
+ aom_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
return *sse - (((int64_t)sum * sum) >> shift); \
} \
\
- uint32_t vpx_highbd_10_variance##w##x##h##_sse2( \
+ uint32_t aom_highbd_10_variance##w##x##h##_sse2( \
const uint8_t *src8, int src_stride, const uint8_t *ref8, \
int ref_stride, uint32_t *sse) { \
int sum; \
@@ -151,12 +151,12 @@
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
highbd_10_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
- vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
+ aom_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \
return (var >= 0) ? (uint32_t)var : 0; \
} \
\
- uint32_t vpx_highbd_12_variance##w##x##h##_sse2( \
+ uint32_t aom_highbd_12_variance##w##x##h##_sse2( \
const uint8_t *src8, int src_stride, const uint8_t *ref8, \
int ref_stride, uint32_t *sse) { \
int sum; \
@@ -165,7 +165,7 @@
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
highbd_12_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
- vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
+ aom_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \
return (var >= 0) ? (uint32_t)var : 0; \
}
@@ -183,69 +183,69 @@
#undef VAR_FN
-unsigned int vpx_highbd_8_mse16x16_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_8_mse16x16_sse2(const uint8_t *src8, int src_stride,
const uint8_t *ref8, int ref_stride,
unsigned int *sse) {
int sum;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
- vpx_highbd_calc16x16var_sse2, 16);
+ aom_highbd_calc16x16var_sse2, 16);
return *sse;
}
-unsigned int vpx_highbd_10_mse16x16_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_10_mse16x16_sse2(const uint8_t *src8, int src_stride,
const uint8_t *ref8, int ref_stride,
unsigned int *sse) {
int sum;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
- vpx_highbd_calc16x16var_sse2, 16);
+ aom_highbd_calc16x16var_sse2, 16);
return *sse;
}
-unsigned int vpx_highbd_12_mse16x16_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_12_mse16x16_sse2(const uint8_t *src8, int src_stride,
const uint8_t *ref8, int ref_stride,
unsigned int *sse) {
int sum;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
- vpx_highbd_calc16x16var_sse2, 16);
+ aom_highbd_calc16x16var_sse2, 16);
return *sse;
}
-unsigned int vpx_highbd_8_mse8x8_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_8_mse8x8_sse2(const uint8_t *src8, int src_stride,
const uint8_t *ref8, int ref_stride,
unsigned int *sse) {
int sum;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum,
- vpx_highbd_calc8x8var_sse2, 8);
+ aom_highbd_calc8x8var_sse2, 8);
return *sse;
}
-unsigned int vpx_highbd_10_mse8x8_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_10_mse8x8_sse2(const uint8_t *src8, int src_stride,
const uint8_t *ref8, int ref_stride,
unsigned int *sse) {
int sum;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum,
- vpx_highbd_calc8x8var_sse2, 8);
+ aom_highbd_calc8x8var_sse2, 8);
return *sse;
}
-unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride,
+unsigned int aom_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride,
const uint8_t *ref8, int ref_stride,
unsigned int *sse) {
int sum;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum,
- vpx_highbd_calc8x8var_sse2, 8);
+ aom_highbd_calc8x8var_sse2, 8);
return *sse;
}
@@ -253,7 +253,7 @@
// These definitions are for functions defined in
// highbd_subpel_variance_impl_sse2.asm
#define DECL(w, opt) \
- int vpx_highbd_sub_pixel_variance##w##xh_##opt( \
+ int aom_highbd_sub_pixel_variance##w##xh_##opt( \
const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
const uint16_t *dst, ptrdiff_t dst_stride, int height, \
unsigned int *sse, void *unused0, void *unused);
@@ -267,29 +267,29 @@
#undef DECL
#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
- uint32_t vpx_highbd_8_sub_pixel_variance##w##x##h##_##opt( \
+ uint32_t aom_highbd_8_sub_pixel_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \
uint32_t sse; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
- int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ int se = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL, \
NULL); \
if (w > wf) { \
unsigned int sse2; \
- int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ int se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
- se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
- se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
@@ -300,29 +300,29 @@
return sse - ((cast se * se) >> (wlog2 + hlog2)); \
} \
\
- uint32_t vpx_highbd_10_sub_pixel_variance##w##x##h##_##opt( \
+ uint32_t aom_highbd_10_sub_pixel_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \
uint32_t sse; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
- int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ int se = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL, \
NULL); \
if (w > wf) { \
uint32_t sse2; \
- int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ int se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
- se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
- se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
@@ -335,7 +335,7 @@
return sse - ((cast se * se) >> (wlog2 + hlog2)); \
} \
\
- uint32_t vpx_highbd_12_sub_pixel_variance##w##x##h##_##opt( \
+ uint32_t aom_highbd_12_sub_pixel_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \
int start_row; \
@@ -347,27 +347,27 @@
for (start_row = 0; start_row < h; start_row += 16) { \
uint32_t sse2; \
int height = h - start_row < 16 ? h - start_row : 16; \
- int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ int se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src + (start_row * src_stride), src_stride, x_offset, y_offset, \
dst + (start_row * dst_stride), dst_stride, height, &sse2, NULL, \
NULL); \
se += se2; \
long_sse += sse2; \
if (w > wf) { \
- se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 16 + (start_row * src_stride), src_stride, x_offset, \
y_offset, dst + 16 + (start_row * dst_stride), dst_stride, height, \
&sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
if (w > wf * 2) { \
- se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 32 + (start_row * src_stride), src_stride, x_offset, \
y_offset, dst + 32 + (start_row * dst_stride), dst_stride, \
height, &sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
- se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 48 + (start_row * src_stride), src_stride, x_offset, \
y_offset, dst + 48 + (start_row * dst_stride), dst_stride, \
height, &sse2, NULL, NULL); \
@@ -402,7 +402,7 @@
// The 2 unused parameters are place holders for PIC enabled build.
#define DECL(w, opt) \
- int vpx_highbd_sub_pixel_avg_variance##w##xh_##opt( \
+ int aom_highbd_sub_pixel_avg_variance##w##xh_##opt( \
const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
const uint16_t *dst, ptrdiff_t dst_stride, const uint16_t *sec, \
ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \
@@ -416,7 +416,7 @@
#undef DECLS
#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
- uint32_t vpx_highbd_8_sub_pixel_avg_variance##w##x##h##_##opt( \
+ uint32_t aom_highbd_8_sub_pixel_avg_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
const uint8_t *sec8) { \
@@ -424,23 +424,23 @@
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \
- int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ int se = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
NULL, NULL); \
if (w > wf) { \
uint32_t sse2; \
- int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ int se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, \
sec + 16, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
- se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, \
sec + 32, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
- se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, \
sec + 48, w, h, &sse2, NULL, NULL); \
se += se2; \
@@ -451,7 +451,7 @@
return sse - ((cast se * se) >> (wlog2 + hlog2)); \
} \
\
- uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt( \
+ uint32_t aom_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
const uint8_t *sec8) { \
@@ -459,23 +459,23 @@
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \
- int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ int se = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
NULL, NULL); \
if (w > wf) { \
uint32_t sse2; \
- int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ int se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, \
sec + 16, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
- se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, \
sec + 32, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
- se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, \
sec + 48, w, h, &sse2, NULL, NULL); \
se += se2; \
@@ -488,7 +488,7 @@
return sse - ((cast se * se) >> (wlog2 + hlog2)); \
} \
\
- uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt( \
+ uint32_t aom_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
const uint8_t *sec8) { \
@@ -502,27 +502,27 @@
for (start_row = 0; start_row < h; start_row += 16) { \
uint32_t sse2; \
int height = h - start_row < 16 ? h - start_row : 16; \
- int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ int se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + (start_row * src_stride), src_stride, x_offset, y_offset, \
dst + (start_row * dst_stride), dst_stride, sec + (start_row * w), \
w, height, &sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
if (w > wf) { \
- se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 16 + (start_row * src_stride), src_stride, x_offset, \
y_offset, dst + 16 + (start_row * dst_stride), dst_stride, \
sec + 16 + (start_row * w), w, height, &sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
if (w > wf * 2) { \
- se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 32 + (start_row * src_stride), src_stride, x_offset, \
y_offset, dst + 32 + (start_row * dst_stride), dst_stride, \
sec + 32 + (start_row * w), w, height, &sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
- se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
+ se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 48 + (start_row * src_stride), src_stride, x_offset, \
y_offset, dst + 48 + (start_row * dst_stride), dst_stride, \
sec + 48 + (start_row * w), w, height, &sse2, NULL, NULL); \
@@ -555,7 +555,7 @@
#undef FNS
#undef FN
-void vpx_highbd_upsampled_pred_sse2(uint16_t *comp_pred, int width, int height,
+void aom_highbd_upsampled_pred_sse2(uint16_t *comp_pred, int width, int height,
const uint8_t *ref8, int ref_stride) {
int i, j;
int stride = ref_stride << 3;
@@ -612,7 +612,7 @@
}
}
-void vpx_highbd_comp_avg_upsampled_pred_sse2(uint16_t *comp_pred,
+void aom_highbd_comp_avg_upsampled_pred_sse2(uint16_t *comp_pred,
const uint8_t *pred8, int width,
int height, const uint8_t *ref8,
int ref_stride) {
diff --git a/aom_dsp/x86/highbd_variance_sse4.c b/aom_dsp/x86/highbd_variance_sse4.c
index fb4bd8b..75d7038 100644
--- a/aom_dsp/x86/highbd_variance_sse4.c
+++ b/aom_dsp/x86/highbd_variance_sse4.c
@@ -10,11 +10,11 @@
#include <smmintrin.h> /* SSE4.1 */
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/variance.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
static INLINE void variance4x4_64_sse4_1(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride,
@@ -65,7 +65,7 @@
*sum = (int64_t)_mm_extract_epi32(y0, 0);
}
-uint32_t vpx_highbd_8_variance4x4_sse4_1(const uint8_t *a, int a_stride,
+uint32_t aom_highbd_8_variance4x4_sse4_1(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
uint32_t *sse) {
int64_t sum;
@@ -77,7 +77,7 @@
return *sse - (uint32_t)((sum * sum) >> 4);
}
-uint32_t vpx_highbd_10_variance4x4_sse4_1(const uint8_t *a, int a_stride,
+uint32_t aom_highbd_10_variance4x4_sse4_1(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
uint32_t *sse) {
int64_t sum;
@@ -90,7 +90,7 @@
return *sse - (uint32_t)((sum * sum) >> 4);
}
-uint32_t vpx_highbd_12_variance4x4_sse4_1(const uint8_t *a, int a_stride,
+uint32_t aom_highbd_12_variance4x4_sse4_1(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
uint32_t *sse) {
int64_t sum;
@@ -104,54 +104,54 @@
}
// Sub-pixel
-uint32_t vpx_highbd_8_sub_pixel_variance4x4_sse4_1(
+uint32_t aom_highbd_8_sub_pixel_variance4x4_sse4_1(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *dst, int dst_stride, uint32_t *sse) {
uint16_t fdata3[(4 + 1) * 4];
uint16_t temp2[4 * 4];
- vpx_highbd_var_filter_block2d_bil_first_pass(
+ aom_highbd_var_filter_block2d_bil_first_pass(
src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
- vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+ aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
bilinear_filters_2t[yoffset]);
- return vpx_highbd_8_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst, dst_stride,
+ return aom_highbd_8_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst, dst_stride,
sse);
}
-uint32_t vpx_highbd_10_sub_pixel_variance4x4_sse4_1(
+uint32_t aom_highbd_10_sub_pixel_variance4x4_sse4_1(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *dst, int dst_stride, uint32_t *sse) {
uint16_t fdata3[(4 + 1) * 4];
uint16_t temp2[4 * 4];
- vpx_highbd_var_filter_block2d_bil_first_pass(
+ aom_highbd_var_filter_block2d_bil_first_pass(
src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
- vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+ aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
bilinear_filters_2t[yoffset]);
- return vpx_highbd_10_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst,
+ return aom_highbd_10_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst,
dst_stride, sse);
}
-uint32_t vpx_highbd_12_sub_pixel_variance4x4_sse4_1(
+uint32_t aom_highbd_12_sub_pixel_variance4x4_sse4_1(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *dst, int dst_stride, uint32_t *sse) {
uint16_t fdata3[(4 + 1) * 4];
uint16_t temp2[4 * 4];
- vpx_highbd_var_filter_block2d_bil_first_pass(
+ aom_highbd_var_filter_block2d_bil_first_pass(
src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
- vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+ aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
bilinear_filters_2t[yoffset]);
- return vpx_highbd_12_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst,
+ return aom_highbd_12_variance4x4(CONVERT_TO_BYTEPTR(temp2), 4, dst,
dst_stride, sse);
}
// Sub-pixel average
-uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_sse4_1(
+uint32_t aom_highbd_8_sub_pixel_avg_variance4x4_sse4_1(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *dst, int dst_stride, uint32_t *sse,
const uint8_t *second_pred) {
@@ -159,19 +159,19 @@
uint16_t temp2[4 * 4];
DECLARE_ALIGNED(16, uint16_t, temp3[4 * 4]);
- vpx_highbd_var_filter_block2d_bil_first_pass(
+ aom_highbd_var_filter_block2d_bil_first_pass(
src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
- vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+ aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
bilinear_filters_2t[yoffset]);
- vpx_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
+ aom_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
4);
- return vpx_highbd_8_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst, dst_stride,
+ return aom_highbd_8_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst, dst_stride,
sse);
}
-uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_sse4_1(
+uint32_t aom_highbd_10_sub_pixel_avg_variance4x4_sse4_1(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *dst, int dst_stride, uint32_t *sse,
const uint8_t *second_pred) {
@@ -179,19 +179,19 @@
uint16_t temp2[4 * 4];
DECLARE_ALIGNED(16, uint16_t, temp3[4 * 4]);
- vpx_highbd_var_filter_block2d_bil_first_pass(
+ aom_highbd_var_filter_block2d_bil_first_pass(
src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
- vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+ aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
bilinear_filters_2t[yoffset]);
- vpx_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
+ aom_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
4);
- return vpx_highbd_10_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst,
+ return aom_highbd_10_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst,
dst_stride, sse);
}
-uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_sse4_1(
+uint32_t aom_highbd_12_sub_pixel_avg_variance4x4_sse4_1(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *dst, int dst_stride, uint32_t *sse,
const uint8_t *second_pred) {
@@ -199,14 +199,14 @@
uint16_t temp2[4 * 4];
DECLARE_ALIGNED(16, uint16_t, temp3[4 * 4]);
- vpx_highbd_var_filter_block2d_bil_first_pass(
+ aom_highbd_var_filter_block2d_bil_first_pass(
src, fdata3, src_stride, 1, 4 + 1, 4, bilinear_filters_2t[xoffset]);
- vpx_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
+ aom_highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4,
bilinear_filters_2t[yoffset]);
- vpx_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
+ aom_highbd_comp_avg_pred(temp3, second_pred, 4, 4, CONVERT_TO_BYTEPTR(temp2),
4);
- return vpx_highbd_12_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst,
+ return aom_highbd_12_variance4x4(CONVERT_TO_BYTEPTR(temp3), 4, dst,
dst_stride, sse);
}
diff --git a/aom_dsp/x86/inv_txfm_sse2.c b/aom_dsp/x86/inv_txfm_sse2.c
index 97ae44b..7504b32 100644
--- a/aom_dsp/x86/inv_txfm_sse2.c
+++ b/aom_dsp/x86/inv_txfm_sse2.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_dsp/x86/inv_txfm_sse2.h"
#include "aom_dsp/x86/txfm_common_sse2.h"
@@ -21,7 +21,7 @@
*(int *)(dest) = _mm_cvtsi128_si32(d0); \
}
-void vpx_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i eight = _mm_set1_epi16(8);
@@ -152,7 +152,7 @@
}
}
-void vpx_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
@@ -448,7 +448,7 @@
out7 = _mm_subs_epi16(stp1_0, stp2_7); \
}
-void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
@@ -480,7 +480,7 @@
// 2-D
for (i = 0; i < 2; i++) {
- // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
+ // 8x8 Transpose is copied from aom_fdct8x8_sse2()
TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
@@ -518,7 +518,7 @@
RECON_AND_STORE(dest + 7 * stride, in7);
}
-void vpx_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
@@ -556,7 +556,7 @@
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
+ // 8x8 Transpose is copied from aom_fdct8x8_sse2()
TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], in0,
in1, in2, in3, in4, in5, in6, in7);
@@ -793,7 +793,7 @@
in[7] = _mm_sub_epi16(k__const_0, s1);
}
-void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
@@ -1163,7 +1163,7 @@
stp2_12) \
}
-void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -1288,7 +1288,7 @@
}
}
-void vpx_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
@@ -2133,7 +2133,7 @@
iadst16_8col(in1);
}
-void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -3007,7 +3007,7 @@
}
// Only upper-left 8x8 has non-zero coeff
-void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -3164,7 +3164,7 @@
}
}
-void vpx_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -3438,7 +3438,7 @@
}
}
-void vpx_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest,
+void aom_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
@@ -3458,7 +3458,7 @@
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
__m128i ubounded, retval;
const __m128i zero = _mm_set1_epi16(0);
@@ -3472,7 +3472,7 @@
return retval;
}
-void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[4 * 4];
tran_low_t *outptr = out;
@@ -3535,7 +3535,7 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vpx_highbd_idct4_c(input, outptr, bd);
+ aom_highbd_idct4_c(input, outptr, bd);
input += 4;
outptr += 4;
}
@@ -3578,7 +3578,7 @@
// Columns
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
- vpx_highbd_idct4_c(temp_in, temp_out, bd);
+ aom_highbd_idct4_c(temp_in, temp_out, bd);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -3587,7 +3587,7 @@
}
}
-void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[8 * 8];
tran_low_t *outptr = out;
@@ -3652,7 +3652,7 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 8; ++i) {
- vpx_highbd_idct8_c(input, outptr, bd);
+ aom_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
@@ -3678,7 +3678,7 @@
tran_low_t temp_in[8], temp_out[8];
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vpx_highbd_idct8_c(temp_in, temp_out, bd);
+ aom_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3687,7 +3687,7 @@
}
}
-void vpx_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[8 * 8] = { 0 };
tran_low_t *outptr = out;
@@ -3755,7 +3755,7 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vpx_highbd_idct8_c(input, outptr, bd);
+ aom_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
@@ -3781,7 +3781,7 @@
tran_low_t temp_in[8], temp_out[8];
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- vpx_highbd_idct8_c(temp_in, temp_out, bd);
+ aom_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3790,7 +3790,7 @@
}
}
-void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[16 * 16];
tran_low_t *outptr = out;
@@ -3863,7 +3863,7 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 16; ++i) {
- vpx_highbd_idct16_c(input, outptr, bd);
+ aom_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
@@ -3894,7 +3894,7 @@
tran_low_t temp_in[16], temp_out[16];
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vpx_highbd_idct16_c(temp_in, temp_out, bd);
+ aom_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -3903,7 +3903,7 @@
}
}
-void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void aom_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[16 * 16] = { 0 };
tran_low_t *outptr = out;
@@ -3981,7 +3981,7 @@
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vpx_highbd_idct16_c(input, outptr, bd);
+ aom_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
@@ -4012,7 +4012,7 @@
tran_low_t temp_in[16], temp_out[16];
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
- vpx_highbd_idct16_c(temp_in, temp_out, bd);
+ aom_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -4020,4 +4020,4 @@
}
}
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/inv_txfm_sse2.h b/aom_dsp/x86/inv_txfm_sse2.h
index d7841bb..6edb91d 100644
--- a/aom_dsp/x86/inv_txfm_sse2.h
+++ b/aom_dsp/x86/inv_txfm_sse2.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_X86_INV_TXFM_SSE2_H_
-#define VPX_DSP_X86_INV_TXFM_SSE2_H_
+#ifndef AOM_DSP_X86_INV_TXFM_SSE2_H_
+#define AOM_DSP_X86_INV_TXFM_SSE2_H_
#include <emmintrin.h> // SSE2
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#include "aom_dsp/inv_txfm.h"
#include "aom_dsp/x86/txfm_common_sse2.h"
@@ -93,7 +93,7 @@
// Function to allow 8 bit optimisations to be used when profile 0 is used with
// highbitdepth enabled
static INLINE __m128i load_input_data(const tran_low_t *data) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
return octa_set_epi16(data[0], data[1], data[2], data[3], data[4], data[5],
data[6], data[7]);
#else
@@ -193,4 +193,4 @@
void iadst8_sse2(__m128i *in);
void iadst16_sse2(__m128i *in0, __m128i *in1);
-#endif // VPX_DSP_X86_INV_TXFM_SSE2_H_
+#endif // AOM_DSP_X86_INV_TXFM_SSE2_H_
diff --git a/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm b/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
index 20baf82..3890926 100644
--- a/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
+++ b/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
@@ -220,7 +220,7 @@
mova m12, [pw_11585x2]
lea r3, [2 * strideq]
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [inputq + 0]
packssdw m0, [inputq + 16]
mova m1, [inputq + 32]
@@ -271,7 +271,7 @@
lea r3, [2 * strideq]
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [inputq + 0]
packssdw m0, [inputq + 16]
mova m1, [inputq + 32]
@@ -793,7 +793,7 @@
lea r4, [rsp + transposed_in]
idct32x32_34_transpose:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [r3 + 0]
packssdw m0, [r3 + 16]
mova m1, [r3 + 32 * 4]
@@ -1223,7 +1223,7 @@
mov r7, 2
idct32x32_135_transpose:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [r3 + 0]
packssdw m0, [r3 + 16]
mova m1, [r3 + 32 * 4]
@@ -1261,7 +1261,7 @@
mova [r4 + 16 * 6], m6
mova [r4 + 16 * 7], m7
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
add r3, 32
%else
add r3, 16
@@ -1272,7 +1272,7 @@
IDCT32X32_135 16*0, 16*32, 16*64, 16*96
lea stp, [stp + 16 * 8]
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea inputq, [inputq + 32 * 32]
%else
lea inputq, [inputq + 16 * 32]
@@ -1687,7 +1687,7 @@
mov r7, 4
idct32x32_1024_transpose:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [r3 + 0]
packssdw m0, [r3 + 16]
mova m1, [r3 + 32 * 4]
@@ -1725,7 +1725,7 @@
mova [r4 + 16 * 5], m5
mova [r4 + 16 * 6], m6
mova [r4 + 16 * 7], m7
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
add r3, 32
%else
add r3, 16
@@ -1737,7 +1737,7 @@
IDCT32X32_1024 16*0, 16*32, 16*64, 16*96
lea stp, [stp + 16 * 8]
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea inputq, [inputq + 32 * 32]
%else
lea inputq, [inputq + 16 * 32]
diff --git a/aom_dsp/x86/inv_wht_sse2.asm b/aom_dsp/x86/inv_wht_sse2.asm
index fbbcd76..ee80563 100644
--- a/aom_dsp/x86/inv_wht_sse2.asm
+++ b/aom_dsp/x86/inv_wht_sse2.asm
@@ -82,7 +82,7 @@
INIT_XMM sse2
cglobal iwht4x4_16_add, 3, 3, 7, input, output, stride
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova m0, [inputq + 0]
packssdw m0, [inputq + 16]
mova m1, [inputq + 32]
diff --git a/aom_dsp/x86/loopfilter_avx2.c b/aom_dsp/x86/loopfilter_avx2.c
index f444c5d..fd73def 100644
--- a/aom_dsp/x86/loopfilter_avx2.c
+++ b/aom_dsp/x86/loopfilter_avx2.c
@@ -10,10 +10,10 @@
#include <immintrin.h> /* AVX2 */
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
-void vpx_lpf_horizontal_edge_8_avx2(unsigned char *s, int p,
+void aom_lpf_horizontal_edge_8_avx2(unsigned char *s, int p,
const unsigned char *_blimit,
const unsigned char *_limit,
const unsigned char *_thresh) {
@@ -101,7 +101,7 @@
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
- /* (vpx_filter + 3 * (qs0 - ps0)) & mask */
+ /* (aom_filter + 3 * (qs0 - ps0)) & mask */
filt = _mm_and_si128(filt, mask);
filter1 = _mm_adds_epi8(filt, t4);
@@ -367,7 +367,7 @@
8, 128, 9, 128, 10, 128, 11, 128, 12, 128, 13, 128, 14, 128, 15, 128
};
-void vpx_lpf_horizontal_edge_16_avx2(unsigned char *s, int p,
+void aom_lpf_horizontal_edge_16_avx2(unsigned char *s, int p,
const unsigned char *_blimit,
const unsigned char *_limit,
const unsigned char *_thresh) {
@@ -480,7 +480,7 @@
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
- /* (vpx_filter + 3 * (qs0 - ps0)) & mask */
+ /* (aom_filter + 3 * (qs0 - ps0)) & mask */
filt = _mm_and_si128(filt, mask);
filter1 = _mm_adds_epi8(filt, t4);
diff --git a/aom_dsp/x86/loopfilter_sse2.c b/aom_dsp/x86/loopfilter_sse2.c
index aaa42f3..3260a7e 100644
--- a/aom_dsp/x86/loopfilter_sse2.c
+++ b/aom_dsp/x86/loopfilter_sse2.c
@@ -10,7 +10,7 @@
#include <emmintrin.h> // SSE2
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
#include "aom_ports/emmintrin_compat.h"
@@ -103,7 +103,7 @@
ps1ps0 = _mm_xor_si128(ps1ps0, t80); /* ^ 0x80 */ \
} while (0)
-void vpx_lpf_horizontal_4_sse2(uint8_t *s, int p /* pitch */,
+void aom_lpf_horizontal_4_sse2(uint8_t *s, int p /* pitch */,
const uint8_t *_blimit, const uint8_t *_limit,
const uint8_t *_thresh) {
const __m128i zero = _mm_set1_epi16(0);
@@ -138,7 +138,7 @@
_mm_storeh_pi((__m64 *)(s + 1 * p), _mm_castsi128_ps(qs1qs0)); // *oq1
}
-void vpx_lpf_vertical_4_sse2(uint8_t *s, int p /* pitch */,
+void aom_lpf_vertical_4_sse2(uint8_t *s, int p /* pitch */,
const uint8_t *_blimit, const uint8_t *_limit,
const uint8_t *_thresh) {
const __m128i zero = _mm_set1_epi16(0);
@@ -229,7 +229,7 @@
*(int *)(s + 7 * p - 2) = _mm_cvtsi128_si32(qs1qs0);
}
-void vpx_lpf_horizontal_edge_8_sse2(unsigned char *s, int p,
+void aom_lpf_horizontal_edge_8_sse2(unsigned char *s, int p,
const unsigned char *_blimit,
const unsigned char *_limit,
const unsigned char *_thresh) {
@@ -309,7 +309,7 @@
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
- // (vpx_filter + 3 * (qs0 - ps0)) & mask
+ // (aom_filter + 3 * (qs0 - ps0)) & mask
filt = _mm_and_si128(filt, mask);
filter1 = _mm_adds_epi8(filt, t4);
@@ -591,7 +591,7 @@
return _mm_or_si128(_mm_andnot_si128(*flat, *other_filt), result);
}
-void vpx_lpf_horizontal_edge_16_sse2(unsigned char *s, int p,
+void aom_lpf_horizontal_edge_16_sse2(unsigned char *s, int p,
const unsigned char *_blimit,
const unsigned char *_limit,
const unsigned char *_thresh) {
@@ -702,7 +702,7 @@
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
- // (vpx_filter + 3 * (qs0 - ps0)) & mask
+ // (aom_filter + 3 * (qs0 - ps0)) & mask
filt = _mm_and_si128(filt, mask);
filter1 = _mm_adds_epi8(filt, t4);
filter2 = _mm_adds_epi8(filt, t3);
@@ -923,7 +923,7 @@
}
}
-void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
+void aom_lpf_horizontal_8_sse2(unsigned char *s, int p,
const unsigned char *_blimit,
const unsigned char *_limit,
const unsigned char *_thresh) {
@@ -1066,7 +1066,7 @@
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
- // (vpx_filter + 3 * (qs0 - ps0)) & mask
+ // (aom_filter + 3 * (qs0 - ps0)) & mask
filt = _mm_and_si128(filt, mask);
filter1 = _mm_adds_epi8(filt, t4);
@@ -1135,7 +1135,7 @@
}
}
-void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0,
+void aom_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0,
const uint8_t *_limit0,
const uint8_t *_thresh0,
const uint8_t *_blimit1,
@@ -1302,7 +1302,7 @@
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
- // (vpx_filter + 3 * (qs0 - ps0)) & mask
+ // (aom_filter + 3 * (qs0 - ps0)) & mask
filt = _mm_and_si128(filt, mask);
filter1 = _mm_adds_epi8(filt, t4);
@@ -1377,7 +1377,7 @@
}
}
-void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p,
+void aom_lpf_horizontal_4_dual_sse2(unsigned char *s, int p,
const unsigned char *_blimit0,
const unsigned char *_limit0,
const unsigned char *_thresh0,
@@ -1471,7 +1471,7 @@
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
filt = _mm_adds_epi8(filt, work_a);
- // (vpx_filter + 3 * (qs0 - ps0)) & mask
+ // (aom_filter + 3 * (qs0 - ps0)) & mask
filt = _mm_and_si128(filt, mask);
filter1 = _mm_adds_epi8(filt, t4);
@@ -1657,7 +1657,7 @@
} while (++idx8x8 < num_8x8_to_transpose);
}
-void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
@@ -1669,7 +1669,7 @@
transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
// Loop filtering
- vpx_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0,
+ aom_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0,
blimit1, limit1, thresh1);
src[0] = t_dst;
src[1] = t_dst + 8;
@@ -1680,7 +1680,7 @@
transpose(src, 16, dst, p, 2);
}
-void vpx_lpf_vertical_8_sse2(unsigned char *s, int p,
+void aom_lpf_vertical_8_sse2(unsigned char *s, int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh) {
@@ -1695,7 +1695,7 @@
transpose(src, p, dst, 8, 1);
// Loop filtering
- vpx_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh);
+ aom_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh);
src[0] = t_dst;
dst[0] = s - 4;
@@ -1704,7 +1704,7 @@
transpose(src, 8, dst, p, 1);
}
-void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
+void aom_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
@@ -1716,7 +1716,7 @@
transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
// Loop filtering
- vpx_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0,
+ aom_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0,
blimit1, limit1, thresh1);
src[0] = t_dst;
src[1] = t_dst + 8;
@@ -1728,7 +1728,7 @@
transpose(src, 16, dst, p, 2);
}
-void vpx_lpf_vertical_16_sse2(unsigned char *s, int p,
+void aom_lpf_vertical_16_sse2(unsigned char *s, int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh) {
@@ -1745,7 +1745,7 @@
transpose(src, p, dst, 8, 2);
// Loop filtering
- vpx_lpf_horizontal_edge_8_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh);
+ aom_lpf_horizontal_edge_8_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh);
src[0] = t_dst;
src[1] = t_dst + 8 * 8;
@@ -1756,7 +1756,7 @@
transpose(src, 8, dst, p, 2);
}
-void vpx_lpf_vertical_16_dual_sse2(unsigned char *s, int p,
+void aom_lpf_vertical_16_dual_sse2(unsigned char *s, int p,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh) {
DECLARE_ALIGNED(16, unsigned char, t_dst[256]);
@@ -1766,7 +1766,7 @@
transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16);
// Loop filtering
- vpx_lpf_horizontal_edge_16_sse2(t_dst + 8 * 16, 16, blimit, limit, thresh);
+ aom_lpf_horizontal_edge_16_sse2(t_dst + 8 * 16, 16, blimit, limit, thresh);
// Transpose back
transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, p);
diff --git a/aom_dsp/x86/masked_sad_intrin_ssse3.c b/aom_dsp/x86/masked_sad_intrin_ssse3.c
index cf1fd76..44d5011 100644
--- a/aom_dsp/x86/masked_sad_intrin_ssse3.c
+++ b/aom_dsp/x86/masked_sad_intrin_ssse3.c
@@ -13,8 +13,8 @@
#include <tmmintrin.h>
#include "aom_ports/mem.h"
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
static INLINE __m128i width8_load_2rows(const uint8_t *ptr, int stride) {
__m128i temp1 = _mm_loadl_epi64((const __m128i *)ptr);
@@ -46,7 +46,7 @@
const uint8_t *m_ptr, int m_stride, int height);
#define MASKSADMXN_SSSE3(m, n) \
- unsigned int vpx_masked_sad##m##x##n##_ssse3( \
+ unsigned int aom_masked_sad##m##x##n##_ssse3( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return masked_sad_ssse3(src, src_stride, ref, ref_stride, msk, msk_stride, \
@@ -68,7 +68,7 @@
MASKSADMXN_SSSE3(16, 8)
#define MASKSAD8XN_SSSE3(n) \
- unsigned int vpx_masked_sad8x##n##_ssse3( \
+ unsigned int aom_masked_sad8x##n##_ssse3( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return masked_sad8xh_ssse3(src, src_stride, ref, ref_stride, msk, \
@@ -80,7 +80,7 @@
MASKSAD8XN_SSSE3(4)
#define MASKSAD4XN_SSSE3(n) \
- unsigned int vpx_masked_sad4x##n##_ssse3( \
+ unsigned int aom_masked_sad4x##n##_ssse3( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk, \
@@ -203,7 +203,7 @@
return (_mm_cvtsi128_si32(res) + 31) >> 6;
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE __m128i highbd_width4_load_2rows(const uint16_t *ptr,
int stride) {
__m128i temp1 = _mm_loadl_epi64((const __m128i *)ptr);
@@ -220,7 +220,7 @@
const uint8_t *m_ptr, int m_stride, int height);
#define HIGHBD_MASKSADMXN_SSSE3(m, n) \
- unsigned int vpx_highbd_masked_sad##m##x##n##_ssse3( \
+ unsigned int aom_highbd_masked_sad##m##x##n##_ssse3( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return highbd_masked_sad_ssse3(src, src_stride, ref, ref_stride, msk, \
@@ -245,7 +245,7 @@
HIGHBD_MASKSADMXN_SSSE3(8, 4)
#define HIGHBD_MASKSAD4XN_SSSE3(n) \
- unsigned int vpx_highbd_masked_sad4x##n##_ssse3( \
+ unsigned int aom_highbd_masked_sad4x##n##_ssse3( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return highbd_masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk, \
@@ -330,4 +330,4 @@
// sad = (sad + 31) >> 6;
return (_mm_cvtsi128_si32(res) + 31) >> 6;
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/masked_variance_intrin_ssse3.c b/aom_dsp/x86/masked_variance_intrin_ssse3.c
index c18f870..2a838a6 100644
--- a/aom_dsp/x86/masked_variance_intrin_ssse3.c
+++ b/aom_dsp/x86/masked_variance_intrin_ssse3.c
@@ -13,10 +13,10 @@
#include <emmintrin.h>
#include <tmmintrin.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
// Half pixel shift
#define HALF_PIXEL_OFFSET (BIL_SUBPEL_SHIFTS / 2)
@@ -44,14 +44,14 @@
#endif
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE int64_t hsum_epi32_si64(__m128i v_d) {
const __m128i v_sign_d = _mm_cmplt_epi32(v_d, _mm_setzero_si128());
const __m128i v_0_q = _mm_unpacklo_epi32(v_d, v_sign_d);
const __m128i v_1_q = _mm_unpackhi_epi32(v_d, v_sign_d);
return hsum_epi64_si64(_mm_add_epi64(v_0_q, v_1_q));
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
static INLINE uint32_t calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
uint32_t *sse, const int w,
@@ -144,7 +144,7 @@
}
#define MASKED_VARWXH(W, H) \
- unsigned int vpx_masked_variance##W##x##H##_ssse3( \
+ unsigned int aom_masked_variance##W##x##H##_ssse3( \
const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
const uint8_t *m, int m_stride, unsigned int *sse) { \
return masked_variancewxh_ssse3(a, a_stride, b, b_stride, m, m_stride, W, \
@@ -219,7 +219,7 @@
}
#define MASKED_VAR8XH(H) \
- unsigned int vpx_masked_variance8x##H##_ssse3( \
+ unsigned int aom_masked_variance8x##H##_ssse3( \
const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
const uint8_t *m, int m_stride, unsigned int *sse) { \
return masked_variance8xh_ssse3(a, a_stride, b, b_stride, m, m_stride, H, \
@@ -294,7 +294,7 @@
}
#define MASKED_VAR4XH(H) \
- unsigned int vpx_masked_variance4x##H##_ssse3( \
+ unsigned int aom_masked_variance4x##H##_ssse3( \
const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
const uint8_t *m, int m_stride, unsigned int *sse) { \
return masked_variance4xh_ssse3(a, a_stride, b, b_stride, m, m_stride, H, \
@@ -304,7 +304,7 @@
MASKED_VAR4XH(4)
MASKED_VAR4XH(8)
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
// Main calculation for n*8 wide blocks
static INLINE void highbd_masked_variance64_ssse3(
@@ -517,7 +517,7 @@
}
#define HIGHBD_MASKED_VARWXH(W, H) \
- unsigned int vpx_highbd_masked_variance##W##x##H##_ssse3( \
+ unsigned int aom_highbd_masked_variance##W##x##H##_ssse3( \
const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, \
const uint8_t *m, int m_stride, unsigned int *sse) { \
uint16_t *a = CONVERT_TO_SHORTPTR(a8); \
@@ -526,7 +526,7 @@
m_stride, W, H, sse); \
} \
\
- unsigned int vpx_highbd_10_masked_variance##W##x##H##_ssse3( \
+ unsigned int aom_highbd_10_masked_variance##W##x##H##_ssse3( \
const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, \
const uint8_t *m, int m_stride, unsigned int *sse) { \
uint16_t *a = CONVERT_TO_SHORTPTR(a8); \
@@ -535,7 +535,7 @@
m_stride, W, H, sse); \
} \
\
- unsigned int vpx_highbd_12_masked_variance##W##x##H##_ssse3( \
+ unsigned int aom_highbd_12_masked_variance##W##x##H##_ssse3( \
const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, \
const uint8_t *m, int m_stride, unsigned int *sse) { \
uint16_t *a = CONVERT_TO_SHORTPTR(a8); \
@@ -644,7 +644,7 @@
}
// Functions for width (W) >= 16
-unsigned int vpx_masked_subpel_varWxH_xzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_varWxH_xzero(const uint8_t *src, int src_stride,
int yoffset, const uint8_t *dst,
int dst_stride, const uint8_t *msk,
int msk_stride, unsigned int *sse,
@@ -689,7 +689,7 @@
}
return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
}
-unsigned int vpx_masked_subpel_varWxH_yzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_varWxH_yzero(const uint8_t *src, int src_stride,
int xoffset, const uint8_t *dst,
int dst_stride, const uint8_t *msk,
int msk_stride, unsigned int *sse,
@@ -720,7 +720,7 @@
}
return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
}
-unsigned int vpx_masked_subpel_varWxH_xnonzero_ynonzero(
+unsigned int aom_masked_subpel_varWxH_xnonzero_ynonzero(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
unsigned int *sse, int w, int h, filter_fn_t xfilter_fn,
@@ -780,7 +780,7 @@
// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
// xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int vpx_masked_subpel_var4xH_xzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_var4xH_xzero(const uint8_t *src, int src_stride,
int yoffset, const uint8_t *dst,
int dst_stride, const uint8_t *msk,
int msk_stride, unsigned int *sse,
@@ -849,7 +849,7 @@
}
// Note order in which rows loaded xmm[127:64] = row 1, xmm[63:0] = row 2
-unsigned int vpx_masked_subpel_var8xH_xzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_var8xH_xzero(const uint8_t *src, int src_stride,
int yoffset, const uint8_t *dst,
int dst_stride, const uint8_t *msk,
int msk_stride, unsigned int *sse,
@@ -904,7 +904,7 @@
// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
// xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int vpx_masked_subpel_var4xH_yzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_var4xH_yzero(const uint8_t *src, int src_stride,
int xoffset, const uint8_t *dst,
int dst_stride, const uint8_t *msk,
int msk_stride, unsigned int *sse,
@@ -969,7 +969,7 @@
return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
}
-unsigned int vpx_masked_subpel_var8xH_yzero(const uint8_t *src, int src_stride,
+unsigned int aom_masked_subpel_var8xH_yzero(const uint8_t *src, int src_stride,
int xoffset, const uint8_t *dst,
int dst_stride, const uint8_t *msk,
int msk_stride, unsigned int *sse,
@@ -1018,7 +1018,7 @@
// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
// xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int vpx_masked_subpel_var4xH_xnonzero_ynonzero(
+unsigned int aom_masked_subpel_var4xH_xnonzero_ynonzero(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
unsigned int *sse, int h) {
@@ -1117,7 +1117,7 @@
return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
}
-unsigned int vpx_masked_subpel_var8xH_xnonzero_ynonzero(
+unsigned int aom_masked_subpel_var8xH_xnonzero_ynonzero(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
unsigned int *sse, int h) {
@@ -1226,49 +1226,49 @@
// For W >=16
#define MASK_SUBPIX_VAR_LARGE(W, H) \
- unsigned int vpx_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ unsigned int aom_masked_sub_pixel_variance##W##x##H##_ssse3( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
unsigned int *sse) { \
assert(W % 16 == 0); \
if (xoffset == 0) { \
if (yoffset == 0) \
- return vpx_masked_variance##W##x##H##_ssse3( \
+ return aom_masked_variance##W##x##H##_ssse3( \
src, src_stride, dst, dst_stride, msk, msk_stride, sse); \
else if (yoffset == HALF_PIXEL_OFFSET) \
- return vpx_masked_subpel_varWxH_xzero( \
+ return aom_masked_subpel_varWxH_xzero( \
src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
msk_stride, sse, W, H, apply_filter_avg); \
else \
- return vpx_masked_subpel_varWxH_xzero(src, src_stride, yoffset, dst, \
+ return aom_masked_subpel_varWxH_xzero(src, src_stride, yoffset, dst, \
dst_stride, msk, msk_stride, \
sse, W, H, apply_filter); \
} else if (yoffset == 0) { \
if (xoffset == HALF_PIXEL_OFFSET) \
- return vpx_masked_subpel_varWxH_yzero( \
+ return aom_masked_subpel_varWxH_yzero( \
src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
msk_stride, sse, W, H, apply_filter_avg); \
else \
- return vpx_masked_subpel_varWxH_yzero(src, src_stride, xoffset, dst, \
+ return aom_masked_subpel_varWxH_yzero(src, src_stride, xoffset, dst, \
dst_stride, msk, msk_stride, \
sse, W, H, apply_filter); \
} else if (xoffset == HALF_PIXEL_OFFSET) { \
if (yoffset == HALF_PIXEL_OFFSET) \
- return vpx_masked_subpel_varWxH_xnonzero_ynonzero( \
+ return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
src, src_stride, HALF_PIXEL_OFFSET, HALF_PIXEL_OFFSET, dst, \
dst_stride, msk, msk_stride, sse, W, H, apply_filter_avg, \
apply_filter_avg); \
else \
- return vpx_masked_subpel_varWxH_xnonzero_ynonzero( \
+ return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
src, src_stride, HALF_PIXEL_OFFSET, yoffset, dst, dst_stride, msk, \
msk_stride, sse, W, H, apply_filter_avg, apply_filter); \
} else { \
if (yoffset == HALF_PIXEL_OFFSET) \
- return vpx_masked_subpel_varWxH_xnonzero_ynonzero( \
+ return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
src, src_stride, xoffset, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
msk_stride, sse, W, H, apply_filter, apply_filter_avg); \
else \
- return vpx_masked_subpel_varWxH_xnonzero_ynonzero( \
+ return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
src, src_stride, xoffset, yoffset, dst, dst_stride, msk, \
msk_stride, sse, W, H, apply_filter, apply_filter); \
} \
@@ -1276,22 +1276,22 @@
// For W < 16
#define MASK_SUBPIX_VAR_SMALL(W, H) \
- unsigned int vpx_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ unsigned int aom_masked_sub_pixel_variance##W##x##H##_ssse3( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
unsigned int *sse) { \
assert(W == 4 || W == 8); \
if (xoffset == 0 && yoffset == 0) \
- return vpx_masked_variance##W##x##H##_ssse3( \
+ return aom_masked_variance##W##x##H##_ssse3( \
src, src_stride, dst, dst_stride, msk, msk_stride, sse); \
else if (xoffset == 0) \
- return vpx_masked_subpel_var##W##xH_xzero( \
+ return aom_masked_subpel_var##W##xH_xzero( \
src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse, H); \
else if (yoffset == 0) \
- return vpx_masked_subpel_var##W##xH_yzero( \
+ return aom_masked_subpel_var##W##xH_yzero( \
src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse, H); \
else \
- return vpx_masked_subpel_var##W##xH_xnonzero_ynonzero( \
+ return aom_masked_subpel_var##W##xH_xnonzero_ynonzero( \
src, src_stride, xoffset, yoffset, dst, dst_stride, msk, msk_stride, \
sse, H); \
}
@@ -1315,7 +1315,7 @@
MASK_SUBPIX_VAR_LARGE(128, 128)
#endif // CONFIG_EXT_PARTITION
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
typedef uint32_t (*highbd_calc_masked_var_t)(__m128i v_sum_d, __m128i v_sse_q,
uint32_t *sse, const int w,
const int h);
@@ -1446,7 +1446,7 @@
}
// High bit depth functions for width (W) >= 8
-unsigned int vpx_highbd_masked_subpel_varWxH_xzero(
+unsigned int aom_highbd_masked_subpel_varWxH_xzero(
const uint16_t *src, int src_stride, int yoffset, const uint16_t *dst,
int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
int w, int h, highbd_filter_fn_t filter_fn,
@@ -1491,7 +1491,7 @@
}
return calc_var(v_sum_d, v_sse_q, sse, w, h);
}
-unsigned int vpx_highbd_masked_subpel_varWxH_yzero(
+unsigned int aom_highbd_masked_subpel_varWxH_yzero(
const uint16_t *src, int src_stride, int xoffset, const uint16_t *dst,
int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
int w, int h, highbd_filter_fn_t filter_fn,
@@ -1523,7 +1523,7 @@
return calc_var(v_sum_d, v_sse_q, sse, w, h);
}
-unsigned int vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero(
+unsigned int aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero(
const uint16_t *src, int src_stride, int xoffset, int yoffset,
const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
unsigned int *sse, int w, int h, highbd_filter_fn_t xfilter_fn,
@@ -1584,7 +1584,7 @@
}
// Note order in which rows loaded xmm[127:64] = row 1, xmm[63:0] = row 2
-unsigned int vpx_highbd_masked_subpel_var4xH_xzero(
+unsigned int aom_highbd_masked_subpel_var4xH_xzero(
const uint16_t *src, int src_stride, int yoffset, const uint16_t *dst,
int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
int h, highbd_calc_masked_var_t calc_var) {
@@ -1635,7 +1635,7 @@
return calc_var(v_sum_d, v_sse_q, sse, 4, h);
}
-unsigned int vpx_highbd_masked_subpel_var4xH_yzero(
+unsigned int aom_highbd_masked_subpel_var4xH_yzero(
const uint16_t *src, int src_stride, int xoffset, const uint16_t *dst,
int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
int h, highbd_calc_masked_var_t calc_var) {
@@ -1683,7 +1683,7 @@
return calc_var(v_sum_d, v_sse_q, sse, 4, h);
}
-unsigned int vpx_highbd_masked_subpel_var4xH_xnonzero_ynonzero(
+unsigned int aom_highbd_masked_subpel_var4xH_xnonzero_ynonzero(
const uint16_t *src, int src_stride, int xoffset, int yoffset,
const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
unsigned int *sse, int h, highbd_calc_masked_var_t calc_var) {
@@ -1811,41 +1811,41 @@
return full_variance_function(src8, src_stride, dst8, dst_stride, msk, \
msk_stride, sse); \
else if (yoffset == HALF_PIXEL_OFFSET) \
- return vpx_highbd_masked_subpel_varWxH_xzero( \
+ return aom_highbd_masked_subpel_varWxH_xzero( \
src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
msk_stride, sse, W, H, highbd_apply_filter_avg, calc_var); \
else \
- return vpx_highbd_masked_subpel_varWxH_xzero( \
+ return aom_highbd_masked_subpel_varWxH_xzero( \
src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse, \
W, H, highbd_apply_filter, calc_var); \
} else if (yoffset == 0) { \
if (xoffset == HALF_PIXEL_OFFSET) \
- return vpx_highbd_masked_subpel_varWxH_yzero( \
+ return aom_highbd_masked_subpel_varWxH_yzero( \
src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
msk_stride, sse, W, H, highbd_apply_filter_avg, calc_var); \
else \
- return vpx_highbd_masked_subpel_varWxH_yzero( \
+ return aom_highbd_masked_subpel_varWxH_yzero( \
src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse, \
W, H, highbd_apply_filter, calc_var); \
} else if (xoffset == HALF_PIXEL_OFFSET) { \
if (yoffset == HALF_PIXEL_OFFSET) \
- return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
+ return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
src, src_stride, HALF_PIXEL_OFFSET, HALF_PIXEL_OFFSET, dst, \
dst_stride, msk, msk_stride, sse, W, H, highbd_apply_filter_avg, \
highbd_apply_filter_avg, calc_var); \
else \
- return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
+ return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
src, src_stride, HALF_PIXEL_OFFSET, yoffset, dst, dst_stride, msk, \
msk_stride, sse, W, H, highbd_apply_filter_avg, \
highbd_apply_filter, calc_var); \
} else { \
if (yoffset == HALF_PIXEL_OFFSET) \
- return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
+ return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
src, src_stride, xoffset, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
msk_stride, sse, W, H, highbd_apply_filter, \
highbd_apply_filter_avg, calc_var); \
else \
- return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
+ return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
src, src_stride, xoffset, yoffset, dst, dst_stride, msk, \
msk_stride, sse, W, H, highbd_apply_filter, highbd_apply_filter, \
calc_var); \
@@ -1866,46 +1866,46 @@
return full_variance_function(src8, src_stride, dst8, dst_stride, msk, \
msk_stride, sse); \
else if (xoffset == 0) \
- return vpx_highbd_masked_subpel_var4xH_xzero( \
+ return aom_highbd_masked_subpel_var4xH_xzero( \
src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse, H, \
calc_var); \
else if (yoffset == 0) \
- return vpx_highbd_masked_subpel_var4xH_yzero( \
+ return aom_highbd_masked_subpel_var4xH_yzero( \
src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse, H, \
calc_var); \
else \
- return vpx_highbd_masked_subpel_var4xH_xnonzero_ynonzero( \
+ return aom_highbd_masked_subpel_var4xH_xnonzero_ynonzero( \
src, src_stride, xoffset, yoffset, dst, dst_stride, msk, msk_stride, \
sse, H, calc_var); \
}
#define HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(W, H) \
- unsigned int vpx_highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ unsigned int aom_highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
unsigned int *sse) { \
return highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
sse, calc_masked_variance, \
- vpx_highbd_masked_variance##W##x##H##_ssse3); \
+ aom_highbd_masked_variance##W##x##H##_ssse3); \
} \
- unsigned int vpx_highbd_10_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_ssse3( \
const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
unsigned int *sse) { \
return highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
sse, highbd_10_calc_masked_variance, \
- vpx_highbd_10_masked_variance##W##x##H##_ssse3); \
+ aom_highbd_10_masked_variance##W##x##H##_ssse3); \
} \
- unsigned int vpx_highbd_12_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_ssse3( \
const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
unsigned int *sse) { \
return highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
sse, highbd_12_calc_masked_variance, \
- vpx_highbd_12_masked_variance##W##x##H##_ssse3); \
+ aom_highbd_12_masked_variance##W##x##H##_ssse3); \
}
HIGHBD_MASK_SUBPIX_VAR_SMALL(4, 4)
diff --git a/aom_dsp/x86/obmc_sad_sse4.c b/aom_dsp/x86/obmc_sad_sse4.c
index b4c839b..97943f8 100644
--- a/aom_dsp/x86/obmc_sad_sse4.c
+++ b/aom_dsp/x86/obmc_sad_sse4.c
@@ -11,11 +11,11 @@
#include <assert.h>
#include <immintrin.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_ports/mem.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/x86/synonyms.h"
////////////////////////////////////////////////////////////////////////////////
@@ -107,7 +107,7 @@
}
#define OBMCSADWXH(w, h) \
- unsigned int vpx_obmc_sad##w##x##h##_sse4_1( \
+ unsigned int aom_obmc_sad##w##x##h##_sse4_1( \
const uint8_t *pre, int pre_stride, const int32_t *wsrc, \
const int32_t *msk) { \
if (w == 4) { \
@@ -140,7 +140,7 @@
// High bit-depth
////////////////////////////////////////////////////////////////////////////////
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE unsigned int hbd_obmc_sad_w4(const uint8_t *pre8,
const int pre_stride,
const int32_t *wsrc,
@@ -230,7 +230,7 @@
}
#define HBD_OBMCSADWXH(w, h) \
- unsigned int vpx_highbd_obmc_sad##w##x##h##_sse4_1( \
+ unsigned int aom_highbd_obmc_sad##w##x##h##_sse4_1( \
const uint8_t *pre, int pre_stride, const int32_t *wsrc, \
const int32_t *mask) { \
if (w == 4) { \
@@ -258,4 +258,4 @@
HBD_OBMCSADWXH(8, 4)
HBD_OBMCSADWXH(4, 8)
HBD_OBMCSADWXH(4, 4)
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/obmc_variance_sse4.c b/aom_dsp/x86/obmc_variance_sse4.c
index 71c3c7e..0142551 100644
--- a/aom_dsp/x86/obmc_variance_sse4.c
+++ b/aom_dsp/x86/obmc_variance_sse4.c
@@ -11,13 +11,13 @@
#include <assert.h>
#include <immintrin.h>
-#include "./vpx_config.h"
+#include "./aom_config.h"
#include "aom_ports/mem.h"
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/x86/synonyms.h"
-#include "aom_dsp/vpx_filter.h"
+#include "aom_dsp/aom_filter.h"
////////////////////////////////////////////////////////////////////////////////
// 8 bit
@@ -114,7 +114,7 @@
}
#define OBMCVARWXH(W, H) \
- unsigned int vpx_obmc_variance##W##x##H##_sse4_1( \
+ unsigned int aom_obmc_variance##W##x##H##_sse4_1( \
const uint8_t *pre, int pre_stride, const int32_t *wsrc, \
const int32_t *mask, unsigned int *sse) { \
int sum; \
@@ -149,7 +149,7 @@
// High bit-depth
////////////////////////////////////////////////////////////////////////////////
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
static INLINE void hbd_obmc_variance_w4(
const uint8_t *pre8, const int pre_stride, const int32_t *wsrc,
const int32_t *mask, uint64_t *const sse, int64_t *const sum, const int h) {
@@ -305,7 +305,7 @@
}
#define HBD_OBMCVARWXH(W, H) \
- unsigned int vpx_highbd_obmc_variance##W##x##H##_sse4_1( \
+ unsigned int aom_highbd_obmc_variance##W##x##H##_sse4_1( \
const uint8_t *pre, int pre_stride, const int32_t *wsrc, \
const int32_t *mask, unsigned int *sse) { \
int sum; \
@@ -313,7 +313,7 @@
return *sse - (((int64_t)sum * sum) / (W * H)); \
} \
\
- unsigned int vpx_highbd_10_obmc_variance##W##x##H##_sse4_1( \
+ unsigned int aom_highbd_10_obmc_variance##W##x##H##_sse4_1( \
const uint8_t *pre, int pre_stride, const int32_t *wsrc, \
const int32_t *mask, unsigned int *sse) { \
int sum; \
@@ -321,7 +321,7 @@
return *sse - (((int64_t)sum * sum) / (W * H)); \
} \
\
- unsigned int vpx_highbd_12_obmc_variance##W##x##H##_sse4_1( \
+ unsigned int aom_highbd_12_obmc_variance##W##x##H##_sse4_1( \
const uint8_t *pre, int pre_stride, const int32_t *wsrc, \
const int32_t *mask, unsigned int *sse) { \
int sum; \
@@ -347,4 +347,4 @@
HBD_OBMCVARWXH(8, 4)
HBD_OBMCVARWXH(4, 8)
HBD_OBMCVARWXH(4, 4)
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/quantize_avx_x86_64.asm b/aom_dsp/x86/quantize_avx_x86_64.asm
index 01c4129..b74d6ea 100644
--- a/aom_dsp/x86/quantize_avx_x86_64.asm
+++ b/aom_dsp/x86/quantize_avx_x86_64.asm
@@ -41,7 +41,7 @@
mova m0, [zbinq] ; m0 = zbin
; Get DC and first 15 AC coeffs - in this special case, that is all.
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; coeff stored as 32bit numbers but we process them as 16 bit numbers
mova m9, [coeffq]
packssdw m9, [coeffq+16] ; m9 = c[i]
@@ -73,7 +73,7 @@
ptest m14, m14
jnz .single_nonzero
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [r1 ], ymm5
mova [r1+32], ymm5
mova [r2 ], ymm5
@@ -121,7 +121,7 @@
pand m8, m7
pand m13, m12
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; Store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
@@ -142,7 +142,7 @@
punpckhqdq m3, m3
pmullw m13, m3 ; dqc[i] = qc[i] * q
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; Store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
@@ -226,7 +226,7 @@
DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, eob
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea coeffq, [ coeffq+ncoeffq*4]
lea qcoeffq, [ qcoeffq+ncoeffq*4]
lea dqcoeffq, [dqcoeffq+ncoeffq*4]
@@ -239,7 +239,7 @@
neg ncoeffq
; get DC and first 15 AC coeffs
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; coeff stored as 32bit numbers & require 16bit numbers
mova m9, [coeffq+ncoeffq*4+ 0]
packssdw m9, [coeffq+ncoeffq*4+16]
@@ -261,7 +261,7 @@
ptest m14, m14
jnz .first_nonzero
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [qcoeffq+ncoeffq*4 ], ymm5
mova [qcoeffq+ncoeffq*4+32], ymm5
mova [dqcoeffq+ncoeffq*4 ], ymm5
@@ -299,7 +299,7 @@
pand m8, m7
pand m13, m12
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
@@ -330,7 +330,7 @@
psignw m13, m10
%endif
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
@@ -360,7 +360,7 @@
.ac_only_loop:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; pack coeff from 32bit to 16bit array
mova m9, [coeffq+ncoeffq*4+ 0]
packssdw m9, [coeffq+ncoeffq*4+16]
@@ -382,7 +382,7 @@
ptest m14, m14
jnz .rest_nonzero
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [qcoeffq+ncoeffq*4+ 0], ymm5
mova [qcoeffq+ncoeffq*4+32], ymm5
mova [dqcoeffq+ncoeffq*4+ 0], ymm5
@@ -421,7 +421,7 @@
pand m14, m7
pand m13, m12
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m14
punpckhwd m6, m14, m6
@@ -451,7 +451,7 @@
psignw m13, m10
%endif
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m14
punpckhwd m6, m14, m6
@@ -507,7 +507,7 @@
DEFINE_ARGS dqcoeff, ncoeff, qcoeff, eob
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea dqcoeffq, [dqcoeffq+ncoeffq*4]
lea qcoeffq, [ qcoeffq+ncoeffq*4]
%else
@@ -519,7 +519,7 @@
pxor m7, m7
.blank_loop:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [dqcoeffq+ncoeffq*4+ 0], ymm7
mova [dqcoeffq+ncoeffq*4+32], ymm7
mova [qcoeffq+ncoeffq*4+ 0], ymm7
diff --git a/aom_dsp/x86/quantize_sse2.c b/aom_dsp/x86/quantize_sse2.c
index c6ff06d..f320e4e 100644
--- a/aom_dsp/x86/quantize_sse2.c
+++ b/aom_dsp/x86/quantize_sse2.c
@@ -11,11 +11,11 @@
#include <emmintrin.h>
#include <xmmintrin.h>
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
static INLINE __m128i load_coefficients(const tran_low_t *coeff_ptr) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
return _mm_setr_epi16((int16_t)coeff_ptr[0], (int16_t)coeff_ptr[1],
(int16_t)coeff_ptr[2], (int16_t)coeff_ptr[3],
(int16_t)coeff_ptr[4], (int16_t)coeff_ptr[5],
@@ -27,7 +27,7 @@
static INLINE void store_coefficients(__m128i coeff_vals,
tran_low_t *coeff_ptr) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
__m128i one = _mm_set1_epi16(1);
__m128i coeff_vals_hi = _mm_mulhi_epi16(coeff_vals, one);
__m128i coeff_vals_lo = _mm_mullo_epi16(coeff_vals, one);
@@ -40,7 +40,7 @@
#endif
}
-void vpx_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void aom_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
diff --git a/aom_dsp/x86/quantize_ssse3_x86_64.asm b/aom_dsp/x86/quantize_ssse3_x86_64.asm
index ca21539..4503370 100644
--- a/aom_dsp/x86/quantize_ssse3_x86_64.asm
+++ b/aom_dsp/x86/quantize_ssse3_x86_64.asm
@@ -53,7 +53,7 @@
%endif
pxor m5, m5 ; m5 = dedicated zero
DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, eob
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea coeffq, [ coeffq+ncoeffq*4]
lea qcoeffq, [ qcoeffq+ncoeffq*4]
lea dqcoeffq, [dqcoeffq+ncoeffq*4]
@@ -66,7 +66,7 @@
neg ncoeffq
; get DC and first 15 AC coeffs
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; coeff stored as 32bit numbers & require 16bit numbers
mova m9, [ coeffq+ncoeffq*4+ 0]
packssdw m9, [ coeffq+ncoeffq*4+16]
@@ -96,7 +96,7 @@
psignw m13, m10 ; m13 = reinsert sign
pand m8, m7
pand m13, m12
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
mova m11, m8
mova m6, m8
@@ -131,7 +131,7 @@
psignw m8, m9
psignw m13, m10
%endif
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
mova m11, m8
mova m6, m8
@@ -166,7 +166,7 @@
jz .accumulate_eob
.ac_only_loop:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; pack coeff from 32bit to 16bit array
mova m9, [ coeffq+ncoeffq*4+ 0]
packssdw m9, [ coeffq+ncoeffq*4+16]
@@ -198,7 +198,7 @@
psignw m13, m10 ; m13 = reinsert sign
pand m14, m7
pand m13, m12
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pxor m11, m11
mova m11, m14
@@ -233,7 +233,7 @@
psignw m14, m9
psignw m13, m10
%endif
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
mova m11, m14
mova m6, m14
@@ -271,7 +271,7 @@
%ifidn %1, b_32x32
jmp .accumulate_eob
.skip_iter:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [qcoeffq+ncoeffq*4+ 0], m5
mova [qcoeffq+ncoeffq*4+16], m5
mova [qcoeffq+ncoeffq*4+32], m5
@@ -310,7 +310,7 @@
mov r2, qcoeffmp
mov r3, eobmp
DEFINE_ARGS dqcoeff, ncoeff, qcoeff, eob
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
lea dqcoeffq, [dqcoeffq+ncoeffq*4]
lea qcoeffq, [ qcoeffq+ncoeffq*4]
%else
@@ -320,7 +320,7 @@
neg ncoeffq
pxor m7, m7
.blank_loop:
-%if CONFIG_VP9_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
mova [dqcoeffq+ncoeffq*4+ 0], m7
mova [dqcoeffq+ncoeffq*4+16], m7
mova [dqcoeffq+ncoeffq*4+32], m7
diff --git a/aom_dsp/x86/sad4d_avx2.c b/aom_dsp/x86/sad4d_avx2.c
index 585d473..9609a94 100644
--- a/aom_dsp/x86/sad4d_avx2.c
+++ b/aom_dsp/x86/sad4d_avx2.c
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <immintrin.h> // AVX2
-#include "./vpx_dsp_rtcd.h"
-#include "aom/vpx_integer.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
-void vpx_sad32x32x4d_avx2(const uint8_t *src, int src_stride,
+void aom_sad32x32x4d_avx2(const uint8_t *src, int src_stride,
const uint8_t *const ref[4], int ref_stride,
uint32_t res[4]) {
__m256i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg;
@@ -79,7 +79,7 @@
}
}
-void vpx_sad64x64x4d_avx2(const uint8_t *src, int src_stride,
+void aom_sad64x64x4d_avx2(const uint8_t *src, int src_stride,
const uint8_t *const ref[4], int ref_stride,
uint32_t res[4]) {
__m256i src_reg, srcnext_reg, ref0_reg, ref0next_reg;
diff --git a/aom_dsp/x86/sad4d_sse2.asm b/aom_dsp/x86/sad4d_sse2.asm
index 6d49869..4f4b799 100644
--- a/aom_dsp/x86/sad4d_sse2.asm
+++ b/aom_dsp/x86/sad4d_sse2.asm
@@ -181,7 +181,7 @@
PROCESS_64x2x4 0, %4, %5, %4 + 64, %5 + 64, %6
%endmacro
-; void vpx_sadNxNx4d_sse2(uint8_t *src, int src_stride,
+; void aom_sadNxNx4d_sse2(uint8_t *src, int src_stride,
; uint8_t *ref[4], int ref_stride,
; uint32_t res[4]);
; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16, 8x8, 8x4, 4x8 and 4x4
diff --git a/aom_dsp/x86/sad_avx2.c b/aom_dsp/x86/sad_avx2.c
index c66ab7c..34d8c0d 100644
--- a/aom_dsp/x86/sad_avx2.c
+++ b/aom_dsp/x86/sad_avx2.c
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <immintrin.h>
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
#define FSAD64_H(h) \
- unsigned int vpx_sad64x##h##_avx2(const uint8_t *src_ptr, int src_stride, \
+ unsigned int aom_sad64x##h##_avx2(const uint8_t *src_ptr, int src_stride, \
const uint8_t *ref_ptr, int ref_stride) { \
int i, res; \
__m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
@@ -40,7 +40,7 @@
}
#define FSAD32_H(h) \
- unsigned int vpx_sad32x##h##_avx2(const uint8_t *src_ptr, int src_stride, \
+ unsigned int aom_sad32x##h##_avx2(const uint8_t *src_ptr, int src_stride, \
const uint8_t *ref_ptr, int ref_stride) { \
int i, res; \
__m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
@@ -89,7 +89,7 @@
#undef FSAD32_H
#define FSADAVG64_H(h) \
- unsigned int vpx_sad64x##h##_avg_avx2( \
+ unsigned int aom_sad64x##h##_avg_avx2( \
const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
int ref_stride, const uint8_t *second_pred) { \
int i, res; \
@@ -123,7 +123,7 @@
}
#define FSADAVG32_H(h) \
- unsigned int vpx_sad32x##h##_avg_avx2( \
+ unsigned int aom_sad32x##h##_avg_avx2( \
const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
int ref_stride, const uint8_t *second_pred) { \
int i, res; \
diff --git a/aom_dsp/x86/sad_sse2.asm b/aom_dsp/x86/sad_sse2.asm
index edef2a7..01ae405 100644
--- a/aom_dsp/x86/sad_sse2.asm
+++ b/aom_dsp/x86/sad_sse2.asm
@@ -45,7 +45,7 @@
%endmacro
%if CONFIG_EXT_PARTITION
-; unsigned int vpx_sad128x128_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad128x128_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
%macro SAD128XN 1-2 0
SAD_FN 128, %1, 5, %2
@@ -114,7 +114,7 @@
%endif
-; unsigned int vpx_sad64x64_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad64x64_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
%macro SAD64XN 1-2 0
SAD_FN 64, %1, 5, %2
@@ -161,7 +161,7 @@
SAD64XN 64, 1 ; sad64x64_avg_sse2
SAD64XN 32, 1 ; sad64x32_avg_sse2
-; unsigned int vpx_sad32x32_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad32x32_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
%macro SAD32XN 1-2 0
SAD_FN 32, %1, 5, %2
@@ -206,7 +206,7 @@
SAD32XN 32, 1 ; sad32x32_avg_sse2
SAD32XN 16, 1 ; sad32x16_avg_sse2
-; unsigned int vpx_sad16x{8,16}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad16x{8,16}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
%macro SAD16XN 1-2 0
SAD_FN 16, %1, 7, %2
@@ -252,7 +252,7 @@
SAD16XN 16, 1 ; sad16x16_avg_sse2
SAD16XN 8, 1 ; sad16x8_avg_sse2
-; unsigned int vpx_sad8x{8,16}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad8x{8,16}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
%macro SAD8XN 1-2 0
SAD_FN 8, %1, 7, %2
@@ -296,7 +296,7 @@
SAD8XN 8, 1 ; sad8x8_avg_sse2
SAD8XN 4, 1 ; sad8x4_avg_sse2
-; unsigned int vpx_sad4x{4, 8}_sse2(uint8_t *src, int src_stride,
+; unsigned int aom_sad4x{4, 8}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
%macro SAD4XN 1-2 0
SAD_FN 4, %1, 7, %2
diff --git a/aom_dsp/x86/sad_sse3.asm b/aom_dsp/x86/sad_sse3.asm
index 4665fb9..1de346e 100644
--- a/aom_dsp/x86/sad_sse3.asm
+++ b/aom_dsp/x86/sad_sse3.asm
@@ -165,14 +165,14 @@
paddw mm7, mm3
%endmacro
-;void int vpx_sad16x16x3_sse3(
+;void int aom_sad16x16x3_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
-global sym(vpx_sad16x16x3_sse3) PRIVATE
-sym(vpx_sad16x16x3_sse3):
+global sym(aom_sad16x16x3_sse3) PRIVATE
+sym(aom_sad16x16x3_sse3):
STACK_FRAME_CREATE_X3
@@ -207,14 +207,14 @@
STACK_FRAME_DESTROY_X3
-;void int vpx_sad16x8x3_sse3(
+;void int aom_sad16x8x3_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
-global sym(vpx_sad16x8x3_sse3) PRIVATE
-sym(vpx_sad16x8x3_sse3):
+global sym(aom_sad16x8x3_sse3) PRIVATE
+sym(aom_sad16x8x3_sse3):
STACK_FRAME_CREATE_X3
@@ -245,14 +245,14 @@
STACK_FRAME_DESTROY_X3
-;void int vpx_sad8x16x3_sse3(
+;void int aom_sad8x16x3_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
-global sym(vpx_sad8x16x3_sse3) PRIVATE
-sym(vpx_sad8x16x3_sse3):
+global sym(aom_sad8x16x3_sse3) PRIVATE
+sym(aom_sad8x16x3_sse3):
STACK_FRAME_CREATE_X3
@@ -274,14 +274,14 @@
STACK_FRAME_DESTROY_X3
-;void int vpx_sad8x8x3_sse3(
+;void int aom_sad8x8x3_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
-global sym(vpx_sad8x8x3_sse3) PRIVATE
-sym(vpx_sad8x8x3_sse3):
+global sym(aom_sad8x8x3_sse3) PRIVATE
+sym(aom_sad8x8x3_sse3):
STACK_FRAME_CREATE_X3
@@ -299,14 +299,14 @@
STACK_FRAME_DESTROY_X3
-;void int vpx_sad4x4x3_sse3(
+;void int aom_sad4x4x3_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
-global sym(vpx_sad4x4x3_sse3) PRIVATE
-sym(vpx_sad4x4x3_sse3):
+global sym(aom_sad4x4x3_sse3) PRIVATE
+sym(aom_sad4x4x3_sse3):
STACK_FRAME_CREATE_X3
diff --git a/aom_dsp/x86/sad_sse4.asm b/aom_dsp/x86/sad_sse4.asm
index 07e28b48a..fd2c70b 100644
--- a/aom_dsp/x86/sad_sse4.asm
+++ b/aom_dsp/x86/sad_sse4.asm
@@ -165,14 +165,14 @@
movdqa [rdi + 16], xmm2
%endmacro
-;void vpx_sad16x16x8_sse4_1(
+;void aom_sad16x16x8_sse4_1(
; const unsigned char *src_ptr,
; int src_stride,
; const unsigned char *ref_ptr,
; int ref_stride,
; unsigned short *sad_array);
-global sym(vpx_sad16x16x8_sse4_1) PRIVATE
-sym(vpx_sad16x16x8_sse4_1):
+global sym(aom_sad16x16x8_sse4_1) PRIVATE
+sym(aom_sad16x16x8_sse4_1):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@@ -205,15 +205,15 @@
ret
-;void vpx_sad16x8x8_sse4_1(
+;void aom_sad16x8x8_sse4_1(
; const unsigned char *src_ptr,
; int src_stride,
; const unsigned char *ref_ptr,
; int ref_stride,
; unsigned short *sad_array
;);
-global sym(vpx_sad16x8x8_sse4_1) PRIVATE
-sym(vpx_sad16x8x8_sse4_1):
+global sym(aom_sad16x8x8_sse4_1) PRIVATE
+sym(aom_sad16x8x8_sse4_1):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@@ -242,15 +242,15 @@
ret
-;void vpx_sad8x8x8_sse4_1(
+;void aom_sad8x8x8_sse4_1(
; const unsigned char *src_ptr,
; int src_stride,
; const unsigned char *ref_ptr,
; int ref_stride,
; unsigned short *sad_array
;);
-global sym(vpx_sad8x8x8_sse4_1) PRIVATE
-sym(vpx_sad8x8x8_sse4_1):
+global sym(aom_sad8x8x8_sse4_1) PRIVATE
+sym(aom_sad8x8x8_sse4_1):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@@ -279,15 +279,15 @@
ret
-;void vpx_sad8x16x8_sse4_1(
+;void aom_sad8x16x8_sse4_1(
; const unsigned char *src_ptr,
; int src_stride,
; const unsigned char *ref_ptr,
; int ref_stride,
; unsigned short *sad_array
;);
-global sym(vpx_sad8x16x8_sse4_1) PRIVATE
-sym(vpx_sad8x16x8_sse4_1):
+global sym(aom_sad8x16x8_sse4_1) PRIVATE
+sym(aom_sad8x16x8_sse4_1):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@@ -320,15 +320,15 @@
ret
-;void vpx_sad4x4x8_sse4_1(
+;void aom_sad4x4x8_sse4_1(
; const unsigned char *src_ptr,
; int src_stride,
; const unsigned char *ref_ptr,
; int ref_stride,
; unsigned short *sad_array
;);
-global sym(vpx_sad4x4x8_sse4_1) PRIVATE
-sym(vpx_sad4x4x8_sse4_1):
+global sym(aom_sad4x4x8_sse4_1) PRIVATE
+sym(aom_sad4x4x8_sse4_1):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
diff --git a/aom_dsp/x86/sad_ssse3.asm b/aom_dsp/x86/sad_ssse3.asm
index 8315f97..b1c97ea 100644
--- a/aom_dsp/x86/sad_ssse3.asm
+++ b/aom_dsp/x86/sad_ssse3.asm
@@ -146,14 +146,14 @@
%endmacro
-;void int vpx_sad16x16x3_ssse3(
+;void int aom_sad16x16x3_ssse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
-global sym(vpx_sad16x16x3_ssse3) PRIVATE
-sym(vpx_sad16x16x3_ssse3):
+global sym(aom_sad16x16x3_ssse3) PRIVATE
+sym(aom_sad16x16x3_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@@ -169,31 +169,31 @@
mov rdx, 0xf
and rdx, rdi
- jmp .vpx_sad16x16x3_ssse3_skiptable
-.vpx_sad16x16x3_ssse3_jumptable:
- dd .vpx_sad16x16x3_ssse3_aligned_by_0 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_1 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_2 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_3 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_4 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_5 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_6 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_7 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_8 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_9 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_10 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_11 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_12 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_13 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_14 - .vpx_sad16x16x3_ssse3_do_jump
- dd .vpx_sad16x16x3_ssse3_aligned_by_15 - .vpx_sad16x16x3_ssse3_do_jump
-.vpx_sad16x16x3_ssse3_skiptable:
+ jmp .aom_sad16x16x3_ssse3_skiptable
+.aom_sad16x16x3_ssse3_jumptable:
+ dd .aom_sad16x16x3_ssse3_aligned_by_0 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_1 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_2 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_3 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_4 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_5 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_6 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_7 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_8 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_9 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_10 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_11 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_12 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_13 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_14 - .aom_sad16x16x3_ssse3_do_jump
+ dd .aom_sad16x16x3_ssse3_aligned_by_15 - .aom_sad16x16x3_ssse3_do_jump
+.aom_sad16x16x3_ssse3_skiptable:
- call .vpx_sad16x16x3_ssse3_do_jump
-.vpx_sad16x16x3_ssse3_do_jump:
+ call .aom_sad16x16x3_ssse3_do_jump
+.aom_sad16x16x3_ssse3_do_jump:
pop rcx ; get the address of do_jump
- mov rax, .vpx_sad16x16x3_ssse3_jumptable - .vpx_sad16x16x3_ssse3_do_jump
- add rax, rcx ; get the absolute address of vpx_sad16x16x3_ssse3_jumptable
+ mov rax, .aom_sad16x16x3_ssse3_jumptable - .aom_sad16x16x3_ssse3_do_jump
+ add rax, rcx ; get the absolute address of aom_sad16x16x3_ssse3_jumptable
movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable
add rcx, rax
@@ -203,23 +203,23 @@
jmp rcx
- PROCESS_16X16X3_OFFSET 0, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 1, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 2, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 3, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 4, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 5, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 6, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 7, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 8, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 9, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 10, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 11, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 12, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 13, .vpx_sad16x16x3_ssse3
- PROCESS_16X16X3_OFFSET 14, .vpx_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 0, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 1, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 2, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 3, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 4, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 5, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 6, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 7, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 8, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 9, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 10, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 11, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 12, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 13, .aom_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 14, .aom_sad16x16x3_ssse3
-.vpx_sad16x16x3_ssse3_aligned_by_15:
+.aom_sad16x16x3_ssse3_aligned_by_15:
PROCESS_16X2X3 1
PROCESS_16X2X3 0
PROCESS_16X2X3 0
@@ -229,7 +229,7 @@
PROCESS_16X2X3 0
PROCESS_16X2X3 0
-.vpx_sad16x16x3_ssse3_store_off:
+.aom_sad16x16x3_ssse3_store_off:
mov rdi, arg(4) ;Results
movq xmm0, xmm5
@@ -259,14 +259,14 @@
pop rbp
ret
-;void int vpx_sad16x8x3_ssse3(
+;void int aom_sad16x8x3_ssse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
-global sym(vpx_sad16x8x3_ssse3) PRIVATE
-sym(vpx_sad16x8x3_ssse3):
+global sym(aom_sad16x8x3_ssse3) PRIVATE
+sym(aom_sad16x8x3_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@@ -282,31 +282,31 @@
mov rdx, 0xf
and rdx, rdi
- jmp .vpx_sad16x8x3_ssse3_skiptable
-.vpx_sad16x8x3_ssse3_jumptable:
- dd .vpx_sad16x8x3_ssse3_aligned_by_0 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_1 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_2 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_3 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_4 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_5 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_6 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_7 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_8 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_9 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_10 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_11 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_12 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_13 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_14 - .vpx_sad16x8x3_ssse3_do_jump
- dd .vpx_sad16x8x3_ssse3_aligned_by_15 - .vpx_sad16x8x3_ssse3_do_jump
-.vpx_sad16x8x3_ssse3_skiptable:
+ jmp .aom_sad16x8x3_ssse3_skiptable
+.aom_sad16x8x3_ssse3_jumptable:
+ dd .aom_sad16x8x3_ssse3_aligned_by_0 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_1 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_2 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_3 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_4 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_5 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_6 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_7 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_8 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_9 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_10 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_11 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_12 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_13 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_14 - .aom_sad16x8x3_ssse3_do_jump
+ dd .aom_sad16x8x3_ssse3_aligned_by_15 - .aom_sad16x8x3_ssse3_do_jump
+.aom_sad16x8x3_ssse3_skiptable:
- call .vpx_sad16x8x3_ssse3_do_jump
-.vpx_sad16x8x3_ssse3_do_jump:
+ call .aom_sad16x8x3_ssse3_do_jump
+.aom_sad16x8x3_ssse3_do_jump:
pop rcx ; get the address of do_jump
- mov rax, .vpx_sad16x8x3_ssse3_jumptable - .vpx_sad16x8x3_ssse3_do_jump
- add rax, rcx ; get the absolute address of vpx_sad16x8x3_ssse3_jumptable
+ mov rax, .aom_sad16x8x3_ssse3_jumptable - .aom_sad16x8x3_ssse3_do_jump
+ add rax, rcx ; get the absolute address of aom_sad16x8x3_ssse3_jumptable
movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable
add rcx, rax
@@ -316,30 +316,30 @@
jmp rcx
- PROCESS_16X8X3_OFFSET 0, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 1, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 2, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 3, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 4, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 5, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 6, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 7, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 8, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 9, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 10, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 11, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 12, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 13, .vpx_sad16x8x3_ssse3
- PROCESS_16X8X3_OFFSET 14, .vpx_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 0, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 1, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 2, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 3, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 4, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 5, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 6, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 7, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 8, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 9, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 10, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 11, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 12, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 13, .aom_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 14, .aom_sad16x8x3_ssse3
-.vpx_sad16x8x3_ssse3_aligned_by_15:
+.aom_sad16x8x3_ssse3_aligned_by_15:
PROCESS_16X2X3 1
PROCESS_16X2X3 0
PROCESS_16X2X3 0
PROCESS_16X2X3 0
-.vpx_sad16x8x3_ssse3_store_off:
+.aom_sad16x8x3_ssse3_store_off:
mov rdi, arg(4) ;Results
movq xmm0, xmm5
diff --git a/aom_dsp/x86/ssim_opt_x86_64.asm b/aom_dsp/x86/ssim_opt_x86_64.asm
index fc49c30..ebc3703 100644
--- a/aom_dsp/x86/ssim_opt_x86_64.asm
+++ b/aom_dsp/x86/ssim_opt_x86_64.asm
@@ -61,8 +61,8 @@
; or pavgb At this point this is just meant to be first pass for calculating
; all the parms needed for 16x16 ssim so we can play with dssim as distortion
; in mode selection code.
-global sym(vpx_ssim_parms_16x16_sse2) PRIVATE
-sym(vpx_ssim_parms_16x16_sse2):
+global sym(aom_ssim_parms_16x16_sse2) PRIVATE
+sym(aom_ssim_parms_16x16_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 9
@@ -151,8 +151,8 @@
; or pavgb At this point this is just meant to be first pass for calculating
; all the parms needed for 16x16 ssim so we can play with dssim as distortion
; in mode selection code.
-global sym(vpx_ssim_parms_8x8_sse2) PRIVATE
-sym(vpx_ssim_parms_8x8_sse2):
+global sym(aom_ssim_parms_8x8_sse2) PRIVATE
+sym(aom_ssim_parms_8x8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 9
diff --git a/aom_dsp/x86/subpel_variance_sse2.asm b/aom_dsp/x86/subpel_variance_sse2.asm
index cee4468..899167a 100644
--- a/aom_dsp/x86/subpel_variance_sse2.asm
+++ b/aom_dsp/x86/subpel_variance_sse2.asm
@@ -39,7 +39,7 @@
SECTION .text
-; int vpx_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride,
+; int aom_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride,
; int x_offset, int y_offset,
; const uint8_t *dst, ptrdiff_t dst_stride,
; int height, unsigned int *sse);
diff --git a/aom_dsp/x86/subtract_sse2.asm b/aom_dsp/x86/subtract_sse2.asm
index 2225b7c..fe2830e 100644
--- a/aom_dsp/x86/subtract_sse2.asm
+++ b/aom_dsp/x86/subtract_sse2.asm
@@ -12,7 +12,7 @@
SECTION .text
-; void vpx_subtract_block(int rows, int cols,
+; void aom_subtract_block(int rows, int cols,
; int16_t *diff, ptrdiff_t diff_stride,
; const uint8_t *src, ptrdiff_t src_stride,
; const uint8_t *pred, ptrdiff_t pred_stride)
diff --git a/aom_dsp/x86/sum_squares_sse2.c b/aom_dsp/x86/sum_squares_sse2.c
index 958493c..eb1d912 100644
--- a/aom_dsp/x86/sum_squares_sse2.c
+++ b/aom_dsp/x86/sum_squares_sse2.c
@@ -14,9 +14,9 @@
#include "aom_dsp/x86/synonyms.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
-static uint64_t vpx_sum_squares_2d_i16_4x4_sse2(const int16_t *src,
+static uint64_t aom_sum_squares_2d_i16_4x4_sse2(const int16_t *src,
int stride) {
const __m128i v_val_0_w =
_mm_loadl_epi64((const __m128i *)(src + 0 * stride));
@@ -44,12 +44,12 @@
#ifdef __GNUC__
// This prevents GCC/Clang from inlining this function into
-// vpx_sum_squares_2d_i16_sse2, which in turn saves some stack
+// aom_sum_squares_2d_i16_sse2, which in turn saves some stack
// maintenance instructions in the common case of 4x4.
__attribute__((noinline))
#endif
static uint64_t
-vpx_sum_squares_2d_i16_nxn_sse2(const int16_t *src, int stride, int size) {
+aom_sum_squares_2d_i16_nxn_sse2(const int16_t *src, int stride, int size) {
int r, c;
const __m128i v_zext_mask_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff);
@@ -118,15 +118,15 @@
#endif
}
-uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t *src, int stride, int size) {
+uint64_t aom_sum_squares_2d_i16_sse2(const int16_t *src, int stride, int size) {
// 4 elements per row only requires half an XMM register, so this
// must be a special case, but also note that over 75% of all calls
// are with size == 4, so it is also the common case.
if (LIKELY(size == 4)) {
- return vpx_sum_squares_2d_i16_4x4_sse2(src, stride);
+ return aom_sum_squares_2d_i16_4x4_sse2(src, stride);
} else {
// Generic case
- return vpx_sum_squares_2d_i16_nxn_sse2(src, stride, size);
+ return aom_sum_squares_2d_i16_nxn_sse2(src, stride, size);
}
}
@@ -134,7 +134,7 @@
// 1D version
//////////////////////////////////////////////////////////////////////////////
-static uint64_t vpx_sum_squares_i16_64n_sse2(const int16_t *src, uint32_t n) {
+static uint64_t aom_sum_squares_i16_64n_sse2(const int16_t *src, uint32_t n) {
const __m128i v_zext_mask_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff);
__m128i v_acc0_q = _mm_setzero_si128();
__m128i v_acc1_q = _mm_setzero_si128();
@@ -192,14 +192,14 @@
#endif
}
-uint64_t vpx_sum_squares_i16_sse2(const int16_t *src, uint32_t n) {
+uint64_t aom_sum_squares_i16_sse2(const int16_t *src, uint32_t n) {
if (n % 64 == 0) {
- return vpx_sum_squares_i16_64n_sse2(src, n);
+ return aom_sum_squares_i16_64n_sse2(src, n);
} else if (n > 64) {
int k = n & ~(64 - 1);
- return vpx_sum_squares_i16_64n_sse2(src, k) +
- vpx_sum_squares_i16_c(src + k, n - k);
+ return aom_sum_squares_i16_64n_sse2(src, k) +
+ aom_sum_squares_i16_c(src + k, n - k);
} else {
- return vpx_sum_squares_i16_c(src, n);
+ return aom_sum_squares_i16_c(src, n);
}
}
diff --git a/aom_dsp/x86/synonyms.h b/aom_dsp/x86/synonyms.h
index e815f7e..b38bb35 100644
--- a/aom_dsp/x86/synonyms.h
+++ b/aom_dsp/x86/synonyms.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_X86_SYNONYS_H_
-#define VPX_DSP_X86_SYNONYS_H_
+#ifndef AOM_DSP_X86_SYNONYS_H_
+#define AOM_DSP_X86_SYNONYS_H_
#include <immintrin.h>
-#include "./vpx_config.h"
-#include "aom/vpx_integer.h"
+#include "./aom_config.h"
+#include "aom/aom_integer.h"
/**
* Various reusable shorthands for x86 SIMD intrinsics.
@@ -108,4 +108,4 @@
}
#endif // __SSSE3__
-#endif // VPX_DSP_X86_SYNONYS_H_
+#endif // AOM_DSP_X86_SYNONYS_H_
diff --git a/aom_dsp/x86/txfm_common_sse2.h b/aom_dsp/x86/txfm_common_sse2.h
index aed7d4e..6f32d09 100644
--- a/aom_dsp/x86/txfm_common_sse2.h
+++ b/aom_dsp/x86/txfm_common_sse2.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_DSP_X86_TXFM_COMMON_SSE2_H_
-#define VPX_DSP_X86_TXFM_COMMON_SSE2_H_
+#ifndef AOM_DSP_X86_TXFM_COMMON_SSE2_H_
+#define AOM_DSP_X86_TXFM_COMMON_SSE2_H_
#include <emmintrin.h>
-#include "aom/vpx_integer.h"
+#include "aom/aom_integer.h"
#define pair_set_epi16(a, b) \
_mm_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
@@ -33,4 +33,4 @@
return _mm_shuffle_epi32(b, 0x4e);
}
-#endif // VPX_DSP_X86_TXFM_COMMON_SSE2_H_
+#endif // AOM_DSP_X86_TXFM_COMMON_SSE2_H_
diff --git a/aom_dsp/x86/variance_avx2.c b/aom_dsp/x86/variance_avx2.c
index 7bc2693..e603711 100644
--- a/aom_dsp/x86/variance_avx2.c
+++ b/aom_dsp/x86/variance_avx2.c
@@ -7,13 +7,13 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
typedef void (*get_var_avx2)(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse, int *sum);
-void vpx_get32x32var_avx2(const uint8_t *src, int src_stride,
+void aom_get32x32var_avx2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride, unsigned int *sse,
int *sum);
@@ -38,104 +38,104 @@
}
}
-unsigned int vpx_variance16x16_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_variance16x16_avx2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_avx2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
- vpx_get16x16var_avx2, 16);
+ aom_get16x16var_avx2, 16);
return *sse - (((uint32_t)((int64_t)sum * sum)) >> 8);
}
-unsigned int vpx_mse16x16_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_mse16x16_avx2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
- vpx_get16x16var_avx2(src, src_stride, ref, ref_stride, sse, &sum);
+ aom_get16x16var_avx2(src, src_stride, ref, ref_stride, sse, &sum);
return *sse;
}
-unsigned int vpx_variance32x16_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_variance32x16_avx2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_avx2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum,
- vpx_get32x32var_avx2, 32);
+ aom_get32x32var_avx2, 32);
return *sse - (((int64_t)sum * sum) >> 9);
}
-unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_variance32x32_avx2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_avx2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum,
- vpx_get32x32var_avx2, 32);
+ aom_get32x32var_avx2, 32);
return *sse - (((int64_t)sum * sum) >> 10);
}
-unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_variance64x64_avx2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_avx2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum,
- vpx_get32x32var_avx2, 32);
+ aom_get32x32var_avx2, 32);
return *sse - (((int64_t)sum * sum) >> 12);
}
-unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_variance64x32_avx2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_avx2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum,
- vpx_get32x32var_avx2, 32);
+ aom_get32x32var_avx2, 32);
return *sse - (((int64_t)sum * sum) >> 11);
}
-unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
int x_offset, int y_offset,
const uint8_t *dst, int dst_stride,
int height, unsigned int *sse);
-unsigned int vpx_sub_pixel_avg_variance32xh_avx2(
+unsigned int aom_sub_pixel_avg_variance32xh_avx2(
const uint8_t *src, int src_stride, int x_offset, int y_offset,
const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride,
int height, unsigned int *sseptr);
-unsigned int vpx_sub_pixel_variance64x64_avx2(const uint8_t *src,
+unsigned int aom_sub_pixel_variance64x64_avx2(const uint8_t *src,
int src_stride, int x_offset,
int y_offset, const uint8_t *dst,
int dst_stride,
unsigned int *sse) {
unsigned int sse1;
- const int se1 = vpx_sub_pixel_variance32xh_avx2(
+ const int se1 = aom_sub_pixel_variance32xh_avx2(
src, src_stride, x_offset, y_offset, dst, dst_stride, 64, &sse1);
unsigned int sse2;
const int se2 =
- vpx_sub_pixel_variance32xh_avx2(src + 32, src_stride, x_offset, y_offset,
+ aom_sub_pixel_variance32xh_avx2(src + 32, src_stride, x_offset, y_offset,
dst + 32, dst_stride, 64, &sse2);
const int se = se1 + se2;
*sse = sse1 + sse2;
return *sse - (((int64_t)se * se) >> 12);
}
-unsigned int vpx_sub_pixel_variance32x32_avx2(const uint8_t *src,
+unsigned int aom_sub_pixel_variance32x32_avx2(const uint8_t *src,
int src_stride, int x_offset,
int y_offset, const uint8_t *dst,
int dst_stride,
unsigned int *sse) {
- const int se = vpx_sub_pixel_variance32xh_avx2(
+ const int se = aom_sub_pixel_variance32xh_avx2(
src, src_stride, x_offset, y_offset, dst, dst_stride, 32, sse);
return *sse - (((int64_t)se * se) >> 10);
}
-unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
+unsigned int aom_sub_pixel_avg_variance64x64_avx2(
const uint8_t *src, int src_stride, int x_offset, int y_offset,
const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) {
unsigned int sse1;
- const int se1 = vpx_sub_pixel_avg_variance32xh_avx2(
+ const int se1 = aom_sub_pixel_avg_variance32xh_avx2(
src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 64, 64, &sse1);
unsigned int sse2;
- const int se2 = vpx_sub_pixel_avg_variance32xh_avx2(
+ const int se2 = aom_sub_pixel_avg_variance32xh_avx2(
src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, sec + 32,
64, 64, &sse2);
const int se = se1 + se2;
@@ -145,11 +145,11 @@
return *sse - (((int64_t)se * se) >> 12);
}
-unsigned int vpx_sub_pixel_avg_variance32x32_avx2(
+unsigned int aom_sub_pixel_avg_variance32x32_avx2(
const uint8_t *src, int src_stride, int x_offset, int y_offset,
const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) {
// Process 32 elements in parallel.
- const int se = vpx_sub_pixel_avg_variance32xh_avx2(
+ const int se = aom_sub_pixel_avg_variance32xh_avx2(
src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 32, 32, sse);
return *sse - (((int64_t)se * se) >> 10);
}
diff --git a/aom_dsp/x86/variance_impl_avx2.c b/aom_dsp/x86/variance_impl_avx2.c
index 3166025..0e2d145 100644
--- a/aom_dsp/x86/variance_impl_avx2.c
+++ b/aom_dsp/x86/variance_impl_avx2.c
@@ -10,7 +10,7 @@
#include <immintrin.h> // AVX2
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
/* clang-format off */
@@ -34,7 +34,7 @@
};
/* clang-format on */
-void vpx_get16x16var_avx2(const unsigned char *src_ptr, int source_stride,
+void aom_get16x16var_avx2(const unsigned char *src_ptr, int source_stride,
const unsigned char *ref_ptr, int recon_stride,
unsigned int *SSE, int *Sum) {
__m256i src, src_expand_low, src_expand_high, ref, ref_expand_low;
@@ -140,7 +140,7 @@
}
}
-void vpx_get32x32var_avx2(const unsigned char *src_ptr, int source_stride,
+void aom_get32x32var_avx2(const unsigned char *src_ptr, int source_stride,
const unsigned char *ref_ptr, int recon_stride,
unsigned int *SSE, int *Sum) {
__m256i src, src_expand_low, src_expand_high, ref, ref_expand_low;
@@ -297,7 +297,7 @@
sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) + \
_mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1));
-unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
+unsigned int aom_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
int x_offset, int y_offset,
const uint8_t *dst, int dst_stride,
int height, unsigned int *sse) {
@@ -484,7 +484,7 @@
return sum;
}
-unsigned int vpx_sub_pixel_avg_variance32xh_avx2(
+unsigned int aom_sub_pixel_avg_variance32xh_avx2(
const uint8_t *src, int src_stride, int x_offset, int y_offset,
const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride,
int height, unsigned int *sse) {
diff --git a/aom_dsp/x86/variance_sse2.c b/aom_dsp/x86/variance_sse2.c
index 0788850..e0397d3 100644
--- a/aom_dsp/x86/variance_sse2.c
+++ b/aom_dsp/x86/variance_sse2.c
@@ -10,8 +10,8 @@
#include <emmintrin.h> // SSE2
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
@@ -19,7 +19,7 @@
const unsigned char *ref, int ref_stride,
unsigned int *sse, int *sum);
-unsigned int vpx_get_mb_ss_sse2(const int16_t *src) {
+unsigned int aom_get_mb_ss_sse2(const int16_t *src) {
__m128i vsum = _mm_setzero_si128();
int i;
@@ -65,7 +65,7 @@
*sse = _mm_cvtsi128_si32(vsum);
}
-void vpx_get8x8var_sse2(const uint8_t *src, int src_stride, const uint8_t *ref,
+void aom_get8x8var_sse2(const uint8_t *src, int src_stride, const uint8_t *ref,
int ref_stride, unsigned int *sse, int *sum) {
const __m128i zero = _mm_setzero_si128();
__m128i vsum = _mm_setzero_si128();
@@ -103,7 +103,7 @@
*sse = _mm_cvtsi128_si32(vsse);
}
-void vpx_get16x16var_sse2(const uint8_t *src, int src_stride,
+void aom_get16x16var_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride, unsigned int *sse,
int *sum) {
const __m128i zero = _mm_setzero_si128();
@@ -165,7 +165,7 @@
}
}
-unsigned int vpx_variance4x4_sse2(const unsigned char *src, int src_stride,
+unsigned int aom_variance4x4_sse2(const unsigned char *src, int src_stride,
const unsigned char *ref, int ref_stride,
unsigned int *sse) {
int sum;
@@ -173,7 +173,7 @@
return *sse - ((sum * sum) >> 4);
}
-unsigned int vpx_variance8x4_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance8x4_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
@@ -182,7 +182,7 @@
return *sse - ((sum * sum) >> 5);
}
-unsigned int vpx_variance4x8_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance4x8_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
@@ -191,126 +191,126 @@
return *sse - ((sum * sum) >> 5);
}
-unsigned int vpx_variance8x8_sse2(const unsigned char *src, int src_stride,
+unsigned int aom_variance8x8_sse2(const unsigned char *src, int src_stride,
const unsigned char *ref, int ref_stride,
unsigned int *sse) {
int sum;
- vpx_get8x8var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
+ aom_get8x8var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
return *sse - ((sum * sum) >> 6);
}
-unsigned int vpx_variance16x8_sse2(const unsigned char *src, int src_stride,
+unsigned int aom_variance16x8_sse2(const unsigned char *src, int src_stride,
const unsigned char *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 16, 8, sse, &sum,
- vpx_get8x8var_sse2, 8);
+ aom_get8x8var_sse2, 8);
return *sse - ((sum * sum) >> 7);
}
-unsigned int vpx_variance8x16_sse2(const unsigned char *src, int src_stride,
+unsigned int aom_variance8x16_sse2(const unsigned char *src, int src_stride,
const unsigned char *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 8, 16, sse, &sum,
- vpx_get8x8var_sse2, 8);
+ aom_get8x8var_sse2, 8);
return *sse - ((sum * sum) >> 7);
}
-unsigned int vpx_variance16x16_sse2(const unsigned char *src, int src_stride,
+unsigned int aom_variance16x16_sse2(const unsigned char *src, int src_stride,
const unsigned char *ref, int ref_stride,
unsigned int *sse) {
int sum;
- vpx_get16x16var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
+ aom_get16x16var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
return *sse - (((uint32_t)((int64_t)sum * sum)) >> 8);
}
-unsigned int vpx_variance32x32_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance32x32_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum,
- vpx_get16x16var_sse2, 16);
+ aom_get16x16var_sse2, 16);
return *sse - (((int64_t)sum * sum) >> 10);
}
-unsigned int vpx_variance32x16_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance32x16_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum,
- vpx_get16x16var_sse2, 16);
+ aom_get16x16var_sse2, 16);
return *sse - (((int64_t)sum * sum) >> 9);
}
-unsigned int vpx_variance16x32_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance16x32_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 16, 32, sse, &sum,
- vpx_get16x16var_sse2, 16);
+ aom_get16x16var_sse2, 16);
return *sse - (((int64_t)sum * sum) >> 9);
}
-unsigned int vpx_variance64x64_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance64x64_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum,
- vpx_get16x16var_sse2, 16);
+ aom_get16x16var_sse2, 16);
return *sse - (((int64_t)sum * sum) >> 12);
}
-unsigned int vpx_variance64x32_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance64x32_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum,
- vpx_get16x16var_sse2, 16);
+ aom_get16x16var_sse2, 16);
return *sse - (((int64_t)sum * sum) >> 11);
}
-unsigned int vpx_variance32x64_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_variance32x64_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 32, 64, sse, &sum,
- vpx_get16x16var_sse2, 16);
+ aom_get16x16var_sse2, 16);
return *sse - (((int64_t)sum * sum) >> 11);
}
-unsigned int vpx_mse8x8_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_mse8x8_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
- vpx_variance8x8_sse2(src, src_stride, ref, ref_stride, sse);
+ aom_variance8x8_sse2(src, src_stride, ref, ref_stride, sse);
return *sse;
}
-unsigned int vpx_mse8x16_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_mse8x16_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
- vpx_variance8x16_sse2(src, src_stride, ref, ref_stride, sse);
+ aom_variance8x16_sse2(src, src_stride, ref, ref_stride, sse);
return *sse;
}
-unsigned int vpx_mse16x8_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_mse16x8_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
- vpx_variance16x8_sse2(src, src_stride, ref, ref_stride, sse);
+ aom_variance16x8_sse2(src, src_stride, ref, ref_stride, sse);
return *sse;
}
-unsigned int vpx_mse16x16_sse2(const uint8_t *src, int src_stride,
+unsigned int aom_mse16x16_sse2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
- vpx_variance16x16_sse2(src, src_stride, ref, ref_stride, sse);
+ aom_variance16x16_sse2(src, src_stride, ref, ref_stride, sse);
return *sse;
}
// The 2 unused parameters are place holders for PIC enabled build.
// These definitions are for functions defined in subpel_variance.asm
#define DECL(w, opt) \
- int vpx_sub_pixel_variance##w##xh_##opt( \
+ int aom_sub_pixel_variance##w##xh_##opt( \
const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
const uint8_t *dst, ptrdiff_t dst_stride, int height, unsigned int *sse, \
void *unused0, void *unused)
@@ -325,27 +325,27 @@
#undef DECL
#define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
- unsigned int vpx_sub_pixel_variance##w##x##h##_##opt( \
+ unsigned int aom_sub_pixel_variance##w##x##h##_##opt( \
const uint8_t *src, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst, int dst_stride, unsigned int *sse_ptr) { \
unsigned int sse; \
- int se = vpx_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset, \
+ int se = aom_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset, \
y_offset, dst, dst_stride, \
h, &sse, NULL, NULL); \
if (w > wf) { \
unsigned int sse2; \
- int se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
+ int se2 = aom_sub_pixel_variance##wf##xh_##opt( \
src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
- se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
+ se2 = aom_sub_pixel_variance##wf##xh_##opt( \
src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
- se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
+ se2 = aom_sub_pixel_variance##wf##xh_##opt( \
src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
@@ -379,7 +379,7 @@
// The 2 unused parameters are place holders for PIC enabled build.
#define DECL(w, opt) \
- int vpx_sub_pixel_avg_variance##w##xh_##opt( \
+ int aom_sub_pixel_avg_variance##w##xh_##opt( \
const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
const uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *sec, \
ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \
@@ -395,28 +395,28 @@
#undef DECLS
#define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
- unsigned int vpx_sub_pixel_avg_variance##w##x##h##_##opt( \
+ unsigned int aom_sub_pixel_avg_variance##w##x##h##_##opt( \
const uint8_t *src, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst, int dst_stride, unsigned int *sseptr, \
const uint8_t *sec) { \
unsigned int sse; \
- int se = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
+ int se = aom_sub_pixel_avg_variance##wf##xh_##opt( \
src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
NULL, NULL); \
if (w > wf) { \
unsigned int sse2; \
- int se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
+ int se2 = aom_sub_pixel_avg_variance##wf##xh_##opt( \
src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, \
sec + 16, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
- se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
+ se2 = aom_sub_pixel_avg_variance##wf##xh_##opt( \
src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, \
sec + 32, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
- se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
+ se2 = aom_sub_pixel_avg_variance##wf##xh_##opt( \
src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, \
sec + 48, w, h, &sse2, NULL, NULL); \
se += se2; \
@@ -448,7 +448,7 @@
#undef FNS
#undef FN
-void vpx_upsampled_pred_sse2(uint8_t *comp_pred, int width, int height,
+void aom_upsampled_pred_sse2(uint8_t *comp_pred, int width, int height,
const uint8_t *ref, int ref_stride) {
int i, j;
int stride = ref_stride << 3;
@@ -536,7 +536,7 @@
}
}
-void vpx_comp_avg_upsampled_pred_sse2(uint8_t *comp_pred, const uint8_t *pred,
+void aom_comp_avg_upsampled_pred_sse2(uint8_t *comp_pred, const uint8_t *pred,
int width, int height, const uint8_t *ref,
int ref_stride) {
const __m128i zero = _mm_set1_epi16(0);