AVX2: Port av1_convolve_(x|y|2d)_sr from SVT-AV1
This commit also adds the following optimizations:
4 tap filter: width 8, 16, 32, 64, 128
2 tap filter: width 2, 4
Fixed a typo: convolve_8tap_unapck_avx2 -> convolve_8tap_unpack_avx2
The new convolve functions are 30% to 100% faster than the old code,
but the old code is kept in place to support encoder-side only 12-tap
filtering. As a result, there's some redundancy that can be removed
in later commits.
RTC Performance:
| SPD_SET | TESTSET | AVG_PSNR | OVR_PSNR | SSIM | ENC_T |
|---------|----------|----------|----------|---------|-------|
| 7 | rtc | +0.000% | +0.000% | +0.000% | -1.6% |
| 7 | rtc_derf | +0.000% | +0.000% | +0.000% | -1.5% |
|---------|----------|----------|----------|---------|-------|
| 8 | rtc | +0.000% | +0.000% | +0.000% | -1.1% |
| 8 | rtc_derf | +0.000% | +0.000% | +0.000% | -0.8% |
|---------|----------|----------|----------|---------|-------|
| 9 | rtc | +0.000% | +0.000% | +0.000% | -1.1% |
| 9 | rtc_derf | +0.000% | +0.000% | +0.000% | -0.8% |
|---------|----------|----------|----------|---------|-------|
| 10 | rtc | +0.000% | +0.000% | +0.000% | -0.9% |
| 10 | rtc_derf | +0.000% | +0.000% | +0.000% | -0.7% |
VOD Performance:
| SPD_SET | TESTSET | AVG_PSNR | OVR_PSNR | SSIM | ENC_T |
|---------|----------|----------|----------|---------|-------|
| 1 | lowres2 | +0.000% | +0.000% | +0.000% | -0.4% |
| 1 | midres2 | +0.000% | +0.000% | +0.000% | -0.4% |
|---------|----------|----------|----------|---------|-------|
| 6 | hdres2 | +0.000% | +0.000% | +0.000% | -0.5% |
| 6 | lowres2 | +0.000% | +0.000% | +0.000% | -0.5% |
| 6 | midres2 | +0.000% | +0.000% | +0.000% | -0.5% |
Change-Id: Ia75a3f286de712ec8152404fde847aaea4bc1f47
diff --git a/aom_dsp/x86/convolve_avx2.h b/aom_dsp/x86/convolve_avx2.h
index a00ede2..a709008 100644
--- a/aom_dsp/x86/convolve_avx2.h
+++ b/aom_dsp/x86/convolve_avx2.h
@@ -12,6 +12,13 @@
#ifndef AOM_AOM_DSP_X86_CONVOLVE_AVX2_H_
#define AOM_AOM_DSP_X86_CONVOLVE_AVX2_H_
+#include <immintrin.h>
+
+#include "aom_ports/mem.h"
+
+#include "av1/common/convolve.h"
+#include "av1/common/filter.h"
+
// filters for 16
DECLARE_ALIGNED(32, static const uint8_t, filt_global_avx2[]) = {
0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 0, 1, 1,
diff --git a/av1/common/filter.h b/av1/common/filter.h
index 7511c88..cca538e 100644
--- a/av1/common/filter.h
+++ b/av1/common/filter.h
@@ -300,6 +300,25 @@
return (allow_interp_mask >> filt_type) & 1;
}
+static AOM_INLINE int get_filter_tap(
+ const InterpFilterParams *const filter_params, int subpel_qn) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_qn & SUBPEL_MASK);
+ if (filter_params->taps == 12) {
+ return 12;
+ }
+ if (filter[0] | filter[7]) {
+ return 8;
+ }
+ if (filter[1] | filter[6]) {
+ return 6;
+ }
+ if (filter[2] | filter[5]) {
+ return 4;
+ }
+ return 2;
+}
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/av1/common/x86/convolve_2d_avx2.c b/av1/common/x86/convolve_2d_avx2.c
index 04112ff..1b39a0a 100644
--- a/av1/common/x86/convolve_2d_avx2.c
+++ b/av1/common/x86/convolve_2d_avx2.c
@@ -13,19 +13,21 @@
#include "config/av1_rtcd.h"
+#include "third_party/SVT-AV1/convolve_2d_avx2.h"
+
#include "aom_dsp/x86/convolve_avx2.h"
-#include "aom_dsp/x86/convolve_common_intrin.h"
-#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/aom_filter.h"
#include "aom_dsp/x86/synonyms.h"
+
#include "av1/common/convolve.h"
-void av1_convolve_2d_sr_avx2(const uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, int w, int h,
- const InterpFilterParams *filter_params_x,
- const InterpFilterParams *filter_params_y,
- const int subpel_x_qn, const int subpel_y_qn,
- ConvolveParams *conv_params) {
+void av1_convolve_2d_sr_general_avx2(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride, int w, int h,
+ const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y,
+ const int subpel_x_qn,
+ const int subpel_y_qn,
+ ConvolveParams *conv_params) {
if (filter_params_x->taps > 8) {
const int bd = 8;
int im_stride = 8, i;
@@ -92,29 +94,11 @@
__m256i filt[4], coeffs_h[4], coeffs_v[4];
- filt[0] = _mm256_load_si256((__m256i const *)(filt_global_avx2));
- filt[1] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32));
-
prepare_coeffs_lowbd(filter_params_x, subpel_x_qn, coeffs_h);
prepare_coeffs(filter_params_y, subpel_y_qn, coeffs_v);
- const int16_t *const filter_x = av1_get_interp_filter_subpel_kernel(
- filter_params_x, subpel_x_qn & SUBPEL_MASK);
- const int16_t *const filter_y = av1_get_interp_filter_subpel_kernel(
- filter_params_y, subpel_y_qn & SUBPEL_MASK);
-
- int horiz_tap = SUBPEL_TAPS;
- int vert_tap = SUBPEL_TAPS;
-
- if (!(filter_x[0] | filter_x[1] | filter_x[6] | filter_x[7]))
- horiz_tap = 4;
- else if (!(filter_x[0] | filter_x[7]))
- horiz_tap = 6;
-
- if (!(filter_y[0] | filter_y[1] | filter_y[6] | filter_y[7]))
- vert_tap = 4;
- else if (!(filter_y[0] | filter_y[7]))
- vert_tap = 6;
+ int horiz_tap = get_filter_tap(filter_params_x, subpel_x_qn);
+ int vert_tap = get_filter_tap(filter_params_y, subpel_y_qn);
if (horiz_tap == 6)
prepare_coeffs_6t_lowbd(filter_params_x, subpel_x_qn, coeffs_h);
@@ -131,8 +115,10 @@
const int fo_horiz = horiz_tap / 2 - 1;
const uint8_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;
- filt[2] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 2));
- filt[3] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 3));
+ filt[0] = _mm256_load_si256((__m256i const *)filt1_global_avx2);
+ filt[1] = _mm256_load_si256((__m256i const *)filt2_global_avx2);
+ filt[2] = _mm256_load_si256((__m256i const *)filt3_global_avx2);
+ filt[3] = _mm256_load_si256((__m256i const *)filt4_global_avx2);
for (int j = 0; j < w; j += 8) {
if (horiz_tap == 4) {
@@ -153,3 +139,23 @@
}
}
}
+
+void av1_convolve_2d_sr_avx2(
+ const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+ int32_t w, int32_t h, const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y, const int32_t subpel_x_q4,
+ const int32_t subpel_y_q4, ConvolveParams *conv_params) {
+ const int32_t tap_x = get_filter_tap(filter_params_x, subpel_x_q4);
+ const int32_t tap_y = get_filter_tap(filter_params_y, subpel_y_q4);
+
+ const bool use_general = (tap_x == 12 || tap_y == 12);
+ if (use_general) {
+ av1_convolve_2d_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y,
+ subpel_x_q4, subpel_y_q4, conv_params);
+ } else {
+ av1_convolve_2d_sr_specialized_avx2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y,
+ subpel_x_q4, subpel_y_q4, conv_params);
+ }
+}
diff --git a/av1/common/x86/convolve_avx2.c b/av1/common/x86/convolve_avx2.c
index 89e0a4c..30de982 100644
--- a/av1/common/x86/convolve_avx2.c
+++ b/av1/common/x86/convolve_avx2.c
@@ -13,16 +13,16 @@
#include "config/av1_rtcd.h"
+#include "third_party/SVT-AV1/convolve_avx2.h"
+
#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/x86/convolve_common_intrin.h"
#include "aom_dsp/x86/convolve_avx2.h"
+#include "aom_dsp/x86/convolve_common_intrin.h"
#include "aom_dsp/x86/synonyms.h"
-void av1_convolve_y_sr_avx2(const uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, int w, int h,
- const InterpFilterParams *filter_params_y,
- const int subpel_y_qn) {
- int i, j, vert_tap = SUBPEL_TAPS;
+static AOM_INLINE void av1_convolve_y_sr_general_avx2(
+ const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn) {
// right shift is F-1 because we are already dividing
// filter co-efficients by 2
const int right_shift_bits = (FILTER_BITS - 1);
@@ -32,16 +32,7 @@
__m256i coeffs[6], s[12];
__m128i d[10];
- // Condition for checking valid vert_filt taps
- const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
- filter_params_y, subpel_y_qn & SUBPEL_MASK);
- if (filter_params_y->taps == 12) {
- vert_tap = 12;
- } else if (!(filter[0] | filter[1] | filter[6] | filter[7])) {
- vert_tap = 4;
- } else if (!(filter[0] | filter[7])) {
- vert_tap = 6;
- }
+ int i, vert_tap = get_filter_tap(filter_params_y, subpel_y_qn);
if (vert_tap == 6)
prepare_coeffs_6t_lowbd(filter_params_y, subpel_y_qn, coeffs);
@@ -55,7 +46,7 @@
if (vert_tap == 4) {
const int fo_vert = 1;
const uint8_t *const src_ptr = src - fo_vert * src_stride;
- for (j = 0; j < w; j += 16) {
+ for (int j = 0; j < w; j += 16) {
const uint8_t *data = &src_ptr[j];
d[0] = _mm_loadu_si128((__m128i *)(data + 0 * src_stride));
d[1] = _mm_loadu_si128((__m128i *)(data + 1 * src_stride));
@@ -150,7 +141,7 @@
const int fo_vert = vert_tap / 2 - 1;
const uint8_t *const src_ptr = src - fo_vert * src_stride;
- for (j = 0; j < w; j += 16) {
+ for (int j = 0; j < w; j += 16) {
const uint8_t *data = &src_ptr[j];
__m256i src6;
@@ -255,7 +246,7 @@
right_shift = _mm_cvtsi32_si128(FILTER_BITS);
right_shift_const = _mm256_set1_epi32((1 << FILTER_BITS) >> 1);
- for (j = 0; j < w; j += 8) {
+ for (int j = 0; j < w; j += 8) {
const uint8_t *data = &src_ptr[j];
__m256i src10;
@@ -403,7 +394,7 @@
const int fo_vert = filter_params_y->taps / 2 - 1;
const uint8_t *const src_ptr = src - fo_vert * src_stride;
- for (j = 0; j < w; j += 16) {
+ for (int j = 0; j < w; j += 16) {
const uint8_t *data = &src_ptr[j];
__m256i src6;
@@ -517,18 +508,33 @@
}
}
-void av1_convolve_x_sr_avx2(const uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, int w, int h,
- const InterpFilterParams *filter_params_x,
- const int subpel_x_qn,
- ConvolveParams *conv_params) {
+void av1_convolve_y_sr_avx2(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride, int32_t w,
+ int32_t h,
+ const InterpFilterParams *filter_params_y,
+ const int32_t subpel_y_q4) {
+ const int vert_tap = get_filter_tap(filter_params_y, subpel_y_q4);
+
+ if (vert_tap == 12) {
+ av1_convolve_y_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_q4);
+ } else {
+ av1_convolve_y_sr_specialized_avx2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_y, subpel_y_q4);
+ }
+}
+
+static AOM_INLINE void av1_convolve_x_sr_general_avx2(
+ const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params) {
const int bits = FILTER_BITS - conv_params->round_0;
const __m128i round_shift = _mm_cvtsi32_si128(bits);
__m256i round_0_const =
_mm256_set1_epi16((1 << (conv_params->round_0 - 1)) >> 1);
__m128i round_0_shift = _mm_cvtsi32_si128(conv_params->round_0 - 1);
__m256i round_const = _mm256_set1_epi16((1 << bits) >> 1);
- int i, horiz_tap = SUBPEL_TAPS;
+ int i, horiz_tap = get_filter_tap(filter_params_x, subpel_x_qn);
assert(bits >= 0);
assert((FILTER_BITS - conv_params->round_1) >= 0 ||
@@ -539,16 +545,6 @@
filt[0] = _mm256_load_si256((__m256i const *)(filt_global_avx2));
filt[1] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32));
- const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
- filter_params_x, subpel_x_qn & SUBPEL_MASK);
- if (filter_params_x->taps == 12) {
- horiz_tap = 12;
- } else if (!(filter[0] | filter[1] | filter[6] | filter[7])) {
- horiz_tap = 4;
- } else if (!(filter[0] | filter[7])) {
- horiz_tap = 6;
- }
-
if (horiz_tap == 6)
prepare_coeffs_6t_lowbd(filter_params_x, subpel_x_qn, coeffs);
else if (horiz_tap == 12) {
@@ -900,3 +896,21 @@
}
}
}
+
+void av1_convolve_x_sr_avx2(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride, int32_t w,
+ int32_t h,
+ const InterpFilterParams *filter_params_x,
+ const int32_t subpel_x_q4,
+ ConvolveParams *conv_params) {
+ const int horz_tap = get_filter_tap(filter_params_x, subpel_x_q4);
+
+ if (horz_tap == 12) {
+ av1_convolve_x_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_q4, conv_params);
+ } else {
+ av1_convolve_x_sr_specialized_avx2(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, subpel_x_q4,
+ conv_params);
+ }
+}
diff --git a/third_party/SVT-AV1/EbMemory_AVX2.h b/third_party/SVT-AV1/EbMemory_AVX2.h
new file mode 100644
index 0000000..0d0ea10
--- /dev/null
+++ b/third_party/SVT-AV1/EbMemory_AVX2.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at https://www.aomedia.org/license/software-license. If the
+ * Alliance for Open Media Patent License 1.0 was not distributed with this
+ * source code in the PATENTS file, you can obtain it at
+ * https://www.aomedia.org/license/patent-license.
+ */
+
+#ifndef AOM_THIRD_PARTY_SVT_AV1_EBMEMORY_AVX2_H_
+#define AOM_THIRD_PARTY_SVT_AV1_EBMEMORY_AVX2_H_
+
+#include <immintrin.h>
+
+#include "config/aom_config.h"
+
+#include "aom/aom_integer.h"
+
+#ifndef _mm256_set_m128i
+#define _mm256_set_m128i(/* __m128i */ hi, /* __m128i */ lo) \
+ _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 0x1)
+#endif
+
+#ifndef _mm256_setr_m128i
+#define _mm256_setr_m128i(/* __m128i */ lo, /* __m128i */ hi) \
+ _mm256_set_m128i((hi), (lo))
+#endif
+
+static INLINE __m256i load_u8_4x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride) {
+ __m128i src01;
+ src01 = _mm_cvtsi32_si128(*(int32_t *)(src + 0 * stride));
+ src01 = _mm_insert_epi32(src01, *(int32_t *)(src + 1 * stride), 1);
+ return _mm256_setr_m128i(src01, _mm_setzero_si128());
+}
+
+static INLINE __m256i load_u8_4x4_avx2(const uint8_t *const src,
+ const ptrdiff_t stride) {
+ __m128i src01, src23;
+ src01 = _mm_cvtsi32_si128(*(int32_t *)(src + 0 * stride));
+ src01 = _mm_insert_epi32(src01, *(int32_t *)(src + 1 * stride), 1);
+ src23 = _mm_cvtsi32_si128(*(int32_t *)(src + 2 * stride));
+ src23 = _mm_insert_epi32(src23, *(int32_t *)(src + 3 * stride), 1);
+ return _mm256_setr_m128i(src01, src23);
+}
+
+static INLINE __m256i load_u8_8x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride) {
+ const __m128i src0 = _mm_loadl_epi64((__m128i *)(src + 0 * stride));
+ const __m128i src1 = _mm_loadl_epi64((__m128i *)(src + 1 * stride));
+ return _mm256_setr_m128i(src0, src1);
+}
+
+static INLINE __m256i load_u8_8x4_avx2(const uint8_t *const src,
+ const ptrdiff_t stride) {
+ __m128i src01, src23;
+ src01 = _mm_loadl_epi64((__m128i *)(src + 0 * stride));
+ src01 = _mm_castpd_si128(_mm_loadh_pd(_mm_castsi128_pd(src01),
+ (double *)(void *)(src + 1 * stride)));
+ src23 = _mm_loadl_epi64((__m128i *)(src + 2 * stride));
+ src23 = _mm_castpd_si128(_mm_loadh_pd(_mm_castsi128_pd(src23),
+ (double *)(void *)(src + 3 * stride)));
+ return _mm256_setr_m128i(src01, src23);
+}
+
+static INLINE __m256i loadu_8bit_16x2_avx2(const void *const src,
+ const ptrdiff_t strideInByte) {
+ const __m128i src0 = _mm_loadu_si128((__m128i *)src);
+ const __m128i src1 =
+ _mm_loadu_si128((__m128i *)((uint8_t *)src + strideInByte));
+ return _mm256_setr_m128i(src0, src1);
+}
+
+static INLINE __m256i loadu_u8_16x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride) {
+ return loadu_8bit_16x2_avx2(src, sizeof(*src) * stride);
+}
+
+static INLINE __m256i loadu_u16_8x2_avx2(const uint16_t *const src,
+ const ptrdiff_t stride) {
+ return loadu_8bit_16x2_avx2(src, sizeof(*src) * stride);
+}
+
+static INLINE void storeu_8bit_16x2_avx2(const __m256i src, void *const dst,
+ const ptrdiff_t strideInByte) {
+ const __m128i d0 = _mm256_castsi256_si128(src);
+ const __m128i d1 = _mm256_extracti128_si256(src, 1);
+ _mm_storeu_si128((__m128i *)dst, d0);
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + strideInByte), d1);
+}
+
+static INLINE void storeu_u8_16x2_avx2(const __m256i src, uint8_t *const dst,
+ const ptrdiff_t stride) {
+ storeu_8bit_16x2_avx2(src, dst, sizeof(*dst) * stride);
+}
+
+static INLINE void storeu_s16_8x2_avx2(const __m256i src, int16_t *const dst,
+ const ptrdiff_t stride) {
+ storeu_8bit_16x2_avx2(src, dst, sizeof(*dst) * stride);
+}
+
+static INLINE void storeu_u16_8x2_avx2(const __m256i src, uint16_t *const dst,
+ const ptrdiff_t stride) {
+ storeu_8bit_16x2_avx2(src, dst, sizeof(*dst) * stride);
+}
+
+#endif // AOM_THIRD_PARTY_SVT_AV1_EBMEMORY_AVX2_H_
diff --git a/third_party/SVT-AV1/EbMemory_SSE4_1.h b/third_party/SVT-AV1/EbMemory_SSE4_1.h
new file mode 100644
index 0000000..d821d9a
--- /dev/null
+++ b/third_party/SVT-AV1/EbMemory_SSE4_1.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at https://www.aomedia.org/license/software-license. If the
+ * Alliance for Open Media Patent License 1.0 was not distributed with this
+ * source code in the PATENTS file, you can obtain it at
+ * https://www.aomedia.org/license/patent-license.
+ */
+
+#ifndef AOM_THIRD_PARTY_SVT_AV1_EBMEMORY_SSE4_1_H_
+#define AOM_THIRD_PARTY_SVT_AV1_EBMEMORY_SSE4_1_H_
+
+#include <smmintrin.h>
+
+#include "config/aom_config.h"
+
+#include "aom/aom_integer.h"
+
+static INLINE __m128i load8bit_4x2_sse4_1(const void *const src,
+ const ptrdiff_t strideInByte) {
+ const __m128i s = _mm_cvtsi32_si128(*(int32_t *)((uint8_t *)src));
+ return _mm_insert_epi32(s, *(int32_t *)((uint8_t *)src + strideInByte), 1);
+}
+
+static INLINE __m128i load_u8_4x2_sse4_1(const uint8_t *const src,
+ const ptrdiff_t stride) {
+ return load8bit_4x2_sse4_1(src, sizeof(*src) * stride);
+}
+
+static INLINE __m128i load_u16_2x2_sse4_1(const uint16_t *const src,
+ const ptrdiff_t stride) {
+ return load8bit_4x2_sse4_1(src, sizeof(*src) * stride);
+}
+
+#endif // AOM_THIRD_PARTY_SVT_AV1_EBMEMORY_SSE4_1_H_
diff --git a/third_party/SVT-AV1/LICENSE.md b/third_party/SVT-AV1/LICENSE.md
new file mode 100644
index 0000000..aff96d1
--- /dev/null
+++ b/third_party/SVT-AV1/LICENSE.md
@@ -0,0 +1,32 @@
+BSD 3-Clause Clear License
+The Clear BSD License
+
+Copyright (c) 2021, Alliance for Open Media
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted (subject to the limitations in the disclaimer below)
+provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the Alliance for Open Media nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/SVT-AV1/PATENTS.md b/third_party/SVT-AV1/PATENTS.md
new file mode 100644
index 0000000..1de4dd7
--- /dev/null
+++ b/third_party/SVT-AV1/PATENTS.md
@@ -0,0 +1,107 @@
+**Alliance for Open Media Patent License 1.0**
+
+ 1. **License Terms.**
+
+ **Patent License.** Subject to the terms and conditions of this License, each
+ Licensor, on behalf of itself and successors in interest and assigns,
+ grants Licensee a non-sublicensable, perpetual, worldwide, non-exclusive,
+ no-charge, royalty-free, irrevocable (except as expressly stated in this
+ License) patent license to its Necessary Claims to make, use, sell, offer
+ for sale, import or distribute any Implementation.
+
+ **Conditions.**
+
+ *Availability.* As a condition to the grant of rights to Licensee to make,
+ sell, offer for sale, import or distribute an Implementation under
+ Section 1.1, Licensee must make its Necessary Claims available under
+ this License, and must reproduce this License with any Implementation
+ as follows:
+
+ a. For distribution in source code, by including this License in the
+ root directory of the source code with its Implementation.
+
+ b. For distribution in any other form (including binary, object form,
+ and/or hardware description code (e.g., HDL, RTL, Gate Level Netlist,
+ GDSII, etc.)), by including this License in the documentation, legal
+ notices, and/or other written materials provided with the
+ Implementation.
+
+ *Additional Conditions.* This license is directly from Licensor to
+ Licensee. Licensee acknowledges as a condition of benefiting from it
+ that no rights from Licensor are received from suppliers, distributors,
+ or otherwise in connection with this License.
+
+ **Defensive Termination**. If any Licensee, its Affiliates, or its agents
+ initiates patent litigation or files, maintains, or voluntarily
+ participates in a lawsuit against another entity or any person asserting
+ that any Implementation infringes Necessary Claims, any patent licenses
+ granted under this License directly to the Licensee are immediately
+ terminated as of the date of the initiation of action unless 1) that suit
+ was in response to a corresponding suit regarding an Implementation first
+ brought against an initiating entity, or 2) that suit was brought to
+ enforce the terms of this License (including intervention in a third-party
+ action by a Licensee).
+
+ **Disclaimers.** The Reference Implementation and Specification are provided
+ "AS IS" and without warranty. The entire risk as to implementing or
+ otherwise using the Reference Implementation or Specification is assumed
+ by the implementer and user. Licensor expressly disclaims any warranties
+ (express, implied, or otherwise), including implied warranties of
+ merchantability, non-infringement, fitness for a particular purpose, or
+ title, related to the material. IN NO EVENT WILL LICENSOR BE LIABLE TO
+ ANY OTHER PARTY FOR LOST PROFITS OR ANY FORM OF INDIRECT, SPECIAL,
+ INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER FROM ANY CAUSES OF
+ ACTION OF ANY KIND WITH RESPECT TO THIS LICENSE, WHETHER BASED ON BREACH
+ OF CONTRACT, TORT (INCLUDING NEGLIGENCE), OR OTHERWISE, AND WHETHER OR
+ NOT THE OTHER PARTRY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+2. **Definitions.**
+
+ **Affiliate.** "Affiliate" means an entity that directly or indirectly
+ Controls, is Controlled by, or is under common Control of that party.
+
+ **Control.** "Control" means direct or indirect control of more than 50% of
+ the voting power to elect directors of that corporation, or for any other
+ entity, the power to direct management of such entity.
+
+ **Decoder.** "Decoder" means any decoder that conforms fully with all
+ non-optional portions of the Specification.
+
+ **Encoder.** "Encoder" means any encoder that produces a bitstream that can
+ be decoded by a Decoder only to the extent it produces such a bitstream.
+
+ **Final Deliverable.** "Final Deliverable" means the final version of a
+ deliverable approved by the Alliance for Open Media as a Final
+ Deliverable.
+
+ **Implementation.** "Implementation" means any implementation, including the
+ Reference Implementation, that is an Encoder and/or a Decoder. An
+ Implementation also includes components of an Implementation only to the
+ extent they are used as part of an Implementation.
+
+ **License.** "License" means this license.
+
+ **Licensee.** "Licensee" means any person or entity who exercises patent
+ rights granted under this License.
+
+ **Licensor.** "Licensor" means (i) any Licensee that makes, sells, offers
+ for sale, imports or distributes any Implementation, or (ii) a person
+ or entity that has a licensing obligation to the Implementation as a
+ result of its membership and/or participation in the Alliance for Open
+ Media working group that developed the Specification.
+
+ **Necessary Claims.** "Necessary Claims" means all claims of patents or
+ patent applications, (a) that currently or at any time in the future,
+ are owned or controlled by the Licensor, and (b) (i) would be an
+ Essential Claim as defined by the W3C Policy as of February 5, 2004
+ (https://www.w3.org/Consortium/Patent-Policy-20040205/#def-essential)
+ as if the Specification was a W3C Recommendation; or (ii) are infringed
+ by the Reference Implementation.
+
+ **Reference Implementation.** "Reference Implementation" means an Encoder
+ and/or Decoder released by the Alliance for Open Media as a Final
+ Deliverable.
+
+ **Specification.** "Specification" means the specification designated by
+ the Alliance for Open Media as a Final Deliverable for which this
+ License was issued.
diff --git a/third_party/SVT-AV1/README.libaom b/third_party/SVT-AV1/README.libaom
new file mode 100644
index 0000000..ff36505
--- /dev/null
+++ b/third_party/SVT-AV1/README.libaom
@@ -0,0 +1,14 @@
+URL: https://gitlab.com/AOMediaCodec/SVT-AV1
+
+Version: 8ff99c90359330d2e807757c9425560bbc452ff3
+License: BSD-3-clause clear
+License File: LICENSE.md
+
+Description:
+Port the x86 intrinsics used for single reference convolve reconstructions.
+
+Local Changes:
+Only ported the functions pertinent to single reference convolves.
+All functions are made static inline to avoid function call overheads.
+References to some arrays are changed to libaom version when applicable.
+Some extra intrinsic functions are added to support missing block sizes.
diff --git a/third_party/SVT-AV1/convolve_2d_avx2.h b/third_party/SVT-AV1/convolve_2d_avx2.h
new file mode 100644
index 0000000..64cd810
--- /dev/null
+++ b/third_party/SVT-AV1/convolve_2d_avx2.h
@@ -0,0 +1,1199 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_
+#define THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_
+
+#include "convolve_avx2.h"
+
+static void convolve_2d_sr_hor_2tap_avx2(
+ const uint8_t *const src, const int32_t src_stride, const int32_t w,
+ const int32_t h, const InterpFilterParams *const filter_params_x,
+ const int32_t subpel_x_q4, int16_t *const im_block) {
+ const uint8_t *src_ptr = src;
+ int32_t y = h;
+ int16_t *im = im_block;
+
+ if (w <= 8) {
+ __m128i coeffs_128;
+
+ prepare_half_coeffs_2tap_ssse3(filter_params_x, subpel_x_q4, &coeffs_128);
+
+ if (w == 2) {
+ do {
+ const __m128i r =
+ x_convolve_2tap_2x2_sse4_1(src_ptr, src_stride, &coeffs_128);
+ xy_x_round_store_2x2_sse2(r, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 2;
+ y -= 2;
+ } while (y);
+ } else if (w == 4) {
+ do {
+ const __m128i r =
+ x_convolve_2tap_4x2_ssse3(src_ptr, src_stride, &coeffs_128);
+ xy_x_round_store_4x2_sse2(r, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 4;
+ y -= 2;
+ } while (y);
+ } else {
+ assert(w == 8);
+
+ do {
+ __m128i r[2];
+
+ x_convolve_2tap_8x2_ssse3(src_ptr, src_stride, &coeffs_128, r);
+ xy_x_round_store_8x2_sse2(r, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 8;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ __m256i coeffs_256;
+
+ prepare_half_coeffs_2tap_avx2(filter_params_x, subpel_x_q4, &coeffs_256);
+
+ if (w == 16) {
+ do {
+ __m256i r[2];
+
+ x_convolve_2tap_16x2_avx2(src_ptr, src_stride, &coeffs_256, r);
+ xy_x_round_store_32_avx2(r, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 16;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ do {
+ xy_x_2tap_32_avx2(src_ptr, &coeffs_256, im);
+ src_ptr += src_stride;
+ im += 32;
+ } while (--y);
+ } else if (w == 64) {
+ do {
+ xy_x_2tap_32_avx2(src_ptr + 0 * 32, &coeffs_256, im + 0 * 32);
+ xy_x_2tap_32_avx2(src_ptr + 1 * 32, &coeffs_256, im + 1 * 32);
+ src_ptr += src_stride;
+ im += 64;
+ } while (--y);
+ } else {
+ assert(w == 128);
+
+ do {
+ xy_x_2tap_32_avx2(src_ptr + 0 * 32, &coeffs_256, im + 0 * 32);
+ xy_x_2tap_32_avx2(src_ptr + 1 * 32, &coeffs_256, im + 1 * 32);
+ xy_x_2tap_32_avx2(src_ptr + 2 * 32, &coeffs_256, im + 2 * 32);
+ xy_x_2tap_32_avx2(src_ptr + 3 * 32, &coeffs_256, im + 3 * 32);
+ src_ptr += src_stride;
+ im += 128;
+ } while (--y);
+ }
+ }
+}
+
+static void convolve_2d_sr_hor_4tap_ssse3(
+ const uint8_t *const src, const int32_t src_stride, const int32_t w,
+ const int32_t h, const InterpFilterParams *const filter_params_x,
+ const int32_t subpel_x_q4, int16_t *const im_block) {
+ const uint8_t *src_ptr = src - 1;
+ int32_t y = h;
+ int16_t *im = im_block;
+
+ if (w <= 4) {
+ __m128i coeffs_128[2];
+
+ prepare_half_coeffs_4tap_ssse3(filter_params_x, subpel_x_q4, coeffs_128);
+ if (w == 2) {
+ do {
+ const __m128i r =
+ x_convolve_4tap_2x2_ssse3(src_ptr, src_stride, coeffs_128);
+ xy_x_round_store_2x2_sse2(r, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 2;
+ y -= 2;
+ } while (y);
+ } else if (w == 4) {
+ do {
+ const __m128i r =
+ x_convolve_4tap_4x2_ssse3(src_ptr, src_stride, coeffs_128);
+ xy_x_round_store_4x2_sse2(r, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 4;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ // TODO(chiyotsai@google.com): Add better optimization
+ __m256i coeffs_256[2], filt_256[2];
+
+ prepare_half_coeffs_4tap_avx2(filter_params_x, subpel_x_q4, coeffs_256);
+ filt_256[0] = _mm256_load_si256((__m256i const *)filt1_global_avx2);
+ filt_256[1] = _mm256_load_si256((__m256i const *)filt2_global_avx2);
+
+ if (w == 8) {
+ do {
+ __m256i res =
+ x_convolve_4tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256);
+ xy_x_round_store_8x2_avx2(res, im);
+
+ src_ptr += 2 * src_stride;
+ im += 2 * 8;
+ y -= 2;
+ } while (y);
+ } else if (w == 16) {
+ do {
+ __m256i r[2];
+
+ x_convolve_4tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r);
+ xy_x_round_store_32_avx2(r, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 16;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ do {
+ xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im);
+
+ src_ptr += src_stride;
+ im += 32;
+ } while (--y);
+ } else if (w == 64) {
+ do {
+ xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im);
+ xy_x_4tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32);
+ src_ptr += src_stride;
+ im += 64;
+ } while (--y);
+ } else {
+ assert(w == 128);
+
+ do {
+ xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im);
+ xy_x_4tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32);
+ xy_x_4tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64);
+ xy_x_4tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96);
+ src_ptr += src_stride;
+ im += 128;
+ } while (--y);
+ }
+ }
+}
+
+static void convolve_2d_sr_hor_6tap_avx2(
+ const uint8_t *const src, const int32_t src_stride, const int32_t w,
+ const int32_t h, const InterpFilterParams *const filter_params_x,
+ const int32_t subpel_x_q4, int16_t *const im_block) {
+ const uint8_t *src_ptr = src - 2;
+ int32_t y = h;
+ int16_t *im = im_block;
+
+ if (w <= 4) {
+ __m128i coeffs_128[3];
+
+ prepare_half_coeffs_6tap_ssse3(filter_params_x, subpel_x_q4, coeffs_128);
+ if (w == 2) {
+ do {
+ const __m128i r =
+ x_convolve_6tap_2x2_ssse3(src_ptr, src_stride, coeffs_128);
+ xy_x_round_store_2x2_sse2(r, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 2;
+ y -= 2;
+ } while (y);
+ } else if (w == 4) {
+ do {
+ const __m128i r =
+ x_convolve_6tap_4x2_ssse3(src_ptr, src_stride, coeffs_128);
+ xy_x_round_store_4x2_sse2(r, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 4;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ __m256i coeffs_256[3], filt_256[3];
+
+ filt_256[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2);
+ filt_256[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2);
+ filt_256[2] = _mm256_loadu_si256((__m256i const *)filt3_global_avx2);
+
+ prepare_half_coeffs_6tap_avx2(filter_params_x, subpel_x_q4, coeffs_256);
+
+ if (w == 8) {
+ do {
+ const __m256i res =
+ x_convolve_6tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256);
+ xy_x_round_store_8x2_avx2(res, im);
+
+ src_ptr += 2 * src_stride;
+ im += 2 * 8;
+ y -= 2;
+ } while (y);
+ } else if (w == 16) {
+ do {
+ __m256i r[2];
+
+ x_convolve_6tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r);
+ xy_x_round_store_32_avx2(r, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 16;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ do {
+ xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im);
+ src_ptr += src_stride;
+ im += 32;
+ } while (--y);
+ } else if (w == 64) {
+ do {
+ xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im);
+ xy_x_6tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32);
+ src_ptr += src_stride;
+ im += 64;
+ } while (--y);
+ } else {
+ assert(w == 128);
+
+ do {
+ xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im);
+ xy_x_6tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32);
+ xy_x_6tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64);
+ xy_x_6tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96);
+ src_ptr += src_stride;
+ im += 128;
+ } while (--y);
+ }
+ }
+}
+
+static void convolve_2d_sr_hor_8tap_avx2(
+ const uint8_t *const src, const int32_t src_stride, const int32_t w,
+ const int32_t h, const InterpFilterParams *const filter_params_x,
+ const int32_t subpel_x_q4, int16_t *const im_block) {
+ const uint8_t *src_ptr = src - 3;
+ int32_t y = h;
+ int16_t *im = im_block;
+ __m256i coeffs_256[4], filt_256[4];
+
+ filt_256[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2);
+ filt_256[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2);
+ filt_256[2] = _mm256_loadu_si256((__m256i const *)filt3_global_avx2);
+ filt_256[3] = _mm256_loadu_si256((__m256i const *)filt4_global_avx2);
+
+ prepare_half_coeffs_8tap_avx2(filter_params_x, subpel_x_q4, coeffs_256);
+
+ if (w == 8) {
+ do {
+ const __m256i res =
+ x_convolve_8tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256);
+ xy_x_round_store_8x2_avx2(res, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 8;
+ y -= 2;
+ } while (y);
+ } else if (w == 16) {
+ do {
+ __m256i r[2];
+
+ x_convolve_8tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r);
+ xy_x_round_store_32_avx2(r, im);
+ src_ptr += 2 * src_stride;
+ im += 2 * 16;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ do {
+ xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im);
+ src_ptr += src_stride;
+ im += 32;
+ } while (--y);
+ } else if (w == 64) {
+ do {
+ xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im);
+ xy_x_8tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32);
+ src_ptr += src_stride;
+ im += 64;
+ } while (--y);
+ } else {
+ assert(w == 128);
+
+ do {
+ xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im);
+ xy_x_8tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32);
+ xy_x_8tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64);
+ xy_x_8tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96);
+ src_ptr += src_stride;
+ im += 128;
+ } while (--y);
+ }
+}
+
+static void convolve_2d_sr_ver_2tap_avx2(
+ const int16_t *const im_block, const int32_t w, const int32_t h,
+ const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4,
+ uint8_t *dst, const int32_t dst_stride) {
+ const int16_t *im = im_block;
+ int32_t y = h;
+
+ if (w <= 4) {
+ __m128i coeffs_128;
+
+ prepare_coeffs_2tap_sse2(filter_params_y, subpel_y_q4, &coeffs_128);
+
+ if (w == 2) {
+ __m128i s_32[2];
+
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)im);
+
+ do {
+ const __m128i res = xy_y_convolve_2tap_2x2_sse2(im, s_32, &coeffs_128);
+ xy_y_round_store_2x2_sse2(res, dst, dst_stride);
+ im += 2 * 2;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m128i s_64[2], r[2];
+
+ assert(w == 4);
+
+ s_64[0] = _mm_loadl_epi64((__m128i *)im);
+
+ do {
+ xy_y_convolve_2tap_4x2_sse2(im, s_64, &coeffs_128, r);
+ r[0] = xy_y_round_sse2(r[0]);
+ r[1] = xy_y_round_sse2(r[1]);
+ const __m128i rr = _mm_packs_epi32(r[0], r[1]);
+ pack_store_4x2_sse2(rr, dst, dst_stride);
+ im += 2 * 4;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ __m256i coeffs_256;
+
+ prepare_coeffs_2tap_avx2(filter_params_y, subpel_y_q4, &coeffs_256);
+
+ if (w == 8) {
+ __m128i s_128[2];
+ __m256i r[2];
+
+ s_128[0] = _mm_loadu_si128((__m128i *)im);
+
+ do {
+ xy_y_convolve_2tap_8x2_avx2(im, s_128, &coeffs_256, r);
+ xy_y_round_store_8x2_avx2(r, dst, dst_stride);
+ im += 2 * 8;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 16) {
+ __m256i s_256[2], r[4];
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)im);
+
+ do {
+ xy_y_convolve_2tap_16x2_avx2(im, s_256, &coeffs_256, r);
+ xy_y_round_store_16x2_avx2(r, dst, dst_stride);
+ im += 2 * 16;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ __m256i s_256[2][2];
+
+ s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16));
+ s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16));
+
+ do {
+ xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[0], s_256[1], &coeffs_256,
+ dst);
+ im += 2 * 32;
+ xy_y_convolve_2tap_32_all_avx2(im, s_256[1], s_256[0], &coeffs_256,
+ dst + dst_stride);
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 64) {
+ __m256i s_256[2][4];
+
+ s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16));
+ s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16));
+ s_256[0][2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16));
+ s_256[0][3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16));
+
+ do {
+ xy_y_convolve_2tap_32_all_avx2(im + 64, s_256[0] + 0, s_256[1] + 0,
+ &coeffs_256, dst);
+ xy_y_convolve_2tap_32_all_avx2(im + 96, s_256[0] + 2, s_256[1] + 2,
+ &coeffs_256, dst + 32);
+ im += 2 * 64;
+ xy_y_convolve_2tap_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0,
+ &coeffs_256, dst + dst_stride);
+ xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[1] + 2, s_256[0] + 2,
+ &coeffs_256, dst + dst_stride + 32);
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m256i s_256[2][8];
+
+ assert(w == 128);
+
+ load_16bit_8rows_avx2(im, 16, s_256[0]);
+
+ do {
+ xy_y_convolve_2tap_32_all_avx2(im + 128, s_256[0] + 0, s_256[1] + 0,
+ &coeffs_256, dst);
+ xy_y_convolve_2tap_32_all_avx2(im + 160, s_256[0] + 2, s_256[1] + 2,
+ &coeffs_256, dst + 1 * 32);
+ xy_y_convolve_2tap_32_all_avx2(im + 192, s_256[0] + 4, s_256[1] + 4,
+ &coeffs_256, dst + 2 * 32);
+ xy_y_convolve_2tap_32_all_avx2(im + 224, s_256[0] + 6, s_256[1] + 6,
+ &coeffs_256, dst + 3 * 32);
+ im += 2 * 128;
+ xy_y_convolve_2tap_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0,
+ &coeffs_256, dst + dst_stride);
+ xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[1] + 2, s_256[0] + 2,
+ &coeffs_256, dst + dst_stride + 1 * 32);
+ xy_y_convolve_2tap_32_all_avx2(im + 64, s_256[1] + 4, s_256[0] + 4,
+ &coeffs_256, dst + dst_stride + 2 * 32);
+ xy_y_convolve_2tap_32_all_avx2(im + 96, s_256[1] + 6, s_256[0] + 6,
+ &coeffs_256, dst + dst_stride + 3 * 32);
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ }
+}
+
+static void convolve_2d_sr_ver_2tap_half_avx2(
+ const int16_t *const im_block, const int32_t w, const int32_t h,
+ const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4,
+ uint8_t *dst, const int32_t dst_stride) {
+ const int16_t *im = im_block;
+ int32_t y = h;
+
+ (void)filter_params_y;
+ (void)subpel_y_q4;
+
+ if (w == 2) {
+ __m128i s_32[2];
+
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)im);
+
+ do {
+ const __m128i res = xy_y_convolve_2tap_2x2_half_pel_sse2(im, s_32);
+ const __m128i r = xy_y_round_half_pel_sse2(res);
+ pack_store_2x2_sse2(r, dst, dst_stride);
+ im += 2 * 2;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 4) {
+ __m128i s_64[2];
+
+ s_64[0] = _mm_loadl_epi64((__m128i *)im);
+
+ do {
+ const __m128i res = xy_y_convolve_2tap_4x2_half_pel_sse2(im, s_64);
+ const __m128i r = xy_y_round_half_pel_sse2(res);
+ pack_store_4x2_sse2(r, dst, dst_stride);
+ im += 2 * 4;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 8) {
+ __m128i s_128[2];
+
+ s_128[0] = _mm_loadu_si128((__m128i *)im);
+
+ do {
+ const __m256i res = xy_y_convolve_2tap_8x2_half_pel_avx2(im, s_128);
+ const __m256i r = xy_y_round_half_pel_avx2(res);
+ pack_store_8x2_avx2(r, dst, dst_stride);
+ im += 2 * 8;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 16) {
+ __m256i s_256[2], r[2];
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)im);
+
+ do {
+ xy_y_convolve_2tap_16x2_half_pel_avx2(im, s_256, r);
+ r[0] = xy_y_round_half_pel_avx2(r[0]);
+ r[1] = xy_y_round_half_pel_avx2(r[1]);
+ xy_y_pack_store_16x2_avx2(r[0], r[1], dst, dst_stride);
+ im += 2 * 16;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ __m256i s_256[2][2];
+
+ s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16));
+ s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16));
+
+ do {
+ xy_y_convolve_2tap_half_pel_32_all_avx2(im + 32, s_256[0], s_256[1], dst);
+ xy_y_convolve_2tap_half_pel_32_all_avx2(im + 2 * 32, s_256[1], s_256[0],
+ dst + dst_stride);
+ im += 2 * 32;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 64) {
+ __m256i s_256[2][4];
+
+ s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16));
+ s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16));
+ s_256[0][2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16));
+ s_256[0][3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16));
+
+ do {
+ xy_y_convolve_2tap_half_pel_32_all_avx2(im + 64, s_256[0] + 0,
+ s_256[1] + 0, dst);
+ xy_y_convolve_2tap_half_pel_32_all_avx2(im + 96, s_256[0] + 2,
+ s_256[1] + 2, dst + 32);
+ im += 2 * 64;
+ xy_y_convolve_2tap_half_pel_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0,
+ dst + dst_stride);
+ xy_y_convolve_2tap_half_pel_32_all_avx2(
+ im + 32, s_256[1] + 2, s_256[0] + 2, dst + dst_stride + 32);
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m256i s_256[2][8];
+
+ assert(w == 128);
+
+ load_16bit_8rows_avx2(im, 16, s_256[0]);
+
+ do {
+ xy_y_convolve_2tap_half_pel_32_all_avx2(im + 128, s_256[0] + 0,
+ s_256[1] + 0, dst);
+ xy_y_convolve_2tap_half_pel_32_all_avx2(im + 160, s_256[0] + 2,
+ s_256[1] + 2, dst + 1 * 32);
+ xy_y_convolve_2tap_half_pel_32_all_avx2(im + 192, s_256[0] + 4,
+ s_256[1] + 4, dst + 2 * 32);
+ xy_y_convolve_2tap_half_pel_32_all_avx2(im + 224, s_256[0] + 6,
+ s_256[1] + 6, dst + 3 * 32);
+ im += 2 * 128;
+ xy_y_convolve_2tap_half_pel_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0,
+ dst + dst_stride);
+ xy_y_convolve_2tap_half_pel_32_all_avx2(
+ im + 32, s_256[1] + 2, s_256[0] + 2, dst + dst_stride + 1 * 32);
+ xy_y_convolve_2tap_half_pel_32_all_avx2(
+ im + 64, s_256[1] + 4, s_256[0] + 4, dst + dst_stride + 2 * 32);
+ xy_y_convolve_2tap_half_pel_32_all_avx2(
+ im + 96, s_256[1] + 6, s_256[0] + 6, dst + dst_stride + 3 * 32);
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+}
+
+static void convolve_2d_sr_ver_4tap_avx2(
+ const int16_t *const im_block, const int32_t w, const int32_t h,
+ const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4,
+ uint8_t *dst, const int32_t dst_stride) {
+ const int16_t *im = im_block;
+ int32_t y = h;
+
+ if (w == 2) {
+ __m128i coeffs_128[2], s_32[4], ss_128[2];
+
+ prepare_coeffs_4tap_sse2(filter_params_y, subpel_y_q4, coeffs_128);
+
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2));
+ s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2));
+ s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2));
+
+ const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]);
+ const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]);
+
+ ss_128[0] = _mm_unpacklo_epi16(src01, src12);
+
+ do {
+ const __m128i res =
+ xy_y_convolve_4tap_2x2_sse2(im, s_32, ss_128, coeffs_128);
+ xy_y_round_store_2x2_sse2(res, dst, dst_stride);
+ im += 2 * 2;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m256i coeffs_256[2];
+
+ prepare_coeffs_4tap_avx2(filter_params_y, subpel_y_q4, coeffs_256);
+
+ if (w == 4) {
+ __m128i s_64[4];
+ __m256i s_256[2], ss_256[2];
+
+ s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4));
+ s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4));
+ s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4));
+
+ // Load lines a and b. Line a to lower 128, line b to upper 128
+ s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]);
+ s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]);
+
+ ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+
+ do {
+ const __m256i res =
+ xy_y_convolve_4tap_4x2_avx2(im, s_64, ss_256, coeffs_256);
+ xy_y_round_store_4x2_avx2(res, dst, dst_stride);
+ im += 2 * 4;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 8) {
+ __m256i s_256[4], r[2];
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8));
+
+ if (subpel_y_q4 != 8) {
+ __m256i ss_256[4];
+
+ ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ ss_256[2] = _mm256_unpackhi_epi16(s_256[0], s_256[1]);
+
+ do {
+ xy_y_convolve_4tap_8x2_avx2(im, ss_256, coeffs_256, r);
+ xy_y_round_store_8x2_avx2(r, dst, dst_stride);
+ im += 2 * 8;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ do {
+ xy_y_convolve_4tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r);
+ xy_y_round_store_8x2_avx2(r, dst, dst_stride);
+ im += 2 * 8;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else if (w == 16) {
+ __m256i s_256[5];
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16));
+ s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16));
+
+ if (subpel_y_q4 != 8) {
+ __m256i ss_256[4], tt_256[4], r[4];
+
+ ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ ss_256[2] = _mm256_unpackhi_epi16(s_256[0], s_256[1]);
+
+ tt_256[0] = _mm256_unpacklo_epi16(s_256[1], s_256[2]);
+ tt_256[2] = _mm256_unpackhi_epi16(s_256[1], s_256[2]);
+
+ do {
+ xy_y_convolve_4tap_16x2_avx2(im, s_256, ss_256, tt_256, coeffs_256,
+ r);
+ xy_y_round_store_16x2_avx2(r, dst, dst_stride);
+ im += 2 * 16;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m256i r[4];
+
+ do {
+ xy_y_convolve_4tap_16x2_half_pelavx2(im, s_256, coeffs_256, r);
+ xy_y_round_store_16x2_avx2(r, dst, dst_stride);
+ im += 2 * 16;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ /*It's a special condition for OBMC. A/c to Av1 spec 4-tap won't
+ support for width(w)>16, but for OBMC while predicting above block
+ it reduces size block to Wx(h/2), for example, if above block size
+ is 32x8, we get block size as 32x4 for OBMC.*/
+ int32_t x = 0;
+
+ assert(!(w % 32));
+
+ __m256i s_256[2][4], ss_256[2][4], tt_256[2][4], r0[4], r1[4];
+ do {
+ const int16_t *s = im + x;
+ uint8_t *d = dst + x;
+
+ loadu_unpack_16bit_3rows_avx2(s, w, s_256[0], ss_256[0], tt_256[0]);
+ loadu_unpack_16bit_3rows_avx2(s + 16, w, s_256[1], ss_256[1],
+ tt_256[1]);
+
+ y = h;
+ do {
+ xy_y_convolve_4tap_32x2_avx2(s, w, s_256[0], ss_256[0], tt_256[0],
+ coeffs_256, r0);
+ xy_y_convolve_4tap_32x2_avx2(s + 16, w, s_256[1], ss_256[1],
+ tt_256[1], coeffs_256, r1);
+
+ xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d);
+ xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride);
+
+ s += 2 * w;
+ d += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+
+ x += 32;
+ } while (x < w);
+ }
+ }
+}
+
+static void convolve_2d_sr_ver_6tap_avx2(
+ const int16_t *const im_block, const int32_t w, const int32_t h,
+ const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4,
+ uint8_t *dst, const int32_t dst_stride) {
+ const int16_t *im = im_block;
+ int32_t y;
+
+ if (w == 2) {
+ __m128i coeffs_128[3], s_32[6], ss_128[3];
+
+ prepare_coeffs_6tap_ssse3(filter_params_y, subpel_y_q4, coeffs_128);
+
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2));
+ s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2));
+ s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2));
+ s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(im + 3 * 2));
+ s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(im + 4 * 2));
+
+ const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]);
+ const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]);
+ const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]);
+ const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[4]);
+
+ ss_128[0] = _mm_unpacklo_epi16(src01, src12);
+ ss_128[1] = _mm_unpacklo_epi16(src23, src34);
+
+ y = h;
+ do {
+ const __m128i res =
+ xy_y_convolve_6tap_2x2_sse2(im, s_32, ss_128, coeffs_128);
+ xy_y_round_store_2x2_sse2(res, dst, dst_stride);
+ im += 2 * 2;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m256i coeffs_256[3];
+
+ prepare_coeffs_6tap_avx2(filter_params_y, subpel_y_q4, coeffs_256);
+
+ if (w == 4) {
+ __m128i s_64[6];
+ __m256i s_256[6], ss_256[3];
+
+ s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4));
+ s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4));
+ s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4));
+ s_64[3] = _mm_loadl_epi64((__m128i *)(im + 3 * 4));
+ s_64[4] = _mm_loadl_epi64((__m128i *)(im + 4 * 4));
+
+ // Load lines a and b. Line a to lower 128, line b to upper 128
+ s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]);
+ s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]);
+ s_256[2] = _mm256_setr_m128i(s_64[2], s_64[3]);
+ s_256[3] = _mm256_setr_m128i(s_64[3], s_64[4]);
+
+ ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]);
+
+ y = h;
+ do {
+ const __m256i res =
+ xy_y_convolve_6tap_4x2_avx2(im, s_64, ss_256, coeffs_256);
+ xy_y_round_store_4x2_avx2(res, dst, dst_stride);
+ im += 2 * 4;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 8) {
+ __m256i s_256[6], r[2];
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8));
+ s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 8));
+ s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 8));
+ y = h;
+
+ if (subpel_y_q4 != 8) {
+ __m256i ss_256[6];
+
+ ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]);
+
+ ss_256[3] = _mm256_unpackhi_epi16(s_256[0], s_256[1]);
+ ss_256[4] = _mm256_unpackhi_epi16(s_256[2], s_256[3]);
+
+ do {
+ xy_y_convolve_6tap_8x2_avx2(im, ss_256, coeffs_256, r);
+ xy_y_round_store_8x2_avx2(r, dst, dst_stride);
+ im += 2 * 8;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ do {
+ xy_y_convolve_6tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r);
+ xy_y_round_store_8x2_avx2(r, dst, dst_stride);
+ im += 2 * 8;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else if (w == 16) {
+ __m256i s_256[6];
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16));
+ s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16));
+ s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16));
+ s_256[4] = _mm256_loadu_si256((__m256i *)(im + 4 * 16));
+ y = h;
+
+ if (subpel_y_q4 != 8) {
+ __m256i ss_256[6], tt_256[6], r[4];
+
+ ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]);
+ ss_256[3] = _mm256_unpackhi_epi16(s_256[0], s_256[1]);
+ ss_256[4] = _mm256_unpackhi_epi16(s_256[2], s_256[3]);
+
+ tt_256[0] = _mm256_unpacklo_epi16(s_256[1], s_256[2]);
+ tt_256[1] = _mm256_unpacklo_epi16(s_256[3], s_256[4]);
+ tt_256[3] = _mm256_unpackhi_epi16(s_256[1], s_256[2]);
+ tt_256[4] = _mm256_unpackhi_epi16(s_256[3], s_256[4]);
+
+ do {
+ xy_y_convolve_6tap_16x2_avx2(im, 16, s_256, ss_256, tt_256,
+ coeffs_256, r);
+ xy_y_round_store_16x2_avx2(r, dst, dst_stride);
+ im += 2 * 16;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m256i ss_256[4], r[4];
+
+ do {
+ xy_y_convolve_6tap_16x2_half_pel_avx2(im, 16, s_256, ss_256,
+ coeffs_256, r);
+ xy_y_round_store_16x2_avx2(r, dst, dst_stride);
+
+ im += 2 * 16;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ int32_t x = 0;
+
+ assert(!(w % 32));
+
+ __m256i s_256[2][6], ss_256[2][6], tt_256[2][6], r0[4], r1[4];
+
+ do {
+ const int16_t *s = im + x;
+ uint8_t *d = dst + x;
+
+ loadu_unpack_16bit_5rows_avx2(s, w, s_256[0], ss_256[0], tt_256[0]);
+ loadu_unpack_16bit_5rows_avx2(s + 16, w, s_256[1], ss_256[1],
+ tt_256[1]);
+
+ y = h;
+ do {
+ xy_y_convolve_6tap_16x2_avx2(s, w, s_256[0], ss_256[0], tt_256[0],
+ coeffs_256, r0);
+ xy_y_convolve_6tap_16x2_avx2(s + 16, w, s_256[1], ss_256[1],
+ tt_256[1], coeffs_256, r1);
+
+ xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d);
+ xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride);
+
+ s += 2 * w;
+ d += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+
+ x += 32;
+ } while (x < w);
+ }
+ }
+}
+
+static void convolve_2d_sr_ver_8tap_avx2(
+ const int16_t *const im_block, const int32_t w, const int32_t h,
+ const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4,
+ uint8_t *dst, const int32_t dst_stride) {
+ const int16_t *im = im_block;
+ int32_t y;
+
+ if (w == 2) {
+ __m128i coeffs_128[4], s_32[8], ss_128[4];
+
+ prepare_coeffs_8tap_sse2(filter_params_y, subpel_y_q4, coeffs_128);
+
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2));
+ s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2));
+ s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2));
+ s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(im + 3 * 2));
+ s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(im + 4 * 2));
+ s_32[5] = _mm_cvtsi32_si128(*(int32_t *)(im + 5 * 2));
+ s_32[6] = _mm_cvtsi32_si128(*(int32_t *)(im + 6 * 2));
+
+ const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]);
+ const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]);
+ const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]);
+ const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[4]);
+ const __m128i src45 = _mm_unpacklo_epi32(s_32[4], s_32[5]);
+ const __m128i src56 = _mm_unpacklo_epi32(s_32[5], s_32[6]);
+
+ ss_128[0] = _mm_unpacklo_epi16(src01, src12);
+ ss_128[1] = _mm_unpacklo_epi16(src23, src34);
+ ss_128[2] = _mm_unpacklo_epi16(src45, src56);
+
+ y = h;
+ do {
+ const __m128i res =
+ xy_y_convolve_8tap_2x2_sse2(im, s_32, ss_128, coeffs_128);
+ xy_y_round_store_2x2_sse2(res, dst, dst_stride);
+ im += 2 * 2;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m256i coeffs_256[4];
+
+ prepare_coeffs_8tap_avx2(filter_params_y, subpel_y_q4, coeffs_256);
+
+ if (w == 4) {
+ __m128i s_64[8];
+ __m256i s_256[8], ss_256[4];
+
+ s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4));
+ s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4));
+ s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4));
+ s_64[3] = _mm_loadl_epi64((__m128i *)(im + 3 * 4));
+ s_64[4] = _mm_loadl_epi64((__m128i *)(im + 4 * 4));
+ s_64[5] = _mm_loadl_epi64((__m128i *)(im + 5 * 4));
+ s_64[6] = _mm_loadl_epi64((__m128i *)(im + 6 * 4));
+
+ // Load lines a and b. Line a to lower 128, line b to upper 128
+ s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]);
+ s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]);
+ s_256[2] = _mm256_setr_m128i(s_64[2], s_64[3]);
+ s_256[3] = _mm256_setr_m128i(s_64[3], s_64[4]);
+ s_256[4] = _mm256_setr_m128i(s_64[4], s_64[5]);
+ s_256[5] = _mm256_setr_m128i(s_64[5], s_64[6]);
+
+ ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]);
+ ss_256[2] = _mm256_unpacklo_epi16(s_256[4], s_256[5]);
+
+ y = h;
+ do {
+ const __m256i res =
+ xy_y_convolve_8tap_4x2_avx2(im, s_64, ss_256, coeffs_256);
+ xy_y_round_store_4x2_avx2(res, dst, dst_stride);
+ im += 2 * 4;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 8) {
+ __m256i s_256[8], r[2];
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8));
+ s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 8));
+ s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 8));
+ s_256[4] = _mm256_loadu_si256((__m256i *)(im + 4 * 8));
+ s_256[5] = _mm256_loadu_si256((__m256i *)(im + 5 * 8));
+ y = h;
+
+ if (subpel_y_q4 != 8) {
+ __m256i ss_256[8];
+
+ convolve_8tap_unpack_avx2(s_256, ss_256);
+
+ do {
+ xy_y_convolve_8tap_8x2_avx2(im, ss_256, coeffs_256, r);
+ xy_y_round_store_8x2_avx2(r, dst, dst_stride);
+ im += 2 * 8;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ do {
+ xy_y_convolve_8tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r);
+ xy_y_round_store_8x2_avx2(r, dst, dst_stride);
+ im += 2 * 8;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else if (w == 16) {
+ __m256i s_256[8], r[4];
+
+ load_16bit_7rows_avx2(im, 16, s_256);
+ y = h;
+
+ if (subpel_y_q4 != 8) {
+ __m256i ss_256[8], tt_256[8];
+
+ convolve_8tap_unpack_avx2(s_256, ss_256);
+ convolve_8tap_unpack_avx2(s_256 + 1, tt_256);
+
+ do {
+ xy_y_convolve_8tap_16x2_avx2(im, 16, coeffs_256, s_256, ss_256,
+ tt_256, r);
+ xy_y_round_store_16x2_avx2(r, dst, dst_stride);
+
+ im += 2 * 16;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ do {
+ xy_y_convolve_8tap_16x2_half_pel_avx2(im, 16, coeffs_256, s_256, r);
+ xy_y_round_store_16x2_avx2(r, dst, dst_stride);
+
+ im += 2 * 16;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ int32_t x = 0;
+ __m256i s_256[2][8], r0[4], r1[4];
+
+ assert(!(w % 32));
+
+ __m256i ss_256[2][8], tt_256[2][8];
+
+ do {
+ const int16_t *s = im + x;
+ uint8_t *d = dst + x;
+
+ load_16bit_7rows_avx2(s, w, s_256[0]);
+ convolve_8tap_unpack_avx2(s_256[0], ss_256[0]);
+ convolve_8tap_unpack_avx2(s_256[0] + 1, tt_256[0]);
+
+ load_16bit_7rows_avx2(s + 16, w, s_256[1]);
+ convolve_8tap_unpack_avx2(s_256[1], ss_256[1]);
+ convolve_8tap_unpack_avx2(s_256[1] + 1, tt_256[1]);
+
+ y = h;
+ do {
+ xy_y_convolve_8tap_16x2_avx2(s, w, coeffs_256, s_256[0], ss_256[0],
+ tt_256[0], r0);
+ xy_y_convolve_8tap_16x2_avx2(s + 16, w, coeffs_256, s_256[1],
+ ss_256[1], tt_256[1], r1);
+ xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d);
+ xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride);
+
+ s += 2 * w;
+ d += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+
+ x += 32;
+ } while (x < w);
+ }
+ }
+}
+
+typedef void (*Convolve2dSrHorTapFunc)(
+ const uint8_t *const src, const int32_t src_stride, const int32_t w,
+ const int32_t h, const InterpFilterParams *const filter_params_x,
+ const int32_t subpel_x_q4, int16_t *const im_block);
+
+typedef void (*Convolve2dSrVerTapFunc)(
+ const int16_t *const im_block, const int32_t w, const int32_t h,
+ const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4,
+ uint8_t *dst, const int32_t dst_stride);
+
+static AOM_FORCE_INLINE void av1_convolve_2d_sr_specialized_avx2(
+ const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+ int32_t w, int32_t h, const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y, const int32_t subpel_x_q4,
+ const int32_t subpel_y_q4, ConvolveParams *conv_params) {
+ static const Convolve2dSrHorTapFunc
+ convolve_2d_sr_hor_tap_func_table[MAX_FILTER_TAP + 1] = {
+ NULL,
+ NULL,
+ convolve_2d_sr_hor_2tap_avx2,
+ NULL,
+ convolve_2d_sr_hor_4tap_ssse3,
+ NULL,
+ convolve_2d_sr_hor_6tap_avx2,
+ NULL,
+ convolve_2d_sr_hor_8tap_avx2
+ };
+ static const Convolve2dSrVerTapFunc
+ convolve_2d_sr_ver_tap_func_table[MAX_FILTER_TAP + 1] = {
+ NULL,
+ convolve_2d_sr_ver_2tap_half_avx2,
+ convolve_2d_sr_ver_2tap_avx2,
+ convolve_2d_sr_ver_4tap_avx2,
+ convolve_2d_sr_ver_4tap_avx2,
+ convolve_2d_sr_ver_6tap_avx2,
+ convolve_2d_sr_ver_6tap_avx2,
+ convolve_2d_sr_ver_8tap_avx2,
+ convolve_2d_sr_ver_8tap_avx2
+ };
+ const int32_t tap_x = get_filter_tap(filter_params_x, subpel_x_q4);
+ const int32_t tap_y = get_filter_tap(filter_params_y, subpel_y_q4);
+
+ assert(tap_x != 12 && tap_y != 12);
+
+ const uint8_t *src_ptr = src - ((tap_y >> 1) - 1) * src_stride;
+ // Note: im_block is 8-pixel interlaced for width 32 and up, to avoid data
+ // permutation.
+ DECLARE_ALIGNED(32, int16_t,
+ im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+
+ (void)conv_params;
+
+ assert(conv_params->round_0 == 3);
+ assert(conv_params->round_1 == 11);
+
+ // horizontal filter
+ int32_t hh = h + tap_y;
+ assert(!(hh % 2));
+
+ convolve_2d_sr_hor_tap_func_table[tap_x](
+ src_ptr, src_stride, w, hh, filter_params_x, subpel_x_q4, im_block);
+
+ // vertical filter
+ convolve_2d_sr_ver_tap_func_table[tap_y - (subpel_y_q4 == 8)](
+ im_block, w, h, filter_params_y, subpel_y_q4, dst, dst_stride);
+}
+
+#endif // THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_
diff --git a/third_party/SVT-AV1/convolve_avx2.h b/third_party/SVT-AV1/convolve_avx2.h
new file mode 100644
index 0000000..31d9790
--- /dev/null
+++ b/third_party/SVT-AV1/convolve_avx2.h
@@ -0,0 +1,3336 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef THIRD_PARTY_SVT_AV1_CONVOLVE_AVX2_H_
+#define THIRD_PARTY_SVT_AV1_CONVOLVE_AVX2_H_
+
+#include "EbMemory_AVX2.h"
+#include "EbMemory_SSE4_1.h"
+#include "synonyms.h"
+
+#include "aom_dsp/aom_filter.h"
+#include "aom_dsp/x86/convolve_avx2.h"
+
+static INLINE void populate_coeffs_4tap_avx2(const __m128i coeffs_128,
+ __m256i coeffs[2]) {
+ const __m256i coeffs_256 = _mm256_broadcastsi128_si256(coeffs_128);
+
+ // coeffs 2 3 2 3 2 3 2 3
+ coeffs[0] = _mm256_shuffle_epi8(coeffs_256, _mm256_set1_epi16(0x0604u));
+ // coeffs 4 5 4 5 4 5 4 5
+ coeffs[1] = _mm256_shuffle_epi8(coeffs_256, _mm256_set1_epi16(0x0a08u));
+}
+
+static INLINE void populate_coeffs_6tap_avx2(const __m128i coeffs_128,
+ __m256i coeffs[3]) {
+ const __m256i coeffs_256 = _mm256_broadcastsi128_si256(coeffs_128);
+
+ // coeffs 1 2 1 2 1 2 1 2
+ coeffs[0] = _mm256_shuffle_epi8(coeffs_256, _mm256_set1_epi16(0x0402u));
+ // coeffs 3 4 3 4 3 4 3 4
+ coeffs[1] = _mm256_shuffle_epi8(coeffs_256, _mm256_set1_epi16(0x0806u));
+ // coeffs 5 6 5 6 5 6 5 6
+ coeffs[2] = _mm256_shuffle_epi8(coeffs_256, _mm256_set1_epi16(0x0C0Au));
+}
+
+static INLINE void populate_coeffs_8tap_avx2(const __m128i coeffs_128,
+ __m256i coeffs[4]) {
+ const __m256i coeffs_256 = _mm256_broadcastsi128_si256(coeffs_128);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ coeffs[0] = _mm256_shuffle_epi8(coeffs_256, _mm256_set1_epi16(0x0200u));
+ // coeffs 2 3 2 3 2 3 2 3
+ coeffs[1] = _mm256_shuffle_epi8(coeffs_256, _mm256_set1_epi16(0x0604u));
+ // coeffs 4 5 4 5 4 5 4 5
+ coeffs[2] = _mm256_shuffle_epi8(coeffs_256, _mm256_set1_epi16(0x0a08u));
+ // coeffs 6 7 6 7 6 7 6 7
+ coeffs[3] = _mm256_shuffle_epi8(coeffs_256, _mm256_set1_epi16(0x0e0cu));
+}
+
+static INLINE void prepare_half_coeffs_2tap_ssse3(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m128i *const coeffs /* [1] */) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeffs_8 = _mm_cvtsi32_si128(*(const int32_t *)(filter + 3));
+
+ // right shift all filter co-efficients by 1 to reduce the bits required.
+ // This extra right shift will be taken care of at the end while rounding
+ // the result.
+ // Since all filter co-efficients are even, this change will not affect the
+ // end result
+ assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
+ _mm_set1_epi16((short)0xffff)));
+
+ const __m128i coeffs_1 = _mm_srai_epi16(coeffs_8, 1);
+
+ // coeffs 3 4 3 4 3 4 3 4
+ *coeffs = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0200u));
+}
+
+static INLINE void prepare_half_coeffs_4tap_ssse3(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m128i *const coeffs /* [2] */) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
+
+ // right shift all filter co-efficients by 1 to reduce the bits required.
+ // This extra right shift will be taken care of at the end while rounding
+ // the result.
+ // Since all filter co-efficients are even, this change will not affect the
+ // end result
+ assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
+ _mm_set1_epi16((short)0xffff)));
+
+ const __m128i coeffs_1 = _mm_srai_epi16(coeffs_8, 1);
+
+ // coeffs 2 3 2 3 2 3 2 3
+ coeffs[0] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0604u));
+ // coeffs 4 5 4 5 4 5 4 5
+ coeffs[1] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0a08u));
+}
+
+static INLINE void prepare_half_coeffs_6tap_ssse3(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m128i *const coeffs /* [3] */) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
+
+ // right shift all filter co-efficients by 1 to reduce the bits required.
+ // This extra right shift will be taken care of at the end while rounding
+ // the result.
+ // Since all filter co-efficients are even, this change will not affect the
+ // end result
+ assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
+ _mm_set1_epi16((short)0xffff)));
+
+ const __m128i coeffs_1 = _mm_srai_epi16(coeffs_8, 1);
+
+ // coeffs 1 2 1 2 1 2 1 2
+ coeffs[0] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0402u));
+ // coeffs 3 4 3 4 3 4 3 4
+ coeffs[1] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0806u));
+ // coeffs 5 6 5 6 5 6 5 6
+ coeffs[2] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0C0Au));
+}
+
+static INLINE void prepare_half_coeffs_8tap_ssse3(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m128i *const coeffs /* [4] */) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
+
+ // right shift all filter co-efficients by 1 to reduce the bits required.
+ // This extra right shift will be taken care of at the end while rounding
+ // the result.
+ // Since all filter co-efficients are even, this change will not affect the
+ // end result
+ assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
+ _mm_set1_epi16((short)0xffff)));
+
+ const __m128i coeffs_1 = _mm_srai_epi16(coeffs_8, 1);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ coeffs[0] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0200u));
+ // coeffs 2 3 2 3 2 3 2 3
+ coeffs[1] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0604u));
+ // coeffs 4 5 4 5 4 5 4 5
+ coeffs[2] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0a08u));
+ // coeffs 6 7 6 7 6 7 6 7
+ coeffs[3] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0e0cu));
+}
+
+static INLINE void prepare_half_coeffs_2tap_avx2(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m256i *const coeffs /* [1] */) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeffs_8 = _mm_cvtsi32_si128(*(const int32_t *)(filter + 3));
+ const __m256i filter_coeffs = _mm256_broadcastsi128_si256(coeffs_8);
+
+ // right shift all filter co-efficients by 1 to reduce the bits required.
+ // This extra right shift will be taken care of at the end while rounding
+ // the result.
+ // Since all filter co-efficients are even, this change will not affect the
+ // end result
+ assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
+ _mm_set1_epi16((short)0xffff)));
+
+ const __m256i coeffs_1 = _mm256_srai_epi16(filter_coeffs, 1);
+
+ // coeffs 3 4 3 4 3 4 3 4
+ *coeffs = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0200u));
+}
+
+static INLINE void prepare_half_coeffs_4tap_avx2(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m256i *const coeffs /* [2] */) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
+
+ // right shift all filter co-efficients by 1 to reduce the bits required.
+ // This extra right shift will be taken care of at the end while rounding
+ // the result.
+ // Since all filter co-efficients are even, this change will not affect the
+ // end result
+ assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
+ _mm_set1_epi16((short)0xffff)));
+ const __m128i coeffs_1 = _mm_srai_epi16(coeffs_8, 1);
+ populate_coeffs_4tap_avx2(coeffs_1, coeffs);
+}
+
+static INLINE void prepare_half_coeffs_6tap_avx2(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m256i *const coeffs /* [3] */) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
+
+ // right shift all filter co-efficients by 1 to reduce the bits required.
+ // This extra right shift will be taken care of at the end while rounding
+ // the result.
+ // Since all filter co-efficients are even, this change will not affect the
+ // end result
+ assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
+ _mm_set1_epi16((short)0xffff)));
+ const __m128i coeffs_1 = _mm_srai_epi16(coeffs_8, 1);
+ populate_coeffs_6tap_avx2(coeffs_1, coeffs);
+}
+
+static INLINE void prepare_half_coeffs_8tap_avx2(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m256i *const coeffs /* [4] */) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
+
+ // right shift all filter co-efficients by 1 to reduce the bits required.
+ // This extra right shift will be taken care of at the end while rounding
+ // the result.
+ // Since all filter co-efficients are even, this change will not affect the
+ // end result
+ assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
+ _mm_set1_epi16((short)0xffff)));
+ const __m128i coeffs_1 = _mm_srai_epi16(coeffs_8, 1);
+ populate_coeffs_8tap_avx2(coeffs_1, coeffs);
+}
+
+static INLINE void prepare_coeffs_2tap_sse2(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m128i *const coeffs /* [1] */) {
+ const int16_t *filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+
+ const __m128i coeff = _mm_cvtsi32_si128(*(const int32_t *)(filter + 3));
+
+ // coeffs 3 4 3 4 3 4 3 4
+ coeffs[0] = _mm_shuffle_epi32(coeff, 0x00);
+}
+
+static INLINE void prepare_coeffs_4tap_sse2(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m128i *const coeffs /* [2] */) {
+ const int16_t *filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+
+ const __m128i coeff = _mm_loadu_si128((__m128i *)filter);
+
+ // coeffs 2 3 2 3 2 3 2 3
+ coeffs[0] = _mm_shuffle_epi32(coeff, 0x55);
+ // coeffs 4 5 4 5 4 5 4 5
+ coeffs[1] = _mm_shuffle_epi32(coeff, 0xaa);
+}
+
+static INLINE void prepare_coeffs_6tap_ssse3(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m128i *const coeffs /* [3] */) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeff = _mm_loadu_si128((__m128i *)filter);
+
+ // coeffs 1 2 1 2 1 2 1 2
+ coeffs[0] = _mm_shuffle_epi8(coeff, _mm_set1_epi32(0x05040302u));
+ // coeffs 3 4 3 4 3 4 3 4
+ coeffs[1] = _mm_shuffle_epi8(coeff, _mm_set1_epi32(0x09080706u));
+ // coeffs 5 6 5 6 5 6 5 6
+ coeffs[2] = _mm_shuffle_epi8(coeff, _mm_set1_epi32(0x0D0C0B0Au));
+}
+
+static INLINE void prepare_coeffs_8tap_sse2(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m128i *const coeffs /* [4] */) {
+ const int16_t *filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+
+ const __m128i coeff = _mm_loadu_si128((__m128i *)filter);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ coeffs[0] = _mm_shuffle_epi32(coeff, 0x00);
+ // coeffs 2 3 2 3 2 3 2 3
+ coeffs[1] = _mm_shuffle_epi32(coeff, 0x55);
+ // coeffs 4 5 4 5 4 5 4 5
+ coeffs[2] = _mm_shuffle_epi32(coeff, 0xaa);
+ // coeffs 6 7 6 7 6 7 6 7
+ coeffs[3] = _mm_shuffle_epi32(coeff, 0xff);
+}
+
+static INLINE void prepare_coeffs_2tap_avx2(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m256i *const coeffs /* [1] */) {
+ const int16_t *filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+
+ const __m128i coeff_8 = _mm_cvtsi32_si128(*(const int32_t *)(filter + 3));
+ const __m256i coeff = _mm256_broadcastsi128_si256(coeff_8);
+
+ // coeffs 3 4 3 4 3 4 3 4
+ coeffs[0] = _mm256_shuffle_epi32(coeff, 0x00);
+}
+
+static INLINE void prepare_coeffs_4tap_avx2(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m256i *const coeffs /* [2] */) {
+ const int16_t *filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+
+ const __m128i coeff_8 = _mm_loadu_si128((__m128i *)filter);
+ const __m256i coeff = _mm256_broadcastsi128_si256(coeff_8);
+
+ // coeffs 2 3 2 3 2 3 2 3
+ coeffs[0] = _mm256_shuffle_epi32(coeff, 0x55);
+ // coeffs 4 5 4 5 4 5 4 5
+ coeffs[1] = _mm256_shuffle_epi32(coeff, 0xaa);
+}
+
+static INLINE void prepare_coeffs_6tap_avx2(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m256i *const coeffs /* [3]*/) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
+ const __m256i coeff = _mm256_broadcastsi128_si256(coeffs_8);
+
+ // coeffs 1 2 1 2 1 2 1 2
+ coeffs[0] = _mm256_shuffle_epi8(coeff, _mm256_set1_epi32(0x05040302u));
+ // coeffs 3 4 3 4 3 4 3 4
+ coeffs[1] = _mm256_shuffle_epi8(coeff, _mm256_set1_epi32(0x09080706u));
+ // coeffs 5 6 5 6 5 6 5 6
+ coeffs[2] = _mm256_shuffle_epi8(coeff, _mm256_set1_epi32(0x0D0C0B0Au));
+}
+
+static INLINE void prepare_coeffs_8tap_avx2(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m256i *const coeffs /* [4] */) {
+ const int16_t *filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+
+ const __m128i coeff_8 = _mm_loadu_si128((__m128i *)filter);
+ const __m256i coeff = _mm256_broadcastsi128_si256(coeff_8);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ coeffs[0] = _mm256_shuffle_epi32(coeff, 0x00);
+ // coeffs 2 3 2 3 2 3 2 3
+ coeffs[1] = _mm256_shuffle_epi32(coeff, 0x55);
+ // coeffs 4 5 4 5 4 5 4 5
+ coeffs[2] = _mm256_shuffle_epi32(coeff, 0xaa);
+ // coeffs 6 7 6 7 6 7 6 7
+ coeffs[3] = _mm256_shuffle_epi32(coeff, 0xff);
+}
+
+static INLINE void load_16bit_5rows_avx2(const int16_t *const src,
+ const ptrdiff_t stride,
+ __m256i dst[5]) {
+ dst[0] = _mm256_loadu_si256((__m256i *)(src + 0 * stride));
+ dst[1] = _mm256_loadu_si256((__m256i *)(src + 1 * stride));
+ dst[2] = _mm256_loadu_si256((__m256i *)(src + 2 * stride));
+ dst[3] = _mm256_loadu_si256((__m256i *)(src + 3 * stride));
+ dst[4] = _mm256_loadu_si256((__m256i *)(src + 4 * stride));
+}
+
+static INLINE void load_16bit_7rows_avx2(const int16_t *const src,
+ const ptrdiff_t stride,
+ __m256i dst[7]) {
+ dst[0] = _mm256_loadu_si256((__m256i *)(src + 0 * stride));
+ dst[1] = _mm256_loadu_si256((__m256i *)(src + 1 * stride));
+ dst[2] = _mm256_loadu_si256((__m256i *)(src + 2 * stride));
+ dst[3] = _mm256_loadu_si256((__m256i *)(src + 3 * stride));
+ dst[4] = _mm256_loadu_si256((__m256i *)(src + 4 * stride));
+ dst[5] = _mm256_loadu_si256((__m256i *)(src + 5 * stride));
+ dst[6] = _mm256_loadu_si256((__m256i *)(src + 6 * stride));
+}
+
+static AOM_FORCE_INLINE void load_16bit_8rows_avx2(const int16_t *const src,
+ const ptrdiff_t stride,
+ __m256i dst[8]) {
+ dst[0] = _mm256_loadu_si256((__m256i *)(src + 0 * stride));
+ dst[1] = _mm256_loadu_si256((__m256i *)(src + 1 * stride));
+ dst[2] = _mm256_loadu_si256((__m256i *)(src + 2 * stride));
+ dst[3] = _mm256_loadu_si256((__m256i *)(src + 3 * stride));
+ dst[4] = _mm256_loadu_si256((__m256i *)(src + 4 * stride));
+ dst[5] = _mm256_loadu_si256((__m256i *)(src + 5 * stride));
+ dst[6] = _mm256_loadu_si256((__m256i *)(src + 6 * stride));
+ dst[7] = _mm256_loadu_si256((__m256i *)(src + 7 * stride));
+}
+
+static AOM_FORCE_INLINE void loadu_unpack_16bit_5rows_avx2(
+ const int16_t *const src, const ptrdiff_t stride, __m256i s_256[5],
+ __m256i ss_256[5], __m256i tt_256[5]) {
+ s_256[0] = _mm256_loadu_si256((__m256i *)(src + 0 * stride));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(src + 1 * stride));
+ s_256[2] = _mm256_loadu_si256((__m256i *)(src + 2 * stride));
+ s_256[3] = _mm256_loadu_si256((__m256i *)(src + 3 * stride));
+ s_256[4] = _mm256_loadu_si256((__m256i *)(src + 4 * stride));
+
+ ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]);
+ ss_256[3] = _mm256_unpackhi_epi16(s_256[0], s_256[1]);
+ ss_256[4] = _mm256_unpackhi_epi16(s_256[2], s_256[3]);
+
+ tt_256[0] = _mm256_unpacklo_epi16(s_256[1], s_256[2]);
+ tt_256[1] = _mm256_unpacklo_epi16(s_256[3], s_256[4]);
+ tt_256[3] = _mm256_unpackhi_epi16(s_256[1], s_256[2]);
+ tt_256[4] = _mm256_unpackhi_epi16(s_256[3], s_256[4]);
+}
+
+static AOM_FORCE_INLINE void loadu_unpack_16bit_3rows_avx2(
+ const int16_t *const src, const ptrdiff_t stride, __m256i s_256[3],
+ __m256i ss_256[3], __m256i tt_256[3]) {
+ s_256[0] = _mm256_loadu_si256((__m256i *)(src + 0 * stride));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(src + 1 * stride));
+ s_256[2] = _mm256_loadu_si256((__m256i *)(src + 2 * stride));
+
+ ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ ss_256[2] = _mm256_unpackhi_epi16(s_256[0], s_256[1]);
+
+ tt_256[0] = _mm256_unpacklo_epi16(s_256[1], s_256[2]);
+ tt_256[2] = _mm256_unpackhi_epi16(s_256[1], s_256[2]);
+}
+
+static INLINE void convolve_8tap_unpack_avx2(const __m256i s[6],
+ __m256i ss[7]) {
+ ss[0] = _mm256_unpacklo_epi16(s[0], s[1]);
+ ss[1] = _mm256_unpacklo_epi16(s[2], s[3]);
+ ss[2] = _mm256_unpacklo_epi16(s[4], s[5]);
+ ss[4] = _mm256_unpackhi_epi16(s[0], s[1]);
+ ss[5] = _mm256_unpackhi_epi16(s[2], s[3]);
+ ss[6] = _mm256_unpackhi_epi16(s[4], s[5]);
+}
+
+static INLINE __m128i convolve_2tap_ssse3(const __m128i ss[1],
+ const __m128i coeffs[1]) {
+ return _mm_maddubs_epi16(ss[0], coeffs[0]);
+}
+
+static INLINE __m128i convolve_4tap_ssse3(const __m128i ss[2],
+ const __m128i coeffs[2]) {
+ const __m128i res_23 = _mm_maddubs_epi16(ss[0], coeffs[0]);
+ const __m128i res_45 = _mm_maddubs_epi16(ss[1], coeffs[1]);
+ return _mm_add_epi16(res_23, res_45);
+}
+
+static INLINE __m128i convolve_6tap_ssse3(const __m128i ss[3],
+ const __m128i coeffs[3]) {
+ const __m128i res_12 = _mm_maddubs_epi16(ss[0], coeffs[0]);
+ const __m128i res_34 = _mm_maddubs_epi16(ss[1], coeffs[1]);
+ const __m128i res_56 = _mm_maddubs_epi16(ss[2], coeffs[2]);
+ const __m128i res_1256 = _mm_add_epi16(res_12, res_56);
+ return _mm_add_epi16(res_1256, res_34);
+}
+
+static INLINE __m128i convolve_8tap_ssse3(const __m128i ss[4],
+ const __m128i coeffs[4]) {
+ const __m128i res_01 = _mm_maddubs_epi16(ss[0], coeffs[0]);
+ const __m128i res_23 = _mm_maddubs_epi16(ss[1], coeffs[1]);
+ const __m128i res_45 = _mm_maddubs_epi16(ss[2], coeffs[2]);
+ const __m128i res_67 = _mm_maddubs_epi16(ss[3], coeffs[3]);
+ const __m128i res_0145 = _mm_add_epi16(res_01, res_45);
+ const __m128i res_2367 = _mm_add_epi16(res_23, res_67);
+ return _mm_add_epi16(res_0145, res_2367);
+}
+
+static INLINE __m256i convolve_2tap_avx2(const __m256i ss[1],
+ const __m256i coeffs[1]) {
+ return _mm256_maddubs_epi16(ss[0], coeffs[0]);
+}
+
+static INLINE __m256i convolve_4tap_avx2(const __m256i ss[2],
+ const __m256i coeffs[2]) {
+ const __m256i res_23 = _mm256_maddubs_epi16(ss[0], coeffs[0]);
+ const __m256i res_45 = _mm256_maddubs_epi16(ss[1], coeffs[1]);
+ return _mm256_add_epi16(res_23, res_45);
+}
+
+static INLINE __m256i convolve_6tap_avx2(const __m256i ss[3],
+ const __m256i coeffs[3]) {
+ const __m256i res_01 = _mm256_maddubs_epi16(ss[0], coeffs[0]);
+ const __m256i res_23 = _mm256_maddubs_epi16(ss[1], coeffs[1]);
+ const __m256i res_45 = _mm256_maddubs_epi16(ss[2], coeffs[2]);
+ const __m256i res_0145 = _mm256_add_epi16(res_01, res_45);
+ return _mm256_add_epi16(res_0145, res_23);
+}
+
+static INLINE __m256i convolve_8tap_avx2(const __m256i ss[4],
+ const __m256i coeffs[4]) {
+ const __m256i res_01 = _mm256_maddubs_epi16(ss[0], coeffs[0]);
+ const __m256i res_23 = _mm256_maddubs_epi16(ss[1], coeffs[1]);
+ const __m256i res_45 = _mm256_maddubs_epi16(ss[2], coeffs[2]);
+ const __m256i res_67 = _mm256_maddubs_epi16(ss[3], coeffs[3]);
+ const __m256i res_0145 = _mm256_add_epi16(res_01, res_45);
+ const __m256i res_2367 = _mm256_add_epi16(res_23, res_67);
+ return _mm256_add_epi16(res_0145, res_2367);
+}
+
+static INLINE __m128i convolve16_2tap_sse2(const __m128i ss[1],
+ const __m128i coeffs[1]) {
+ return _mm_madd_epi16(ss[0], coeffs[0]);
+}
+
+static INLINE __m128i convolve16_4tap_sse2(const __m128i ss[2],
+ const __m128i coeffs[2]) {
+ const __m128i res_01 = _mm_madd_epi16(ss[0], coeffs[0]);
+ const __m128i res_23 = _mm_madd_epi16(ss[1], coeffs[1]);
+ return _mm_add_epi32(res_01, res_23);
+}
+
+static INLINE __m128i convolve16_6tap_sse2(const __m128i ss[3],
+ const __m128i coeffs[3]) {
+ const __m128i res_01 = _mm_madd_epi16(ss[0], coeffs[0]);
+ const __m128i res_23 = _mm_madd_epi16(ss[1], coeffs[1]);
+ const __m128i res_45 = _mm_madd_epi16(ss[2], coeffs[2]);
+ const __m128i res_0123 = _mm_add_epi32(res_01, res_23);
+ return _mm_add_epi32(res_0123, res_45);
+}
+
+static INLINE __m128i convolve16_8tap_sse2(const __m128i ss[4],
+ const __m128i coeffs[4]) {
+ const __m128i res_01 = _mm_madd_epi16(ss[0], coeffs[0]);
+ const __m128i res_23 = _mm_madd_epi16(ss[1], coeffs[1]);
+ const __m128i res_45 = _mm_madd_epi16(ss[2], coeffs[2]);
+ const __m128i res_67 = _mm_madd_epi16(ss[3], coeffs[3]);
+ const __m128i res_0123 = _mm_add_epi32(res_01, res_23);
+ const __m128i res_4567 = _mm_add_epi32(res_45, res_67);
+ return _mm_add_epi32(res_0123, res_4567);
+}
+
+static INLINE __m256i convolve16_2tap_avx2(const __m256i ss[1],
+ const __m256i coeffs[1]) {
+ return _mm256_madd_epi16(ss[0], coeffs[0]);
+}
+
+static INLINE __m256i convolve16_4tap_avx2(const __m256i ss[2],
+ const __m256i coeffs[2]) {
+ const __m256i res_1 = _mm256_madd_epi16(ss[0], coeffs[0]);
+ const __m256i res_2 = _mm256_madd_epi16(ss[1], coeffs[1]);
+ return _mm256_add_epi32(res_1, res_2);
+}
+
+static INLINE __m256i convolve16_6tap_avx2(const __m256i ss[3],
+ const __m256i coeffs[3]) {
+ const __m256i res_01 = _mm256_madd_epi16(ss[0], coeffs[0]);
+ const __m256i res_23 = _mm256_madd_epi16(ss[1], coeffs[1]);
+ const __m256i res_45 = _mm256_madd_epi16(ss[2], coeffs[2]);
+ const __m256i res_0123 = _mm256_add_epi32(res_01, res_23);
+ return _mm256_add_epi32(res_0123, res_45);
+}
+
+static INLINE __m256i convolve16_8tap_avx2(const __m256i ss[4],
+ const __m256i coeffs[4]) {
+ const __m256i res_01 = _mm256_madd_epi16(ss[0], coeffs[0]);
+ const __m256i res_23 = _mm256_madd_epi16(ss[1], coeffs[1]);
+ const __m256i res_45 = _mm256_madd_epi16(ss[2], coeffs[2]);
+ const __m256i res_67 = _mm256_madd_epi16(ss[3], coeffs[3]);
+ const __m256i res_0123 = _mm256_add_epi32(res_01, res_23);
+ const __m256i res_4567 = _mm256_add_epi32(res_45, res_67);
+ return _mm256_add_epi32(res_0123, res_4567);
+}
+
+static INLINE __m256i x_convolve_4tap_avx2(const __m256i data,
+ const __m256i coeffs[2],
+ const __m256i filt[2]) {
+ __m256i ss[2];
+
+ ss[0] = _mm256_shuffle_epi8(data, filt[0]);
+ ss[1] = _mm256_shuffle_epi8(data, filt[1]);
+
+ return convolve_4tap_avx2(ss, coeffs);
+}
+
+static INLINE __m256i x_convolve_6tap_avx2(const __m256i data,
+ const __m256i coeffs[3],
+ const __m256i filt[3]) {
+ __m256i ss[3];
+
+ ss[0] = _mm256_shuffle_epi8(data, filt[0]);
+ ss[1] = _mm256_shuffle_epi8(data, filt[1]);
+ ss[2] = _mm256_shuffle_epi8(data, filt[2]);
+
+ return convolve_6tap_avx2(ss, coeffs);
+}
+
+static INLINE __m256i x_convolve_8tap_avx2(const __m256i data,
+ const __m256i coeffs[4],
+ const __m256i filt[4]) {
+ __m256i ss[4];
+
+ ss[0] = _mm256_shuffle_epi8(data, filt[0]);
+ ss[1] = _mm256_shuffle_epi8(data, filt[1]);
+ ss[2] = _mm256_shuffle_epi8(data, filt[2]);
+ ss[3] = _mm256_shuffle_epi8(data, filt[3]);
+
+ return convolve_8tap_avx2(ss, coeffs);
+}
+
+static INLINE __m256i sr_y_round_avx2(const __m256i src) {
+ const __m256i round = _mm256_set1_epi16(32);
+ const __m256i dst = _mm256_add_epi16(src, round);
+ return _mm256_srai_epi16(dst, FILTER_BITS - 1);
+}
+
+static INLINE __m128i xy_x_round_sse2(const __m128i src) {
+ const __m128i round = _mm_set1_epi16(2);
+ const __m128i dst = _mm_add_epi16(src, round);
+ return _mm_srai_epi16(dst, 2);
+}
+
+static INLINE __m256i xy_x_round_avx2(const __m256i src) {
+ const __m256i round = _mm256_set1_epi16(2);
+ const __m256i dst = _mm256_add_epi16(src, round);
+ return _mm256_srai_epi16(dst, 2);
+}
+
+static INLINE void xy_x_round_store_2x2_sse2(const __m128i res,
+ int16_t *const dst) {
+ const __m128i d = xy_x_round_sse2(res);
+ _mm_storel_epi64((__m128i *)dst, d);
+}
+
+static INLINE void xy_x_round_store_4x2_sse2(const __m128i res,
+ int16_t *const dst) {
+ const __m128i d = xy_x_round_sse2(res);
+ _mm_storeu_si128((__m128i *)dst, d);
+}
+
+static INLINE void xy_x_round_store_8x2_sse2(const __m128i res[2],
+ int16_t *const dst) {
+ __m128i r[2];
+
+ r[0] = xy_x_round_sse2(res[0]);
+ r[1] = xy_x_round_sse2(res[1]);
+ _mm_storeu_si128((__m128i *)dst, r[0]);
+ _mm_storeu_si128((__m128i *)(dst + 8), r[1]);
+}
+
+static INLINE void xy_x_round_store_8x2_avx2(const __m256i res,
+ int16_t *const dst) {
+ const __m256i d = xy_x_round_avx2(res);
+ _mm256_storeu_si256((__m256i *)dst, d);
+}
+
+static INLINE void xy_x_round_store_32_avx2(const __m256i res[2],
+ int16_t *const dst) {
+ __m256i r[2];
+
+ r[0] = xy_x_round_avx2(res[0]);
+ r[1] = xy_x_round_avx2(res[1]);
+ const __m256i d0 =
+ _mm256_inserti128_si256(r[0], _mm256_castsi256_si128(r[1]), 1);
+ const __m256i d1 =
+ _mm256_inserti128_si256(r[1], _mm256_extracti128_si256(r[0], 1), 0);
+ _mm256_storeu_si256((__m256i *)dst, d0);
+ _mm256_storeu_si256((__m256i *)(dst + 16), d1);
+}
+
+static INLINE __m128i xy_y_round_sse2(const __m128i src) {
+ const __m128i round = _mm_set1_epi32(1024);
+ const __m128i dst = _mm_add_epi32(src, round);
+ return _mm_srai_epi32(dst, 11);
+}
+
+static INLINE __m128i xy_y_round_half_pel_sse2(const __m128i src) {
+ const __m128i round = _mm_set1_epi16(16);
+ const __m128i dst = _mm_add_epi16(src, round);
+ return _mm_srai_epi16(dst, 5);
+}
+
+static INLINE __m256i xy_y_round_avx2(const __m256i src) {
+ const __m256i round = _mm256_set1_epi32(1024);
+ const __m256i dst = _mm256_add_epi32(src, round);
+ return _mm256_srai_epi32(dst, 11);
+}
+
+static INLINE __m256i xy_y_round_16_avx2(const __m256i r[2]) {
+ const __m256i r0 = xy_y_round_avx2(r[0]);
+ const __m256i r1 = xy_y_round_avx2(r[1]);
+ return _mm256_packs_epi32(r0, r1);
+}
+
+static INLINE __m256i xy_y_round_half_pel_avx2(const __m256i src) {
+ const __m256i round = _mm256_set1_epi16(16);
+ const __m256i dst = _mm256_add_epi16(src, round);
+ return _mm256_srai_epi16(dst, 5);
+}
+
+static INLINE void pack_store_2x2_sse2(const __m128i res, uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m128i d = _mm_packus_epi16(res, res);
+ *(int16_t *)dst = (int16_t)_mm_cvtsi128_si32(d);
+ *(int16_t *)(dst + stride) = (int16_t)_mm_extract_epi16(d, 1);
+}
+
+static INLINE void pack_store_4x2_sse2(const __m128i res, uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m128i d = _mm_packus_epi16(res, res);
+ store_u8_4x2_sse2(d, dst, stride);
+}
+
+static INLINE void pack_store_4x2_avx2(const __m256i res, uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m256i d = _mm256_packus_epi16(res, res);
+ const __m128i d0 = _mm256_castsi256_si128(d);
+ const __m128i d1 = _mm256_extracti128_si256(d, 1);
+
+ xx_storel_32(dst, d0);
+ xx_storel_32(dst + stride, d1);
+}
+
+static INLINE void pack_store_8x2_avx2(const __m256i res, uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m256i d = _mm256_packus_epi16(res, res);
+ const __m128i d0 = _mm256_castsi256_si128(d);
+ const __m128i d1 = _mm256_extracti128_si256(d, 1);
+ _mm_storel_epi64((__m128i *)dst, d0);
+ _mm_storel_epi64((__m128i *)(dst + stride), d1);
+}
+
+static INLINE void pack_store_16x2_avx2(const __m256i res0, const __m256i res1,
+ uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m256i d = _mm256_packus_epi16(res0, res1);
+ storeu_u8_16x2_avx2(d, dst, stride);
+}
+
+static INLINE void xy_y_pack_store_16x2_avx2(const __m256i res0,
+ const __m256i res1,
+ uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m256i t = _mm256_packus_epi16(res0, res1);
+ const __m256i d = _mm256_permute4x64_epi64(t, 0xD8);
+ storeu_u8_16x2_avx2(d, dst, stride);
+}
+
+static INLINE void pack_store_32_avx2(const __m256i res0, const __m256i res1,
+ uint8_t *const dst) {
+ const __m256i t = _mm256_packus_epi16(res0, res1);
+ const __m256i d = _mm256_permute4x64_epi64(t, 0xD8);
+ _mm256_storeu_si256((__m256i *)dst, d);
+}
+
+static INLINE void xy_y_round_store_2x2_sse2(const __m128i res,
+ uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m128i r = xy_y_round_sse2(res);
+ const __m128i rr = _mm_packs_epi32(r, r);
+ pack_store_2x2_sse2(rr, dst, stride);
+}
+
+static INLINE void xy_y_round_store_4x2_avx2(const __m256i res,
+ uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m256i r = xy_y_round_avx2(res);
+ const __m256i rr = _mm256_packs_epi32(r, r);
+ pack_store_4x2_avx2(rr, dst, stride);
+}
+
+static INLINE void xy_y_pack_store_32_avx2(const __m256i res0,
+ const __m256i res1,
+ uint8_t *const dst) {
+ const __m256i d = _mm256_packus_epi16(res0, res1);
+ // d = _mm256_permute4x64_epi64(d, 0xD8);
+ _mm256_storeu_si256((__m256i *)dst, d);
+}
+
+static INLINE void xy_y_round_store_32_avx2(const __m256i r0[2],
+ const __m256i r1[2],
+ uint8_t *const dst) {
+ const __m256i ra = xy_y_round_16_avx2(r0);
+ const __m256i rb = xy_y_round_16_avx2(r1);
+ xy_y_pack_store_32_avx2(ra, rb, dst);
+}
+
+static INLINE void convolve_store_32_avx2(const __m256i res0,
+ const __m256i res1,
+ uint8_t *const dst) {
+ const __m256i d = _mm256_packus_epi16(res0, res1);
+ _mm256_storeu_si256((__m256i *)dst, d);
+}
+
+static INLINE __m128i sr_x_round_sse2(const __m128i src) {
+ const __m128i round = _mm_set1_epi16(34);
+ const __m128i dst = _mm_add_epi16(src, round);
+ return _mm_srai_epi16(dst, 6);
+}
+
+static INLINE __m256i sr_x_round_avx2(const __m256i src) {
+ const __m256i round = _mm256_set1_epi16(34);
+ const __m256i dst = _mm256_add_epi16(src, round);
+ return _mm256_srai_epi16(dst, 6);
+}
+
+static INLINE __m128i sr_y_round_sse2(const __m128i src) {
+ const __m128i round = _mm_set1_epi16(32);
+ const __m128i dst = _mm_add_epi16(src, round);
+ return _mm_srai_epi16(dst, FILTER_BITS - 1);
+}
+
+static INLINE void sr_x_round_store_8x2_avx2(const __m256i res,
+ uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
+ const __m256i r = sr_x_round_avx2(res);
+ pack_store_8x2_avx2(r, dst, dst_stride);
+}
+
+static INLINE void sr_x_round_store_16x2_avx2(const __m256i res[2],
+ uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
+ __m256i r[2];
+
+ r[0] = sr_x_round_avx2(res[0]);
+ r[1] = sr_x_round_avx2(res[1]);
+ pack_store_16x2_avx2(r[0], r[1], dst, dst_stride);
+}
+
+static INLINE void sr_x_round_store_32_avx2(const __m256i res[2],
+ uint8_t *const dst) {
+ __m256i r[2];
+
+ r[0] = sr_x_round_avx2(res[0]);
+ r[1] = sr_x_round_avx2(res[1]);
+ convolve_store_32_avx2(r[0], r[1], dst);
+}
+
+static INLINE void sr_y_round_store_8x2_avx2(const __m256i res,
+ uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
+ const __m256i r = sr_y_round_avx2(res);
+ pack_store_8x2_avx2(r, dst, dst_stride);
+}
+
+static INLINE void sr_y_round_store_16x2_avx2(const __m256i res[2],
+ uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
+ __m256i r[2];
+
+ r[0] = sr_y_round_avx2(res[0]);
+ r[1] = sr_y_round_avx2(res[1]);
+ pack_store_16x2_avx2(r[0], r[1], dst, dst_stride);
+}
+
+static INLINE void sr_y_2tap_32_avg_avx2(const uint8_t *const src,
+ const __m256i s0, __m256i *const s1,
+ uint8_t *const dst) {
+ *s1 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i d = _mm256_avg_epu8(s0, *s1);
+ _mm256_storeu_si256((__m256i *)dst, d);
+}
+
+static INLINE void sr_x_2tap_32_avg_avx2(const uint8_t *const src,
+ uint8_t *const dst) {
+ const __m256i s0 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i s1 = _mm256_loadu_si256((__m256i *)(src + 1));
+ const __m256i d = _mm256_avg_epu8(s0, s1);
+ _mm256_storeu_si256((__m256i *)dst, d);
+}
+
+static INLINE __m128i x_convolve_2tap_2x2_sse4_1(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[1]) {
+ const __m128i sfl =
+ _mm_setr_epi8(0, 1, 1, 2, 4, 5, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0);
+ const __m128i s_128 = load_u8_4x2_sse4_1(src, stride);
+ const __m128i ss = _mm_shuffle_epi8(s_128, sfl);
+ return convolve_2tap_ssse3(&ss, coeffs);
+}
+
+static INLINE __m128i x_convolve_2tap_4x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[1]) {
+ const __m128i sfl =
+ _mm_setr_epi8(0, 1, 1, 2, 2, 3, 3, 4, 8, 9, 9, 10, 10, 11, 11, 12);
+ const __m128i s_128 = load_u8_8x2_sse2(src, stride);
+ const __m128i ss = _mm_shuffle_epi8(s_128, sfl);
+ return convolve_2tap_ssse3(&ss, coeffs);
+}
+
+static INLINE void x_convolve_2tap_8x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[1],
+ __m128i r[2]) {
+ __m128i ss[2];
+ const __m128i s00 = _mm_loadu_si128((__m128i *)src);
+ const __m128i s10 = _mm_loadu_si128((__m128i *)(src + stride));
+ const __m128i s01 = _mm_srli_si128(s00, 1);
+ const __m128i s11 = _mm_srli_si128(s10, 1);
+ ss[0] = _mm_unpacklo_epi8(s00, s01);
+ ss[1] = _mm_unpacklo_epi8(s10, s11);
+
+ r[0] = convolve_2tap_ssse3(&ss[0], coeffs);
+ r[1] = convolve_2tap_ssse3(&ss[1], coeffs);
+}
+
+static INLINE __m256i x_convolve_2tap_8x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[1]) {
+ __m128i s_128[2][2];
+ __m256i s_256[2];
+
+ s_128[0][0] = _mm_loadu_si128((__m128i *)src);
+ s_128[1][0] = _mm_loadu_si128((__m128i *)(src + stride));
+ s_128[0][1] = _mm_srli_si128(s_128[0][0], 1);
+ s_128[1][1] = _mm_srli_si128(s_128[1][0], 1);
+ s_256[0] = _mm256_setr_m128i(s_128[0][0], s_128[1][0]);
+ s_256[1] = _mm256_setr_m128i(s_128[0][1], s_128[1][1]);
+ const __m256i ss = _mm256_unpacklo_epi8(s_256[0], s_256[1]);
+ return convolve_2tap_avx2(&ss, coeffs);
+}
+
+static INLINE void x_convolve_2tap_16x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[1],
+ __m256i r[2]) {
+ const __m256i s0_256 = loadu_8bit_16x2_avx2(src, stride);
+ const __m256i s1_256 = loadu_8bit_16x2_avx2(src + 1, stride);
+ const __m256i s0 = _mm256_unpacklo_epi8(s0_256, s1_256);
+ const __m256i s1 = _mm256_unpackhi_epi8(s0_256, s1_256);
+ r[0] = convolve_2tap_avx2(&s0, coeffs);
+ r[1] = convolve_2tap_avx2(&s1, coeffs);
+}
+
+static INLINE void x_convolve_2tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[1],
+ __m256i r[2]) {
+ const __m256i s0 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i s1 = _mm256_loadu_si256((__m256i *)(src + 1));
+ const __m256i ss0 = _mm256_unpacklo_epi8(s0, s1);
+ const __m256i ss1 = _mm256_unpackhi_epi8(s0, s1);
+
+ r[0] = convolve_2tap_avx2(&ss0, coeffs);
+ r[1] = convolve_2tap_avx2(&ss1, coeffs);
+}
+
+static INLINE __m128i x_convolve_4tap_2x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[2]) {
+ const __m128i sfl0 =
+ _mm_setr_epi8(0, 1, 1, 2, 8, 9, 9, 10, 0, 0, 0, 0, 0, 0, 0, 0);
+ const __m128i sfl1 =
+ _mm_setr_epi8(2, 3, 3, 4, 10, 11, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0);
+ const __m128i s = load_u8_8x2_sse2(src, stride);
+ __m128i ss[2];
+
+ ss[0] = _mm_shuffle_epi8(s, sfl0);
+ ss[1] = _mm_shuffle_epi8(s, sfl1);
+ return convolve_4tap_ssse3(ss, coeffs);
+}
+
+static INLINE __m128i x_convolve_4tap_4x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[2]) {
+ const __m128i s = load_u8_8x2_sse2(src, stride);
+ const __m128i sfl0 =
+ _mm_setr_epi8(0, 1, 1, 2, 2, 3, 3, 4, 8, 9, 9, 10, 10, 11, 11, 12);
+ const __m128i sfl1 =
+ _mm_setr_epi8(2, 3, 3, 4, 4, 5, 5, 6, 10, 11, 11, 12, 12, 13, 13, 14);
+ __m128i ss[2];
+
+ ss[0] = _mm_shuffle_epi8(s, sfl0);
+ ss[1] = _mm_shuffle_epi8(s, sfl1);
+ return convolve_4tap_ssse3(ss, coeffs);
+}
+
+static INLINE __m256i x_convolve_4tap_8x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[2],
+ const __m256i filt[2]) {
+ const __m256i s_256 = loadu_8bit_16x2_avx2(src, stride);
+ return x_convolve_4tap_avx2(s_256, coeffs, filt);
+}
+
+static INLINE void x_convolve_4tap_16x2_avx2(const uint8_t *const src,
+ const int32_t src_stride,
+ const __m256i coeffs[2],
+ const __m256i filt[2],
+ __m256i r[2]) {
+ r[0] = x_convolve_4tap_8x2_avx2(src + 0, src_stride, coeffs, filt);
+ r[1] = x_convolve_4tap_8x2_avx2(src + 8, src_stride, coeffs, filt);
+}
+
+static INLINE void x_convolve_4tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[2],
+ const __m256i filt[2],
+ __m256i r[2]) {
+ const __m256i s0_256 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i s1_256 = _mm256_loadu_si256((__m256i *)(src + 8));
+
+ r[0] = x_convolve_4tap_avx2(s0_256, coeffs, filt);
+ r[1] = x_convolve_4tap_avx2(s1_256, coeffs, filt);
+}
+
+static INLINE __m128i x_convolve_6tap_2x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[3]) {
+ const __m128i sfl0 =
+ _mm_setr_epi8(0, 1, 1, 2, 8, 9, 9, 10, 0, 0, 0, 0, 0, 0, 0, 0);
+ const __m128i sfl1 =
+ _mm_setr_epi8(2, 3, 3, 4, 10, 11, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0);
+ const __m128i sfl2 =
+ _mm_setr_epi8(4, 5, 5, 6, 12, 13, 13, 14, 0, 0, 0, 0, 0, 0, 0, 0);
+
+ const __m128i s = load_u8_8x2_sse2(src, stride);
+ __m128i ss[3];
+
+ ss[0] = _mm_shuffle_epi8(s, sfl0);
+ ss[1] = _mm_shuffle_epi8(s, sfl1);
+ ss[2] = _mm_shuffle_epi8(s, sfl2);
+ return convolve_6tap_ssse3(ss, coeffs);
+}
+
+static INLINE __m128i x_convolve_6tap_4x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[3]) {
+ const __m128i s = load_u8_8x2_sse2(src, stride);
+ const __m128i sfl0 =
+ _mm_setr_epi8(0, 1, 1, 2, 8, 9, 9, 10, 0, 0, 0, 0, 0, 0, 0, 0);
+ const __m128i sfl1 =
+ _mm_setr_epi8(2, 3, 3, 4, 10, 11, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0);
+ const __m128i sfl2 =
+ _mm_setr_epi8(4, 5, 5, 6, 12, 13, 13, 14, 0, 0, 0, 0, 0, 0, 0, 0);
+ __m128i ss[3];
+
+ ss[0] = _mm_shuffle_epi8(s, sfl0);
+ ss[1] = _mm_shuffle_epi8(s, sfl1);
+ ss[2] = _mm_shuffle_epi8(s, sfl2);
+ return convolve_6tap_ssse3(ss, coeffs);
+}
+
+static INLINE __m256i x_convolve_6tap_8x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[3],
+ const __m256i filt[3]) {
+ const __m256i s_256 = loadu_8bit_16x2_avx2(src, stride);
+ return x_convolve_6tap_avx2(s_256, coeffs, filt);
+}
+
+static INLINE void x_convolve_6tap_16x2_avx2(const uint8_t *const src,
+ const int32_t src_stride,
+ const __m256i coeffs[3],
+ const __m256i filt[3],
+ __m256i r[2]) {
+ r[0] = x_convolve_6tap_8x2_avx2(src + 0, src_stride, coeffs, filt);
+ r[1] = x_convolve_6tap_8x2_avx2(src + 8, src_stride, coeffs, filt);
+}
+
+static INLINE void x_convolve_6tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[3],
+ const __m256i filt[3],
+ __m256i r[2]) {
+ const __m256i s0_256 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i s1_256 = _mm256_loadu_si256((__m256i *)(src + 8));
+
+ r[0] = x_convolve_6tap_avx2(s0_256, coeffs, filt);
+ r[1] = x_convolve_6tap_avx2(s1_256, coeffs, filt);
+}
+
+static INLINE __m256i x_convolve_8tap_8x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[4],
+ const __m256i filt[4]) {
+ const __m256i s_256 = loadu_8bit_16x2_avx2(src, stride);
+ return x_convolve_8tap_avx2(s_256, coeffs, filt);
+}
+
+static AOM_FORCE_INLINE void x_convolve_8tap_16x2_avx2(const uint8_t *const src,
+ const int32_t src_stride,
+ const __m256i coeffs[4],
+ const __m256i filt[4],
+ __m256i r[2]) {
+ r[0] = x_convolve_8tap_8x2_avx2(src + 0, src_stride, coeffs, filt);
+ r[1] = x_convolve_8tap_8x2_avx2(src + 8, src_stride, coeffs, filt);
+}
+
+static AOM_FORCE_INLINE void x_convolve_8tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[4],
+ const __m256i filt[4],
+ __m256i r[2]) {
+ const __m256i s0_256 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i s1_256 = _mm256_loadu_si256((__m256i *)(src + 8));
+
+ r[0] = x_convolve_8tap_avx2(s0_256, coeffs, filt);
+ r[1] = x_convolve_8tap_avx2(s1_256, coeffs, filt);
+}
+
+static INLINE __m128i y_convolve_2tap_2x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[1],
+ __m128i s_16[2]) {
+ __m128i s_128[2];
+
+ s_16[1] = _mm_cvtsi32_si128(*(int16_t *)(src + stride));
+ s_128[0] = _mm_unpacklo_epi16(s_16[0], s_16[1]);
+ s_16[0] = _mm_cvtsi32_si128(*(int16_t *)(src + 2 * stride));
+ s_128[1] = _mm_unpacklo_epi16(s_16[1], s_16[0]);
+ const __m128i ss = _mm_unpacklo_epi8(s_128[0], s_128[1]);
+ return convolve_2tap_ssse3(&ss, coeffs);
+}
+
+static INLINE __m128i y_convolve_2tap_4x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[1],
+ __m128i s_32[2]) {
+ __m128i s_128[2];
+
+ s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(src + stride));
+ s_128[0] = _mm_unpacklo_epi32(s_32[0], s_32[1]);
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(src + 2 * stride));
+ s_128[1] = _mm_unpacklo_epi32(s_32[1], s_32[0]);
+ const __m128i ss = _mm_unpacklo_epi8(s_128[0], s_128[1]);
+ return convolve_2tap_ssse3(&ss, coeffs);
+}
+
+static INLINE __m256i y_convolve_2tap_8x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[1],
+ __m128i s_64[2]) {
+ __m256i s_256[2];
+
+ s_64[1] = _mm_loadl_epi64((__m128i *)(src + stride));
+ s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]);
+ s_64[0] = _mm_loadl_epi64((__m128i *)(src + 2 * stride));
+ s_256[1] = _mm256_setr_m128i(s_64[1], s_64[0]);
+ const __m256i ss = _mm256_unpacklo_epi8(s_256[0], s_256[1]);
+ return convolve_2tap_avx2(&ss, coeffs);
+}
+
+static INLINE void y_convolve_2tap_16x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[1],
+ __m128i s_128[2], __m256i r[2]) {
+ __m256i s_256[2];
+
+ s_128[1] = _mm_loadu_si128((__m128i *)(src + stride));
+ s_256[0] = _mm256_setr_m128i(s_128[0], s_128[1]);
+ s_128[0] = _mm_loadu_si128((__m128i *)(src + 2 * stride));
+ s_256[1] = _mm256_setr_m128i(s_128[1], s_128[0]);
+ const __m256i ss0 = _mm256_unpacklo_epi8(s_256[0], s_256[1]);
+ const __m256i ss1 = _mm256_unpackhi_epi8(s_256[0], s_256[1]);
+ r[0] = convolve_2tap_avx2(&ss0, coeffs);
+ r[1] = convolve_2tap_avx2(&ss1, coeffs);
+}
+
+static INLINE void y_convolve_2tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[1],
+ const __m256i s0, __m256i *const s1,
+ __m256i r[2]) {
+ *s1 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i ss0 = _mm256_unpacklo_epi8(s0, *s1);
+ const __m256i ss1 = _mm256_unpackhi_epi8(s0, *s1);
+ r[0] = convolve_2tap_avx2(&ss0, coeffs);
+ r[1] = convolve_2tap_avx2(&ss1, coeffs);
+}
+
+static INLINE __m128i y_convolve_4tap_2x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[2],
+ __m128i s_16[4],
+ __m128i ss_128[2]) {
+ s_16[3] = _mm_cvtsi32_si128(*(int16_t *)(src + stride));
+ const __m128i src23 = _mm_unpacklo_epi16(s_16[2], s_16[3]);
+ s_16[2] = _mm_cvtsi32_si128(*(int16_t *)(src + 2 * stride));
+ const __m128i src34 = _mm_unpacklo_epi16(s_16[3], s_16[2]);
+ ss_128[1] = _mm_unpacklo_epi8(src23, src34);
+ return convolve_4tap_ssse3(ss_128, coeffs);
+}
+
+static INLINE __m128i y_convolve_4tap_4x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[2],
+ __m128i s_32[4],
+ __m128i ss_128[2]) {
+ s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(src + stride));
+ const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]);
+ s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(src + 2 * stride));
+ const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[2]);
+ ss_128[1] = _mm_unpacklo_epi8(src23, src34);
+ return convolve_4tap_ssse3(ss_128, coeffs);
+}
+
+static INLINE __m256i y_convolve_4tap_8x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[2],
+ __m128i s_64[4],
+ __m256i ss_256[2]) {
+ s_64[3] = _mm_loadl_epi64((__m128i *)(src + stride));
+ const __m256i src23 = _mm256_setr_m128i(s_64[2], s_64[3]);
+ s_64[2] = _mm_loadl_epi64((__m128i *)(src + 2 * stride));
+ const __m256i src34 = _mm256_setr_m128i(s_64[3], s_64[2]);
+ ss_256[1] = _mm256_unpacklo_epi8(src23, src34);
+ return convolve_4tap_avx2(ss_256, coeffs);
+}
+
+static INLINE void y_convolve_4tap_16x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[2],
+ __m128i s_128[4],
+ __m256i ss_256[4], __m256i r[2]) {
+ s_128[3] = _mm_loadu_si128((__m128i *)(src + stride));
+ const __m256i src23 = _mm256_setr_m128i(s_128[2], s_128[3]);
+ s_128[2] = _mm_loadu_si128((__m128i *)(src + 2 * stride));
+ const __m256i src34 = _mm256_setr_m128i(s_128[3], s_128[2]);
+ ss_256[1] = _mm256_unpacklo_epi8(src23, src34);
+ ss_256[3] = _mm256_unpackhi_epi8(src23, src34);
+ r[0] = convolve_4tap_avx2(ss_256, coeffs);
+ r[1] = convolve_4tap_avx2(ss_256 + 2, coeffs);
+}
+
+static INLINE __m128i y_convolve_6tap_2x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[3],
+ __m128i s_16[6],
+ __m128i ss_128[3]) {
+ s_16[5] = _mm_cvtsi32_si128(*(int16_t *)(src + 3 * stride));
+ const __m128i src45 = _mm_unpacklo_epi16(s_16[4], s_16[5]);
+ s_16[4] = _mm_cvtsi32_si128(*(int16_t *)(src + 4 * stride));
+ const __m128i src56 = _mm_unpacklo_epi16(s_16[5], s_16[4]);
+ ss_128[2] = _mm_unpacklo_epi8(src45, src56);
+ return convolve_6tap_ssse3(ss_128, coeffs);
+}
+
+static INLINE void y_convolve_4tap_32x2_avx2(
+ const uint8_t *const src, const ptrdiff_t stride, const __m256i coeffs[2],
+ __m256i s_256[4], __m256i ss_256[4], __m256i tt_256[4], __m256i r[4]) {
+ s_256[3] = _mm256_loadu_si256((__m256i *)(src + 1 * stride));
+ ss_256[1] = _mm256_unpacklo_epi8(s_256[2], s_256[3]);
+ ss_256[3] = _mm256_unpackhi_epi8(s_256[2], s_256[3]);
+ s_256[2] = _mm256_loadu_si256((__m256i *)(src + 2 * stride));
+ tt_256[1] = _mm256_unpacklo_epi8(s_256[3], s_256[2]);
+ tt_256[3] = _mm256_unpackhi_epi8(s_256[3], s_256[2]);
+ r[0] = convolve_4tap_avx2(ss_256 + 0, coeffs);
+ r[1] = convolve_4tap_avx2(ss_256 + 2, coeffs);
+ r[2] = convolve_4tap_avx2(tt_256 + 0, coeffs);
+ r[3] = convolve_4tap_avx2(tt_256 + 2, coeffs);
+}
+
+static INLINE __m128i y_convolve_6tap_4x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[3],
+ __m128i s_32[6],
+ __m128i ss_128[3]) {
+ s_32[5] = _mm_cvtsi32_si128(*(int32_t *)(src + 3 * stride));
+ const __m128i src45 = _mm_unpacklo_epi32(s_32[4], s_32[5]);
+ s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(src + 4 * stride));
+ const __m128i src56 = _mm_unpacklo_epi32(s_32[5], s_32[4]);
+ ss_128[2] = _mm_unpacklo_epi8(src45, src56);
+ return convolve_6tap_ssse3(ss_128, coeffs);
+}
+
+static INLINE __m256i y_convolve_6tap_8x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[3],
+ __m128i s_64[6],
+ __m256i ss_256[3]) {
+ s_64[5] = _mm_loadl_epi64((__m128i *)(src + 3 * stride));
+ const __m256i src45 = _mm256_setr_m128i(s_64[4], s_64[5]);
+ s_64[4] = _mm_loadl_epi64((__m128i *)(src + 4 * stride));
+ const __m256i src56 = _mm256_setr_m128i(s_64[5], s_64[4]);
+ ss_256[2] = _mm256_unpacklo_epi8(src45, src56);
+ return convolve_6tap_avx2(ss_256, coeffs);
+}
+
+static INLINE void y_convolve_6tap_16x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[3],
+ __m128i s_128[6],
+ __m256i ss_256[6], __m256i r[2]) {
+ s_128[5] = _mm_loadu_si128((__m128i *)(src + 3 * stride));
+ const __m256i src45 = _mm256_setr_m128i(s_128[4], s_128[5]);
+ s_128[4] = _mm_loadu_si128((__m128i *)(src + 4 * stride));
+ const __m256i src56 = _mm256_setr_m128i(s_128[5], s_128[4]);
+ ss_256[2] = _mm256_unpacklo_epi8(src45, src56);
+ ss_256[5] = _mm256_unpackhi_epi8(src45, src56);
+ r[0] = convolve_6tap_avx2(ss_256, coeffs);
+ r[1] = convolve_6tap_avx2(ss_256 + 3, coeffs);
+}
+
+static INLINE void y_convolve_6tap_32x2_avx2(
+ const uint8_t *const src, const ptrdiff_t stride, const __m256i coeffs[3],
+ __m256i s_256[6], __m256i ss_256[6], __m256i tt_256[6], __m256i r[4]) {
+ s_256[5] = _mm256_loadu_si256((__m256i *)(src + 3 * stride));
+ ss_256[2] = _mm256_unpacklo_epi8(s_256[4], s_256[5]);
+ ss_256[5] = _mm256_unpackhi_epi8(s_256[4], s_256[5]);
+ s_256[4] = _mm256_loadu_si256((__m256i *)(src + 4 * stride));
+ tt_256[2] = _mm256_unpacklo_epi8(s_256[5], s_256[4]);
+ tt_256[5] = _mm256_unpackhi_epi8(s_256[5], s_256[4]);
+ r[0] = convolve_6tap_avx2(ss_256 + 0, coeffs);
+ r[1] = convolve_6tap_avx2(ss_256 + 3, coeffs);
+ r[2] = convolve_6tap_avx2(tt_256 + 0, coeffs);
+ r[3] = convolve_6tap_avx2(tt_256 + 3, coeffs);
+}
+
+static INLINE __m128i y_convolve_8tap_2x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[4],
+ __m128i s_16[8],
+ __m128i ss_128[4]) {
+ s_16[7] = _mm_cvtsi32_si128(*(int16_t *)(src + 7 * stride));
+ const __m128i src67 = _mm_unpacklo_epi16(s_16[6], s_16[7]);
+ s_16[6] = _mm_cvtsi32_si128(*(int16_t *)(src + 8 * stride));
+ const __m128i src78 = _mm_unpacklo_epi16(s_16[7], s_16[6]);
+ ss_128[3] = _mm_unpacklo_epi8(src67, src78);
+ return convolve_8tap_ssse3(ss_128, coeffs);
+}
+
+static INLINE __m128i y_convolve_8tap_4x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i coeffs[4],
+ __m128i s_32[8],
+ __m128i ss_128[4]) {
+ s_32[7] = _mm_cvtsi32_si128(*(int32_t *)(src + 7 * stride));
+ const __m128i src67 = _mm_unpacklo_epi32(s_32[6], s_32[7]);
+ s_32[6] = _mm_cvtsi32_si128(*(int32_t *)(src + 8 * stride));
+ const __m128i src78 = _mm_unpacklo_epi32(s_32[7], s_32[6]);
+ ss_128[3] = _mm_unpacklo_epi8(src67, src78);
+ return convolve_8tap_ssse3(ss_128, coeffs);
+}
+
+static INLINE __m256i y_convolve_8tap_8x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[4],
+ __m128i s_64[8],
+ __m256i ss_256[4]) {
+ s_64[7] = _mm_loadl_epi64((__m128i *)(src + 7 * stride));
+ const __m256i src67 = _mm256_setr_m128i(s_64[6], s_64[7]);
+ s_64[6] = _mm_loadl_epi64((__m128i *)(src + 8 * stride));
+ const __m256i src78 = _mm256_setr_m128i(s_64[7], s_64[6]);
+ ss_256[3] = _mm256_unpacklo_epi8(src67, src78);
+ return convolve_8tap_avx2(ss_256, coeffs);
+}
+
+static INLINE void y_convolve_8tap_16x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i coeffs[4],
+ __m128i s_128[8],
+ __m256i ss_256[8], __m256i r[2]) {
+ s_128[7] = _mm_loadu_si128((__m128i *)(src + 7 * stride));
+ const __m256i src67 = _mm256_setr_m128i(s_128[6], s_128[7]);
+ s_128[6] = _mm_loadu_si128((__m128i *)(src + 8 * stride));
+ const __m256i src78 = _mm256_setr_m128i(s_128[7], s_128[6]);
+ ss_256[3] = _mm256_unpacklo_epi8(src67, src78);
+ ss_256[7] = _mm256_unpackhi_epi8(src67, src78);
+ r[0] = convolve_8tap_avx2(ss_256, coeffs);
+ r[1] = convolve_8tap_avx2(ss_256 + 4, coeffs);
+}
+
+static INLINE void y_convolve_8tap_32x2_avx2(
+ const uint8_t *const src, const ptrdiff_t stride, const __m256i coeffs[4],
+ __m256i s_256[8], __m256i ss_256[8], __m256i tt_256[8], __m256i r[4]) {
+ s_256[7] = _mm256_loadu_si256((__m256i *)(src + 7 * stride));
+ ss_256[3] = _mm256_unpacklo_epi8(s_256[6], s_256[7]);
+ ss_256[7] = _mm256_unpackhi_epi8(s_256[6], s_256[7]);
+ s_256[6] = _mm256_loadu_si256((__m256i *)(src + 8 * stride));
+ tt_256[3] = _mm256_unpacklo_epi8(s_256[7], s_256[6]);
+ tt_256[7] = _mm256_unpackhi_epi8(s_256[7], s_256[6]);
+ r[0] = convolve_8tap_avx2(ss_256 + 0, coeffs);
+ r[1] = convolve_8tap_avx2(ss_256 + 4, coeffs);
+ r[2] = convolve_8tap_avx2(tt_256 + 0, coeffs);
+ r[3] = convolve_8tap_avx2(tt_256 + 4, coeffs);
+}
+
+static INLINE void xy_x_convolve_2tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[1],
+ __m256i r[2]) {
+ const __m256i s0 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i s1 = _mm256_loadu_si256((__m256i *)(src + 1));
+ const __m256i ss0 = _mm256_unpacklo_epi8(s0, s1);
+ const __m256i ss1 = _mm256_unpackhi_epi8(s0, s1);
+
+ r[0] = convolve_2tap_avx2(&ss0, coeffs);
+ r[1] = convolve_2tap_avx2(&ss1, coeffs);
+}
+
+static INLINE void xy_x_2tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[1],
+ int16_t *const dst) {
+ __m256i r[2];
+
+ xy_x_convolve_2tap_32_avx2(src, coeffs, r);
+ const __m256i d0 = xy_x_round_avx2(r[0]);
+ const __m256i d1 = xy_x_round_avx2(r[1]);
+ _mm256_storeu_si256((__m256i *)dst, d0);
+ _mm256_storeu_si256((__m256i *)(dst + 16), d1);
+}
+
+static INLINE void xy_x_4tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[2],
+ const __m256i filt[2],
+ int16_t *const dst) {
+ __m256i r[2];
+
+ x_convolve_4tap_32_avx2(src, coeffs, filt, r);
+ const __m256i d0 = xy_x_round_avx2(r[0]);
+ const __m256i d1 = xy_x_round_avx2(r[1]);
+ _mm256_storeu_si256((__m256i *)dst, d0);
+ _mm256_storeu_si256((__m256i *)(dst + 16), d1);
+}
+
+static INLINE void xy_x_6tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[3],
+ const __m256i filt[3],
+ int16_t *const dst) {
+ __m256i r[2];
+
+ x_convolve_6tap_32_avx2(src, coeffs, filt, r);
+ const __m256i d0 = xy_x_round_avx2(r[0]);
+ const __m256i d1 = xy_x_round_avx2(r[1]);
+ _mm256_storeu_si256((__m256i *)dst, d0);
+ _mm256_storeu_si256((__m256i *)(dst + 16), d1);
+}
+
+static INLINE void xy_x_8tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[4],
+ const __m256i filt[4],
+ int16_t *const dst) {
+ __m256i r[2];
+
+ x_convolve_8tap_32_avx2(src, coeffs, filt, r);
+ const __m256i d0 = xy_x_round_avx2(r[0]);
+ const __m256i d1 = xy_x_round_avx2(r[1]);
+ _mm256_storeu_si256((__m256i *)dst, d0);
+ _mm256_storeu_si256((__m256i *)(dst + 16), d1);
+}
+
+static INLINE __m128i xy_y_convolve_2tap_2x2_sse2(const int16_t *const src,
+ __m128i s_32[2],
+ const __m128i coeffs[1]) {
+ __m128i s_128[2];
+
+ s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(src + 2));
+ s_128[0] = _mm_unpacklo_epi32(s_32[0], s_32[1]);
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(src + 2 * 2));
+ s_128[1] = _mm_unpacklo_epi32(s_32[1], s_32[0]);
+ const __m128i ss = _mm_unpacklo_epi16(s_128[0], s_128[1]);
+ return convolve16_2tap_sse2(&ss, coeffs);
+}
+
+static INLINE __m128i xy_y_convolve_2tap_2x2_half_pel_sse2(
+ const int16_t *const src, __m128i s_32[2]) {
+ __m128i s_128[2];
+
+ s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(src + 2));
+ s_128[0] = _mm_unpacklo_epi32(s_32[0], s_32[1]);
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(src + 2 * 2));
+ s_128[1] = _mm_unpacklo_epi32(s_32[1], s_32[0]);
+ return _mm_add_epi16(s_128[0], s_128[1]);
+}
+
+static INLINE void xy_y_convolve_2tap_4x2_sse2(const int16_t *const src,
+ __m128i s_64[2],
+ const __m128i coeffs[1],
+ __m128i r[2]) {
+ __m128i s_128[2];
+
+ s_64[1] = _mm_loadl_epi64((__m128i *)(src + 4));
+ s_128[0] = _mm_unpacklo_epi64(s_64[0], s_64[1]);
+ s_64[0] = _mm_loadl_epi64((__m128i *)(src + 2 * 4));
+ s_128[1] = _mm_unpacklo_epi64(s_64[1], s_64[0]);
+ const __m128i ss0 = _mm_unpacklo_epi16(s_128[0], s_128[1]);
+ const __m128i ss1 = _mm_unpackhi_epi16(s_128[0], s_128[1]);
+ r[0] = convolve16_2tap_sse2(&ss0, coeffs);
+ r[1] = convolve16_2tap_sse2(&ss1, coeffs);
+}
+
+static INLINE __m128i xy_y_convolve_2tap_4x2_half_pel_sse2(
+ const int16_t *const src, __m128i s_64[2]) {
+ __m128i s_128[2];
+
+ s_64[1] = _mm_loadl_epi64((__m128i *)(src + 4));
+ s_128[0] = _mm_unpacklo_epi64(s_64[0], s_64[1]);
+ s_64[0] = _mm_loadl_epi64((__m128i *)(src + 2 * 4));
+ s_128[1] = _mm_unpacklo_epi64(s_64[1], s_64[0]);
+ return _mm_add_epi16(s_128[0], s_128[1]);
+}
+
+static INLINE void xy_y_convolve_2tap_16_avx2(const __m256i s0,
+ const __m256i s1,
+ const __m256i coeffs[1],
+ __m256i r[2]) {
+ const __m256i ss0 = _mm256_unpacklo_epi16(s0, s1);
+ const __m256i ss1 = _mm256_unpackhi_epi16(s0, s1);
+ r[0] = convolve16_2tap_avx2(&ss0, coeffs);
+ r[1] = convolve16_2tap_avx2(&ss1, coeffs);
+}
+
+static INLINE void xy_y_convolve_2tap_8x2_avx2(const int16_t *const src,
+ __m128i s_128[2],
+ const __m256i coeffs[1],
+ __m256i r[2]) {
+ __m256i s_256[2];
+ s_128[1] = _mm_loadu_si128((__m128i *)(src + 8));
+ s_256[0] = _mm256_setr_m128i(s_128[0], s_128[1]);
+ s_128[0] = _mm_loadu_si128((__m128i *)(src + 2 * 8));
+ s_256[1] = _mm256_setr_m128i(s_128[1], s_128[0]);
+ xy_y_convolve_2tap_16_avx2(s_256[0], s_256[1], coeffs, r);
+}
+
+static INLINE __m256i xy_y_convolve_2tap_8x2_half_pel_avx2(
+ const int16_t *const src, __m128i s_128[2]) {
+ __m256i s_256[2];
+ s_128[1] = _mm_loadu_si128((__m128i *)(src + 8));
+ s_256[0] = _mm256_setr_m128i(s_128[0], s_128[1]);
+ s_128[0] = _mm_loadu_si128((__m128i *)(src + 2 * 8));
+ s_256[1] = _mm256_setr_m128i(s_128[1], s_128[0]);
+ return _mm256_add_epi16(s_256[0], s_256[1]);
+}
+
+static INLINE void xy_y_convolve_2tap_16x2_half_pel_avx2(
+ const int16_t *const src, __m256i s_256[2], __m256i r[2]) {
+ s_256[1] = _mm256_loadu_si256((__m256i *)(src + 16));
+ r[0] = _mm256_add_epi16(s_256[0], s_256[1]);
+ s_256[0] = _mm256_loadu_si256((__m256i *)(src + 2 * 16));
+ r[1] = _mm256_add_epi16(s_256[1], s_256[0]);
+}
+
+static INLINE void xy_y_store_16x2_avx2(const __m256i r[2], uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m256i t = _mm256_packus_epi16(r[0], r[1]);
+ const __m256i d = _mm256_permute4x64_epi64(t, 0xD8);
+ storeu_u8_16x2_avx2(d, dst, stride);
+}
+
+static INLINE void xy_y_convolve_2tap_16x2_avx2(const int16_t *const src,
+ __m256i s[2],
+ const __m256i coeffs[1],
+ __m256i r[4]) {
+ s[1] = _mm256_loadu_si256((__m256i *)(src + 16));
+ xy_y_convolve_2tap_16_avx2(s[0], s[1], coeffs, r + 0);
+ s[0] = _mm256_loadu_si256((__m256i *)(src + 2 * 16));
+ xy_y_convolve_2tap_16_avx2(s[1], s[0], coeffs, r + 2);
+}
+
+static INLINE void xy_y_convolve_2tap_32_avx2(const int16_t *const src,
+ const __m256i s0[2],
+ __m256i s1[2],
+ const __m256i coeffs[1],
+ __m256i r[4]) {
+ s1[0] = _mm256_loadu_si256((__m256i *)src);
+ s1[1] = _mm256_loadu_si256((__m256i *)(src + 16));
+ xy_y_convolve_2tap_16_avx2(s0[0], s1[0], coeffs, r + 0);
+ xy_y_convolve_2tap_16_avx2(s0[1], s1[1], coeffs, r + 2);
+}
+
+static INLINE void xy_y_convolve_2tap_32_all_avx2(const int16_t *const src,
+ const __m256i s0[2],
+ __m256i s1[2],
+ const __m256i coeffs[1],
+ uint8_t *const dst) {
+ __m256i r[4];
+
+ xy_y_convolve_2tap_32_avx2(src, s0, s1, coeffs, r);
+ xy_y_round_store_32_avx2(r + 0, r + 2, dst);
+}
+
+static INLINE void xy_y_convolve_2tap_half_pel_32_avx2(const int16_t *const src,
+ const __m256i s0[2],
+ __m256i s1[2],
+ __m256i r[2]) {
+ s1[0] = _mm256_loadu_si256((__m256i *)src);
+ s1[1] = _mm256_loadu_si256((__m256i *)(src + 16));
+ r[0] = _mm256_add_epi16(s0[0], s1[0]);
+ r[1] = _mm256_add_epi16(s0[1], s1[1]);
+}
+
+static INLINE void xy_y_convolve_2tap_half_pel_32_all_avx2(
+ const int16_t *const src, const __m256i s0[2], __m256i s1[2],
+ uint8_t *const dst) {
+ __m256i r[2];
+
+ xy_y_convolve_2tap_half_pel_32_avx2(src, s0, s1, r);
+ r[0] = xy_y_round_half_pel_avx2(r[0]);
+ r[1] = xy_y_round_half_pel_avx2(r[1]);
+ xy_y_pack_store_32_avx2(r[0], r[1], dst);
+}
+
+static INLINE __m128i xy_y_convolve_4tap_2x2_sse2(const int16_t *const src,
+ __m128i s_32[4],
+ __m128i ss_128[2],
+ const __m128i coeffs[2]) {
+ s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(src + 3 * 2));
+ const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]);
+ s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(src + 4 * 2));
+ const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[2]);
+ ss_128[1] = _mm_unpacklo_epi16(src23, src34);
+ const __m128i r = convolve16_4tap_sse2(ss_128, coeffs);
+ ss_128[0] = ss_128[1];
+ return r;
+}
+
+static INLINE __m256i xy_y_convolve_4tap_4x2_avx2(const int16_t *const src,
+ __m128i s_64[4],
+ __m256i ss_256[2],
+ const __m256i coeffs[2]) {
+ __m256i s_256[2];
+ s_64[3] = _mm_loadl_epi64((__m128i *)(src + 3 * 4));
+ s_256[0] = _mm256_setr_m128i(s_64[2], s_64[3]);
+ s_64[2] = _mm_loadl_epi64((__m128i *)(src + 4 * 4));
+ s_256[1] = _mm256_setr_m128i(s_64[3], s_64[2]);
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ const __m256i r = convolve16_4tap_avx2(ss_256, coeffs);
+ ss_256[0] = ss_256[1];
+ return r;
+}
+
+static INLINE void xy_y_convolve_4tap_16_avx2(const __m256i *const ss,
+ const __m256i coeffs[2],
+ __m256i r[2]) {
+ r[0] = convolve16_4tap_avx2(ss, coeffs);
+ r[1] = convolve16_4tap_avx2(ss + 2, coeffs);
+}
+
+static INLINE void xy_y_convolve_4tap_8x2_avx2(const int16_t *const src,
+ __m256i ss_256[4],
+ const __m256i coeffs[2],
+ __m256i r[2]) {
+ __m256i s_256[2];
+ s_256[0] = _mm256_loadu_si256((__m256i *)(src + 2 * 8));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(src + 3 * 8));
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ ss_256[3] = _mm256_unpackhi_epi16(s_256[0], s_256[1]);
+ xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r);
+ ss_256[0] = ss_256[1];
+ ss_256[2] = ss_256[3];
+}
+
+static INLINE void xy_y_convolve_4tap_8x2_half_pel_avx2(
+ const int16_t *const src, const __m256i coeffs[1], __m256i s_256[4],
+ __m256i r[2]) {
+ __m256i a_256[2];
+ s_256[2] = _mm256_loadu_si256((__m256i *)(src + 2 * 8));
+ s_256[3] = _mm256_loadu_si256((__m256i *)(src + 3 * 8));
+ a_256[0] = _mm256_add_epi16(s_256[0], s_256[3]);
+ a_256[1] = _mm256_add_epi16(s_256[1], s_256[2]);
+ xy_y_convolve_2tap_16_avx2(a_256[0], a_256[1], coeffs, r);
+ s_256[0] = s_256[2];
+ s_256[1] = s_256[3];
+}
+
+static INLINE void xy_y_convolve_4tap_16x2_avx2(
+ const int16_t *const src, __m256i s_256[4], __m256i ss_256[4],
+ __m256i tt_256[4], const __m256i coeffs[2], __m256i r[4]) {
+ s_256[3] = _mm256_loadu_si256((__m256i *)(src + 3 * 16));
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]);
+ ss_256[3] = _mm256_unpackhi_epi16(s_256[2], s_256[3]);
+ s_256[2] = _mm256_loadu_si256((__m256i *)(src + 4 * 16));
+ tt_256[1] = _mm256_unpacklo_epi16(s_256[3], s_256[2]);
+ tt_256[3] = _mm256_unpackhi_epi16(s_256[3], s_256[2]);
+ xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r + 0);
+ xy_y_convolve_4tap_16_avx2(tt_256, coeffs, r + 2);
+ ss_256[0] = ss_256[1];
+ ss_256[2] = ss_256[3];
+ tt_256[0] = tt_256[1];
+ tt_256[2] = tt_256[3];
+}
+
+static INLINE void xy_y_convolve_4tap_32x2_avx2(
+ const int16_t *const src, const ptrdiff_t stride, __m256i s_256[4],
+ __m256i ss_256[4], __m256i tt_256[4], const __m256i coeffs[2],
+ __m256i r[4]) {
+ s_256[3] = _mm256_loadu_si256((__m256i *)(src + 3 * stride));
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]);
+ ss_256[3] = _mm256_unpackhi_epi16(s_256[2], s_256[3]);
+ s_256[2] = _mm256_loadu_si256((__m256i *)(src + 4 * stride));
+ tt_256[1] = _mm256_unpacklo_epi16(s_256[3], s_256[2]);
+ tt_256[3] = _mm256_unpackhi_epi16(s_256[3], s_256[2]);
+ xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r + 0);
+ xy_y_convolve_4tap_16_avx2(tt_256, coeffs, r + 2);
+ ss_256[0] = ss_256[1];
+ ss_256[2] = ss_256[3];
+ tt_256[0] = tt_256[1];
+ tt_256[2] = tt_256[3];
+}
+
+static INLINE void xy_y_convolve_4tap_16x2_half_pelavx2(
+ const int16_t *const src, __m256i s_256[5], const __m256i coeffs[1],
+ __m256i r[4]) {
+ __m256i a_256[2];
+
+ s_256[3] = _mm256_loadu_si256((__m256i *)(src + 3 * 16));
+ s_256[4] = _mm256_loadu_si256((__m256i *)(src + 4 * 16));
+
+ a_256[0] = _mm256_add_epi16(s_256[0], s_256[3]);
+ a_256[1] = _mm256_add_epi16(s_256[1], s_256[2]);
+ xy_y_convolve_2tap_16_avx2(a_256[0], a_256[1], coeffs, r + 0);
+
+ a_256[0] = _mm256_add_epi16(s_256[1], s_256[4]);
+ a_256[1] = _mm256_add_epi16(s_256[2], s_256[3]);
+ xy_y_convolve_2tap_16_avx2(a_256[0], a_256[1], coeffs, r + 2);
+
+ s_256[0] = s_256[2];
+ s_256[1] = s_256[3];
+ s_256[2] = s_256[4];
+}
+
+static INLINE __m128i xy_y_convolve_6tap_2x2_sse2(const int16_t *const src,
+ __m128i s_32[6],
+ __m128i ss_128[3],
+ const __m128i coeffs[3]) {
+ s_32[5] = _mm_cvtsi32_si128(*(int32_t *)(src + 5 * 2));
+ const __m128i src45 = _mm_unpacklo_epi32(s_32[4], s_32[5]);
+ s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(src + 6 * 2));
+ const __m128i src56 = _mm_unpacklo_epi32(s_32[5], s_32[4]);
+ ss_128[2] = _mm_unpacklo_epi16(src45, src56);
+ const __m128i r = convolve16_6tap_sse2(ss_128, coeffs);
+ ss_128[0] = ss_128[1];
+ ss_128[1] = ss_128[2];
+ return r;
+}
+
+static INLINE __m256i xy_y_convolve_6tap_4x2_avx2(const int16_t *const src,
+ __m128i s_64[6],
+ __m256i ss_256[3],
+ const __m256i coeffs[3]) {
+ __m256i s_256[2];
+ s_64[5] = _mm_loadl_epi64((__m128i *)(src + 5 * 4));
+ s_256[0] = _mm256_setr_m128i(s_64[4], s_64[5]);
+ s_64[4] = _mm_loadl_epi64((__m128i *)(src + 6 * 4));
+ s_256[1] = _mm256_setr_m128i(s_64[5], s_64[4]);
+ ss_256[2] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ const __m256i r = convolve16_6tap_avx2(ss_256, coeffs);
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+ return r;
+}
+
+static INLINE void xy_y_convolve_6tap_16_avx2(const __m256i ss[6],
+ const __m256i coeffs[3],
+ __m256i r[2]) {
+ r[0] = convolve16_6tap_avx2(ss, coeffs);
+ r[1] = convolve16_6tap_avx2(ss + 3, coeffs);
+}
+
+static INLINE void xy_y_convolve_6tap_8x2_avx2(const int16_t *const src,
+ __m256i ss_256[6],
+ const __m256i coeffs[3],
+ __m256i r[2]) {
+ __m256i s_256[2];
+ s_256[0] = _mm256_loadu_si256((__m256i *)(src + 4 * 8));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(src + 5 * 8));
+ ss_256[2] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ ss_256[5] = _mm256_unpackhi_epi16(s_256[0], s_256[1]);
+ xy_y_convolve_6tap_16_avx2(ss_256, coeffs, r);
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+ ss_256[3] = ss_256[4];
+ ss_256[4] = ss_256[5];
+}
+
+static INLINE void xy_y_convolve_6tap_8x2_half_pel_avx2(
+ const int16_t *const src, const __m256i coeffs[2], __m256i s_256[6],
+ __m256i r[2]) {
+ __m256i a_256[2], ss_256[4];
+ s_256[4] = _mm256_loadu_si256((__m256i *)(src + 4 * 8));
+ s_256[5] = _mm256_loadu_si256((__m256i *)(src + 5 * 8));
+ a_256[0] = _mm256_add_epi16(s_256[0], s_256[5]);
+ a_256[1] = _mm256_add_epi16(s_256[1], s_256[4]);
+ ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]);
+ ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
+ ss_256[3] = _mm256_unpackhi_epi16(s_256[2], s_256[3]);
+ xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r);
+ s_256[0] = s_256[2];
+ s_256[1] = s_256[3];
+ s_256[2] = s_256[4];
+ s_256[3] = s_256[5];
+}
+
+static INLINE void xy_y_convolve_6tap_16x2_avx2(
+ const int16_t *const src, const ptrdiff_t stride, __m256i s_256[6],
+ __m256i ss_256[6], __m256i tt_256[6], const __m256i coeffs[3],
+ __m256i r[4]) {
+ s_256[5] = _mm256_loadu_si256((__m256i *)(src + 5 * stride));
+ ss_256[2] = _mm256_unpacklo_epi16(s_256[4], s_256[5]);
+ ss_256[5] = _mm256_unpackhi_epi16(s_256[4], s_256[5]);
+ s_256[4] = _mm256_loadu_si256((__m256i *)(src + 6 * stride));
+ tt_256[2] = _mm256_unpacklo_epi16(s_256[5], s_256[4]);
+ tt_256[5] = _mm256_unpackhi_epi16(s_256[5], s_256[4]);
+
+ xy_y_convolve_6tap_16_avx2(ss_256, coeffs, r + 0);
+ xy_y_convolve_6tap_16_avx2(tt_256, coeffs, r + 2);
+
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+ ss_256[3] = ss_256[4];
+ ss_256[4] = ss_256[5];
+
+ tt_256[0] = tt_256[1];
+ tt_256[1] = tt_256[2];
+ tt_256[3] = tt_256[4];
+ tt_256[4] = tt_256[5];
+}
+
+static INLINE void xy_y_convolve_6tap_16x2_half_pel_avx2(
+ const int16_t *const src, const ptrdiff_t stride, __m256i s_256[6],
+ __m256i ss_256[4], const __m256i coeffs[2], __m256i r[4]) {
+ __m256i a_256[2];
+
+ s_256[5] = _mm256_loadu_si256((__m256i *)(src + 5 * stride));
+ a_256[0] = _mm256_add_epi16(s_256[0], s_256[5]);
+ a_256[1] = _mm256_add_epi16(s_256[1], s_256[4]);
+ ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]);
+ ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
+ ss_256[3] = _mm256_unpackhi_epi16(s_256[2], s_256[3]);
+ xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r + 0);
+
+ a_256[1] = _mm256_add_epi16(s_256[2], s_256[5]);
+ s_256[0] = s_256[2];
+ s_256[2] = s_256[4];
+ s_256[4] = _mm256_loadu_si256((__m256i *)(src + 6 * stride));
+ a_256[0] = _mm256_add_epi16(s_256[1], s_256[4]);
+ s_256[1] = s_256[3];
+ s_256[3] = s_256[5];
+ ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi16(s_256[1], s_256[2]);
+ ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
+ ss_256[3] = _mm256_unpackhi_epi16(s_256[1], s_256[2]);
+ xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r + 2);
+}
+
+static INLINE __m128i xy_y_convolve_8tap_2x2_sse2(const int16_t *const src,
+ __m128i s_32[8],
+ __m128i ss_128[4],
+ const __m128i coeffs[4]) {
+ s_32[7] = _mm_cvtsi32_si128(*(int32_t *)(src + 7 * 2));
+ const __m128i src67 = _mm_unpacklo_epi32(s_32[6], s_32[7]);
+ s_32[6] = _mm_cvtsi32_si128(*(int32_t *)(src + 8 * 2));
+ const __m128i src78 = _mm_unpacklo_epi32(s_32[7], s_32[6]);
+ ss_128[3] = _mm_unpacklo_epi16(src67, src78);
+ const __m128i r = convolve16_8tap_sse2(ss_128, coeffs);
+ ss_128[0] = ss_128[1];
+ ss_128[1] = ss_128[2];
+ ss_128[2] = ss_128[3];
+ return r;
+}
+
+static INLINE __m256i xy_y_convolve_8tap_4x2_avx2(const int16_t *const src,
+ __m128i s_64[8],
+ __m256i ss_256[4],
+ const __m256i coeffs[4]) {
+ __m256i s_256[2];
+ s_64[7] = _mm_loadl_epi64((__m128i *)(src + 7 * 4));
+ s_256[0] = _mm256_setr_m128i(s_64[6], s_64[7]);
+ s_64[6] = _mm_loadl_epi64((__m128i *)(src + 8 * 4));
+ s_256[1] = _mm256_setr_m128i(s_64[7], s_64[6]);
+ ss_256[3] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ const __m256i r = convolve16_8tap_avx2(ss_256, coeffs);
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+ ss_256[2] = ss_256[3];
+ return r;
+}
+
+static INLINE void xy_y_convolve_8tap_16_avx2(const __m256i *const ss,
+ const __m256i coeffs[4],
+ __m256i r[2]) {
+ r[0] = convolve16_8tap_avx2(ss, coeffs);
+ r[1] = convolve16_8tap_avx2(ss + 4, coeffs);
+}
+
+static INLINE void xy_y_convolve_8tap_8x2_avx2(const int16_t *const src,
+ __m256i ss_256[8],
+ const __m256i coeffs[4],
+ __m256i r[2]) {
+ __m256i s_256[2];
+ s_256[0] = _mm256_loadu_si256((__m256i *)(src + 6 * 8));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(src + 7 * 8));
+ ss_256[3] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
+ ss_256[7] = _mm256_unpackhi_epi16(s_256[0], s_256[1]);
+ xy_y_convolve_8tap_16_avx2(ss_256, coeffs, r);
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+ ss_256[2] = ss_256[3];
+ ss_256[4] = ss_256[5];
+ ss_256[5] = ss_256[6];
+ ss_256[6] = ss_256[7];
+}
+
+static INLINE void xy_y_convolve_8tap_8x2_half_pel_avx2(
+ const int16_t *const src, const __m256i coeffs[2], __m256i s_256[8],
+ __m256i r[2]) {
+ __m256i a_256[4], ss_256[4];
+
+ s_256[6] = _mm256_loadu_si256((__m256i *)(src + 6 * 8));
+ s_256[7] = _mm256_loadu_si256((__m256i *)(src + 7 * 8));
+ a_256[0] = _mm256_add_epi16(s_256[0], s_256[7]);
+ a_256[1] = _mm256_add_epi16(s_256[1], s_256[6]);
+ a_256[2] = _mm256_add_epi16(s_256[2], s_256[5]);
+ a_256[3] = _mm256_add_epi16(s_256[3], s_256[4]);
+ ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi16(a_256[2], a_256[3]);
+ ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
+ ss_256[3] = _mm256_unpackhi_epi16(a_256[2], a_256[3]);
+ xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r);
+ s_256[0] = s_256[2];
+ s_256[1] = s_256[3];
+ s_256[2] = s_256[4];
+ s_256[3] = s_256[5];
+ s_256[4] = s_256[6];
+ s_256[5] = s_256[7];
+}
+
+static AOM_FORCE_INLINE void xy_y_convolve_8tap_16x2_avx2(
+ const int16_t *const src, const ptrdiff_t stride, const __m256i coeffs[4],
+ __m256i s_256[8], __m256i ss_256[8], __m256i tt_256[8], __m256i r[4]) {
+ s_256[7] = _mm256_loadu_si256((__m256i *)(src + 7 * stride));
+ ss_256[3] = _mm256_unpacklo_epi16(s_256[6], s_256[7]);
+ ss_256[7] = _mm256_unpackhi_epi16(s_256[6], s_256[7]);
+ s_256[6] = _mm256_loadu_si256((__m256i *)(src + 8 * stride));
+ tt_256[3] = _mm256_unpacklo_epi16(s_256[7], s_256[6]);
+ tt_256[7] = _mm256_unpackhi_epi16(s_256[7], s_256[6]);
+
+ xy_y_convolve_8tap_16_avx2(ss_256, coeffs, r + 0);
+ xy_y_convolve_8tap_16_avx2(tt_256, coeffs, r + 2);
+
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+ ss_256[2] = ss_256[3];
+ ss_256[4] = ss_256[5];
+ ss_256[5] = ss_256[6];
+ ss_256[6] = ss_256[7];
+
+ tt_256[0] = tt_256[1];
+ tt_256[1] = tt_256[2];
+ tt_256[2] = tt_256[3];
+ tt_256[4] = tt_256[5];
+ tt_256[5] = tt_256[6];
+ tt_256[6] = tt_256[7];
+}
+
+static INLINE void xy_y_convolve_8tap_16x2_half_pel_avx2(
+ const int16_t *const src, const ptrdiff_t stride, const __m256i coeffs[4],
+ __m256i s_256[8], __m256i r[4]) {
+ __m256i a_256[4], ss_256[4];
+ s_256[7] = _mm256_loadu_si256((__m256i *)(src + 7 * stride));
+
+ a_256[0] = _mm256_add_epi16(s_256[0], s_256[7]);
+ a_256[1] = _mm256_add_epi16(s_256[1], s_256[6]);
+ a_256[2] = _mm256_add_epi16(s_256[2], s_256[5]);
+ a_256[3] = _mm256_add_epi16(s_256[3], s_256[4]);
+ ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi16(a_256[2], a_256[3]);
+ ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
+ ss_256[3] = _mm256_unpackhi_epi16(a_256[2], a_256[3]);
+
+ xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r + 0);
+
+ a_256[1] = _mm256_add_epi16(s_256[2], s_256[7]);
+ a_256[2] = _mm256_add_epi16(s_256[3], s_256[6]);
+ a_256[3] = _mm256_add_epi16(s_256[4], s_256[5]);
+ s_256[0] = s_256[2];
+ s_256[2] = s_256[4];
+ s_256[4] = s_256[6];
+ s_256[6] = _mm256_loadu_si256((__m256i *)(src + 8 * stride));
+
+ a_256[0] = _mm256_add_epi16(s_256[1], s_256[6]);
+ s_256[1] = s_256[3];
+ s_256[3] = s_256[5];
+ s_256[5] = s_256[7];
+ ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi16(a_256[2], a_256[3]);
+ ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
+ ss_256[3] = _mm256_unpackhi_epi16(a_256[2], a_256[3]);
+
+ xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r + 2);
+}
+
+static INLINE void xy_y_round_store_8x2_avx2(const __m256i res[2],
+ uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m256i r = xy_y_round_16_avx2(res);
+ pack_store_8x2_avx2(r, dst, stride);
+}
+
+static INLINE void xy_y_round_store_16x2_avx2(const __m256i res[4],
+ uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m256i r0 = xy_y_round_16_avx2(res + 0);
+ const __m256i r1 = xy_y_round_16_avx2(res + 2);
+ xy_y_pack_store_16x2_avx2(r0, r1, dst, stride);
+}
+
+static INLINE void sr_y_round_store_32_avx2(const __m256i res[2],
+ uint8_t *const dst) {
+ __m256i r[2];
+
+ r[0] = sr_y_round_avx2(res[0]);
+ r[1] = sr_y_round_avx2(res[1]);
+ convolve_store_32_avx2(r[0], r[1], dst);
+}
+
+static INLINE void sr_y_round_store_32x2_avx2(const __m256i res[4],
+ uint8_t *const dst,
+ const int32_t dst_stride) {
+ sr_y_round_store_32_avx2(res, dst);
+ sr_y_round_store_32_avx2(res + 2, dst + dst_stride);
+}
+
+static INLINE void sr_y_2tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[1], const __m256i s0,
+ __m256i *const s1, uint8_t *const dst) {
+ __m256i r[2];
+ y_convolve_2tap_32_avx2(src, coeffs, s0, s1, r);
+ sr_y_round_store_32_avx2(r, dst);
+}
+
+static AOM_FORCE_INLINE void av1_convolve_y_sr_specialized_avx2(
+ const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+ int32_t w, int32_t h, const InterpFilterParams *filter_params_y,
+ const int32_t subpel_y_q4) {
+ int32_t x, y;
+ __m128i coeffs_128[4];
+ __m256i coeffs_256[4];
+
+ int vert_tap = get_filter_tap(filter_params_y, subpel_y_q4);
+
+ if (vert_tap == 2) {
+ // vert_filt as 2 tap
+ const uint8_t *src_ptr = src;
+
+ y = h;
+
+ if (subpel_y_q4 != 8) {
+ if (w <= 8) {
+ prepare_half_coeffs_2tap_ssse3(filter_params_y, subpel_y_q4,
+ coeffs_128);
+
+ if (w == 2) {
+ __m128i s_16[2];
+
+ s_16[0] = _mm_cvtsi32_si128(*(int16_t *)src_ptr);
+
+ do {
+ const __m128i res = y_convolve_2tap_2x2_ssse3(src_ptr, src_stride,
+ coeffs_128, s_16);
+ const __m128i r = sr_y_round_sse2(res);
+ pack_store_2x2_sse2(r, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 4) {
+ __m128i s_32[2];
+
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)src_ptr);
+
+ do {
+ const __m128i res = y_convolve_2tap_4x2_ssse3(src_ptr, src_stride,
+ coeffs_128, s_32);
+ const __m128i r = sr_y_round_sse2(res);
+ pack_store_4x2_sse2(r, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m128i s_64[2], s_128[2];
+
+ assert(w == 8);
+
+ s_64[0] = _mm_loadl_epi64((__m128i *)src_ptr);
+
+ do {
+ // Note: Faster than binding to AVX2 registers.
+ s_64[1] = _mm_loadl_epi64((__m128i *)(src_ptr + src_stride));
+ s_128[0] = _mm_unpacklo_epi64(s_64[0], s_64[1]);
+ s_64[0] = _mm_loadl_epi64((__m128i *)(src_ptr + 2 * src_stride));
+ s_128[1] = _mm_unpacklo_epi64(s_64[1], s_64[0]);
+ const __m128i ss0 = _mm_unpacklo_epi8(s_128[0], s_128[1]);
+ const __m128i ss1 = _mm_unpackhi_epi8(s_128[0], s_128[1]);
+ const __m128i res0 = convolve_2tap_ssse3(&ss0, coeffs_128);
+ const __m128i res1 = convolve_2tap_ssse3(&ss1, coeffs_128);
+ const __m128i r0 = sr_y_round_sse2(res0);
+ const __m128i r1 = sr_y_round_sse2(res1);
+ const __m128i d = _mm_packus_epi16(r0, r1);
+ _mm_storel_epi64((__m128i *)dst, d);
+ _mm_storeh_epi64((__m128i *)(dst + dst_stride), d);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ prepare_half_coeffs_2tap_avx2(filter_params_y, subpel_y_q4, coeffs_256);
+
+ if (w == 16) {
+ __m128i s_128[2];
+
+ s_128[0] = _mm_loadu_si128((__m128i *)src_ptr);
+
+ do {
+ __m256i r[2];
+
+ y_convolve_2tap_16x2_avx2(src_ptr, src_stride, coeffs_256, s_128,
+ r);
+ sr_y_round_store_16x2_avx2(r, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ __m256i s_256[2];
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)src_ptr);
+
+ do {
+ sr_y_2tap_32_avx2(src_ptr + src_stride, coeffs_256, s_256[0],
+ &s_256[1], dst);
+ sr_y_2tap_32_avx2(src_ptr + 2 * src_stride, coeffs_256, s_256[1],
+ &s_256[0], dst + dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 64) {
+ __m256i s_256[2][2];
+
+ s_256[0][0] = _mm256_loadu_si256((__m256i *)(src_ptr + 0 * 32));
+ s_256[0][1] = _mm256_loadu_si256((__m256i *)(src_ptr + 1 * 32));
+
+ do {
+ sr_y_2tap_32_avx2(src_ptr + src_stride, coeffs_256, s_256[0][0],
+ &s_256[1][0], dst);
+ sr_y_2tap_32_avx2(src_ptr + src_stride + 32, coeffs_256,
+ s_256[0][1], &s_256[1][1], dst + 32);
+ sr_y_2tap_32_avx2(src_ptr + 2 * src_stride, coeffs_256, s_256[1][0],
+ &s_256[0][0], dst + dst_stride);
+ sr_y_2tap_32_avx2(src_ptr + 2 * src_stride + 32, coeffs_256,
+ s_256[1][1], &s_256[0][1], dst + dst_stride + 32);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m256i s_256[2][4];
+
+ assert(w == 128);
+
+ s_256[0][0] = _mm256_loadu_si256((__m256i *)(src_ptr + 0 * 32));
+ s_256[0][1] = _mm256_loadu_si256((__m256i *)(src_ptr + 1 * 32));
+ s_256[0][2] = _mm256_loadu_si256((__m256i *)(src_ptr + 2 * 32));
+ s_256[0][3] = _mm256_loadu_si256((__m256i *)(src_ptr + 3 * 32));
+
+ do {
+ sr_y_2tap_32_avx2(src_ptr + src_stride, coeffs_256, s_256[0][0],
+ &s_256[1][0], dst);
+ sr_y_2tap_32_avx2(src_ptr + src_stride + 1 * 32, coeffs_256,
+ s_256[0][1], &s_256[1][1], dst + 1 * 32);
+ sr_y_2tap_32_avx2(src_ptr + src_stride + 2 * 32, coeffs_256,
+ s_256[0][2], &s_256[1][2], dst + 2 * 32);
+ sr_y_2tap_32_avx2(src_ptr + src_stride + 3 * 32, coeffs_256,
+ s_256[0][3], &s_256[1][3], dst + 3 * 32);
+
+ sr_y_2tap_32_avx2(src_ptr + 2 * src_stride, coeffs_256, s_256[1][0],
+ &s_256[0][0], dst + dst_stride);
+ sr_y_2tap_32_avx2(src_ptr + 2 * src_stride + 1 * 32, coeffs_256,
+ s_256[1][1], &s_256[0][1],
+ dst + dst_stride + 1 * 32);
+ sr_y_2tap_32_avx2(src_ptr + 2 * src_stride + 2 * 32, coeffs_256,
+ s_256[1][2], &s_256[0][2],
+ dst + dst_stride + 2 * 32);
+ sr_y_2tap_32_avx2(src_ptr + 2 * src_stride + 3 * 32, coeffs_256,
+ s_256[1][3], &s_256[0][3],
+ dst + dst_stride + 3 * 32);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ }
+ } else {
+ // average to get half pel
+ if (w <= 8) {
+ if (w == 2) {
+ __m128i s_16[2];
+
+ s_16[0] = _mm_cvtsi32_si128(*(int16_t *)src_ptr);
+
+ do {
+ s_16[1] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + src_stride));
+ const __m128i d0 = _mm_avg_epu8(s_16[0], s_16[1]);
+ *(int16_t *)dst = (int16_t)_mm_cvtsi128_si32(d0);
+ s_16[0] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 2 * src_stride));
+ const __m128i d1 = _mm_avg_epu8(s_16[1], s_16[0]);
+ *(int16_t *)(dst + dst_stride) = (int16_t)_mm_cvtsi128_si32(d1);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 4) {
+ __m128i s_32[2];
+
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)src_ptr);
+
+ do {
+ s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + src_stride));
+ const __m128i d0 = _mm_avg_epu8(s_32[0], s_32[1]);
+ xx_storel_32(dst, d0);
+ *(uint32_t *)dst = _mm_cvtsi128_si32(d0);
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 2 * src_stride));
+ const __m128i d1 = _mm_avg_epu8(s_32[1], s_32[0]);
+ xx_storel_32(dst + dst_stride, d1);
+ *(uint32_t *)(dst + dst_stride) = _mm_cvtsi128_si32(d1);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m128i s_64[2];
+
+ assert(w == 8);
+
+ s_64[0] = _mm_loadl_epi64((__m128i *)src_ptr);
+
+ do {
+ // Note: Faster than binding to AVX2 registers.
+ s_64[1] = _mm_loadl_epi64((__m128i *)(src_ptr + src_stride));
+ const __m128i d0 = _mm_avg_epu8(s_64[0], s_64[1]);
+ _mm_storel_epi64((__m128i *)dst, d0);
+ s_64[0] = _mm_loadl_epi64((__m128i *)(src_ptr + 2 * src_stride));
+ const __m128i d1 = _mm_avg_epu8(s_64[1], s_64[0]);
+ _mm_storel_epi64((__m128i *)(dst + dst_stride), d1);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else if (w == 16) {
+ __m128i s_128[2];
+
+ s_128[0] = _mm_loadu_si128((__m128i *)src_ptr);
+
+ do {
+ s_128[1] = _mm_loadu_si128((__m128i *)(src_ptr + src_stride));
+ const __m128i d0 = _mm_avg_epu8(s_128[0], s_128[1]);
+ _mm_storeu_si128((__m128i *)dst, d0);
+ s_128[0] = _mm_loadu_si128((__m128i *)(src_ptr + 2 * src_stride));
+ const __m128i d1 = _mm_avg_epu8(s_128[1], s_128[0]);
+ _mm_storeu_si128((__m128i *)(dst + dst_stride), d1);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ __m256i s_256[2];
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)src_ptr);
+
+ do {
+ sr_y_2tap_32_avg_avx2(src_ptr + src_stride, s_256[0], &s_256[1], dst);
+ sr_y_2tap_32_avg_avx2(src_ptr + 2 * src_stride, s_256[1], &s_256[0],
+ dst + dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 64) {
+ __m256i s_256[2][2];
+
+ s_256[0][0] = _mm256_loadu_si256((__m256i *)(src_ptr + 0 * 32));
+ s_256[0][1] = _mm256_loadu_si256((__m256i *)(src_ptr + 1 * 32));
+
+ do {
+ sr_y_2tap_32_avg_avx2(src_ptr + src_stride, s_256[0][0], &s_256[1][0],
+ dst);
+ sr_y_2tap_32_avg_avx2(src_ptr + src_stride + 32, s_256[0][1],
+ &s_256[1][1], dst + 32);
+
+ sr_y_2tap_32_avg_avx2(src_ptr + 2 * src_stride, s_256[1][0],
+ &s_256[0][0], dst + dst_stride);
+ sr_y_2tap_32_avg_avx2(src_ptr + 2 * src_stride + 32, s_256[1][1],
+ &s_256[0][1], dst + dst_stride + 32);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m256i s_256[2][4];
+
+ assert(w == 128);
+
+ s_256[0][0] = _mm256_loadu_si256((__m256i *)(src_ptr + 0 * 32));
+ s_256[0][1] = _mm256_loadu_si256((__m256i *)(src_ptr + 1 * 32));
+ s_256[0][2] = _mm256_loadu_si256((__m256i *)(src_ptr + 2 * 32));
+ s_256[0][3] = _mm256_loadu_si256((__m256i *)(src_ptr + 3 * 32));
+
+ do {
+ sr_y_2tap_32_avg_avx2(src_ptr + src_stride, s_256[0][0], &s_256[1][0],
+ dst);
+ sr_y_2tap_32_avg_avx2(src_ptr + src_stride + 1 * 32, s_256[0][1],
+ &s_256[1][1], dst + 1 * 32);
+ sr_y_2tap_32_avg_avx2(src_ptr + src_stride + 2 * 32, s_256[0][2],
+ &s_256[1][2], dst + 2 * 32);
+ sr_y_2tap_32_avg_avx2(src_ptr + src_stride + 3 * 32, s_256[0][3],
+ &s_256[1][3], dst + 3 * 32);
+
+ sr_y_2tap_32_avg_avx2(src_ptr + 2 * src_stride, s_256[1][0],
+ &s_256[0][0], dst + dst_stride);
+ sr_y_2tap_32_avg_avx2(src_ptr + 2 * src_stride + 1 * 32, s_256[1][1],
+ &s_256[0][1], dst + dst_stride + 1 * 32);
+ sr_y_2tap_32_avg_avx2(src_ptr + 2 * src_stride + 2 * 32, s_256[1][2],
+ &s_256[0][2], dst + dst_stride + 2 * 32);
+ sr_y_2tap_32_avg_avx2(src_ptr + 2 * src_stride + 3 * 32, s_256[1][3],
+ &s_256[0][3], dst + dst_stride + 3 * 32);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ }
+ } else if (vert_tap == 4) {
+ // vert_filt as 4 tap
+ const uint8_t *src_ptr = src - src_stride;
+
+ y = h;
+
+ if (w <= 4) {
+ prepare_half_coeffs_4tap_ssse3(filter_params_y, subpel_y_q4, coeffs_128);
+
+ if (w == 2) {
+ __m128i s_16[4], ss_128[2];
+
+ s_16[0] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 0 * src_stride));
+ s_16[1] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 1 * src_stride));
+ s_16[2] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 2 * src_stride));
+
+ const __m128i src01 = _mm_unpacklo_epi16(s_16[0], s_16[1]);
+ const __m128i src12 = _mm_unpacklo_epi16(s_16[1], s_16[2]);
+
+ ss_128[0] = _mm_unpacklo_epi8(src01, src12);
+
+ do {
+ src_ptr += 2 * src_stride;
+ const __m128i res = y_convolve_4tap_2x2_ssse3(
+ src_ptr, src_stride, coeffs_128, s_16, ss_128);
+ const __m128i r = sr_y_round_sse2(res);
+ pack_store_2x2_sse2(r, dst, dst_stride);
+
+ ss_128[0] = ss_128[1];
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m128i s_32[4], ss_128[2];
+
+ assert(w == 4);
+
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 0 * src_stride));
+ s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 1 * src_stride));
+ s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 2 * src_stride));
+
+ const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]);
+ const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]);
+
+ ss_128[0] = _mm_unpacklo_epi8(src01, src12);
+
+ do {
+ src_ptr += 2 * src_stride;
+ const __m128i res = y_convolve_4tap_4x2_ssse3(
+ src_ptr, src_stride, coeffs_128, s_32, ss_128);
+ const __m128i r = sr_y_round_sse2(res);
+ pack_store_4x2_sse2(r, dst, dst_stride);
+
+ ss_128[0] = ss_128[1];
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ prepare_half_coeffs_4tap_avx2(filter_params_y, subpel_y_q4, coeffs_256);
+
+ if (w == 8) {
+ __m128i s_64[4];
+ __m256i ss_256[2];
+
+ s_64[0] = _mm_loadl_epi64((__m128i *)(src_ptr + 0 * src_stride));
+ s_64[1] = _mm_loadl_epi64((__m128i *)(src_ptr + 1 * src_stride));
+ s_64[2] = _mm_loadl_epi64((__m128i *)(src_ptr + 2 * src_stride));
+
+ // Load lines a and b. Line a to lower 128, line b to upper 128
+ const __m256i src01 = _mm256_setr_m128i(s_64[0], s_64[1]);
+ const __m256i src12 = _mm256_setr_m128i(s_64[1], s_64[2]);
+
+ ss_256[0] = _mm256_unpacklo_epi8(src01, src12);
+
+ do {
+ src_ptr += 2 * src_stride;
+ const __m256i res = y_convolve_4tap_8x2_avx2(
+ src_ptr, src_stride, coeffs_256, s_64, ss_256);
+ sr_y_round_store_8x2_avx2(res, dst, dst_stride);
+
+ ss_256[0] = ss_256[1];
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 16) {
+ __m128i s_128[4];
+ __m256i ss_256[4], r[2];
+
+ s_128[0] = _mm_loadu_si128((__m128i *)(src_ptr + 0 * src_stride));
+ s_128[1] = _mm_loadu_si128((__m128i *)(src_ptr + 1 * src_stride));
+ s_128[2] = _mm_loadu_si128((__m128i *)(src_ptr + 2 * src_stride));
+
+ // Load lines a and b. Line a to lower 128, line b to upper 128
+ const __m256i src01 = _mm256_setr_m128i(s_128[0], s_128[1]);
+ const __m256i src12 = _mm256_setr_m128i(s_128[1], s_128[2]);
+
+ ss_256[0] = _mm256_unpacklo_epi8(src01, src12);
+ ss_256[2] = _mm256_unpackhi_epi8(src01, src12);
+
+ do {
+ src_ptr += 2 * src_stride;
+ y_convolve_4tap_16x2_avx2(src_ptr, src_stride, coeffs_256, s_128,
+ ss_256, r);
+ sr_y_round_store_16x2_avx2(r, dst, dst_stride);
+
+ ss_256[0] = ss_256[1];
+ ss_256[2] = ss_256[3];
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ // AV1 standard won't have 32x4 case.
+ // This only favors some optimization feature which
+ // subsamples 32x8 to 32x4 and triggers 4-tap filter.
+
+ __m256i s_256[4], ss_256[4], tt_256[4], r[4];
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)(src_ptr + 0 * src_stride));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(src_ptr + 1 * src_stride));
+ s_256[2] = _mm256_loadu_si256((__m256i *)(src_ptr + 2 * src_stride));
+
+ ss_256[0] = _mm256_unpacklo_epi8(s_256[0], s_256[1]);
+ ss_256[2] = _mm256_unpackhi_epi8(s_256[0], s_256[1]);
+
+ tt_256[0] = _mm256_unpacklo_epi8(s_256[1], s_256[2]);
+ tt_256[2] = _mm256_unpackhi_epi8(s_256[1], s_256[2]);
+
+ do {
+ src_ptr += 2 * src_stride;
+ y_convolve_4tap_32x2_avx2(src_ptr, src_stride, coeffs_256, s_256,
+ ss_256, tt_256, r);
+ sr_y_round_store_32x2_avx2(r, dst, dst_stride);
+
+ ss_256[0] = ss_256[1];
+ ss_256[2] = ss_256[3];
+
+ tt_256[0] = tt_256[1];
+ tt_256[2] = tt_256[3];
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ assert(!(w % 32));
+
+ __m256i s_256[4], ss_256[4], tt_256[4], r[4];
+ x = 0;
+ do {
+ const uint8_t *s = src_ptr + x;
+ uint8_t *d = dst + x;
+ s_256[0] = _mm256_loadu_si256((__m256i *)(s + 0 * src_stride));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(s + 1 * src_stride));
+ s_256[2] = _mm256_loadu_si256((__m256i *)(s + 2 * src_stride));
+
+ ss_256[0] = _mm256_unpacklo_epi8(s_256[0], s_256[1]);
+ ss_256[2] = _mm256_unpackhi_epi8(s_256[0], s_256[1]);
+
+ tt_256[0] = _mm256_unpacklo_epi8(s_256[1], s_256[2]);
+ tt_256[2] = _mm256_unpackhi_epi8(s_256[1], s_256[2]);
+
+ y = h;
+ do {
+ s += 2 * src_stride;
+ y_convolve_4tap_32x2_avx2(s, src_stride, coeffs_256, s_256, ss_256,
+ tt_256, r);
+ sr_y_round_store_32x2_avx2(r, d, dst_stride);
+
+ ss_256[0] = ss_256[1];
+ ss_256[2] = ss_256[3];
+
+ tt_256[0] = tt_256[1];
+ tt_256[2] = tt_256[3];
+ d += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ x += 32;
+ } while (x < w);
+ }
+ }
+ } else if (vert_tap == 6) {
+ // vert_filt as 6 tap
+ const uint8_t *src_ptr = src - 2 * src_stride;
+
+ if (w <= 4) {
+ prepare_half_coeffs_6tap_ssse3(filter_params_y, subpel_y_q4, coeffs_128);
+
+ y = h;
+
+ if (w == 2) {
+ __m128i s_16[6], ss_128[3];
+
+ s_16[0] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 0 * src_stride));
+ s_16[1] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 1 * src_stride));
+ s_16[2] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 2 * src_stride));
+ s_16[3] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 3 * src_stride));
+ s_16[4] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 4 * src_stride));
+
+ const __m128i src01 = _mm_unpacklo_epi16(s_16[0], s_16[1]);
+ const __m128i src12 = _mm_unpacklo_epi16(s_16[1], s_16[2]);
+ const __m128i src23 = _mm_unpacklo_epi16(s_16[2], s_16[3]);
+ const __m128i src34 = _mm_unpacklo_epi16(s_16[3], s_16[4]);
+
+ ss_128[0] = _mm_unpacklo_epi8(src01, src12);
+ ss_128[1] = _mm_unpacklo_epi8(src23, src34);
+
+ do {
+ src_ptr += 2 * src_stride;
+ const __m128i res = y_convolve_6tap_2x2_ssse3(
+ src_ptr, src_stride, coeffs_128, s_16, ss_128);
+ const __m128i r = sr_y_round_sse2(res);
+ pack_store_2x2_sse2(r, dst, dst_stride);
+
+ ss_128[0] = ss_128[1];
+ ss_128[1] = ss_128[2];
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m128i s_32[6], ss_128[3];
+
+ assert(w == 4);
+
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 0 * src_stride));
+ s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 1 * src_stride));
+ s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 2 * src_stride));
+ s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 3 * src_stride));
+ s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 4 * src_stride));
+
+ const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]);
+ const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]);
+ const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]);
+ const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[4]);
+
+ ss_128[0] = _mm_unpacklo_epi8(src01, src12);
+ ss_128[1] = _mm_unpacklo_epi8(src23, src34);
+
+ do {
+ src_ptr += 2 * src_stride;
+ const __m128i res = y_convolve_6tap_4x2_ssse3(
+ src_ptr, src_stride, coeffs_128, s_32, ss_128);
+ const __m128i r = sr_y_round_sse2(res);
+ pack_store_4x2_sse2(r, dst, dst_stride);
+
+ ss_128[0] = ss_128[1];
+ ss_128[1] = ss_128[2];
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ prepare_half_coeffs_6tap_avx2(filter_params_y, subpel_y_q4, coeffs_256);
+
+ if (w == 8) {
+ __m128i s_64[6];
+ __m256i ss_256[3];
+
+ s_64[0] = _mm_loadl_epi64((__m128i *)(src_ptr + 0 * src_stride));
+ s_64[1] = _mm_loadl_epi64((__m128i *)(src_ptr + 1 * src_stride));
+ s_64[2] = _mm_loadl_epi64((__m128i *)(src_ptr + 2 * src_stride));
+ s_64[3] = _mm_loadl_epi64((__m128i *)(src_ptr + 3 * src_stride));
+ s_64[4] = _mm_loadl_epi64((__m128i *)(src_ptr + 4 * src_stride));
+
+ // Load lines a and b. Line a to lower 128, line b to upper 128
+ const __m256i src01 = _mm256_setr_m128i(s_64[0], s_64[1]);
+ const __m256i src12 = _mm256_setr_m128i(s_64[1], s_64[2]);
+ const __m256i src23 = _mm256_setr_m128i(s_64[2], s_64[3]);
+ const __m256i src34 = _mm256_setr_m128i(s_64[3], s_64[4]);
+
+ ss_256[0] = _mm256_unpacklo_epi8(src01, src12);
+ ss_256[1] = _mm256_unpacklo_epi8(src23, src34);
+
+ y = h;
+ do {
+ src_ptr += 2 * src_stride;
+ const __m256i res = y_convolve_6tap_8x2_avx2(
+ src_ptr, src_stride, coeffs_256, s_64, ss_256);
+ sr_y_round_store_8x2_avx2(res, dst, dst_stride);
+
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 16) {
+ __m128i s_128[6];
+ __m256i ss_256[6], r[2];
+
+ s_128[0] = _mm_loadu_si128((__m128i *)(src_ptr + 0 * src_stride));
+ s_128[1] = _mm_loadu_si128((__m128i *)(src_ptr + 1 * src_stride));
+ s_128[2] = _mm_loadu_si128((__m128i *)(src_ptr + 2 * src_stride));
+ s_128[3] = _mm_loadu_si128((__m128i *)(src_ptr + 3 * src_stride));
+ s_128[4] = _mm_loadu_si128((__m128i *)(src_ptr + 4 * src_stride));
+
+ // Load lines a and b. Line a to lower 128, line b to upper 128
+ const __m256i src01 = _mm256_setr_m128i(s_128[0], s_128[1]);
+ const __m256i src12 = _mm256_setr_m128i(s_128[1], s_128[2]);
+ const __m256i src23 = _mm256_setr_m128i(s_128[2], s_128[3]);
+ const __m256i src34 = _mm256_setr_m128i(s_128[3], s_128[4]);
+
+ ss_256[0] = _mm256_unpacklo_epi8(src01, src12);
+ ss_256[1] = _mm256_unpacklo_epi8(src23, src34);
+
+ ss_256[3] = _mm256_unpackhi_epi8(src01, src12);
+ ss_256[4] = _mm256_unpackhi_epi8(src23, src34);
+
+ y = h;
+ do {
+ src_ptr += 2 * src_stride;
+ y_convolve_6tap_16x2_avx2(src_ptr, src_stride, coeffs_256, s_128,
+ ss_256, r);
+ sr_y_round_store_16x2_avx2(r, dst, dst_stride);
+
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+
+ ss_256[3] = ss_256[4];
+ ss_256[4] = ss_256[5];
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m256i s_256[6], ss_256[6], tt_256[6], r[4];
+
+ assert(!(w % 32));
+
+ x = 0;
+ do {
+ const uint8_t *s = src_ptr + x;
+ uint8_t *d = dst + x;
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)(s + 0 * src_stride));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(s + 1 * src_stride));
+ s_256[2] = _mm256_loadu_si256((__m256i *)(s + 2 * src_stride));
+ s_256[3] = _mm256_loadu_si256((__m256i *)(s + 3 * src_stride));
+ s_256[4] = _mm256_loadu_si256((__m256i *)(s + 4 * src_stride));
+
+ ss_256[0] = _mm256_unpacklo_epi8(s_256[0], s_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi8(s_256[2], s_256[3]);
+ ss_256[3] = _mm256_unpackhi_epi8(s_256[0], s_256[1]);
+ ss_256[4] = _mm256_unpackhi_epi8(s_256[2], s_256[3]);
+
+ tt_256[0] = _mm256_unpacklo_epi8(s_256[1], s_256[2]);
+ tt_256[1] = _mm256_unpacklo_epi8(s_256[3], s_256[4]);
+ tt_256[3] = _mm256_unpackhi_epi8(s_256[1], s_256[2]);
+ tt_256[4] = _mm256_unpackhi_epi8(s_256[3], s_256[4]);
+
+ y = h;
+ do {
+ s += 2 * src_stride;
+ y_convolve_6tap_32x2_avx2(s, src_stride, coeffs_256, s_256, ss_256,
+ tt_256, r);
+ sr_y_round_store_32x2_avx2(r, d, dst_stride);
+
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+ ss_256[3] = ss_256[4];
+ ss_256[4] = ss_256[5];
+
+ tt_256[0] = tt_256[1];
+ tt_256[1] = tt_256[2];
+ tt_256[3] = tt_256[4];
+ tt_256[4] = tt_256[5];
+ d += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+
+ x += 32;
+ } while (x < w);
+ }
+ }
+ } else if (vert_tap == 8) {
+ // vert_filt as 8 tap
+ const uint8_t *src_ptr = src - 3 * src_stride;
+
+ if (w <= 4) {
+ prepare_half_coeffs_8tap_ssse3(filter_params_y, subpel_y_q4, coeffs_128);
+
+ y = h;
+
+ if (w == 2) {
+ __m128i s_16[8], ss_128[4];
+
+ s_16[0] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 0 * src_stride));
+ s_16[1] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 1 * src_stride));
+ s_16[2] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 2 * src_stride));
+ s_16[3] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 3 * src_stride));
+ s_16[4] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 4 * src_stride));
+ s_16[5] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 5 * src_stride));
+ s_16[6] = _mm_cvtsi32_si128(*(int16_t *)(src_ptr + 6 * src_stride));
+
+ const __m128i src01 = _mm_unpacklo_epi16(s_16[0], s_16[1]);
+ const __m128i src12 = _mm_unpacklo_epi16(s_16[1], s_16[2]);
+ const __m128i src23 = _mm_unpacklo_epi16(s_16[2], s_16[3]);
+ const __m128i src34 = _mm_unpacklo_epi16(s_16[3], s_16[4]);
+ const __m128i src45 = _mm_unpacklo_epi16(s_16[4], s_16[5]);
+ const __m128i src56 = _mm_unpacklo_epi16(s_16[5], s_16[6]);
+
+ ss_128[0] = _mm_unpacklo_epi8(src01, src12);
+ ss_128[1] = _mm_unpacklo_epi8(src23, src34);
+ ss_128[2] = _mm_unpacklo_epi8(src45, src56);
+
+ do {
+ const __m128i res = y_convolve_8tap_2x2_ssse3(
+ src_ptr, src_stride, coeffs_128, s_16, ss_128);
+ const __m128i r = sr_y_round_sse2(res);
+ pack_store_2x2_sse2(r, dst, dst_stride);
+ ss_128[0] = ss_128[1];
+ ss_128[1] = ss_128[2];
+ ss_128[2] = ss_128[3];
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m128i s_32[8], ss_128[4];
+
+ assert(w == 4);
+
+ s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 0 * src_stride));
+ s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 1 * src_stride));
+ s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 2 * src_stride));
+ s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 3 * src_stride));
+ s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 4 * src_stride));
+ s_32[5] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 5 * src_stride));
+ s_32[6] = _mm_cvtsi32_si128(*(int32_t *)(src_ptr + 6 * src_stride));
+
+ const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]);
+ const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]);
+ const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]);
+ const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[4]);
+ const __m128i src45 = _mm_unpacklo_epi32(s_32[4], s_32[5]);
+ const __m128i src56 = _mm_unpacklo_epi32(s_32[5], s_32[6]);
+
+ ss_128[0] = _mm_unpacklo_epi8(src01, src12);
+ ss_128[1] = _mm_unpacklo_epi8(src23, src34);
+ ss_128[2] = _mm_unpacklo_epi8(src45, src56);
+
+ do {
+ const __m128i res = y_convolve_8tap_4x2_ssse3(
+ src_ptr, src_stride, coeffs_128, s_32, ss_128);
+ const __m128i r = sr_y_round_sse2(res);
+ pack_store_4x2_sse2(r, dst, dst_stride);
+ ss_128[0] = ss_128[1];
+ ss_128[1] = ss_128[2];
+ ss_128[2] = ss_128[3];
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ prepare_half_coeffs_8tap_avx2(filter_params_y, subpel_y_q4, coeffs_256);
+
+ if (w == 8) {
+ __m128i s_64[8];
+ __m256i ss_256[4];
+
+ s_64[0] = _mm_loadl_epi64((__m128i *)(src_ptr + 0 * src_stride));
+ s_64[1] = _mm_loadl_epi64((__m128i *)(src_ptr + 1 * src_stride));
+ s_64[2] = _mm_loadl_epi64((__m128i *)(src_ptr + 2 * src_stride));
+ s_64[3] = _mm_loadl_epi64((__m128i *)(src_ptr + 3 * src_stride));
+ s_64[4] = _mm_loadl_epi64((__m128i *)(src_ptr + 4 * src_stride));
+ s_64[5] = _mm_loadl_epi64((__m128i *)(src_ptr + 5 * src_stride));
+ s_64[6] = _mm_loadl_epi64((__m128i *)(src_ptr + 6 * src_stride));
+
+ // Load lines a and b. Line a to lower 128, line b to upper 128
+ const __m256i src01 = _mm256_setr_m128i(s_64[0], s_64[1]);
+ const __m256i src12 = _mm256_setr_m128i(s_64[1], s_64[2]);
+ const __m256i src23 = _mm256_setr_m128i(s_64[2], s_64[3]);
+ const __m256i src34 = _mm256_setr_m128i(s_64[3], s_64[4]);
+ const __m256i src45 = _mm256_setr_m128i(s_64[4], s_64[5]);
+ const __m256i src56 = _mm256_setr_m128i(s_64[5], s_64[6]);
+
+ ss_256[0] = _mm256_unpacklo_epi8(src01, src12);
+ ss_256[1] = _mm256_unpacklo_epi8(src23, src34);
+ ss_256[2] = _mm256_unpacklo_epi8(src45, src56);
+
+ y = h;
+ do {
+ const __m256i res = y_convolve_8tap_8x2_avx2(
+ src_ptr, src_stride, coeffs_256, s_64, ss_256);
+ sr_y_round_store_8x2_avx2(res, dst, dst_stride);
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+ ss_256[2] = ss_256[3];
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 16) {
+ __m128i s_128[8];
+ __m256i ss_256[8], r[2];
+
+ s_128[0] = _mm_loadu_si128((__m128i *)(src_ptr + 0 * src_stride));
+ s_128[1] = _mm_loadu_si128((__m128i *)(src_ptr + 1 * src_stride));
+ s_128[2] = _mm_loadu_si128((__m128i *)(src_ptr + 2 * src_stride));
+ s_128[3] = _mm_loadu_si128((__m128i *)(src_ptr + 3 * src_stride));
+ s_128[4] = _mm_loadu_si128((__m128i *)(src_ptr + 4 * src_stride));
+ s_128[5] = _mm_loadu_si128((__m128i *)(src_ptr + 5 * src_stride));
+ s_128[6] = _mm_loadu_si128((__m128i *)(src_ptr + 6 * src_stride));
+
+ // Load lines a and b. Line a to lower 128, line b to upper 128
+ const __m256i src01 = _mm256_setr_m128i(s_128[0], s_128[1]);
+ const __m256i src12 = _mm256_setr_m128i(s_128[1], s_128[2]);
+ const __m256i src23 = _mm256_setr_m128i(s_128[2], s_128[3]);
+ const __m256i src34 = _mm256_setr_m128i(s_128[3], s_128[4]);
+ const __m256i src45 = _mm256_setr_m128i(s_128[4], s_128[5]);
+ const __m256i src56 = _mm256_setr_m128i(s_128[5], s_128[6]);
+
+ ss_256[0] = _mm256_unpacklo_epi8(src01, src12);
+ ss_256[1] = _mm256_unpacklo_epi8(src23, src34);
+ ss_256[2] = _mm256_unpacklo_epi8(src45, src56);
+
+ ss_256[4] = _mm256_unpackhi_epi8(src01, src12);
+ ss_256[5] = _mm256_unpackhi_epi8(src23, src34);
+ ss_256[6] = _mm256_unpackhi_epi8(src45, src56);
+
+ y = h;
+ do {
+ y_convolve_8tap_16x2_avx2(src_ptr, src_stride, coeffs_256, s_128,
+ ss_256, r);
+ sr_y_round_store_16x2_avx2(r, dst, dst_stride);
+
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+ ss_256[2] = ss_256[3];
+
+ ss_256[4] = ss_256[5];
+ ss_256[5] = ss_256[6];
+ ss_256[6] = ss_256[7];
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ __m256i s_256[8], ss_256[8], tt_256[8], r[4];
+
+ assert(!(w % 32));
+
+ x = 0;
+ do {
+ const uint8_t *s = src_ptr + x;
+ uint8_t *d = dst + x;
+
+ s_256[0] = _mm256_loadu_si256((__m256i *)(s + 0 * src_stride));
+ s_256[1] = _mm256_loadu_si256((__m256i *)(s + 1 * src_stride));
+ s_256[2] = _mm256_loadu_si256((__m256i *)(s + 2 * src_stride));
+ s_256[3] = _mm256_loadu_si256((__m256i *)(s + 3 * src_stride));
+ s_256[4] = _mm256_loadu_si256((__m256i *)(s + 4 * src_stride));
+ s_256[5] = _mm256_loadu_si256((__m256i *)(s + 5 * src_stride));
+ s_256[6] = _mm256_loadu_si256((__m256i *)(s + 6 * src_stride));
+
+ ss_256[0] = _mm256_unpacklo_epi8(s_256[0], s_256[1]);
+ ss_256[1] = _mm256_unpacklo_epi8(s_256[2], s_256[3]);
+ ss_256[2] = _mm256_unpacklo_epi8(s_256[4], s_256[5]);
+ ss_256[4] = _mm256_unpackhi_epi8(s_256[0], s_256[1]);
+ ss_256[5] = _mm256_unpackhi_epi8(s_256[2], s_256[3]);
+ ss_256[6] = _mm256_unpackhi_epi8(s_256[4], s_256[5]);
+
+ tt_256[0] = _mm256_unpacklo_epi8(s_256[1], s_256[2]);
+ tt_256[1] = _mm256_unpacklo_epi8(s_256[3], s_256[4]);
+ tt_256[2] = _mm256_unpacklo_epi8(s_256[5], s_256[6]);
+ tt_256[4] = _mm256_unpackhi_epi8(s_256[1], s_256[2]);
+ tt_256[5] = _mm256_unpackhi_epi8(s_256[3], s_256[4]);
+ tt_256[6] = _mm256_unpackhi_epi8(s_256[5], s_256[6]);
+
+ y = h;
+ do {
+ y_convolve_8tap_32x2_avx2(s, src_stride, coeffs_256, s_256, ss_256,
+ tt_256, r);
+ sr_y_round_store_32x2_avx2(r, d, dst_stride);
+
+ ss_256[0] = ss_256[1];
+ ss_256[1] = ss_256[2];
+ ss_256[2] = ss_256[3];
+ ss_256[4] = ss_256[5];
+ ss_256[5] = ss_256[6];
+ ss_256[6] = ss_256[7];
+
+ tt_256[0] = tt_256[1];
+ tt_256[1] = tt_256[2];
+ tt_256[2] = tt_256[3];
+ tt_256[4] = tt_256[5];
+ tt_256[5] = tt_256[6];
+ tt_256[6] = tt_256[7];
+ s += 2 * src_stride;
+ d += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+
+ x += 32;
+ } while (x < w);
+ }
+ }
+ }
+}
+
+static INLINE void sr_x_2tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[1],
+ uint8_t *const dst) {
+ __m256i r[2];
+
+ x_convolve_2tap_32_avx2(src, coeffs, r);
+ sr_x_round_store_32_avx2(r, dst);
+}
+
+static INLINE void sr_x_6tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[3],
+ const __m256i filt[3],
+ uint8_t *const dst) {
+ __m256i r[2];
+
+ x_convolve_6tap_32_avx2(src, coeffs, filt, r);
+ sr_x_round_store_32_avx2(r, dst);
+}
+
+static AOM_FORCE_INLINE void sr_x_8tap_32_avx2(const uint8_t *const src,
+ const __m256i coeffs[4],
+ const __m256i filt[4],
+ uint8_t *const dst) {
+ __m256i r[2];
+
+ x_convolve_8tap_32_avx2(src, coeffs, filt, r);
+ sr_x_round_store_32_avx2(r, dst);
+}
+
+static AOM_FORCE_INLINE void av1_convolve_x_sr_specialized_avx2(
+ const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+ int32_t w, int32_t h, const InterpFilterParams *filter_params_x,
+ const int32_t subpel_x_q4, ConvolveParams *conv_params) {
+ int32_t y = h;
+ __m128i coeffs_128[4];
+ __m256i coeffs_256[4];
+
+ assert(conv_params->round_0 == 3);
+ assert((FILTER_BITS - conv_params->round_1) >= 0 ||
+ ((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS));
+ (void)conv_params;
+
+ const int horz_tap = get_filter_tap(filter_params_x, subpel_x_q4);
+
+ if (horz_tap == 2) {
+ // horz_filt as 2 tap
+ const uint8_t *src_ptr = src;
+
+ if (subpel_x_q4 != 8) {
+ if (w <= 8) {
+ prepare_half_coeffs_2tap_ssse3(filter_params_x, subpel_x_q4,
+ coeffs_128);
+
+ if (w == 2) {
+ do {
+ const __m128i res =
+ x_convolve_2tap_2x2_sse4_1(src_ptr, src_stride, coeffs_128);
+ const __m128i r = sr_x_round_sse2(res);
+ pack_store_2x2_sse2(r, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 4) {
+ do {
+ const __m128i res =
+ x_convolve_2tap_4x2_ssse3(src_ptr, src_stride, coeffs_128);
+ const __m128i r = sr_x_round_sse2(res);
+ pack_store_4x2_sse2(r, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else {
+ assert(w == 8);
+
+ do {
+ __m128i res[2];
+
+ x_convolve_2tap_8x2_ssse3(src_ptr, src_stride, coeffs_128, res);
+ res[0] = sr_x_round_sse2(res[0]);
+ res[1] = sr_x_round_sse2(res[1]);
+ const __m128i d = _mm_packus_epi16(res[0], res[1]);
+ _mm_storel_epi64((__m128i *)dst, d);
+ _mm_storeh_epi64((__m128i *)(dst + dst_stride), d);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ }
+ } else {
+ prepare_half_coeffs_2tap_avx2(filter_params_x, subpel_x_q4, coeffs_256);
+
+ if (w == 16) {
+ do {
+ __m256i r[2];
+
+ x_convolve_2tap_16x2_avx2(src_ptr, src_stride, coeffs_256, r);
+ sr_x_round_store_16x2_avx2(r, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ do {
+ sr_x_2tap_32_avx2(src_ptr, coeffs_256, dst);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ } else if (w == 64) {
+ do {
+ sr_x_2tap_32_avx2(src_ptr + 0 * 32, coeffs_256, dst + 0 * 32);
+ sr_x_2tap_32_avx2(src_ptr + 1 * 32, coeffs_256, dst + 1 * 32);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ } else {
+ assert(w == 128);
+
+ do {
+ sr_x_2tap_32_avx2(src_ptr + 0 * 32, coeffs_256, dst + 0 * 32);
+ sr_x_2tap_32_avx2(src_ptr + 1 * 32, coeffs_256, dst + 1 * 32);
+ sr_x_2tap_32_avx2(src_ptr + 2 * 32, coeffs_256, dst + 2 * 32);
+ sr_x_2tap_32_avx2(src_ptr + 3 * 32, coeffs_256, dst + 3 * 32);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ }
+ }
+ } else {
+ // average to get half pel
+ if (w == 2) {
+ do {
+ __m128i s_128;
+
+ s_128 = load_u8_4x2_sse4_1(src_ptr, src_stride);
+ const __m128i s1 = _mm_srli_si128(s_128, 1);
+ const __m128i d = _mm_avg_epu8(s_128, s1);
+ *(uint16_t *)dst = (uint16_t)_mm_cvtsi128_si32(d);
+ *(uint16_t *)(dst + dst_stride) = _mm_extract_epi16(d, 2);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 4) {
+ do {
+ __m128i s_128;
+
+ s_128 = load_u8_8x2_sse2(src_ptr, src_stride);
+ const __m128i s1 = _mm_srli_si128(s_128, 1);
+ const __m128i d = _mm_avg_epu8(s_128, s1);
+ xx_storel_32(dst, d);
+ *(int32_t *)(dst + dst_stride) = _mm_extract_epi32(d, 2);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 8) {
+ do {
+ const __m128i s00 = _mm_loadu_si128((__m128i *)src_ptr);
+ const __m128i s10 =
+ _mm_loadu_si128((__m128i *)(src_ptr + src_stride));
+ const __m128i s01 = _mm_srli_si128(s00, 1);
+ const __m128i s11 = _mm_srli_si128(s10, 1);
+ const __m128i d0 = _mm_avg_epu8(s00, s01);
+ const __m128i d1 = _mm_avg_epu8(s10, s11);
+ _mm_storel_epi64((__m128i *)dst, d0);
+ _mm_storel_epi64((__m128i *)(dst + dst_stride), d1);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 16) {
+ do {
+ const __m128i s00 = _mm_loadu_si128((__m128i *)src_ptr);
+ const __m128i s01 = _mm_loadu_si128((__m128i *)(src_ptr + 1));
+ const __m128i s10 =
+ _mm_loadu_si128((__m128i *)(src_ptr + src_stride));
+ const __m128i s11 =
+ _mm_loadu_si128((__m128i *)(src_ptr + src_stride + 1));
+ const __m128i d0 = _mm_avg_epu8(s00, s01);
+ const __m128i d1 = _mm_avg_epu8(s10, s11);
+ _mm_storeu_si128((__m128i *)dst, d0);
+ _mm_storeu_si128((__m128i *)(dst + dst_stride), d1);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ do {
+ sr_x_2tap_32_avg_avx2(src_ptr, dst);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ } else if (w == 64) {
+ do {
+ sr_x_2tap_32_avg_avx2(src_ptr + 0 * 32, dst + 0 * 32);
+ sr_x_2tap_32_avg_avx2(src_ptr + 1 * 32, dst + 1 * 32);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ } else {
+ assert(w == 128);
+
+ do {
+ sr_x_2tap_32_avg_avx2(src_ptr + 0 * 32, dst + 0 * 32);
+ sr_x_2tap_32_avg_avx2(src_ptr + 1 * 32, dst + 1 * 32);
+ sr_x_2tap_32_avg_avx2(src_ptr + 2 * 32, dst + 2 * 32);
+ sr_x_2tap_32_avg_avx2(src_ptr + 3 * 32, dst + 3 * 32);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ }
+ }
+ } else if (horz_tap == 4) {
+ // horz_filt as 4 tap
+ const uint8_t *src_ptr = src - 1;
+
+ prepare_half_coeffs_4tap_ssse3(filter_params_x, subpel_x_q4, coeffs_128);
+
+ if (w == 2) {
+ do {
+ const __m128i res =
+ x_convolve_4tap_2x2_ssse3(src_ptr, src_stride, coeffs_128);
+ const __m128i r = sr_x_round_sse2(res);
+ pack_store_2x2_sse2(r, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 4) {
+ do {
+ const __m128i res =
+ x_convolve_4tap_4x2_ssse3(src_ptr, src_stride, coeffs_128);
+ const __m128i r = sr_x_round_sse2(res);
+ pack_store_4x2_sse2(r, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 8) {
+ // TODO(chiyotsai@google.com): Reuse the old SIMD code here. Need to
+ // rewrite this for better performance later.
+ __m256i filt_256[2];
+ prepare_coeffs_lowbd(filter_params_x, subpel_x_q4, coeffs_256);
+
+ filt_256[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2);
+ filt_256[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2);
+ for (int i = 0; i < h; i += 2) {
+ const __m256i data = _mm256_permute2x128_si256(
+ _mm256_castsi128_si256(
+ _mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride]))),
+ _mm256_castsi128_si256(_mm_loadu_si128(
+ (__m128i *)(&src_ptr[i * src_stride + src_stride]))),
+ 0x20);
+
+ __m256i res_16b = convolve_lowbd_x_4tap(data, coeffs_256 + 1, filt_256);
+ res_16b = sr_x_round_avx2(res_16b);
+
+ __m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
+
+ const __m128i res_0 = _mm256_castsi256_si128(res_8b);
+ const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);
+
+ _mm_storel_epi64((__m128i *)&dst[i * dst_stride], res_0);
+ _mm_storel_epi64((__m128i *)&dst[i * dst_stride + dst_stride], res_1);
+ }
+ } else {
+ assert(!(w % 16));
+ // TODO(chiyotsai@google.com): Reuse the old SIMD code here. Need to
+ // rewrite this for better performance later.
+ __m256i filt_256[2];
+ prepare_coeffs_lowbd(filter_params_x, subpel_x_q4, coeffs_256);
+ filt_256[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2);
+ filt_256[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2);
+
+ for (int i = 0; i < h; ++i) {
+ for (int j = 0; j < w; j += 16) {
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15 16 17
+ // 18 19 20 21 22 23
+ const __m256i data = _mm256_inserti128_si256(
+ _mm256_loadu_si256((__m256i *)&src_ptr[(i * src_stride) + j]),
+ _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + (j + 8)]),
+ 1);
+
+ __m256i res_16b =
+ convolve_lowbd_x_4tap(data, coeffs_256 + 1, filt_256);
+ res_16b = sr_x_round_avx2(res_16b);
+
+ /* rounding code */
+ // 8 bit conversion and saturation to uint8
+ __m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
+
+ // Store values into the destination buffer
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ res_8b = _mm256_permute4x64_epi64(res_8b, 216);
+ __m128i res = _mm256_castsi256_si128(res_8b);
+ _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res);
+ }
+ }
+ }
+ } else {
+ __m256i filt_256[4];
+
+ filt_256[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2);
+ filt_256[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2);
+ filt_256[2] = _mm256_loadu_si256((__m256i const *)filt3_global_avx2);
+
+ if (horz_tap == 6) {
+ // horz_filt as 6 tap
+ const uint8_t *src_ptr = src - 2;
+
+ prepare_half_coeffs_6tap_avx2(filter_params_x, subpel_x_q4, coeffs_256);
+
+ if (w == 8) {
+ do {
+ const __m256i res = x_convolve_6tap_8x2_avx2(src_ptr, src_stride,
+ coeffs_256, filt_256);
+ sr_x_round_store_8x2_avx2(res, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 16) {
+ do {
+ __m256i r[2];
+
+ x_convolve_6tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256,
+ r);
+ sr_x_round_store_16x2_avx2(r, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ do {
+ sr_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, dst);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ } else if (w == 64) {
+ do {
+ sr_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, dst);
+ sr_x_6tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, dst + 32);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ } else {
+ assert(w == 128);
+
+ do {
+ sr_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, dst);
+ sr_x_6tap_32_avx2(src_ptr + 1 * 32, coeffs_256, filt_256,
+ dst + 1 * 32);
+ sr_x_6tap_32_avx2(src_ptr + 2 * 32, coeffs_256, filt_256,
+ dst + 2 * 32);
+ sr_x_6tap_32_avx2(src_ptr + 3 * 32, coeffs_256, filt_256,
+ dst + 3 * 32);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ }
+ } else if (horz_tap == 8) {
+ // horz_filt as 8 tap
+ const uint8_t *src_ptr = src - 3;
+
+ filt_256[3] = _mm256_loadu_si256((__m256i const *)filt4_global_avx2);
+
+ prepare_half_coeffs_8tap_avx2(filter_params_x, subpel_x_q4, coeffs_256);
+
+ if (w == 8) {
+ do {
+ const __m256i res = x_convolve_8tap_8x2_avx2(src_ptr, src_stride,
+ coeffs_256, filt_256);
+ sr_x_round_store_8x2_avx2(res, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 16) {
+ do {
+ __m256i r[2];
+
+ x_convolve_8tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256,
+ r);
+ sr_x_round_store_16x2_avx2(r, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ y -= 2;
+ } while (y);
+ } else if (w == 32) {
+ do {
+ sr_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, dst);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ } else if (w == 64) {
+ do {
+ sr_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, dst);
+ sr_x_8tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, dst + 32);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ } else {
+ assert(w == 128);
+
+ do {
+ sr_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, dst);
+ sr_x_8tap_32_avx2(src_ptr + 1 * 32, coeffs_256, filt_256,
+ dst + 1 * 32);
+ sr_x_8tap_32_avx2(src_ptr + 2 * 32, coeffs_256, filt_256,
+ dst + 2 * 32);
+ sr_x_8tap_32_avx2(src_ptr + 3 * 32, coeffs_256, filt_256,
+ dst + 3 * 32);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while (--y);
+ }
+ }
+ }
+}
+
+#endif // THIRD_PARTY_SVT_AV1_CONVOLVE_AVX2_H_
diff --git a/third_party/SVT-AV1/synonyms.h b/third_party/SVT-AV1/synonyms.h
new file mode 100644
index 0000000..0ded6e5
--- /dev/null
+++ b/third_party/SVT-AV1/synonyms.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_THIRD_PARTY_SVT_AV1_SYNONYMS_H_
+#define AOM_THIRD_PARTY_SVT_AV1_SYNONYMS_H_
+
+#include "aom_dsp/x86/mem_sse2.h"
+#include "aom_dsp/x86/synonyms.h"
+
+static INLINE __m128i load_u8_8x2_sse2(const uint8_t *const src,
+ const ptrdiff_t stride) {
+ return load_8bit_8x2_to_1_reg_sse2(src, (int)(sizeof(*src) * stride));
+}
+
+static AOM_FORCE_INLINE void store_u8_4x2_sse2(const __m128i src,
+ uint8_t *const dst,
+ const ptrdiff_t stride) {
+ xx_storel_32(dst, src);
+ *(uint32_t *)(dst + stride) =
+ ((uint32_t)_mm_extract_epi16(src, 3) << 16) | _mm_extract_epi16(src, 2);
+}
+
+#endif // AOM_THIRD_PARTY_SVT_AV1_SYNONYMS_H_