AVX2 dr prediction full version - lbd & hbd, Z1,Z2,Z3
performance change from 1.15 to 12.x
depending on input params
Lbd- 8bit
z1
enable_upsample: 0 angle: 3
[ TX_4X4] 1.15
[ TX_8X8] 1.63
[TX_16X16] 3.21
[TX_32X32] 5.58
[TX_64X64] 2.55
enable_upsample: 0 angle: 45
[ TX_4X4] 1.23
[ TX_8X8] 2.37
[TX_16X16] 3.96
[TX_32X32] 3.89
enable_upsample: 0 angle: 87
[ TX_4X4] 1.32
[ TX_8X8] 2.40
[TX_16X16] 3.95
[TX_32X32] 3.95
[TX_64X64] 1.88
z2
enable_upsample: 0 angle: 93
[ TX_4X4] 1.52
[ TX_8X8] 2.48
[TX_16X16] 2.50
[TX_32X32] 1.63
[TX_64X64] 1.26
enable_upsample: 0 angle: 135
[ TX_4X4] 0.98
[ TX_8X8] 1.13
[TX_16X16] 1.15
[TX_32X32] 1.31
[TX_64X64] 1.37
enable_upsample: 0 angle: 177
[ TX_4X4] 1.16
[ TX_8X8] 1.48
[TX_16X16] 1.53
[TX_32X32] 1.54
[TX_64X64] 1.60
z3
enable_upsample: 0 angle: 183
[ TX_4X4] 1.29
[ TX_8X8] 1.96
[TX_16X16] 3.29
[TX_32X32] 3.32
[TX_64X64] 1.51
enable_upsample: 0 angle: 225
[ TX_4X4] 1.22
[ TX_8X8] 2.02
[TX_16X16] 3.48
[TX_32X32] 3.43
[TX_64X64] 1.51
enable_upsample: 0 angle: 267
[ TX_4X4] 1.44
[ TX_8X8] 1.91
[TX_16X16] 3.45
[TX_32X32] 4.98
[TX_64X64] 2.51
Highbd, 10 bit
z1
enable_upsample: 0 angle: 3
[ TX_4X4] 7.23
[ TX_8X8] 11.47
[TX_16X16] 12.56
[TX_32X32] 8.21
[TX_64X64] 6.32
enable_upsample: 0 angle: 45
[ TX_4X4] 1.53
[ TX_8X8] 2.15
[TX_16X16] 2.46
[TX_32X32] 1.95
[TX_64X64] 2.08
enable_upsample: 0 angle: 87
[ TX_4X4] 1.50
[ TX_8X8] 2.17
[TX_16X16] 2.12
[TX_32X32] 1.88
[TX_64X64] 2.10
z2
enable_upsample: 0 angle: 93
[ TX_4X4] 1.56
[ TX_8X8] 3.15
[TX_16X16] 2.50
[TX_32X32] 1.62
[TX_64X64] 1.23
enable_upsample: 0 angle: 135
[ TX_4X4] 1.04
[ TX_8X8] 1.36
[TX_16X16] 1.22
[TX_32X32] 1.38
[TX_64X64] 1.39
enable_upsample: 0 angle: 177
[ TX_4X4] 1.17
[ TX_8X8] 1.69
[TX_16X16] 1.46
[TX_32X32] 1.50
[TX_64X64] 1.56
z3
enable_upsample: 0 angle: 183
[ TX_4X4] 1.38
[ TX_8X8] 1.71
[TX_16X16] 2.00
[TX_32X32] 1.99
[TX_64X64] 1.95
enable_upsample: 0 angle: 225
[ TX_4X4] 1.46
[ TX_8X8] 1.67
[TX_16X16] 2.01
[TX_32X32] 2.01
[TX_64X64] 1.95
enable_upsample: 0 angle: 267
[ TX_4X4] 1.40
[ TX_8X8] 1.52
[TX_16X16] 3.57
[TX_32X32] 3.47
[TX_64X64] 3.13
Change-Id: Iddd285cd43020a28074790eb164518c67422cc30
diff --git a/aom_dsp/x86/intrapred_avx2.c b/aom_dsp/x86/intrapred_avx2.c
index f684715..ae18bc3 100644
--- a/aom_dsp/x86/intrapred_avx2.c
+++ b/aom_dsp/x86/intrapred_avx2.c
@@ -12,9 +12,7 @@
#include <immintrin.h>
#include "config/aom_dsp_rtcd.h"
-#include "config/av1_rtcd.h"
#include "aom_dsp/x86/lpf_common_sse2.h"
-#include "aom_ports/mem.h"
static INLINE __m256i dc_sum_64(const uint8_t *ref) {
const __m256i x0 = _mm256_loadu_si256((const __m256i *)ref);
@@ -1514,8 +1512,8 @@
return;
}
-static void transpose_TX_8X8(const uint16_t *src, ptrdiff_t pitchSrc,
- uint16_t *dst, ptrdiff_t pitchDst) {
+static void highbd_transpose_TX_8X8(const uint16_t *src, ptrdiff_t pitchSrc,
+ uint16_t *dst, ptrdiff_t pitchDst) {
__m128i r0, r1, r2, r3, r4, r5, r6, r7, r0_Lo, r1_Lo, r2_Lo, r3_Lo, r4_Lo,
r5_Lo, r6_Lo;
r0 = _mm_load_si128(
@@ -1582,12 +1580,607 @@
_mm_storeu_si128((__m128i *)(dst + 7 * pitchDst), r3);
}
-static void transpose(const uint16_t *src, ptrdiff_t pitchSrc, uint16_t *dst,
- ptrdiff_t pitchDst, int width, int height) {
+static uint8_t HighbdLoadMaskx[8][16] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
+ { 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 },
+};
+
+static uint8_t HighbdEvenOddMaskx4[8][16] = {
+ { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14,
+ 15 }, // 0=0,1, 1=2,3, 2=4,5, 3=6,7, 4=8,9, 5=10,11, 6=12,13, 7=14,15,
+ // >7=0,1
+ { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 4, 5, 8, 9, 12, 13 },
+ { 0, 1, 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 6, 7, 10, 11 },
+ { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 0, 1, 8, 9 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 8, 9, 12, 13, 0, 1, 0, 1 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 0, 1 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 0, 1 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15 }
+};
+
+static uint16_t HighbdEvenOddMaskx8_2[8][16] = {
+ { 0, 2, 4, 6, 8, 10, 12, 14 }, { 2, 2, 4, 6, 8, 10, 12, 14 },
+ { 4, 4, 4, 6, 8, 10, 12, 14 }, { 6, 6, 6, 6, 8, 10, 12, 14 },
+ { 8, 8, 8, 8, 8, 10, 12, 14 }, { 10, 10, 10, 10, 10, 10, 12, 14 },
+ { 12, 12, 12, 12, 12, 12, 12, 14 }, { 14, 14, 14, 14, 14, 14, 14, 14 },
+};
+
+static uint16_t HighbdBaseMask[17][16] = {
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ },
+ { 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0,
+ 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0,
+ 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff }
+};
+
+static void highbd_dr_prediction_z2_Nx4_avx2(
+ int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
+ const uint16_t *left, int upsample_above, int upsample_left, int dx,
+ int dy) {
+ const int min_base_x = -(1 << upsample_above);
+ const int min_base_y = -(1 << upsample_left);
+ (void)min_base_y;
+ const int frac_bits_x = 6 - upsample_above;
+ const int frac_bits_y = 6 - upsample_left;
+
+ // a assert(dx > 0);
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0_x, a1_x, a0_y, a1_y, a32, a16;
+ __m256i diff;
+ __m128i c3f, min_base_y128;
+
+ a16 = _mm256_set1_epi32(16);
+ c3f = _mm_set1_epi32(0x3f);
+ min_base_y128 = _mm_set1_epi32(min_base_y);
+
+ for (int r = 0; r < N; r++) {
+ __m256i b, res, shift;
+ __m128i resx, resy, resxy;
+ __m128i a0_x128, a1_x128;
+ int y = r + 1;
+ int base_x = (-y * dx) >> frac_bits_x;
+ int base_shift = 0;
+ if (base_x < (min_base_x - 1)) {
+ base_shift = (min_base_x - base_x - 1) >> upsample_above;
+ }
+ int base_min_diff =
+ (min_base_x - base_x + upsample_above) >> upsample_above;
+ if (base_min_diff > 4) {
+ base_min_diff = 4;
+ } else {
+ if (base_min_diff < 0) base_min_diff = 0;
+ }
+
+ if (base_shift > 3) {
+ resx = _mm_setzero_si128();
+ } else {
+ a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
+ a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
+
+ if (upsample_above) {
+ a0_x128 = _mm_shuffle_epi8(a0_x128,
+ *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
+ a1_x128 = _mm_shuffle_epi8(a1_x128,
+ *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
+ shift = _mm256_castsi128_si256(_mm_srli_epi32(
+ _mm_and_si128(
+ _mm_slli_epi32(
+ _mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
+ (2 << 6) - y * dx, (3 << 6) - y * dx),
+ upsample_above),
+ c3f),
+ 1));
+ } else {
+ a0_x128 =
+ _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+ a1_x128 =
+ _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+ shift = _mm256_castsi128_si256(_mm_srli_epi32(
+ _mm_and_si128(_mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
+ (2 << 6) - y * dx, (3 << 6) - y * dx),
+ c3f),
+ 1));
+ }
+ a0_x = _mm256_cvtepu16_epi32(a0_x128);
+ a1_x = _mm256_cvtepu16_epi32(a1_x128);
+
+ diff = _mm256_sub_epi32(a1_x, a0_x); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_x, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ resx = _mm256_castsi256_si128(res);
+ resx = _mm_packus_epi32(resx, resx);
+ }
+ // y calc
+ if (base_x < min_base_x) {
+ DECLARE_ALIGNED(32, int, base_y_c[4]);
+ __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
+ r6 = _mm_set1_epi32(r << 6);
+ dy128 = _mm_set1_epi32(dy);
+ c1234 = _mm_setr_epi32(1, 2, 3, 4);
+ y_c128 = _mm_sub_epi32(r6, _mm_mullo_epi32(c1234, dy128));
+ base_y_c128 = _mm_srai_epi32(y_c128, frac_bits_y);
+ mask128 = _mm_cmpgt_epi32(min_base_y128, base_y_c128);
+ base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
+ _mm_store_si128((__m128i *)base_y_c, base_y_c128);
+
+ a0_y = _mm256_castsi128_si256(
+ _mm_setr_epi32(left[base_y_c[0]], left[base_y_c[1]],
+ left[base_y_c[2]], left[base_y_c[3]]));
+ a1_y = _mm256_castsi128_si256(
+ _mm_setr_epi32(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
+ left[base_y_c[2] + 1], left[base_y_c[3] + 1]));
+
+ if (upsample_left) {
+ shift = _mm256_castsi128_si256(_mm_srli_epi32(
+ _mm_and_si128(_mm_slli_epi32(y_c128, upsample_left), c3f), 1));
+ } else {
+ shift = _mm256_castsi128_si256(
+ _mm_srli_epi32(_mm_and_si128(y_c128, c3f), 1));
+ }
+ diff = _mm256_sub_epi32(a1_y, a0_y); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_y, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ resy = _mm256_castsi256_si128(res);
+ resy = _mm_packus_epi32(resy, resy);
+ } else {
+ resy = resx;
+ }
+ resxy =
+ _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
+ _mm_storel_epi64((__m128i *)(dst), resxy);
+ dst += stride;
+ }
+}
+
+static void highbd_dr_prediction_z2_Nx8_avx2(
+ int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
+ const uint16_t *left, int upsample_above, int upsample_left, int dx,
+ int dy) {
+ const int min_base_x = -(1 << upsample_above);
+ const int min_base_y = -(1 << upsample_left);
+ const int frac_bits_x = 6 - upsample_above;
+ const int frac_bits_y = 6 - upsample_left;
+
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, c3f, min_base_y256;
+ __m256i diff;
+ __m128i a0_x128, a1_x128;
+
+ a16 = _mm256_set1_epi32(16);
+ c3f = _mm256_set1_epi32(0x3f);
+ min_base_y256 = _mm256_set1_epi32(min_base_y);
+
+ for (int r = 0; r < N; r++) {
+ __m256i b, res, shift;
+ __m128i resx, resy, resxy;
+ int y = r + 1;
+ int base_x = (-y * dx) >> frac_bits_x;
+ int base_shift = 0;
+ if (base_x < (min_base_x - 1)) {
+ base_shift = (min_base_x - base_x - 1) >> upsample_above;
+ }
+ int base_min_diff =
+ (min_base_x - base_x + upsample_above) >> upsample_above;
+ if (base_min_diff > 8) {
+ base_min_diff = 8;
+ } else {
+ if (base_min_diff < 0) base_min_diff = 0;
+ }
+
+ if (base_shift > 7) {
+ resx = _mm_setzero_si128();
+ } else {
+ if (upsample_above) {
+ a0_x128 = _mm_setr_epi16(
+ above[base_x + HighbdEvenOddMaskx8_2[base_shift][0]],
+ above[base_x + HighbdEvenOddMaskx8_2[base_shift][1]],
+ above[base_x + HighbdEvenOddMaskx8_2[base_shift][2]],
+ above[base_x + HighbdEvenOddMaskx8_2[base_shift][3]],
+ above[base_x + HighbdEvenOddMaskx8_2[base_shift][4]],
+ above[base_x + HighbdEvenOddMaskx8_2[base_shift][5]],
+ above[base_x + HighbdEvenOddMaskx8_2[base_shift][6]],
+ above[base_x + HighbdEvenOddMaskx8_2[base_shift][7]]);
+ a1_x128 = _mm_setr_epi16(
+ above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][0]],
+ above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][1]],
+ above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][2]],
+ above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][3]],
+ above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][4]],
+ above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][5]],
+ above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][6]],
+ above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][7]]);
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(
+ _mm256_slli_epi32(
+ _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx,
+ (2 << 6) - y * dx, (3 << 6) - y * dx,
+ (4 << 6) - y * dx, (5 << 6) - y * dx,
+ (6 << 6) - y * dx, (7 << 6) - y * dx),
+ upsample_above),
+ c3f),
+ 1);
+ } else {
+ a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
+ a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
+ a0_x128 =
+ _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+ a1_x128 =
+ _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(
+ _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
+ (3 << 6) - y * dx, (4 << 6) - y * dx,
+ (5 << 6) - y * dx, (6 << 6) - y * dx,
+ (7 << 6) - y * dx),
+ c3f),
+ 1);
+ }
+
+ a0_x = _mm256_cvtepu16_epi32(a0_x128);
+ a1_x = _mm256_cvtepu16_epi32(a1_x128);
+
+ diff = _mm256_sub_epi32(a1_x, a0_x); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_x, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ resx = _mm256_castsi256_si128(_mm256_packus_epi32(
+ res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
+ }
+ // y calc
+ if (base_x < min_base_x) {
+ DECLARE_ALIGNED(32, int, base_y_c[8]);
+ __m256i r6, c256, dy256, y_c256, base_y_c256, mask256;
+ r6 = _mm256_set1_epi32(r << 6);
+ dy256 = _mm256_set1_epi32(dy);
+ c256 = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8);
+ y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
+ base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
+ mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
+ base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
+ _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
+
+ a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
+ left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
+ left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
+ left[base_y_c[6]], left[base_y_c[7]]));
+ a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
+ left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
+ left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
+ left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
+
+ if (upsample_left) {
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(_mm256_slli_epi32((y_c256), upsample_left), c3f),
+ 1);
+ } else {
+ shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
+ }
+ diff = _mm256_sub_epi32(a1_y, a0_y); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_y, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ resy = _mm256_castsi256_si128(_mm256_packus_epi32(
+ res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
+ } else {
+ resy = resx;
+ }
+ resxy =
+ _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
+ _mm_storeu_si128((__m128i *)(dst), resxy);
+ dst += stride;
+ }
+}
+
+static void highbd_dr_prediction_z2_HxW_avx2(
+ int H, int W, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
+ const uint16_t *left, int upsample_above, int upsample_left, int dx,
+ int dy) {
+ // here upsample_above and upsample_left are 0 by design of
+ // av1_use_intra_edge_upsample
+ const int min_base_x = -1;
+ const int min_base_y = -1;
+ (void)upsample_above;
+ (void)upsample_left;
+ const int frac_bits_x = 6;
+ const int frac_bits_y = 6;
+
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0_x, a1_x, a0_y, a1_y, a32, a0_1_x, a1_1_x, a16;
+ __m256i diff, min_base_y256, c3f;
+ __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
+
+ a16 = _mm256_set1_epi32(16);
+ min_base_y256 = _mm256_set1_epi16(min_base_y);
+ c3f = _mm256_set1_epi32(0x3f);
+ for (int r = 0; r < H; r++) {
+ __m256i b, res, shift;
+ __m256i resx[2], resy[2];
+ __m256i resxy;
+ for (int j = 0; j < W; j += 16) {
+ int y = r + 1;
+ int base_x = (-y * dx) >> frac_bits_x;
+ int base_shift = 0;
+ if ((base_x + j) < (min_base_x - 1)) {
+ base_shift = (min_base_x - (base_x + j) - 1);
+ }
+ int base_min_diff = (min_base_x - base_x - j);
+ if (base_min_diff > 16) {
+ base_min_diff = 16;
+ } else {
+ if (base_min_diff < 0) base_min_diff = 0;
+ }
+
+ if (base_shift > 7) {
+ resx[0] = _mm256_setzero_si256();
+ } else {
+ a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + j));
+ a1_x128 =
+ _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1 + j));
+ a0_x128 =
+ _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+ a1_x128 =
+ _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+
+ a0_x = _mm256_cvtepu16_epi32(a0_x128);
+ a1_x = _mm256_cvtepu16_epi32(a1_x128);
+
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(
+ _mm256_setr_epi32(
+ ((0 + j) << 6) - y * dx, ((1 + j) << 6) - y * dx,
+ ((2 + j) << 6) - y * dx, ((3 + j) << 6) - y * dx,
+ ((4 + j) << 6) - y * dx, ((5 + j) << 6) - y * dx,
+ ((6 + j) << 6) - y * dx, ((7 + j) << 6) - y * dx),
+ _mm256_set1_epi32(0x3f)),
+ 1);
+
+ diff = _mm256_sub_epi32(a1_x, a0_x); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_x, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ resx[0] = _mm256_packus_epi32(
+ res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+ }
+ base_shift = 0;
+ if ((base_x + j + 8) < (min_base_x - 1)) {
+ base_shift = (min_base_x - (base_x + j + 8) - 1);
+ }
+ if (base_shift > 7) {
+ resx[1] = _mm256_setzero_si256();
+ } else {
+ a0_1_x128 =
+ _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 8 + j));
+ a1_1_x128 =
+ _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 9 + j));
+ a0_1_x128 = _mm_shuffle_epi8(a0_1_x128,
+ *(__m128i *)HighbdLoadMaskx[base_shift]);
+ a1_1_x128 = _mm_shuffle_epi8(a1_1_x128,
+ *(__m128i *)HighbdLoadMaskx[base_shift]);
+
+ a0_1_x = _mm256_cvtepu16_epi32(a0_1_x128);
+ a1_1_x = _mm256_cvtepu16_epi32(a1_1_x128);
+
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(
+ _mm256_setr_epi32(
+ ((8 + j) << 6) - y * dx, ((9 + j) << 6) - y * dx,
+ ((10 + j) << 6) - y * dx, ((11 + j) << 6) - y * dx,
+ ((12 + j) << 6) - y * dx, ((13 + j) << 6) - y * dx,
+ ((14 + j) << 6) - y * dx, ((15 + j) << 6) - y * dx),
+ _mm256_set1_epi32(0x3f)),
+ 1);
+
+ diff = _mm256_sub_epi32(a1_1_x, a0_1_x); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_1_x, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi32(diff, shift);
+
+ resx[1] = _mm256_add_epi32(a32, b);
+ resx[1] = _mm256_srli_epi32(resx[1], 5);
+ resx[1] = _mm256_packus_epi32(
+ resx[1],
+ _mm256_castsi128_si256(_mm256_extracti128_si256(resx[1], 1)));
+ }
+ resx[0] =
+ _mm256_inserti128_si256(resx[0], _mm256_castsi256_si128(resx[1]),
+ 1); // 16 16bit values
+
+ // y calc
+ if ((base_x < min_base_x)) {
+ DECLARE_ALIGNED(32, int, base_y_c[16]);
+ __m256i r6, c256, dy256, y_c256, y_c_1_256, base_y_c256, mask256;
+ r6 = _mm256_set1_epi32(r << 6);
+ dy256 = _mm256_set1_epi32(dy);
+ c256 = _mm256_setr_epi32(1 + j, 2 + j, 3 + j, 4 + j, 5 + j, 6 + j,
+ 7 + j, 8 + j);
+ y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
+ base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
+ mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
+ base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
+ _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
+ c256 = _mm256_setr_epi32(9 + j, 10 + j, 11 + j, 12 + j, 13 + j, 14 + j,
+ 15 + j, 16 + j);
+ y_c_1_256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
+ base_y_c256 = _mm256_srai_epi32(y_c_1_256, frac_bits_y);
+ mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
+ base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
+ _mm256_store_si256((__m256i *)(base_y_c + 8), base_y_c256);
+
+ a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
+ left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
+ left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
+ left[base_y_c[6]], left[base_y_c[7]]));
+ a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
+ left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
+ left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
+ left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
+
+ shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
+
+ diff = _mm256_sub_epi32(a1_y, a0_y); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_y, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ resy[0] = _mm256_packus_epi32(
+ res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+
+ a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
+ left[base_y_c[8]], left[base_y_c[9]], left[base_y_c[10]],
+ left[base_y_c[11]], left[base_y_c[12]], left[base_y_c[13]],
+ left[base_y_c[14]], left[base_y_c[15]]));
+ a1_y = _mm256_cvtepu16_epi32(
+ _mm_setr_epi16(left[base_y_c[8] + 1], left[base_y_c[9] + 1],
+ left[base_y_c[10] + 1], left[base_y_c[11] + 1],
+ left[base_y_c[12] + 1], left[base_y_c[13] + 1],
+ left[base_y_c[14] + 1], left[base_y_c[15] + 1]));
+ shift = _mm256_srli_epi32(_mm256_and_si256(y_c_1_256, c3f), 1);
+
+ diff = _mm256_sub_epi32(a1_y, a0_y); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_y, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ resy[1] = _mm256_packus_epi32(
+ res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+
+ resy[0] =
+ _mm256_inserti128_si256(resy[0], _mm256_castsi256_si128(resy[1]),
+ 1); // 16 16bit values
+ } else {
+ resy[0] = resx[0];
+ }
+ resxy = _mm256_blendv_epi8(resx[0], resy[0],
+ *(__m256i *)HighbdBaseMask[base_min_diff]);
+ _mm256_storeu_si256((__m256i *)(dst + j), resxy);
+ } // for j
+ dst += stride;
+ }
+}
+
+// Directional prediction, zone 2: 90 < angle < 180
+void av1_highbd_dr_prediction_z2_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint16_t *above,
+ const uint16_t *left, int upsample_above,
+ int upsample_left, int dx, int dy,
+ int bd) {
+ (void)bd;
+ assert(dx > 0);
+ assert(dy > 0);
+ switch (bw) {
+ case 4:
+ highbd_dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left,
+ upsample_above, upsample_left, dx, dy);
+ break;
+ case 8:
+ highbd_dr_prediction_z2_Nx8_avx2(bh, dst, stride, above, left,
+ upsample_above, upsample_left, dx, dy);
+ break;
+ default:
+ highbd_dr_prediction_z2_HxW_avx2(bh, bw, dst, stride, above, left,
+ upsample_above, upsample_left, dx, dy);
+ break;
+ }
+ return;
+}
+
+static void highbd_transpose(const uint16_t *src, ptrdiff_t pitchSrc,
+ uint16_t *dst, ptrdiff_t pitchDst, int width,
+ int height) {
for (int j = 0; j < height; j += 8)
for (int i = 0; i < width; i += 8)
- transpose_TX_8X8(src + i * pitchSrc + j, pitchSrc, dst + j * pitchDst + i,
- pitchDst);
+ highbd_transpose_TX_8X8(src + i * pitchSrc + j, pitchSrc,
+ dst + j * pitchDst + i, pitchDst);
}
static void highbd_dr_prediction_z3_4x4_avx2(uint16_t *dst, ptrdiff_t stride,
@@ -1823,7 +2416,7 @@
int upsample_left, int dy) {
DECLARE_ALIGNED(16, uint16_t, dstT[64 * 64]);
highbd_dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
- transpose(dstT, 64, dst, stride, 64, 64);
+ highbd_transpose(dstT, 64, dst, stride, 64, 64);
}
static void highbd_dr_prediction_z3_16x32_avx2(uint16_t *dst, ptrdiff_t stride,
@@ -1873,9 +2466,9 @@
static void highbd_dr_prediction_z3_32x64_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
int upsample_left, int dy) {
- DECLARE_ALIGNED(16, uint16_t, dstT[64 * 32]);
+ uint16_t dstT[64 * 32];
highbd_dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
- transpose(dstT, 64, dst, stride, 32, 64);
+ highbd_transpose(dstT, 64, dst, stride, 32, 64);
}
static void highbd_dr_prediction_z3_64x32_avx2(uint16_t *dst, ptrdiff_t stride,
@@ -1883,7 +2476,7 @@
int upsample_left, int dy) {
DECLARE_ALIGNED(16, uint16_t, dstT[32 * 64]);
highbd_dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy);
- transpose(dstT, 32, dst, stride, 64, 32);
+ highbd_transpose(dstT, 32, dst, stride, 64, 32);
return;
}
@@ -1892,7 +2485,7 @@
int upsample_left, int dy) {
DECLARE_ALIGNED(16, uint16_t, dstT[64 * 16]);
highbd_dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
- transpose(dstT, 64, dst, stride, 16, 64);
+ highbd_transpose(dstT, 64, dst, stride, 16, 64);
}
static void highbd_dr_prediction_z3_64x16_avx2(uint16_t *dst, ptrdiff_t stride,
@@ -2017,3 +2610,1766 @@
}
return;
}
+
+// Low bit depth functions
+static uint8_t BaseMask[33][32] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+};
+
+static AOM_FORCE_INLINE void dr_prediction_z1_4xN_internal_avx2(
+ int N, __m128i *dst, const uint8_t *above, int upsample_above, int dx) {
+ const int frac_bits = 6 - upsample_above;
+ const int max_base_x = ((N + 4) - 1) << upsample_above;
+ int x;
+ // a assert(dx > 0);
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0, a1, a32, a16;
+ __m256i diff;
+ __m128i a_mbase_x;
+
+ a16 = _mm256_set1_epi32(16);
+ a_mbase_x = _mm_set1_epi8(above[max_base_x]);
+
+ x = dx;
+ for (int r = 0; r < N; r++) {
+ __m256i b, res, shift;
+ __m128i res1, a0_128, a1_128;
+
+ int base = x >> frac_bits;
+ int base_max_diff = (max_base_x - base) >> upsample_above;
+ if (base_max_diff <= 0) {
+ for (int i = r; i < N; ++i) {
+ dst[i] = a_mbase_x; // save 4 values
+ }
+ return;
+ }
+ if (base_max_diff > 4) base_max_diff = 4;
+ a0_128 = _mm_loadu_si128((__m128i *)(above + base));
+ a1_128 = _mm_srli_si128(a0_128, 1);
+ a0 = _mm256_cvtepu8_epi32(a0_128);
+ a1 = _mm256_cvtepu8_epi32(a1_128);
+
+ if (upsample_above) {
+ a0 = _mm256_permutevar8x32_epi32(
+ a0, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
+ a1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0, 1));
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(
+ _mm256_slli_epi32(_mm256_set1_epi32(x), upsample_above),
+ _mm256_set1_epi32(0x3f)),
+ 1);
+ } else {
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
+ }
+
+ diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ res1 = _mm256_castsi256_si128(res);
+ res1 = _mm_packus_epi32(res1, res1);
+ res1 = _mm_packus_epi16(res1, res1);
+
+ dst[r] =
+ _mm_blendv_epi8(a_mbase_x, res1, *(__m128i *)BaseMask[base_max_diff]);
+ x += dx;
+ }
+}
+
+static void dr_prediction_z1_4xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, int upsample_above,
+ int dx) {
+ __m128i dstvec[16];
+
+ dr_prediction_z1_4xN_internal_avx2(N, dstvec, above, upsample_above, dx);
+ for (int i = 0; i < N; i++) {
+ *(uint32_t *)(dst + stride * i) = _mm_cvtsi128_si32(dstvec[i]);
+ }
+}
+
+static AOM_FORCE_INLINE void dr_prediction_z1_8xN_internal_avx2(
+ int N, __m128i *dst, const uint8_t *above, int upsample_above, int dx) {
+ const int frac_bits = 6 - upsample_above;
+ const int max_base_x = ((8 + N) - 1) << upsample_above;
+
+ int x;
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0, a1, a0_1, a1_1, a32, a16, diff;
+ __m128i a_mbase_x;
+
+ a16 = _mm256_set1_epi32(16);
+ a_mbase_x = _mm_set1_epi8(above[max_base_x]);
+
+ x = dx;
+ for (int r = 0; r < N; r++) {
+ __m256i b, res, res1, shift;
+ __m128i res128;
+
+ int base = x >> frac_bits;
+ int base_max_diff = (max_base_x - base) >> upsample_above;
+ if (base_max_diff <= 0) {
+ for (int i = r; i < N; ++i) {
+ dst[i] = a_mbase_x; // save 16 values, 8 to be used furter
+ }
+ return;
+ }
+ if (base_max_diff > 8) base_max_diff = 8;
+
+ a0 = _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base)));
+ a1 = _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
+
+ if (upsample_above) {
+ a0 = _mm256_permutevar8x32_epi32(
+ a0, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
+ a1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0, 1));
+
+ a0_1 =
+ _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
+ a0_1 = _mm256_permutevar8x32_epi32(
+ a0_1, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
+ a1_1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0_1, 1));
+
+ a0 = _mm256_inserti128_si256(a0, _mm256_castsi256_si128(a0_1), 1);
+ a1 = _mm256_inserti128_si256(a1, _mm256_castsi256_si128(a1_1), 1);
+
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(
+ _mm256_slli_epi32(_mm256_set1_epi32(x), upsample_above),
+ _mm256_set1_epi32(0x3f)),
+ 1);
+ } else {
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
+ }
+
+ diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ res1 = _mm256_packus_epi32(
+ res, _mm256_castsi128_si256(
+ _mm256_extracti128_si256(res, 1))); // goto 16 bit
+
+ res128 = _mm_packus_epi16(_mm256_castsi256_si128(res1),
+ _mm256_castsi256_si128(res1)); // goto 8 bit
+
+ res128 =
+ _mm_blendv_epi8(a_mbase_x, res128, *(__m128i *)BaseMask[base_max_diff]);
+ dst[r] = res128;
+ x += dx;
+ }
+}
+
+static void dr_prediction_z1_8xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, int upsample_above,
+ int dx) {
+ __m128i dstvec[32];
+
+ dr_prediction_z1_8xN_internal_avx2(N, dstvec, above, upsample_above, dx);
+ for (int i = 0; i < N; i++) {
+ _mm_storel_epi64((__m128i *)(dst + stride * i), dstvec[i]);
+ }
+}
+
+static AOM_FORCE_INLINE void dr_prediction_z1_16xN_internal_avx2(
+ int N, __m128i *dstvec, const uint8_t *above, int upsample_above, int dx) {
+ int x;
+ // here upsample_above is 0 by design of av1_use_intra_edge_upsample
+ (void)upsample_above;
+ const int frac_bits = 6;
+ const int max_base_x = ((16 + N) - 1);
+
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0, a0_1, a1, a1_1, diff, a32, a16;
+ __m128i a_mbase_x;
+
+ a16 = _mm256_set1_epi32(16);
+ a_mbase_x = _mm_set1_epi8((uint8_t)above[max_base_x]);
+
+ x = dx;
+ for (int r = 0; r < N; r++) {
+ __m256i b, res[2];
+ __m128i res128[2];
+ int base = x >> frac_bits;
+ int base_max_diff = (max_base_x - base);
+ if (base_max_diff <= 0) {
+ for (int i = r; i < N; ++i) {
+ dstvec[i] = a_mbase_x; // save 16 values
+ }
+ return;
+ }
+ __m256i shift = _mm256_srli_epi32(
+ _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
+
+ a0 = _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base)));
+ a1 = _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
+
+ diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi32(diff, shift);
+
+ res[0] = _mm256_add_epi32(a32, b);
+ res[0] = _mm256_srli_epi32(res[0], 5);
+ res[0] = _mm256_packus_epi32(
+ res[0], _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
+ res128[0] = _mm_packus_epi16(_mm256_castsi256_si128(res[0]),
+ _mm256_castsi256_si128(res[0])); // goto 8 bit
+
+ if (base_max_diff > 8) {
+ if (base_max_diff > 16) base_max_diff = 16;
+ a0_1 =
+ _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
+ a1_1 =
+ _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base + 9)));
+
+ diff = _mm256_sub_epi32(a1_1, a0_1); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_1, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi32(diff, shift);
+
+ res[1] = _mm256_add_epi32(a32, b);
+ res[1] = _mm256_srli_epi32(res[1], 5);
+ res[1] = _mm256_packus_epi32(
+ res[1], _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
+ res128[1] =
+ _mm_packus_epi16(_mm256_castsi256_si128(res[1]),
+ _mm256_castsi256_si128(res[1])); // goto 8 bit
+
+ } else {
+ res128[1] = a_mbase_x;
+ }
+ res128[0] = _mm_unpacklo_epi64(res128[0], res128[1]); // 16 8bit values
+
+ dstvec[r] = _mm_blendv_epi8(a_mbase_x, res128[0],
+ *(__m128i *)BaseMask[base_max_diff]);
+ x += dx;
+ }
+}
+static void dr_prediction_z1_16xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, int upsample_above,
+ int dx) {
+ __m128i dstvec[64];
+
+ dr_prediction_z1_16xN_internal_avx2(N, dstvec, above, upsample_above, dx);
+ for (int i = 0; i < N; i++) {
+ _mm_storeu_si128((__m128i *)(dst + stride * i), dstvec[i]);
+ }
+}
+
+static AOM_FORCE_INLINE void dr_prediction_z1_32xN_internal_avx2(
+ int N, __m256i *dstvec, const uint8_t *above, int upsample_above, int dx) {
+ int x;
+ // here upsample_above is 0 by design of av1_use_intra_edge_upsample
+ (void)upsample_above;
+ const int frac_bits = 6;
+ const int max_base_x = ((32 + N) - 1);
+
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0, a0_1, a1, a1_1, a32, a16;
+ __m256i a_mbase_x, diff;
+
+ a16 = _mm256_set1_epi32(16);
+ a_mbase_x = _mm256_set1_epi8(above[max_base_x]);
+
+ x = dx;
+ for (int r = 0; r < N; r++) {
+ __m256i b, res[2], res16[2];
+
+ int base = x >> frac_bits;
+ int base_max_diff = (max_base_x - base);
+ if (base_max_diff <= 0) {
+ for (int i = r; i < N; ++i) {
+ dstvec[i] = a_mbase_x; // save 32 values
+ }
+ return;
+ }
+ if (base_max_diff > 32) base_max_diff = 32;
+ __m256i shift = _mm256_srli_epi32(
+ _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
+
+ for (int j = 0, jj = 0; j < 32; j += 16, jj++) {
+ int mdiff = base_max_diff - j;
+ if (mdiff <= 0) {
+ res16[jj] = a_mbase_x;
+ } else {
+ a0 = _mm256_cvtepu8_epi32(
+ _mm_loadu_si128((__m128i *)(above + base + j)));
+ a1 = _mm256_cvtepu8_epi32(
+ _mm_loadu_si128((__m128i *)(above + base + 1 + j)));
+
+ diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi32(diff, shift);
+
+ res[0] = _mm256_add_epi32(a32, b);
+ res[0] = _mm256_srli_epi32(res[0], 5);
+ res[0] = _mm256_packus_epi32(
+ res[0],
+ _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
+
+ // goto 8 bit
+ res[0] = _mm256_packus_epi16(res[0], res[0]);
+
+ if (mdiff > 8) {
+ a0_1 = _mm256_cvtepu8_epi32(
+ _mm_loadu_si128((__m128i *)(above + base + 8 + j)));
+ a1_1 = _mm256_cvtepu8_epi32(
+ _mm_loadu_si128((__m128i *)(above + base + 9 + j)));
+
+ diff = _mm256_sub_epi32(a1_1, a0_1); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_1, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi32(diff, shift);
+
+ res[1] = _mm256_add_epi32(a32, b);
+ res[1] = _mm256_srli_epi32(res[1], 5);
+ res[1] = _mm256_packus_epi32(
+ res[1],
+ _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
+ res[1] = _mm256_packus_epi16(res[1], res[1]);
+ // goto 8 bit
+ } else {
+ res[1] = a_mbase_x;
+ }
+ res16[jj] = _mm256_unpacklo_epi64(res[0], res[1]); // 16 8bit values
+ }
+ }
+ res16[1] =
+ _mm256_inserti128_si256(res16[0], _mm256_castsi256_si128(res16[1]),
+ 1); // 32 8bit values
+
+ dstvec[r] = _mm256_blendv_epi8(
+ a_mbase_x, res16[1],
+ *(__m256i *)BaseMask[base_max_diff]); // 32 8bit values
+ x += dx;
+ }
+}
+
+static void dr_prediction_z1_32xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, int upsample_above,
+ int dx) {
+ __m256i dstvec[64];
+ dr_prediction_z1_32xN_internal_avx2(N, dstvec, above, upsample_above, dx);
+ for (int i = 0; i < N; i++) {
+ _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
+ }
+}
+
+static void dr_prediction_z1_64xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, int upsample_above,
+ int dx) {
+ int x;
+
+ // here upsample_above is 0 by design of av1_use_intra_edge_upsample
+ (void)upsample_above;
+ const int frac_bits = 6;
+ const int max_base_x = ((64 + N) - 1);
+
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0, a0_1, a1, a1_1, a32, a16;
+ __m256i a_mbase_x, diff;
+ __m128i max_base_x128, base_inc128, mask128;
+
+ a16 = _mm256_set1_epi32(16);
+ a_mbase_x = _mm256_set1_epi8(above[max_base_x]);
+ max_base_x128 = _mm_set1_epi8(max_base_x);
+
+ x = dx;
+ for (int r = 0; r < N; r++, dst += stride) {
+ __m256i b, res[2];
+ __m128i res1;
+
+ int base = x >> frac_bits;
+ if (base >= max_base_x) {
+ for (int i = r; i < N; ++i) {
+ _mm256_storeu_si256((__m256i *)dst, a_mbase_x); // save 32 values
+ _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
+ dst += stride;
+ }
+ return;
+ }
+
+ __m256i shift = _mm256_srli_epi32(
+ _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
+
+ __m128i a0_128, a0_1_128, a1_128, a1_1_128;
+ for (int j = 0; j < 64; j += 16) {
+ int mdif = max_base_x - (base + j);
+ if (mdif <= 0) {
+ _mm_storeu_si128((__m128i *)(dst + j),
+ _mm256_castsi256_si128(a_mbase_x));
+ } else {
+ a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
+ a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1 + j));
+ a0 = _mm256_cvtepu8_epi32(a0_128);
+ a1 = _mm256_cvtepu8_epi32(a1_128);
+
+ diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi32(diff, shift);
+
+ res[0] = _mm256_add_epi32(a32, b);
+ res[0] = _mm256_srli_epi32(res[0], 5);
+ res[0] = _mm256_packus_epi32(
+ res[0],
+ _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
+ // goto 8 bit
+ res[0] = _mm256_packus_epi16(res[0], res[0]);
+
+ if (mdif > 8) {
+ a0_1_128 = _mm_loadu_si128((__m128i *)(above + base + 8 + j));
+ a1_1_128 = _mm_loadu_si128((__m128i *)(above + base + 9 + j));
+ a0_1 = _mm256_cvtepu8_epi32(a0_1_128);
+ a1_1 = _mm256_cvtepu8_epi32(a1_1_128);
+
+ diff = _mm256_sub_epi32(a1_1, a0_1); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_1, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi32(diff, shift);
+
+ res[1] = _mm256_add_epi32(a32, b);
+ res[1] = _mm256_srli_epi32(res[1], 5);
+ res[1] = _mm256_packus_epi32(
+ res[1],
+ _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
+ res[1] = _mm256_packus_epi16(res[1], res[1]);
+
+ } else {
+ res[1] = a_mbase_x;
+ }
+ res1 = _mm_unpacklo_epi64(
+ _mm256_castsi256_si128(res[0]),
+ _mm256_castsi256_si128(res[1])); // 16 8bit values
+
+ base_inc128 = _mm_setr_epi8(
+ base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
+ base + j + 5, base + j + 6, base + j + 7, base + j + 8,
+ base + j + 9, base + j + 10, base + j + 11, base + j + 12,
+ base + j + 13, base + j + 14, base + j + 15);
+
+ mask128 = _mm_cmpgt_epi8(_mm_subs_epu8(max_base_x128, base_inc128),
+ _mm_setzero_si128());
+ res1 =
+ _mm_blendv_epi8(_mm256_castsi256_si128(a_mbase_x), res1, mask128);
+ _mm_storeu_si128((__m128i *)(dst + j), res1);
+ }
+ }
+ x += dx;
+ }
+}
+
+// Directional prediction, zone 1: 0 < angle < 90
+void av1_dr_prediction_z1_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
+ const uint8_t *above, const uint8_t *left,
+ int upsample_above, int dx, int dy) {
+ (void)left;
+ (void)dy;
+ switch (bw) {
+ case 4:
+ dr_prediction_z1_4xN_avx2(bh, dst, stride, above, upsample_above, dx);
+ break;
+ case 8:
+ dr_prediction_z1_8xN_avx2(bh, dst, stride, above, upsample_above, dx);
+ break;
+ case 16:
+ dr_prediction_z1_16xN_avx2(bh, dst, stride, above, upsample_above, dx);
+ break;
+ case 32:
+ dr_prediction_z1_32xN_avx2(bh, dst, stride, above, upsample_above, dx);
+ break;
+ case 64:
+ dr_prediction_z1_64xN_avx2(bh, dst, stride, above, upsample_above, dx);
+ break;
+ default: break;
+ }
+ return;
+}
+
+static uint8_t LoadMaskx[8][16] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 },
+ { 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
+ { 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 },
+ { 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
+ { 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 },
+ { 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8 },
+};
+
+static uint8_t EvenOddMaskx4[8][16] = {
+ { 0, 2, 4, 6, 1, 3, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 1, 3, 5, 7, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 2, 4, 6, 8, 3, 5, 7, 9, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 3, 5, 7, 9, 4, 6, 8, 10, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 4, 6, 8, 10, 5, 7, 9, 11, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 5, 7, 9, 11, 6, 8, 10, 12, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 6, 8, 10, 12, 7, 9, 11, 13, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 7, 9, 11, 13, 8, 10, 12, 14, 0 }
+};
+
+static uint8_t EvenOddMaskx[8][16] = {
+ { 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 0, 0, 0, 0 },
+ { 0, 1, 3, 5, 7, 9, 11, 13, 15, 2, 4, 6, 8, 0, 0, 0 },
+ { 0, 0, 2, 4, 6, 8, 10, 12, 14, 3, 5, 7, 9, 0, 0, 0 },
+ { 0, 0, 0, 3, 5, 7, 9, 11, 13, 15, 4, 6, 8, 10, 0 },
+ { 0, 0, 0, 0, 4, 6, 8, 10, 12, 14, 5, 7, 9, 11, 0, 0 },
+ { 0, 0, 0, 0, 0, 5, 7, 9, 11, 13, 15, 6, 8, 10, 12, 0 },
+ { 0, 0, 0, 0, 0, 0, 6, 8, 10, 12, 14, 7, 9, 11, 13, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 7, 9, 11, 13, 15, 8, 10, 12, 14 }
+};
+
+static void dr_prediction_z2_Nx4_avx2(int N, uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left,
+ int upsample_above, int upsample_left,
+ int dx, int dy) {
+ const int min_base_x = -(1 << upsample_above);
+ const int min_base_y = -(1 << upsample_left);
+ const int frac_bits_x = 6 - upsample_above;
+ const int frac_bits_y = 6 - upsample_left;
+
+ // a assert(dx > 0);
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, diff;
+ __m128i c3f, min_base_y128;
+
+ a16 = _mm256_set1_epi32(16);
+ c3f = _mm_set1_epi32(0x3f);
+ min_base_y128 = _mm_set1_epi32(min_base_y);
+
+ for (int r = 0; r < N; r++) {
+ __m256i b, res, shift;
+ __m128i resx, resy, resxy;
+ __m128i a0_x128, a1_x128;
+ int y = r + 1;
+ int base_x = (-y * dx) >> frac_bits_x;
+ int base_shift = 0;
+ if (base_x < (min_base_x - 1)) {
+ base_shift = (min_base_x - base_x - 1) >> upsample_above;
+ }
+ int base_min_diff =
+ (min_base_x - base_x + upsample_above) >> upsample_above;
+ if (base_min_diff > 4) {
+ base_min_diff = 4;
+ } else {
+ if (base_min_diff < 0) base_min_diff = 0;
+ }
+
+ if (base_shift > 3) {
+ resx = _mm_setzero_si128();
+ } else {
+ a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
+ if (upsample_above) {
+ a0_x128 =
+ _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx4[base_shift]);
+ a1_x128 = _mm_srli_si128(a0_x128, 4);
+
+ shift = _mm256_castsi128_si256(_mm_srli_epi32(
+ _mm_and_si128(
+ _mm_slli_epi32(
+ _mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
+ (2 << 6) - y * dx, (3 << 6) - y * dx),
+ upsample_above),
+ c3f),
+ 1));
+ } else {
+ a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
+ a1_x128 = _mm_srli_si128(a0_x128, 1);
+ shift = _mm256_castsi128_si256(_mm_srli_epi32(
+ _mm_and_si128(_mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
+ (2 << 6) - y * dx, (3 << 6) - y * dx),
+ c3f),
+ 1));
+ }
+ a0_x = _mm256_cvtepu8_epi32(a0_x128);
+ a1_x = _mm256_cvtepu8_epi32(a1_x128);
+
+ diff = _mm256_sub_epi32(a1_x, a0_x); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_x, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ resx = _mm256_castsi256_si128(res);
+ resx = _mm_packus_epi32(resx, resx);
+ resx = _mm_packus_epi16(resx, resx);
+ }
+ // y calc
+ if (base_x < min_base_x) {
+ DECLARE_ALIGNED(32, int, base_y_c[4]);
+ __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
+ r6 = _mm_set1_epi32(r << 6);
+ dy128 = _mm_set1_epi32(dy);
+ c1234 = _mm_setr_epi32(1, 2, 3, 4);
+ y_c128 = _mm_sub_epi32(r6, _mm_mullo_epi32(c1234, dy128));
+ base_y_c128 = _mm_srai_epi32(y_c128, frac_bits_y);
+ mask128 = _mm_cmpgt_epi32(min_base_y128, base_y_c128);
+ base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
+ _mm_store_si128((__m128i *)base_y_c, base_y_c128);
+
+ a0_y = _mm256_castsi128_si256(
+ _mm_setr_epi32(left[base_y_c[0]], left[base_y_c[1]],
+ left[base_y_c[2]], left[base_y_c[3]]));
+ a1_y = _mm256_castsi128_si256(
+ _mm_setr_epi32(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
+ left[base_y_c[2] + 1], left[base_y_c[3] + 1]));
+
+ if (upsample_left) {
+ shift = _mm256_castsi128_si256(_mm_srli_epi32(
+ _mm_and_si128(_mm_slli_epi32(y_c128, upsample_left), c3f), 1));
+ } else {
+ shift = _mm256_castsi128_si256(
+ _mm_srli_epi32(_mm_and_si128(y_c128, c3f), 1));
+ }
+ diff = _mm256_sub_epi32(a1_y, a0_y); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_y, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ resy = _mm256_castsi256_si128(res);
+ resy = _mm_packus_epi32(resy, resy);
+ resy = _mm_packus_epi16(resy, resy);
+ } else {
+ resy = resx;
+ }
+ resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
+ *(uint32_t *)(dst) = _mm_cvtsi128_si32(resxy);
+ dst += stride;
+ }
+}
+
+static void dr_prediction_z2_Nx8_avx2(int N, uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left,
+ int upsample_above, int upsample_left,
+ int dx, int dy) {
+ const int min_base_x = -(1 << upsample_above);
+ const int min_base_y = -(1 << upsample_left);
+ const int frac_bits_x = 6 - upsample_above;
+ const int frac_bits_y = 6 - upsample_left;
+
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, c3f;
+ __m256i diff, min_base_y256;
+ __m128i a0_x128, a1_x128;
+
+ a16 = _mm256_set1_epi32(16);
+ c3f = _mm256_set1_epi32(0x3f);
+ min_base_y256 = _mm256_set1_epi32(min_base_y);
+
+ for (int r = 0; r < N; r++) {
+ __m256i b, res, shift;
+ __m128i resx, resy, resxy;
+
+ int y = r + 1;
+ int base_x = (-y * dx) >> frac_bits_x;
+ int base_shift = 0;
+ if (base_x < (min_base_x - 1)) {
+ base_shift = (min_base_x - base_x - 1) >> upsample_above;
+ }
+ int base_min_diff =
+ (min_base_x - base_x + upsample_above) >> upsample_above;
+ if (base_min_diff > 8) {
+ base_min_diff = 8;
+ } else {
+ if (base_min_diff < 0) base_min_diff = 0;
+ }
+
+ if (base_shift > 7) {
+ resx = _mm_setzero_si128();
+ } else {
+ a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
+ a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
+ if (upsample_above) {
+ a0_x128 =
+ _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx[base_shift]);
+ a1_x128 =
+ _mm_shuffle_epi8(a1_x128, *(__m128i *)EvenOddMaskx[base_shift]);
+
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(
+ _mm256_slli_epi32(
+ _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx,
+ (2 << 6) - y * dx, (3 << 6) - y * dx,
+ (4 << 6) - y * dx, (5 << 6) - y * dx,
+ (6 << 6) - y * dx, (7 << 6) - y * dx),
+ upsample_above),
+ c3f),
+ 1);
+ } else {
+ a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
+ a1_x128 = _mm_shuffle_epi8(a1_x128, *(__m128i *)LoadMaskx[base_shift]);
+
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(
+ _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
+ (3 << 6) - y * dx, (4 << 6) - y * dx,
+ (5 << 6) - y * dx, (6 << 6) - y * dx,
+ (7 << 6) - y * dx),
+ c3f),
+ 1);
+ }
+ a0_x = _mm256_cvtepu8_epi32(a0_x128);
+ a1_x = _mm256_cvtepu8_epi32(a1_x128);
+
+ diff = _mm256_sub_epi32(a1_x, a0_x); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_x, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ res = _mm256_packus_epi32(
+ res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+ resx = _mm_packus_epi16(_mm256_castsi256_si128(res),
+ _mm256_castsi256_si128(res));
+ }
+ // y calc
+ if (base_x < min_base_x) {
+ DECLARE_ALIGNED(32, int, base_y_c[8]);
+ __m256i r6, c256, dy256, y_c256, base_y_c256, mask256;
+ r6 = _mm256_set1_epi32(r << 6);
+ dy256 = _mm256_set1_epi32(dy);
+ c256 = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8);
+ y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
+ base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
+ mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
+ base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
+ _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
+
+ a0_y = _mm256_setr_epi32(left[base_y_c[0]], left[base_y_c[1]],
+ left[base_y_c[2]], left[base_y_c[3]],
+ left[base_y_c[4]], left[base_y_c[5]],
+ left[base_y_c[6]], left[base_y_c[7]]);
+ a1_y = _mm256_setr_epi32(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
+ left[base_y_c[2] + 1], left[base_y_c[3] + 1],
+ left[base_y_c[4] + 1], left[base_y_c[5] + 1],
+ left[base_y_c[6] + 1], left[base_y_c[7] + 1]);
+
+ if (upsample_left) {
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(_mm256_slli_epi32(y_c256, upsample_left), c3f), 1);
+ } else {
+ shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
+ }
+ diff = _mm256_sub_epi32(a1_y, a0_y); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_y, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ res = _mm256_packus_epi32(
+ res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+ resy = _mm_packus_epi16(_mm256_castsi256_si128(res),
+ _mm256_castsi256_si128(res));
+ } else {
+ resy = resx;
+ }
+ resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
+ _mm_storel_epi64((__m128i *)(dst), resxy);
+ dst += stride;
+ }
+}
+
+static void dr_prediction_z2_HxW_avx2(int H, int W, uint8_t *dst,
+ ptrdiff_t stride, const uint8_t *above,
+ const uint8_t *left, int upsample_above,
+ int upsample_left, int dx, int dy) {
+ // here upsample_above and upsample_left are 0 by design of
+ // av1_use_intra_edge_upsample
+ const int min_base_x = -1;
+ const int min_base_y = -1;
+ (void)upsample_above;
+ (void)upsample_left;
+ const int frac_bits_x = 6;
+ const int frac_bits_y = 6;
+
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0_x, a1_x, a0_y, a1_y, a32, a0_1_x, a1_1_x, a16;
+ __m256i diff, min_base_y256, c3f;
+ __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
+
+ a16 = _mm256_set1_epi32(16);
+ min_base_y256 = _mm256_set1_epi32(min_base_y);
+ c3f = _mm256_set1_epi32(0x3f);
+
+ for (int r = 0; r < H; r++) {
+ __m256i b, res, shift;
+ __m128i resx[2], resy[2];
+ __m128i resxy;
+ for (int j = 0; j < W; j += 16) {
+ int y = r + 1;
+ int base_x = (-y * dx) >> frac_bits_x;
+
+ int base_shift = 0;
+ if ((base_x + j) < (min_base_x - 1)) {
+ base_shift = (min_base_x - (base_x + j) - 1);
+ }
+ int base_min_diff = (min_base_x - base_x - j);
+ if (base_min_diff > 16) {
+ base_min_diff = 16;
+ } else {
+ if (base_min_diff < 0) base_min_diff = 0;
+ }
+ if (base_shift > 7) {
+ resx[0] = _mm_setzero_si128();
+ } else {
+ a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + j));
+ a1_x128 =
+ _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1 + j));
+ a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
+ a1_x128 = _mm_shuffle_epi8(a1_x128, *(__m128i *)LoadMaskx[base_shift]);
+
+ a0_x = _mm256_cvtepu8_epi32(a0_x128);
+ a1_x = _mm256_cvtepu8_epi32(a1_x128);
+
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(
+ _mm256_setr_epi32(
+ ((0 + j) << 6) - y * dx, ((1 + j) << 6) - y * dx,
+ ((2 + j) << 6) - y * dx, ((3 + j) << 6) - y * dx,
+ ((4 + j) << 6) - y * dx, ((5 + j) << 6) - y * dx,
+ ((6 + j) << 6) - y * dx, ((7 + j) << 6) - y * dx),
+ c3f),
+ 1);
+
+ diff = _mm256_sub_epi32(a1_x, a0_x); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_x, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ res = _mm256_packus_epi32(
+ res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+ resx[0] = _mm_packus_epi16(_mm256_castsi256_si128(res),
+ _mm256_castsi256_si128(res));
+ }
+ base_shift = 0;
+ if ((base_x + j + 8) < (min_base_x - 1)) {
+ base_shift = (min_base_x - (base_x + j + 8) - 1);
+ }
+ if (base_shift > 7) {
+ resx[1] = _mm_setzero_si128();
+ } else {
+ a0_1_x128 =
+ _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 8 + j));
+ a1_1_x128 =
+ _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 9 + j));
+ a0_1_x128 =
+ _mm_shuffle_epi8(a0_1_x128, *(__m128i *)LoadMaskx[base_shift]);
+ a1_1_x128 =
+ _mm_shuffle_epi8(a1_1_x128, *(__m128i *)LoadMaskx[base_shift]);
+
+ a0_1_x = _mm256_cvtepu8_epi32(a0_1_x128);
+ a1_1_x = _mm256_cvtepu8_epi32(a1_1_x128);
+
+ shift = _mm256_srli_epi32(
+ _mm256_and_si256(
+ _mm256_setr_epi32(
+ ((8 + j) << 6) - y * dx, ((9 + j) << 6) - y * dx,
+ ((10 + j) << 6) - y * dx, ((11 + j) << 6) - y * dx,
+ ((12 + j) << 6) - y * dx, ((13 + j) << 6) - y * dx,
+ ((14 + j) << 6) - y * dx, ((15 + j) << 6) - y * dx),
+ _mm256_set1_epi32(0x3f)),
+ 1);
+
+ diff = _mm256_sub_epi32(a1_1_x, a0_1_x); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_1_x, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi32(diff, shift);
+
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+ res = _mm256_packus_epi32(
+ res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+ resx[1] = _mm_packus_epi16(_mm256_castsi256_si128(res),
+ _mm256_castsi256_si128(res));
+ }
+ resx[0] = _mm_unpacklo_epi64(resx[0], resx[1]);
+
+ // y calc
+ if ((base_x < min_base_x)) {
+ DECLARE_ALIGNED(32, int, base_y_c[16]);
+ __m256i r6, c256, dy256, y_c256, y_c_1_256, base_y_c256, mask256;
+ r6 = _mm256_set1_epi32(r << 6);
+ dy256 = _mm256_set1_epi32(dy);
+ c256 = _mm256_setr_epi32(1 + j, 2 + j, 3 + j, 4 + j, 5 + j, 6 + j,
+ 7 + j, 8 + j);
+ y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
+ base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
+ mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
+ base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
+ _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
+ c256 = _mm256_setr_epi32(9 + j, 10 + j, 11 + j, 12 + j, 13 + j, 14 + j,
+ 15 + j, 16 + j);
+ y_c_1_256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
+ base_y_c256 = _mm256_srai_epi32(y_c_1_256, frac_bits_y);
+ mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
+ base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
+ _mm256_store_si256((__m256i *)(base_y_c + 8), base_y_c256);
+
+ a0_y = _mm256_cvtepu8_epi32(_mm_setr_epi8(
+ left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
+ left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
+ left[base_y_c[6]], left[base_y_c[7]], 0, 0, 0, 0, 0, 0, 0, 0));
+ a1_y = _mm256_cvtepu8_epi32(_mm_setr_epi8(
+ left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
+ left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
+ left[base_y_c[6] + 1], left[base_y_c[7] + 1], 0, 0, 0, 0, 0, 0, 0,
+ 0));
+
+ shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
+
+ diff = _mm256_sub_epi32(a1_y, a0_y); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_y, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ res = _mm256_packus_epi32(
+ res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+ resy[0] = _mm_packus_epi16(_mm256_castsi256_si128(res),
+ _mm256_castsi256_si128(res));
+
+ a0_y = _mm256_cvtepu8_epi32(_mm_setr_epi8(
+ left[base_y_c[8]], left[base_y_c[9]], left[base_y_c[10]],
+ left[base_y_c[11]], left[base_y_c[12]], left[base_y_c[13]],
+ left[base_y_c[14]], left[base_y_c[15]], 0, 0, 0, 0, 0, 0, 0, 0));
+ a1_y = _mm256_cvtepu8_epi32(
+ _mm_setr_epi8(left[base_y_c[8] + 1], left[base_y_c[9] + 1],
+ left[base_y_c[10] + 1], left[base_y_c[11] + 1],
+ left[base_y_c[12] + 1], left[base_y_c[13] + 1],
+ left[base_y_c[14] + 1], left[base_y_c[15] + 1], 0, 0,
+ 0, 0, 0, 0, 0, 0));
+ shift = _mm256_srli_epi32(_mm256_and_si256(y_c_1_256, c3f), 1);
+
+ diff = _mm256_sub_epi32(a1_y, a0_y); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi32(a0_y, 5); // a[x] * 32
+ a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi32(diff, shift);
+ res = _mm256_add_epi32(a32, b);
+ res = _mm256_srli_epi32(res, 5);
+
+ res = _mm256_packus_epi32(
+ res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+ resy[1] =
+ _mm_packus_epi16(_mm256_castsi256_si128(res),
+ _mm256_castsi256_si128(res)); // 8 16bit values
+ resy[0] = _mm_unpacklo_epi64(resy[0], resy[1]);
+ } else {
+ resy[0] = resx[0];
+ }
+ resxy = _mm_blendv_epi8(resx[0], resy[0],
+ *(__m128i *)BaseMask[base_min_diff]);
+ _mm_storeu_si128((__m128i *)(dst + j), resxy);
+ } // for j
+ dst += stride;
+ }
+}
+
+// Directional prediction, zone 2: 90 < angle < 180
+void av1_dr_prediction_z2_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
+ const uint8_t *above, const uint8_t *left,
+ int upsample_above, int upsample_left, int dx,
+ int dy) {
+ assert(dx > 0);
+ assert(dy > 0);
+ switch (bw) {
+ case 4:
+ dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left, upsample_above,
+ upsample_left, dx, dy);
+ break;
+ case 8:
+ dr_prediction_z2_Nx8_avx2(bh, dst, stride, above, left, upsample_above,
+ upsample_left, dx, dy);
+
+ break;
+ default:
+ dr_prediction_z2_HxW_avx2(bh, bw, dst, stride, above, left,
+ upsample_above, upsample_left, dx, dy);
+ break;
+ }
+ return;
+}
+
+// z3 functions
+static INLINE void transpose4x16_sse2(__m128i *x, __m128i *d) {
+ __m128i w0, w1, w2, w3, ww0, ww1, ww2, ww3;
+ w0 = _mm_unpacklo_epi8(x[0], x[1]);
+ w1 = _mm_unpacklo_epi8(x[2], x[3]);
+ w2 = _mm_unpackhi_epi8(x[0], x[1]);
+ w3 = _mm_unpackhi_epi8(x[2], x[3]);
+
+ ww0 = _mm_unpacklo_epi16(w0, w1);
+ ww1 = _mm_unpacklo_epi16(w2, w3);
+ ww2 = _mm_unpackhi_epi16(w0, w1);
+ ww3 = _mm_unpackhi_epi16(w2, w3);
+
+ w0 = _mm_unpacklo_epi32(ww0, ww1);
+ w2 = _mm_unpacklo_epi32(ww2, ww3);
+ w1 = _mm_unpackhi_epi32(ww0, ww1);
+ w3 = _mm_unpackhi_epi32(ww2, ww3);
+
+ d[0] = _mm_unpacklo_epi64(w0, w2);
+ d[1] = _mm_unpackhi_epi64(w0, w2);
+ d[2] = _mm_unpacklo_epi64(w1, w3);
+ d[3] = _mm_unpackhi_epi64(w1, w3);
+
+ d[4] = _mm_srli_si128(d[0], 8);
+ d[5] = _mm_srli_si128(d[1], 8);
+ d[6] = _mm_srli_si128(d[2], 8);
+ d[7] = _mm_srli_si128(d[3], 8);
+
+ d[8] = _mm_srli_si128(d[0], 4);
+ d[9] = _mm_srli_si128(d[1], 4);
+ d[10] = _mm_srli_si128(d[2], 4);
+ d[11] = _mm_srli_si128(d[3], 4);
+
+ d[12] = _mm_srli_si128(d[0], 12);
+ d[13] = _mm_srli_si128(d[1], 12);
+ d[14] = _mm_srli_si128(d[2], 12);
+ d[15] = _mm_srli_si128(d[3], 12);
+}
+
+static INLINE void transpose16x32_avx2(__m256i *x, __m256i *d) {
+ __m256i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9;
+ __m256i w10, w11, w12, w13, w14, w15;
+
+ w0 = _mm256_unpacklo_epi8(x[0], x[1]);
+ w1 = _mm256_unpacklo_epi8(x[2], x[3]);
+ w2 = _mm256_unpacklo_epi8(x[4], x[5]);
+ w3 = _mm256_unpacklo_epi8(x[6], x[7]);
+
+ w8 = _mm256_unpacklo_epi8(x[8], x[9]);
+ w9 = _mm256_unpacklo_epi8(x[10], x[11]);
+ w10 = _mm256_unpacklo_epi8(x[12], x[13]);
+ w11 = _mm256_unpacklo_epi8(x[14], x[15]);
+
+ w4 = _mm256_unpacklo_epi16(w0, w1);
+ w5 = _mm256_unpacklo_epi16(w2, w3);
+ w12 = _mm256_unpacklo_epi16(w8, w9);
+ w13 = _mm256_unpacklo_epi16(w10, w11);
+
+ w6 = _mm256_unpacklo_epi32(w4, w5);
+ w7 = _mm256_unpackhi_epi32(w4, w5);
+ w14 = _mm256_unpacklo_epi32(w12, w13);
+ w15 = _mm256_unpackhi_epi32(w12, w13);
+
+ // Store first 4-line result
+ d[0] = _mm256_unpacklo_epi64(w6, w14);
+ d[1] = _mm256_unpackhi_epi64(w6, w14);
+ d[2] = _mm256_unpacklo_epi64(w7, w15);
+ d[3] = _mm256_unpackhi_epi64(w7, w15);
+
+ w4 = _mm256_unpackhi_epi16(w0, w1);
+ w5 = _mm256_unpackhi_epi16(w2, w3);
+ w12 = _mm256_unpackhi_epi16(w8, w9);
+ w13 = _mm256_unpackhi_epi16(w10, w11);
+
+ w6 = _mm256_unpacklo_epi32(w4, w5);
+ w7 = _mm256_unpackhi_epi32(w4, w5);
+ w14 = _mm256_unpacklo_epi32(w12, w13);
+ w15 = _mm256_unpackhi_epi32(w12, w13);
+
+ // Store second 4-line result
+ d[4] = _mm256_unpacklo_epi64(w6, w14);
+ d[5] = _mm256_unpackhi_epi64(w6, w14);
+ d[6] = _mm256_unpacklo_epi64(w7, w15);
+ d[7] = _mm256_unpackhi_epi64(w7, w15);
+
+ // upper half
+ w0 = _mm256_unpackhi_epi8(x[0], x[1]);
+ w1 = _mm256_unpackhi_epi8(x[2], x[3]);
+ w2 = _mm256_unpackhi_epi8(x[4], x[5]);
+ w3 = _mm256_unpackhi_epi8(x[6], x[7]);
+
+ w8 = _mm256_unpackhi_epi8(x[8], x[9]);
+ w9 = _mm256_unpackhi_epi8(x[10], x[11]);
+ w10 = _mm256_unpackhi_epi8(x[12], x[13]);
+ w11 = _mm256_unpackhi_epi8(x[14], x[15]);
+
+ w4 = _mm256_unpacklo_epi16(w0, w1);
+ w5 = _mm256_unpacklo_epi16(w2, w3);
+ w12 = _mm256_unpacklo_epi16(w8, w9);
+ w13 = _mm256_unpacklo_epi16(w10, w11);
+
+ w6 = _mm256_unpacklo_epi32(w4, w5);
+ w7 = _mm256_unpackhi_epi32(w4, w5);
+ w14 = _mm256_unpacklo_epi32(w12, w13);
+ w15 = _mm256_unpackhi_epi32(w12, w13);
+
+ // Store first 4-line result
+ d[8] = _mm256_unpacklo_epi64(w6, w14);
+ d[9] = _mm256_unpackhi_epi64(w6, w14);
+ d[10] = _mm256_unpacklo_epi64(w7, w15);
+ d[11] = _mm256_unpackhi_epi64(w7, w15);
+
+ w4 = _mm256_unpackhi_epi16(w0, w1);
+ w5 = _mm256_unpackhi_epi16(w2, w3);
+ w12 = _mm256_unpackhi_epi16(w8, w9);
+ w13 = _mm256_unpackhi_epi16(w10, w11);
+
+ w6 = _mm256_unpacklo_epi32(w4, w5);
+ w7 = _mm256_unpackhi_epi32(w4, w5);
+ w14 = _mm256_unpacklo_epi32(w12, w13);
+ w15 = _mm256_unpackhi_epi32(w12, w13);
+
+ // Store second 4-line result
+ d[12] = _mm256_unpacklo_epi64(w6, w14);
+ d[13] = _mm256_unpackhi_epi64(w6, w14);
+ d[14] = _mm256_unpacklo_epi64(w7, w15);
+ d[15] = _mm256_unpackhi_epi64(w7, w15);
+}
+
+static INLINE void transpose16x16_sse2(__m128i *x, __m128i *d) {
+ __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9;
+ __m128i w10, w11, w12, w13, w14, w15;
+
+ w0 = _mm_unpacklo_epi8(x[0], x[1]);
+ w1 = _mm_unpacklo_epi8(x[2], x[3]);
+ w2 = _mm_unpacklo_epi8(x[4], x[5]);
+ w3 = _mm_unpacklo_epi8(x[6], x[7]);
+
+ w8 = _mm_unpacklo_epi8(x[8], x[9]);
+ w9 = _mm_unpacklo_epi8(x[10], x[11]);
+ w10 = _mm_unpacklo_epi8(x[12], x[13]);
+ w11 = _mm_unpacklo_epi8(x[14], x[15]);
+
+ w4 = _mm_unpacklo_epi16(w0, w1);
+ w5 = _mm_unpacklo_epi16(w2, w3);
+ w12 = _mm_unpacklo_epi16(w8, w9);
+ w13 = _mm_unpacklo_epi16(w10, w11);
+
+ w6 = _mm_unpacklo_epi32(w4, w5);
+ w7 = _mm_unpackhi_epi32(w4, w5);
+ w14 = _mm_unpacklo_epi32(w12, w13);
+ w15 = _mm_unpackhi_epi32(w12, w13);
+
+ // Store first 4-line result
+ d[0] = _mm_unpacklo_epi64(w6, w14);
+ d[1] = _mm_unpackhi_epi64(w6, w14);
+ d[2] = _mm_unpacklo_epi64(w7, w15);
+ d[3] = _mm_unpackhi_epi64(w7, w15);
+
+ w4 = _mm_unpackhi_epi16(w0, w1);
+ w5 = _mm_unpackhi_epi16(w2, w3);
+ w12 = _mm_unpackhi_epi16(w8, w9);
+ w13 = _mm_unpackhi_epi16(w10, w11);
+
+ w6 = _mm_unpacklo_epi32(w4, w5);
+ w7 = _mm_unpackhi_epi32(w4, w5);
+ w14 = _mm_unpacklo_epi32(w12, w13);
+ w15 = _mm_unpackhi_epi32(w12, w13);
+
+ // Store second 4-line result
+ d[4] = _mm_unpacklo_epi64(w6, w14);
+ d[5] = _mm_unpackhi_epi64(w6, w14);
+ d[6] = _mm_unpacklo_epi64(w7, w15);
+ d[7] = _mm_unpackhi_epi64(w7, w15);
+
+ // upper half
+ w0 = _mm_unpackhi_epi8(x[0], x[1]);
+ w1 = _mm_unpackhi_epi8(x[2], x[3]);
+ w2 = _mm_unpackhi_epi8(x[4], x[5]);
+ w3 = _mm_unpackhi_epi8(x[6], x[7]);
+
+ w8 = _mm_unpackhi_epi8(x[8], x[9]);
+ w9 = _mm_unpackhi_epi8(x[10], x[11]);
+ w10 = _mm_unpackhi_epi8(x[12], x[13]);
+ w11 = _mm_unpackhi_epi8(x[14], x[15]);
+
+ w4 = _mm_unpacklo_epi16(w0, w1);
+ w5 = _mm_unpacklo_epi16(w2, w3);
+ w12 = _mm_unpacklo_epi16(w8, w9);
+ w13 = _mm_unpacklo_epi16(w10, w11);
+
+ w6 = _mm_unpacklo_epi32(w4, w5);
+ w7 = _mm_unpackhi_epi32(w4, w5);
+ w14 = _mm_unpacklo_epi32(w12, w13);
+ w15 = _mm_unpackhi_epi32(w12, w13);
+
+ // Store first 4-line result
+ d[8] = _mm_unpacklo_epi64(w6, w14);
+ d[9] = _mm_unpackhi_epi64(w6, w14);
+ d[10] = _mm_unpacklo_epi64(w7, w15);
+ d[11] = _mm_unpackhi_epi64(w7, w15);
+
+ w4 = _mm_unpackhi_epi16(w0, w1);
+ w5 = _mm_unpackhi_epi16(w2, w3);
+ w12 = _mm_unpackhi_epi16(w8, w9);
+ w13 = _mm_unpackhi_epi16(w10, w11);
+
+ w6 = _mm_unpacklo_epi32(w4, w5);
+ w7 = _mm_unpackhi_epi32(w4, w5);
+ w14 = _mm_unpacklo_epi32(w12, w13);
+ w15 = _mm_unpackhi_epi32(w12, w13);
+
+ // Store second 4-line result
+ d[12] = _mm_unpacklo_epi64(w6, w14);
+ d[13] = _mm_unpackhi_epi64(w6, w14);
+ d[14] = _mm_unpacklo_epi64(w7, w15);
+ d[15] = _mm_unpackhi_epi64(w7, w15);
+}
+
+static void transpose_TX_8X8(const uint8_t *src, ptrdiff_t pitchSrc,
+ uint8_t *dst, ptrdiff_t pitchDst) {
+ __m128i r0, r1, r2, r3, r4, r5, r6, r7;
+ __m128i d0d1, d2d3, d4d5, d6d7;
+ r0 = _mm_loadl_epi64((__m128i *)(src + 0 * pitchSrc));
+ r1 = _mm_loadl_epi64((__m128i *)(src + 1 * pitchSrc));
+ r2 = _mm_loadl_epi64((__m128i *)(src + 2 * pitchSrc));
+ r3 = _mm_loadl_epi64((__m128i *)(src + 3 * pitchSrc));
+ r4 = _mm_loadl_epi64((__m128i *)(src + 4 * pitchSrc));
+ r5 = _mm_loadl_epi64((__m128i *)(src + 5 * pitchSrc));
+ r6 = _mm_loadl_epi64((__m128i *)(src + 6 * pitchSrc));
+ r7 = _mm_loadl_epi64((__m128i *)(src + 7 * pitchSrc));
+
+ transpose8x8_sse2(&r0, &r1, &r2, &r3, &r4, &r5, &r6, &r7, &d0d1, &d2d3, &d4d5,
+ &d6d7);
+
+ _mm_storel_epi64((__m128i *)(dst + 0 * pitchDst), d0d1);
+ _mm_storel_epi64((__m128i *)(dst + 1 * pitchDst), _mm_srli_si128(d0d1, 8));
+ _mm_storel_epi64((__m128i *)(dst + 2 * pitchDst), d2d3);
+ _mm_storel_epi64((__m128i *)(dst + 3 * pitchDst), _mm_srli_si128(d2d3, 8));
+ _mm_storel_epi64((__m128i *)(dst + 4 * pitchDst), d4d5);
+ _mm_storel_epi64((__m128i *)(dst + 5 * pitchDst), _mm_srli_si128(d4d5, 8));
+ _mm_storel_epi64((__m128i *)(dst + 6 * pitchDst), d6d7);
+ _mm_storel_epi64((__m128i *)(dst + 7 * pitchDst), _mm_srli_si128(d6d7, 8));
+}
+
+static void transpose(const uint8_t *src, ptrdiff_t pitchSrc, uint8_t *dst,
+ ptrdiff_t pitchDst, int width, int height) {
+ for (int j = 0; j < height; j += 8)
+ for (int i = 0; i < width; i += 8)
+ transpose_TX_8X8(src + i * pitchSrc + j, pitchSrc, dst + j * pitchDst + i,
+ pitchDst);
+}
+
+static void dr_prediction_z3_4x4_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[4], d[4];
+
+ dr_prediction_z1_4xN_internal_avx2(4, dstvec, left, upsample_left, dy);
+ transpose4x8_8x4_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
+ &d[0], &d[1], &d[2], &d[3]);
+
+ *(uint32_t *)(dst + stride * 0) = _mm_cvtsi128_si32(d[0]);
+ *(uint32_t *)(dst + stride * 1) = _mm_cvtsi128_si32(d[1]);
+ *(uint32_t *)(dst + stride * 2) = _mm_cvtsi128_si32(d[2]);
+ *(uint32_t *)(dst + stride * 3) = _mm_cvtsi128_si32(d[3]);
+ return;
+}
+
+static void dr_prediction_z3_8x8_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[8], d[8];
+
+ dr_prediction_z1_8xN_internal_avx2(8, dstvec, left, upsample_left, dy);
+ transpose8x8_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4],
+ &dstvec[5], &dstvec[6], &dstvec[7], &d[0], &d[1], &d[2],
+ &d[3]);
+
+ _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
+ _mm_storel_epi64((__m128i *)(dst + 1 * stride), _mm_srli_si128(d[0], 8));
+ _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[1]);
+ _mm_storel_epi64((__m128i *)(dst + 3 * stride), _mm_srli_si128(d[1], 8));
+ _mm_storel_epi64((__m128i *)(dst + 4 * stride), d[2]);
+ _mm_storel_epi64((__m128i *)(dst + 5 * stride), _mm_srli_si128(d[2], 8));
+ _mm_storel_epi64((__m128i *)(dst + 6 * stride), d[3]);
+ _mm_storel_epi64((__m128i *)(dst + 7 * stride), _mm_srli_si128(d[3], 8));
+}
+
+static void dr_prediction_z3_4x8_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[4], d[8];
+
+ dr_prediction_z1_8xN_internal_avx2(4, dstvec, left, upsample_left, dy);
+ transpose4x8_8x4_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &d[0],
+ &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]);
+ for (int i = 0; i < 8; i++) {
+ *(uint32_t *)(dst + stride * i) = _mm_cvtsi128_si32(d[i]);
+ }
+}
+
+static void dr_prediction_z3_8x4_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[8], d[4];
+
+ dr_prediction_z1_4xN_internal_avx2(8, dstvec, left, upsample_left, dy);
+ transpose8x8_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
+ &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7], &d[0],
+ &d[1], &d[2], &d[3]);
+ _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
+ _mm_storel_epi64((__m128i *)(dst + 1 * stride), d[1]);
+ _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[2]);
+ _mm_storel_epi64((__m128i *)(dst + 3 * stride), d[3]);
+}
+
+static void dr_prediction_z3_8x16_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[8], d[8];
+
+ dr_prediction_z1_16xN_internal_avx2(8, dstvec, left, upsample_left, dy);
+ transpose8x16_16x8_sse2(dstvec, dstvec + 1, dstvec + 2, dstvec + 3,
+ dstvec + 4, dstvec + 5, dstvec + 6, dstvec + 7, d,
+ d + 1, d + 2, d + 3, d + 4, d + 5, d + 6, d + 7);
+ for (int i = 0; i < 8; i++) {
+ _mm_storel_epi64((__m128i *)(dst + i * stride), d[i]);
+ _mm_storel_epi64((__m128i *)(dst + (i + 8) * stride),
+ _mm_srli_si128(d[i], 8));
+ }
+}
+
+static void dr_prediction_z3_16x8_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[16], d[16];
+
+ dr_prediction_z1_8xN_internal_avx2(16, dstvec, left, upsample_left, dy);
+ transpose16x8_8x16_sse2(
+ &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
+ &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
+ &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
+ &d[3], &d[4], &d[5], &d[6], &d[7]);
+
+ for (int i = 0; i < 8; i++) {
+ _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
+ }
+}
+
+static void dr_prediction_z3_4x16_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[4], d[16];
+
+ dr_prediction_z1_16xN_internal_avx2(4, dstvec, left, upsample_left, dy);
+ transpose4x16_sse2(dstvec, d);
+ for (int i = 0; i < 16; i++) {
+ *(uint32_t *)(dst + stride * i) = _mm_cvtsi128_si32(d[i]);
+ }
+}
+
+static void dr_prediction_z3_16x4_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[16], d[8];
+
+ dr_prediction_z1_4xN_internal_avx2(16, dstvec, left, upsample_left, dy);
+ for (int i = 4; i < 8; i++) {
+ d[i] = _mm_setzero_si128();
+ }
+ transpose16x8_8x16_sse2(
+ &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
+ &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
+ &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
+ &d[3], &d[4], &d[5], &d[6], &d[7]);
+
+ for (int i = 0; i < 4; i++) {
+ _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
+ }
+}
+
+static void dr_prediction_z3_8x32_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m256i dstvec[16], d[16];
+
+ dr_prediction_z1_32xN_internal_avx2(8, dstvec, left, upsample_left, dy);
+ for (int i = 8; i < 16; i++) {
+ dstvec[i] = _mm256_setzero_si256();
+ }
+ transpose16x32_avx2(dstvec, d);
+
+ for (int i = 0; i < 16; i++) {
+ _mm_storel_epi64((__m128i *)(dst + i * stride),
+ _mm256_castsi256_si128(d[i]));
+ }
+ for (int i = 0; i < 16; i++) {
+ _mm_storel_epi64((__m128i *)(dst + (i + 16) * stride),
+ _mm256_extracti128_si256(d[i], 1));
+ }
+}
+
+static void dr_prediction_z3_32x8_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[32], d[16];
+
+ dr_prediction_z1_8xN_internal_avx2(32, dstvec, left, upsample_left, dy);
+
+ transpose16x8_8x16_sse2(
+ &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
+ &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
+ &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
+ &d[3], &d[4], &d[5], &d[6], &d[7]);
+ transpose16x8_8x16_sse2(
+ &dstvec[0 + 16], &dstvec[1 + 16], &dstvec[2 + 16], &dstvec[3 + 16],
+ &dstvec[4 + 16], &dstvec[5 + 16], &dstvec[6 + 16], &dstvec[7 + 16],
+ &dstvec[8 + 16], &dstvec[9 + 16], &dstvec[10 + 16], &dstvec[11 + 16],
+ &dstvec[12 + 16], &dstvec[13 + 16], &dstvec[14 + 16], &dstvec[15 + 16],
+ &d[0 + 8], &d[1 + 8], &d[2 + 8], &d[3 + 8], &d[4 + 8], &d[5 + 8],
+ &d[6 + 8], &d[7 + 8]);
+
+ for (int i = 0; i < 8; i++) {
+ _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
+ _mm_storeu_si128((__m128i *)(dst + i * stride + 16), d[i + 8]);
+ }
+}
+
+static void dr_prediction_z3_16x16_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[16], d[16];
+
+ dr_prediction_z1_16xN_internal_avx2(16, dstvec, left, upsample_left, dy);
+ transpose16x16_sse2(dstvec, d);
+
+ for (int i = 0; i < 16; i++) {
+ _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
+ }
+}
+
+static void dr_prediction_z3_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m256i dstvec[32], d[32];
+
+ dr_prediction_z1_32xN_internal_avx2(32, dstvec, left, upsample_left, dy);
+ transpose16x32_avx2(dstvec, d);
+ transpose16x32_avx2(dstvec + 16, d + 16);
+ for (int j = 0; j < 16; j++) {
+ _mm_storeu_si128((__m128i *)(dst + j * stride),
+ _mm256_castsi256_si128(d[j]));
+ _mm_storeu_si128((__m128i *)(dst + j * stride + 16),
+ _mm256_castsi256_si128(d[j + 16]));
+ }
+ for (int j = 0; j < 16; j++) {
+ _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride),
+ _mm256_extracti128_si256(d[j], 1));
+ _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride + 16),
+ _mm256_extracti128_si256(d[j + 16], 1));
+ }
+}
+
+static void dr_prediction_z3_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ DECLARE_ALIGNED(16, uint8_t, dstT[64 * 64]);
+ dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
+ transpose(dstT, 64, dst, stride, 64, 64);
+}
+
+static void dr_prediction_z3_16x32_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m256i dstvec[16], d[16];
+
+ dr_prediction_z1_32xN_internal_avx2(16, dstvec, left, upsample_left, dy);
+ transpose16x32_avx2(dstvec, d);
+ // store
+ for (int j = 0; j < 16; j++) {
+ _mm_storeu_si128((__m128i *)(dst + j * stride),
+ _mm256_castsi256_si128(d[j]));
+ _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride),
+ _mm256_extracti128_si256(d[j], 1));
+ }
+}
+
+static void dr_prediction_z3_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[32], d[16];
+
+ dr_prediction_z1_16xN_internal_avx2(32, dstvec, left, upsample_left, dy);
+ for (int i = 0; i < 32; i += 16) {
+ transpose16x16_sse2((dstvec + i), d);
+ for (int j = 0; j < 16; j++) {
+ _mm_storeu_si128((__m128i *)(dst + j * stride + i), d[j]);
+ }
+ }
+}
+
+static void dr_prediction_z3_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ uint8_t dstT[64 * 32];
+ dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
+ transpose(dstT, 64, dst, stride, 32, 64);
+}
+
+static void dr_prediction_z3_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ uint8_t dstT[32 * 64];
+ dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy);
+ transpose(dstT, 32, dst, stride, 64, 32);
+ return;
+}
+
+static void dr_prediction_z3_16x64_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ uint8_t dstT[64 * 16];
+ dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
+ transpose(dstT, 64, dst, stride, 16, 64);
+}
+
+static void dr_prediction_z3_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, int upsample_left,
+ int dy) {
+ __m128i dstvec[64], d[16];
+
+ dr_prediction_z1_16xN_internal_avx2(64, dstvec, left, upsample_left, dy);
+ for (int i = 0; i < 64; i += 16) {
+ transpose16x16_sse2((dstvec + i), d);
+ for (int j = 0; j < 16; j++) {
+ _mm_storeu_si128((__m128i *)(dst + j * stride + i), d[j]);
+ }
+ }
+}
+
+void av1_dr_prediction_z3_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
+ const uint8_t *above, const uint8_t *left,
+ int upsample_left, int dx, int dy) {
+ (void)above;
+ (void)dx;
+ assert(dx == 1);
+ assert(dy > 0);
+
+ if (bw == bh) {
+ switch (bw) {
+ case 4:
+ dr_prediction_z3_4x4_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 8:
+ dr_prediction_z3_8x8_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 16:
+ dr_prediction_z3_16x16_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 32:
+ dr_prediction_z3_32x32_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 64:
+ dr_prediction_z3_64x64_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ }
+ } else {
+ if (bw < bh) {
+ if (bw + bw == bh) {
+ switch (bw) {
+ case 4:
+ dr_prediction_z3_4x8_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 8:
+ dr_prediction_z3_8x16_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 16:
+ dr_prediction_z3_16x32_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 32:
+ dr_prediction_z3_32x64_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ }
+ } else {
+ switch (bw) {
+ case 4:
+ dr_prediction_z3_4x16_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 8:
+ dr_prediction_z3_8x32_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 16:
+ dr_prediction_z3_16x64_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ }
+ }
+ } else {
+ if (bh + bh == bw) {
+ switch (bh) {
+ case 4:
+ dr_prediction_z3_8x4_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 8:
+ dr_prediction_z3_16x8_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 16:
+ dr_prediction_z3_32x16_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 32:
+ dr_prediction_z3_64x32_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ }
+ } else {
+ switch (bh) {
+ case 4:
+ dr_prediction_z3_16x4_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 8:
+ dr_prediction_z3_32x8_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ case 16:
+ dr_prediction_z3_64x16_avx2(dst, stride, left, upsample_left, dy);
+ break;
+ }
+ }
+ }
+ }
+ return;
+}
diff --git a/aom_dsp/x86/loopfilter_sse2.c b/aom_dsp/x86/loopfilter_sse2.c
index 26f249e..e1981e5 100644
--- a/aom_dsp/x86/loopfilter_sse2.c
+++ b/aom_dsp/x86/loopfilter_sse2.c
@@ -16,347 +16,12 @@
#include "aom_dsp/x86/synonyms.h"
#include "aom_ports/mem.h"
#include "aom_ports/emmintrin_compat.h"
+#include "aom_dsp/x86/lpf_common_sse2.h"
static INLINE __m128i abs_diff(__m128i a, __m128i b) {
return _mm_or_si128(_mm_subs_epu8(a, b), _mm_subs_epu8(b, a));
}
-static INLINE void transpose4x8_8x4_low_sse2(__m128i *x0, __m128i *x1,
- __m128i *x2, __m128i *x3,
- __m128i *d0, __m128i *d1,
- __m128i *d2, __m128i *d3) {
- // input
- // x0 00 01 02 03 04 05 06 07 xx xx xx xx xx xx xx xx
- // x1 10 11 12 13 14 15 16 17 xx xx xx xx xx xx xx xx
- // x2 20 21 22 23 24 25 26 27 xx xx xx xx xx xx xx xx
- // x3 30 31 32 33 34 35 36 37 xx xx xx xx xx xx xx xx
- // output
- // 00 10 20 30 xx xx xx xx xx xx xx xx xx xx xx xx
- // 01 11 21 31 xx xx xx xx xx xx xx xx xx xx xx xx
- // 02 12 22 32 xx xx xx xx xx xx xx xx xx xx xx xx
- // 03 13 23 33 xx xx xx xx xx xx xx xx xx xx xx xx
-
- __m128i w0, w1;
-
- w0 = _mm_unpacklo_epi8(
- *x0, *x1); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
- w1 = _mm_unpacklo_epi8(
- *x2, *x3); // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
-
- *d0 = _mm_unpacklo_epi16(
- w0, w1); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
-
- *d1 = _mm_srli_si128(*d0,
- 4); // 01 11 21 31 xx xx xx xx xx xx xx xx xx xx xx xx
- *d2 = _mm_srli_si128(*d0,
- 8); // 02 12 22 32 xx xx xx xx xx xx xx xx xx xx xx xx
- *d3 = _mm_srli_si128(*d0,
- 12); // 03 13 23 33 xx xx xx xx xx xx xx xx xx xx xx xx
-}
-
-static INLINE void transpose4x8_8x4_sse2(__m128i *x0, __m128i *x1, __m128i *x2,
- __m128i *x3, __m128i *d0, __m128i *d1,
- __m128i *d2, __m128i *d3, __m128i *d4,
- __m128i *d5, __m128i *d6,
- __m128i *d7) {
- // input
- // x0 00 01 02 03 04 05 06 07 xx xx xx xx xx xx xx xx
- // x1 10 11 12 13 14 15 16 17 xx xx xx xx xx xx xx xx
- // x2 20 21 22 23 24 25 26 27 xx xx xx xx xx xx xx xx
- // x3 30 31 32 33 34 35 36 37 xx xx xx xx xx xx xx xx
- // output
- // 00 10 20 30 xx xx xx xx xx xx xx xx xx xx xx xx
- // 01 11 21 31 xx xx xx xx xx xx xx xx xx xx xx xx
- // 02 12 22 32 xx xx xx xx xx xx xx xx xx xx xx xx
- // 03 13 23 33 xx xx xx xx xx xx xx xx xx xx xx xx
- // 04 14 24 34 xx xx xx xx xx xx xx xx xx xx xx xx
- // 05 15 25 35 xx xx xx xx xx xx xx xx xx xx xx xx
- // 06 16 26 36 xx xx xx xx xx xx xx xx xx xx xx xx
- // 07 17 27 37 xx xx xx xx xx xx xx xx xx xx xx xx
-
- __m128i w0, w1, ww0, ww1;
-
- w0 = _mm_unpacklo_epi8(
- *x0, *x1); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
- w1 = _mm_unpacklo_epi8(
- *x2, *x3); // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
-
- ww0 = _mm_unpacklo_epi16(
- w0, w1); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
- ww1 = _mm_unpackhi_epi16(
- w0, w1); // 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37
-
- *d0 = ww0; // 00 10 20 30 xx xx xx xx xx xx xx xx xx xx xx xx
- *d1 = _mm_srli_si128(ww0,
- 4); // 01 11 21 31 xx xx xx xx xx xx xx xx xx xx xx xx
- *d2 = _mm_srli_si128(ww0,
- 8); // 02 12 22 32 xx xx xx xx xx xx xx xx xx xx xx xx
- *d3 = _mm_srli_si128(ww0,
- 12); // 03 13 23 33 xx xx xx xx xx xx xx xx xx xx xx xx
-
- *d4 = ww1; // 04 14 24 34 xx xx xx xx xx xx xx xx xx xx xx xx
- *d5 = _mm_srli_si128(ww1,
- 4); // 05 15 25 35 xx xx xx xx xx xx xx xx xx xx xx xx
- *d6 = _mm_srli_si128(ww1,
- 8); // 06 16 26 36 xx xx xx xx xx xx xx xx xx xx xx xx
- *d7 = _mm_srli_si128(ww1,
- 12); // 07 17 27 37 xx xx xx xx xx xx xx xx xx xx xx xx
-}
-
-static INLINE void transpose8x8_low_sse2(__m128i *x0, __m128i *x1, __m128i *x2,
- __m128i *x3, __m128i *x4, __m128i *x5,
- __m128i *x6, __m128i *x7, __m128i *d0,
- __m128i *d1, __m128i *d2,
- __m128i *d3) {
- // input
- // x0 00 01 02 03 04 05 06 07
- // x1 10 11 12 13 14 15 16 17
- // x2 20 21 22 23 24 25 26 27
- // x3 30 31 32 33 34 35 36 37
- // x4 40 41 42 43 44 45 46 47
- // x5 50 51 52 53 54 55 56 57
- // x6 60 61 62 63 64 65 66 67
- // x7 70 71 72 73 74 75 76 77
- // output
- // d0 00 10 20 30 40 50 60 70 xx xx xx xx xx xx xx
- // d1 01 11 21 31 41 51 61 71 xx xx xx xx xx xx xx xx
- // d2 02 12 22 32 42 52 62 72 xx xx xx xx xx xx xx xx
- // d3 03 13 23 33 43 53 63 73 xx xx xx xx xx xx xx xx
-
- __m128i w0, w1, w2, w3, w4, w5;
-
- w0 = _mm_unpacklo_epi8(
- *x0, *x1); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
-
- w1 = _mm_unpacklo_epi8(
- *x2, *x3); // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
-
- w2 = _mm_unpacklo_epi8(
- *x4, *x5); // 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57
-
- w3 = _mm_unpacklo_epi8(
- *x6, *x7); // 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77
-
- w4 = _mm_unpacklo_epi16(
- w0, w1); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
- w5 = _mm_unpacklo_epi16(
- w2, w3); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
-
- *d0 = _mm_unpacklo_epi32(
- w4, w5); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
- *d1 = _mm_srli_si128(*d0, 8);
- *d2 = _mm_unpackhi_epi32(
- w4, w5); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
- *d3 = _mm_srli_si128(*d2, 8);
-}
-
-static INLINE void transpose8x8_sse2(__m128i *x0, __m128i *x1, __m128i *x2,
- __m128i *x3, __m128i *x4, __m128i *x5,
- __m128i *x6, __m128i *x7, __m128i *d0d1,
- __m128i *d2d3, __m128i *d4d5,
- __m128i *d6d7) {
- __m128i w0, w1, w2, w3, w4, w5, w6, w7;
- // x0 00 01 02 03 04 05 06 07
- // x1 10 11 12 13 14 15 16 17
- w0 = _mm_unpacklo_epi8(
- *x0, *x1); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
-
- // x2 20 21 22 23 24 25 26 27
- // x3 30 31 32 33 34 35 36 37
- w1 = _mm_unpacklo_epi8(
- *x2, *x3); // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
-
- // x4 40 41 42 43 44 45 46 47
- // x5 50 51 52 53 54 55 56 57
- w2 = _mm_unpacklo_epi8(
- *x4, *x5); // 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57
-
- // x6 60 61 62 63 64 65 66 67
- // x7 70 71 72 73 74 75 76 77
- w3 = _mm_unpacklo_epi8(
- *x6, *x7); // 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77
-
- w4 = _mm_unpacklo_epi16(
- w0, w1); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
- w5 = _mm_unpacklo_epi16(
- w2, w3); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
-
- *d0d1 = _mm_unpacklo_epi32(
- w4, w5); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
- *d2d3 = _mm_unpackhi_epi32(
- w4, w5); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
-
- w6 = _mm_unpackhi_epi16(
- w0, w1); // 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37
- w7 = _mm_unpackhi_epi16(
- w2, w3); // 44 54 64 74 45 55 65 75 46 56 66 76 47 57 67 77
-
- *d4d5 = _mm_unpacklo_epi32(
- w6, w7); // 04 14 24 34 44 54 64 74 05 15 25 35 45 55 65 75
- *d6d7 = _mm_unpackhi_epi32(
- w6, w7); // 06 16 26 36 46 56 66 76 07 17 27 37 47 57 67 77
-}
-
-static INLINE void transpose16x8_8x16_sse2(
- __m128i *x0, __m128i *x1, __m128i *x2, __m128i *x3, __m128i *x4,
- __m128i *x5, __m128i *x6, __m128i *x7, __m128i *x8, __m128i *x9,
- __m128i *x10, __m128i *x11, __m128i *x12, __m128i *x13, __m128i *x14,
- __m128i *x15, __m128i *d0, __m128i *d1, __m128i *d2, __m128i *d3,
- __m128i *d4, __m128i *d5, __m128i *d6, __m128i *d7) {
- __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9;
- __m128i w10, w11, w12, w13, w14, w15;
-
- w0 = _mm_unpacklo_epi8(*x0, *x1);
- w1 = _mm_unpacklo_epi8(*x2, *x3);
- w2 = _mm_unpacklo_epi8(*x4, *x5);
- w3 = _mm_unpacklo_epi8(*x6, *x7);
-
- w8 = _mm_unpacklo_epi8(*x8, *x9);
- w9 = _mm_unpacklo_epi8(*x10, *x11);
- w10 = _mm_unpacklo_epi8(*x12, *x13);
- w11 = _mm_unpacklo_epi8(*x14, *x15);
-
- w4 = _mm_unpacklo_epi16(w0, w1);
- w5 = _mm_unpacklo_epi16(w2, w3);
- w12 = _mm_unpacklo_epi16(w8, w9);
- w13 = _mm_unpacklo_epi16(w10, w11);
-
- w6 = _mm_unpacklo_epi32(w4, w5);
- w7 = _mm_unpackhi_epi32(w4, w5);
- w14 = _mm_unpacklo_epi32(w12, w13);
- w15 = _mm_unpackhi_epi32(w12, w13);
-
- // Store first 4-line result
- *d0 = _mm_unpacklo_epi64(w6, w14);
- *d1 = _mm_unpackhi_epi64(w6, w14);
- *d2 = _mm_unpacklo_epi64(w7, w15);
- *d3 = _mm_unpackhi_epi64(w7, w15);
-
- w4 = _mm_unpackhi_epi16(w0, w1);
- w5 = _mm_unpackhi_epi16(w2, w3);
- w12 = _mm_unpackhi_epi16(w8, w9);
- w13 = _mm_unpackhi_epi16(w10, w11);
-
- w6 = _mm_unpacklo_epi32(w4, w5);
- w7 = _mm_unpackhi_epi32(w4, w5);
- w14 = _mm_unpacklo_epi32(w12, w13);
- w15 = _mm_unpackhi_epi32(w12, w13);
-
- // Store second 4-line result
- *d4 = _mm_unpacklo_epi64(w6, w14);
- *d5 = _mm_unpackhi_epi64(w6, w14);
- *d6 = _mm_unpacklo_epi64(w7, w15);
- *d7 = _mm_unpackhi_epi64(w7, w15);
-}
-
-// this function treats its input as 2 parallel 8x4 matrices, transposes each of
-// them independently while flipping the second matrix horizontaly Used for 14
-// taps filter pq pairs inverse
-static INLINE void transpose_pq_14_inv_sse2(__m128i *x0, __m128i *x1,
- __m128i *x2, __m128i *x3,
- __m128i *x4, __m128i *x5,
- __m128i *x6, __m128i *x7,
- __m128i *pq0, __m128i *pq1,
- __m128i *pq2, __m128i *pq3) {
- __m128i w10, w11, w12, w13;
- __m128i w0, w1, w2, w3, w4, w5;
- __m128i d0, d1, d2, d3;
-
- w0 = _mm_unpacklo_epi8(
- *x0, *x1); // p 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
- w1 = _mm_unpacklo_epi8(
- *x2, *x3); // p 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
- w2 = _mm_unpacklo_epi8(
- *x4, *x5); // p 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57
- w3 = _mm_unpacklo_epi8(
- *x6, *x7); // p 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77
-
- w4 = _mm_unpacklo_epi16(
- w0, w1); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
- w5 = _mm_unpacklo_epi16(
- w2, w3); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
-
- d0 = _mm_unpacklo_epi32(
- w4, w5); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
- d2 = _mm_unpackhi_epi32(
- w4, w5); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
-
- w10 = _mm_unpacklo_epi8(
- *x7, *x6); // q xx xx xx xx xx xx xx xx 00 10 01 11 02 12 03 13
- w11 = _mm_unpacklo_epi8(
- *x5, *x4); // q xx xx xx xx xx xx xx xx 20 30 21 31 22 32 23 33
- w12 = _mm_unpacklo_epi8(
- *x3, *x2); // q xx xx xx xx xx xx xx xx 40 50 41 51 42 52 43 53
- w13 = _mm_unpacklo_epi8(
- *x1, *x0); // q xx xx xx xx xx xx xx xx 60 70 61 71 62 72 63 73
-
- w4 = _mm_unpackhi_epi16(
- w10, w11); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
- w5 = _mm_unpackhi_epi16(
- w12, w13); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
-
- d1 = _mm_unpacklo_epi32(
- w4, w5); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
- d3 = _mm_unpackhi_epi32(
- w4, w5); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
-
- *pq0 = _mm_unpacklo_epi64(d0, d1); // pq
- *pq1 = _mm_unpackhi_epi64(d0, d1); // pq
- *pq2 = _mm_unpacklo_epi64(d2, d3); // pq
- *pq3 = _mm_unpackhi_epi64(d2, d3); // pq
-}
-
-static INLINE void transpose8x16_16x8_sse2(
- __m128i *x0, __m128i *x1, __m128i *x2, __m128i *x3, __m128i *x4,
- __m128i *x5, __m128i *x6, __m128i *x7, __m128i *d0d1, __m128i *d2d3,
- __m128i *d4d5, __m128i *d6d7, __m128i *d8d9, __m128i *d10d11,
- __m128i *d12d13, __m128i *d14d15) {
- __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9;
- __m128i w10, w11, w12, w13, w14, w15;
-
- w0 = _mm_unpacklo_epi8(*x0, *x1);
- w1 = _mm_unpacklo_epi8(*x2, *x3);
- w2 = _mm_unpacklo_epi8(*x4, *x5);
- w3 = _mm_unpacklo_epi8(*x6, *x7);
-
- w8 = _mm_unpackhi_epi8(*x0, *x1);
- w9 = _mm_unpackhi_epi8(*x2, *x3);
- w10 = _mm_unpackhi_epi8(*x4, *x5);
- w11 = _mm_unpackhi_epi8(*x6, *x7);
-
- w4 = _mm_unpacklo_epi16(w0, w1);
- w5 = _mm_unpacklo_epi16(w2, w3);
- w12 = _mm_unpacklo_epi16(w8, w9);
- w13 = _mm_unpacklo_epi16(w10, w11);
-
- w6 = _mm_unpacklo_epi32(w4, w5);
- w7 = _mm_unpackhi_epi32(w4, w5);
- w14 = _mm_unpacklo_epi32(w12, w13);
- w15 = _mm_unpackhi_epi32(w12, w13);
-
- // Store first 4-line result
- *d0d1 = _mm_unpacklo_epi64(w6, w14);
- *d2d3 = _mm_unpackhi_epi64(w6, w14);
- *d4d5 = _mm_unpacklo_epi64(w7, w15);
- *d6d7 = _mm_unpackhi_epi64(w7, w15);
-
- w4 = _mm_unpackhi_epi16(w0, w1);
- w5 = _mm_unpackhi_epi16(w2, w3);
- w12 = _mm_unpackhi_epi16(w8, w9);
- w13 = _mm_unpackhi_epi16(w10, w11);
-
- w6 = _mm_unpacklo_epi32(w4, w5);
- w7 = _mm_unpackhi_epi32(w4, w5);
- w14 = _mm_unpacklo_epi32(w12, w13);
- w15 = _mm_unpackhi_epi32(w12, w13);
-
- // Store second 4-line result
- *d8d9 = _mm_unpacklo_epi64(w6, w14);
- *d10d11 = _mm_unpackhi_epi64(w6, w14);
- *d12d13 = _mm_unpacklo_epi64(w7, w15);
- *d14d15 = _mm_unpackhi_epi64(w7, w15);
-}
-
// this function treats its input as 2 parallel 8x4 matrices, transposes each of
// them to 4x8 independently while flipping the second matrix horizontaly. Used
// for 14 taps pq pairs creation
@@ -416,6 +81,63 @@
ww2); // 07 17 27 37 08 18 28 38 xx xx xx xx xx xx xx xx
}
+// this function treats its input as 2 parallel 8x4 matrices, transposes each of
+// them independently while flipping the second matrix horizontaly Used for 14
+// taps filter pq pairs inverse
+static INLINE void transpose_pq_14_inv_sse2(__m128i *x0, __m128i *x1,
+ __m128i *x2, __m128i *x3,
+ __m128i *x4, __m128i *x5,
+ __m128i *x6, __m128i *x7,
+ __m128i *pq0, __m128i *pq1,
+ __m128i *pq2, __m128i *pq3) {
+ __m128i w10, w11, w12, w13;
+ __m128i w0, w1, w2, w3, w4, w5;
+ __m128i d0, d1, d2, d3;
+
+ w0 = _mm_unpacklo_epi8(
+ *x0, *x1); // p 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
+ w1 = _mm_unpacklo_epi8(
+ *x2, *x3); // p 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
+ w2 = _mm_unpacklo_epi8(
+ *x4, *x5); // p 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57
+ w3 = _mm_unpacklo_epi8(
+ *x6, *x7); // p 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77
+
+ w4 = _mm_unpacklo_epi16(
+ w0, w1); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
+ w5 = _mm_unpacklo_epi16(
+ w2, w3); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
+
+ d0 = _mm_unpacklo_epi32(
+ w4, w5); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
+ d2 = _mm_unpackhi_epi32(
+ w4, w5); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
+
+ w10 = _mm_unpacklo_epi8(
+ *x7, *x6); // q xx xx xx xx xx xx xx xx 00 10 01 11 02 12 03 13
+ w11 = _mm_unpacklo_epi8(
+ *x5, *x4); // q xx xx xx xx xx xx xx xx 20 30 21 31 22 32 23 33
+ w12 = _mm_unpacklo_epi8(
+ *x3, *x2); // q xx xx xx xx xx xx xx xx 40 50 41 51 42 52 43 53
+ w13 = _mm_unpacklo_epi8(
+ *x1, *x0); // q xx xx xx xx xx xx xx xx 60 70 61 71 62 72 63 73
+
+ w4 = _mm_unpackhi_epi16(
+ w10, w11); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
+ w5 = _mm_unpackhi_epi16(
+ w12, w13); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
+
+ d1 = _mm_unpacklo_epi32(
+ w4, w5); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
+ d3 = _mm_unpackhi_epi32(
+ w4, w5); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
+
+ *pq0 = _mm_unpacklo_epi64(d0, d1); // pq
+ *pq1 = _mm_unpackhi_epi64(d0, d1); // pq
+ *pq2 = _mm_unpacklo_epi64(d2, d3); // pq
+ *pq3 = _mm_unpackhi_epi64(d2, d3); // pq
+}
+
static AOM_FORCE_INLINE void filter4_sse2(__m128i *p1p0, __m128i *q1q0,
__m128i *hev, __m128i *mask,
__m128i *qs1qs0, __m128i *ps1ps0) {
diff --git a/aom_dsp/x86/lpf_common_sse2.h b/aom_dsp/x86/lpf_common_sse2.h
index 8970fe7..6ed2cbf 100644
--- a/aom_dsp/x86/lpf_common_sse2.h
+++ b/aom_dsp/x86/lpf_common_sse2.h
@@ -212,4 +212,284 @@
d4 + 1, d5 + 1, d6 + 1, d7 + 1);
}
+// Low bit depth functions
+static INLINE void transpose4x8_8x4_low_sse2(__m128i *x0, __m128i *x1,
+ __m128i *x2, __m128i *x3,
+ __m128i *d0, __m128i *d1,
+ __m128i *d2, __m128i *d3) {
+ // input
+ // x0 00 01 02 03 04 05 06 07 xx xx xx xx xx xx xx xx
+ // x1 10 11 12 13 14 15 16 17 xx xx xx xx xx xx xx xx
+ // x2 20 21 22 23 24 25 26 27 xx xx xx xx xx xx xx xx
+ // x3 30 31 32 33 34 35 36 37 xx xx xx xx xx xx xx xx
+ // output
+ // 00 10 20 30 xx xx xx xx xx xx xx xx xx xx xx xx
+ // 01 11 21 31 xx xx xx xx xx xx xx xx xx xx xx xx
+ // 02 12 22 32 xx xx xx xx xx xx xx xx xx xx xx xx
+ // 03 13 23 33 xx xx xx xx xx xx xx xx xx xx xx xx
+
+ __m128i w0, w1;
+
+ w0 = _mm_unpacklo_epi8(
+ *x0, *x1); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
+ w1 = _mm_unpacklo_epi8(
+ *x2, *x3); // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
+
+ *d0 = _mm_unpacklo_epi16(
+ w0, w1); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
+
+ *d1 = _mm_srli_si128(*d0,
+ 4); // 01 11 21 31 xx xx xx xx xx xx xx xx xx xx xx xx
+ *d2 = _mm_srli_si128(*d0,
+ 8); // 02 12 22 32 xx xx xx xx xx xx xx xx xx xx xx xx
+ *d3 = _mm_srli_si128(*d0,
+ 12); // 03 13 23 33 xx xx xx xx xx xx xx xx xx xx xx xx
+}
+
+static INLINE void transpose4x8_8x4_sse2(__m128i *x0, __m128i *x1, __m128i *x2,
+ __m128i *x3, __m128i *d0, __m128i *d1,
+ __m128i *d2, __m128i *d3, __m128i *d4,
+ __m128i *d5, __m128i *d6,
+ __m128i *d7) {
+ // input
+ // x0 00 01 02 03 04 05 06 07 xx xx xx xx xx xx xx xx
+ // x1 10 11 12 13 14 15 16 17 xx xx xx xx xx xx xx xx
+ // x2 20 21 22 23 24 25 26 27 xx xx xx xx xx xx xx xx
+ // x3 30 31 32 33 34 35 36 37 xx xx xx xx xx xx xx xx
+ // output
+ // 00 10 20 30 xx xx xx xx xx xx xx xx xx xx xx xx
+ // 01 11 21 31 xx xx xx xx xx xx xx xx xx xx xx xx
+ // 02 12 22 32 xx xx xx xx xx xx xx xx xx xx xx xx
+ // 03 13 23 33 xx xx xx xx xx xx xx xx xx xx xx xx
+ // 04 14 24 34 xx xx xx xx xx xx xx xx xx xx xx xx
+ // 05 15 25 35 xx xx xx xx xx xx xx xx xx xx xx xx
+ // 06 16 26 36 xx xx xx xx xx xx xx xx xx xx xx xx
+ // 07 17 27 37 xx xx xx xx xx xx xx xx xx xx xx xx
+
+ __m128i w0, w1, ww0, ww1;
+
+ w0 = _mm_unpacklo_epi8(
+ *x0, *x1); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
+ w1 = _mm_unpacklo_epi8(
+ *x2, *x3); // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
+
+ ww0 = _mm_unpacklo_epi16(
+ w0, w1); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
+ ww1 = _mm_unpackhi_epi16(
+ w0, w1); // 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37
+
+ *d0 = ww0; // 00 10 20 30 xx xx xx xx xx xx xx xx xx xx xx xx
+ *d1 = _mm_srli_si128(ww0,
+ 4); // 01 11 21 31 xx xx xx xx xx xx xx xx xx xx xx xx
+ *d2 = _mm_srli_si128(ww0,
+ 8); // 02 12 22 32 xx xx xx xx xx xx xx xx xx xx xx xx
+ *d3 = _mm_srli_si128(ww0,
+ 12); // 03 13 23 33 xx xx xx xx xx xx xx xx xx xx xx xx
+
+ *d4 = ww1; // 04 14 24 34 xx xx xx xx xx xx xx xx xx xx xx xx
+ *d5 = _mm_srli_si128(ww1,
+ 4); // 05 15 25 35 xx xx xx xx xx xx xx xx xx xx xx xx
+ *d6 = _mm_srli_si128(ww1,
+ 8); // 06 16 26 36 xx xx xx xx xx xx xx xx xx xx xx xx
+ *d7 = _mm_srli_si128(ww1,
+ 12); // 07 17 27 37 xx xx xx xx xx xx xx xx xx xx xx xx
+}
+
+static INLINE void transpose8x8_low_sse2(__m128i *x0, __m128i *x1, __m128i *x2,
+ __m128i *x3, __m128i *x4, __m128i *x5,
+ __m128i *x6, __m128i *x7, __m128i *d0,
+ __m128i *d1, __m128i *d2,
+ __m128i *d3) {
+ // input
+ // x0 00 01 02 03 04 05 06 07
+ // x1 10 11 12 13 14 15 16 17
+ // x2 20 21 22 23 24 25 26 27
+ // x3 30 31 32 33 34 35 36 37
+ // x4 40 41 42 43 44 45 46 47
+ // x5 50 51 52 53 54 55 56 57
+ // x6 60 61 62 63 64 65 66 67
+ // x7 70 71 72 73 74 75 76 77
+ // output
+ // d0 00 10 20 30 40 50 60 70 xx xx xx xx xx xx xx
+ // d1 01 11 21 31 41 51 61 71 xx xx xx xx xx xx xx xx
+ // d2 02 12 22 32 42 52 62 72 xx xx xx xx xx xx xx xx
+ // d3 03 13 23 33 43 53 63 73 xx xx xx xx xx xx xx xx
+
+ __m128i w0, w1, w2, w3, w4, w5;
+
+ w0 = _mm_unpacklo_epi8(
+ *x0, *x1); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
+
+ w1 = _mm_unpacklo_epi8(
+ *x2, *x3); // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
+
+ w2 = _mm_unpacklo_epi8(
+ *x4, *x5); // 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57
+
+ w3 = _mm_unpacklo_epi8(
+ *x6, *x7); // 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77
+
+ w4 = _mm_unpacklo_epi16(
+ w0, w1); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
+ w5 = _mm_unpacklo_epi16(
+ w2, w3); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
+
+ *d0 = _mm_unpacklo_epi32(
+ w4, w5); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
+ *d1 = _mm_srli_si128(*d0, 8);
+ *d2 = _mm_unpackhi_epi32(
+ w4, w5); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
+ *d3 = _mm_srli_si128(*d2, 8);
+}
+
+static INLINE void transpose8x8_sse2(__m128i *x0, __m128i *x1, __m128i *x2,
+ __m128i *x3, __m128i *x4, __m128i *x5,
+ __m128i *x6, __m128i *x7, __m128i *d0d1,
+ __m128i *d2d3, __m128i *d4d5,
+ __m128i *d6d7) {
+ __m128i w0, w1, w2, w3, w4, w5, w6, w7;
+ // x0 00 01 02 03 04 05 06 07
+ // x1 10 11 12 13 14 15 16 17
+ w0 = _mm_unpacklo_epi8(
+ *x0, *x1); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
+
+ // x2 20 21 22 23 24 25 26 27
+ // x3 30 31 32 33 34 35 36 37
+ w1 = _mm_unpacklo_epi8(
+ *x2, *x3); // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
+
+ // x4 40 41 42 43 44 45 46 47
+ // x5 50 51 52 53 54 55 56 57
+ w2 = _mm_unpacklo_epi8(
+ *x4, *x5); // 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57
+
+ // x6 60 61 62 63 64 65 66 67
+ // x7 70 71 72 73 74 75 76 77
+ w3 = _mm_unpacklo_epi8(
+ *x6, *x7); // 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77
+
+ w4 = _mm_unpacklo_epi16(
+ w0, w1); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
+ w5 = _mm_unpacklo_epi16(
+ w2, w3); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
+
+ *d0d1 = _mm_unpacklo_epi32(
+ w4, w5); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
+ *d2d3 = _mm_unpackhi_epi32(
+ w4, w5); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
+
+ w6 = _mm_unpackhi_epi16(
+ w0, w1); // 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37
+ w7 = _mm_unpackhi_epi16(
+ w2, w3); // 44 54 64 74 45 55 65 75 46 56 66 76 47 57 67 77
+
+ *d4d5 = _mm_unpacklo_epi32(
+ w6, w7); // 04 14 24 34 44 54 64 74 05 15 25 35 45 55 65 75
+ *d6d7 = _mm_unpackhi_epi32(
+ w6, w7); // 06 16 26 36 46 56 66 76 07 17 27 37 47 57 67 77
+}
+
+static INLINE void transpose16x8_8x16_sse2(
+ __m128i *x0, __m128i *x1, __m128i *x2, __m128i *x3, __m128i *x4,
+ __m128i *x5, __m128i *x6, __m128i *x7, __m128i *x8, __m128i *x9,
+ __m128i *x10, __m128i *x11, __m128i *x12, __m128i *x13, __m128i *x14,
+ __m128i *x15, __m128i *d0, __m128i *d1, __m128i *d2, __m128i *d3,
+ __m128i *d4, __m128i *d5, __m128i *d6, __m128i *d7) {
+ __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9;
+ __m128i w10, w11, w12, w13, w14, w15;
+
+ w0 = _mm_unpacklo_epi8(*x0, *x1);
+ w1 = _mm_unpacklo_epi8(*x2, *x3);
+ w2 = _mm_unpacklo_epi8(*x4, *x5);
+ w3 = _mm_unpacklo_epi8(*x6, *x7);
+
+ w8 = _mm_unpacklo_epi8(*x8, *x9);
+ w9 = _mm_unpacklo_epi8(*x10, *x11);
+ w10 = _mm_unpacklo_epi8(*x12, *x13);
+ w11 = _mm_unpacklo_epi8(*x14, *x15);
+
+ w4 = _mm_unpacklo_epi16(w0, w1);
+ w5 = _mm_unpacklo_epi16(w2, w3);
+ w12 = _mm_unpacklo_epi16(w8, w9);
+ w13 = _mm_unpacklo_epi16(w10, w11);
+
+ w6 = _mm_unpacklo_epi32(w4, w5);
+ w7 = _mm_unpackhi_epi32(w4, w5);
+ w14 = _mm_unpacklo_epi32(w12, w13);
+ w15 = _mm_unpackhi_epi32(w12, w13);
+
+ // Store first 4-line result
+ *d0 = _mm_unpacklo_epi64(w6, w14);
+ *d1 = _mm_unpackhi_epi64(w6, w14);
+ *d2 = _mm_unpacklo_epi64(w7, w15);
+ *d3 = _mm_unpackhi_epi64(w7, w15);
+
+ w4 = _mm_unpackhi_epi16(w0, w1);
+ w5 = _mm_unpackhi_epi16(w2, w3);
+ w12 = _mm_unpackhi_epi16(w8, w9);
+ w13 = _mm_unpackhi_epi16(w10, w11);
+
+ w6 = _mm_unpacklo_epi32(w4, w5);
+ w7 = _mm_unpackhi_epi32(w4, w5);
+ w14 = _mm_unpacklo_epi32(w12, w13);
+ w15 = _mm_unpackhi_epi32(w12, w13);
+
+ // Store second 4-line result
+ *d4 = _mm_unpacklo_epi64(w6, w14);
+ *d5 = _mm_unpackhi_epi64(w6, w14);
+ *d6 = _mm_unpacklo_epi64(w7, w15);
+ *d7 = _mm_unpackhi_epi64(w7, w15);
+}
+
+static INLINE void transpose8x16_16x8_sse2(
+ __m128i *x0, __m128i *x1, __m128i *x2, __m128i *x3, __m128i *x4,
+ __m128i *x5, __m128i *x6, __m128i *x7, __m128i *d0d1, __m128i *d2d3,
+ __m128i *d4d5, __m128i *d6d7, __m128i *d8d9, __m128i *d10d11,
+ __m128i *d12d13, __m128i *d14d15) {
+ __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9;
+ __m128i w10, w11, w12, w13, w14, w15;
+
+ w0 = _mm_unpacklo_epi8(*x0, *x1);
+ w1 = _mm_unpacklo_epi8(*x2, *x3);
+ w2 = _mm_unpacklo_epi8(*x4, *x5);
+ w3 = _mm_unpacklo_epi8(*x6, *x7);
+
+ w8 = _mm_unpackhi_epi8(*x0, *x1);
+ w9 = _mm_unpackhi_epi8(*x2, *x3);
+ w10 = _mm_unpackhi_epi8(*x4, *x5);
+ w11 = _mm_unpackhi_epi8(*x6, *x7);
+
+ w4 = _mm_unpacklo_epi16(w0, w1);
+ w5 = _mm_unpacklo_epi16(w2, w3);
+ w12 = _mm_unpacklo_epi16(w8, w9);
+ w13 = _mm_unpacklo_epi16(w10, w11);
+
+ w6 = _mm_unpacklo_epi32(w4, w5);
+ w7 = _mm_unpackhi_epi32(w4, w5);
+ w14 = _mm_unpacklo_epi32(w12, w13);
+ w15 = _mm_unpackhi_epi32(w12, w13);
+
+ // Store first 4-line result
+ *d0d1 = _mm_unpacklo_epi64(w6, w14);
+ *d2d3 = _mm_unpackhi_epi64(w6, w14);
+ *d4d5 = _mm_unpacklo_epi64(w7, w15);
+ *d6d7 = _mm_unpackhi_epi64(w7, w15);
+
+ w4 = _mm_unpackhi_epi16(w0, w1);
+ w5 = _mm_unpackhi_epi16(w2, w3);
+ w12 = _mm_unpackhi_epi16(w8, w9);
+ w13 = _mm_unpackhi_epi16(w10, w11);
+
+ w6 = _mm_unpacklo_epi32(w4, w5);
+ w7 = _mm_unpackhi_epi32(w4, w5);
+ w14 = _mm_unpacklo_epi32(w12, w13);
+ w15 = _mm_unpackhi_epi32(w12, w13);
+
+ // Store second 4-line result
+ *d8d9 = _mm_unpacklo_epi64(w6, w14);
+ *d10d11 = _mm_unpackhi_epi64(w6, w14);
+ *d12d13 = _mm_unpacklo_epi64(w7, w15);
+ *d14d15 = _mm_unpackhi_epi64(w7, w15);
+}
+
#endif // AOM_AOM_DSP_X86_LPF_COMMON_SSE2_H_
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index e0ca13f..1f12588 100755
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -81,8 +81,11 @@
# directional intra predictor functions
add_proto qw/void av1_dr_prediction_z1/, "uint8_t *dst, ptrdiff_t stride, int bw, int bh, const uint8_t *above, const uint8_t *left, int upsample_above, int dx, int dy";
+specialize qw/av1_dr_prediction_z1 avx2/;
add_proto qw/void av1_dr_prediction_z2/, "uint8_t *dst, ptrdiff_t stride, int bw, int bh, const uint8_t *above, const uint8_t *left, int upsample_above, int upsample_left, int dx, int dy";
+specialize qw/av1_dr_prediction_z2 avx2/;
add_proto qw/void av1_dr_prediction_z3/, "uint8_t *dst, ptrdiff_t stride, int bw, int bh, const uint8_t *above, const uint8_t *left, int upsample_left, int dx, int dy";
+specialize qw/av1_dr_prediction_z3 avx2/;
# FILTER_INTRA predictor functions
add_proto qw/void av1_filter_intra_predictor/, "uint8_t *dst, ptrdiff_t stride, TX_SIZE tx_size, const uint8_t *above, const uint8_t *left, int mode";
@@ -157,7 +160,7 @@
add_proto qw/void av1_highbd_dr_prediction_z1/, "uint16_t *dst, ptrdiff_t stride, int bw, int bh, const uint16_t *above, const uint16_t *left, int upsample_above, int dx, int dy, int bd";
specialize qw/av1_highbd_dr_prediction_z1 avx2/;
add_proto qw/void av1_highbd_dr_prediction_z2/, "uint16_t *dst, ptrdiff_t stride, int bw, int bh, const uint16_t *above, const uint16_t *left, int upsample_above, int upsample_left, int dx, int dy, int bd";
-#specialize qw/av1_highbd_dr_prediction_z2 avx2/;
+specialize qw/av1_highbd_dr_prediction_z2 avx2/;
add_proto qw/void av1_highbd_dr_prediction_z3/, "uint16_t *dst, ptrdiff_t stride, int bw, int bh, const uint16_t *above, const uint16_t *left, int upsample_left, int dx, int dy, int bd";
specialize qw/av1_highbd_dr_prediction_z3 avx2/;
diff --git a/test/dr_prediction_test.cc b/test/dr_prediction_test.cc
index a64d39b..674490f 100644
--- a/test/dr_prediction_test.cc
+++ b/test/dr_prediction_test.cc
@@ -135,7 +135,7 @@
template <typename Pixel, typename FuncType>
class DrPredTest : public ::testing::TestWithParam<DrPredFunc<FuncType> > {
protected:
- static const int kMaxNumTests = 100000;
+ static const int kMaxNumTests = 10000;
static const int kIterations = 10;
static const int kDstStride = 64;
static const int kDstSize = kDstStride * kDstStride;
@@ -171,7 +171,7 @@
void Predict(bool speedtest, int tx) {
const int kNumTests = speedtest ? kMaxNumTests : 1;
aom_usec_timer timer;
-
+ int tst_time = 0;
aom_usec_timer_start(&timer);
for (int k = 0; k < kNumTests; ++k) {
params_.ref_fn(dst_ref_, dst_stride_, bw_, bh_, above_, left_,
@@ -180,20 +180,20 @@
aom_usec_timer_mark(&timer);
const int ref_time = static_cast<int>(aom_usec_timer_elapsed(&timer));
- aom_usec_timer_start(&timer);
if (params_.tst_fn) {
+ aom_usec_timer_start(&timer);
for (int k = 0; k < kNumTests; ++k) {
ASM_REGISTER_STATE_CHECK(params_.tst_fn(dst_tst_, dst_stride_, bw_, bh_,
above_, left_, upsample_above_,
upsample_left_, dx_, dy_, bd_));
}
+ aom_usec_timer_mark(&timer);
+ tst_time = static_cast<int>(aom_usec_timer_elapsed(&timer));
} else {
for (int i = 0; i < kDstSize; ++i) {
dst_ref_[i] = dst_tst_[i];
}
}
- aom_usec_timer_mark(&timer);
- const int tst_time = static_cast<int>(aom_usec_timer_elapsed(&timer));
OutputTimes(kNumTests, ref_time, tst_time, tx);
}
@@ -290,8 +290,7 @@
class LowbdDrPredTest : public DrPredTest<uint8_t, DrPred> {};
TEST_P(LowbdDrPredTest, SaturatedValues) {
- for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
- enable_upsample_ = iter & 1;
+ for (enable_upsample_ = 0; enable_upsample_ < 2; ++enable_upsample_) {
for (int angle = start_angle_; angle < stop_angle_; ++angle) {
dx_ = av1_get_dx(angle);
dy_ = av1_get_dy(angle);
@@ -300,20 +299,6 @@
}
}
-TEST_P(LowbdDrPredTest, DISABLED_Speed) {
- const int angles[] = { 3, 45, 87 };
- for (enable_upsample_ = 0; enable_upsample_ < 2; ++enable_upsample_) {
- for (int i = 0; i < 3; ++i) {
- const int angle = angles[i] + start_angle_;
- dx_ = av1_get_dx(angle);
- dy_ = av1_get_dy(angle);
- printf("enable_upsample: %d angle: %d ~~~~~~~~~~~~~~~\n",
- enable_upsample_, angle);
- if (dx_ && dy_) RunTest(true, false, angle);
- }
- }
-}
-
using ::testing::make_tuple;
INSTANTIATE_TEST_CASE_P(
@@ -328,8 +313,7 @@
class HighbdDrPredTest : public DrPredTest<uint16_t, DrPred_Hbd> {};
TEST_P(HighbdDrPredTest, SaturatedValues) {
- for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
- enable_upsample_ = iter & 1;
+ for (enable_upsample_ = 0; enable_upsample_ < 2; ++enable_upsample_) {
for (int angle = start_angle_; angle < stop_angle_; ++angle) {
dx_ = av1_get_dx(angle);
dy_ = av1_get_dy(angle);
@@ -362,6 +346,44 @@
#if HAVE_AVX2
INSTANTIATE_TEST_CASE_P(
+ AVX2, LowbdDrPredTest,
+ ::testing::Values(DrPredFunc<DrPred>(&z1_wrapper<av1_dr_prediction_z1_c>,
+ &z1_wrapper<av1_dr_prediction_z1_avx2>,
+ AOM_BITS_8, kZ1Start),
+ DrPredFunc<DrPred>(&z2_wrapper<av1_dr_prediction_z2_c>,
+ &z2_wrapper<av1_dr_prediction_z2_avx2>,
+ AOM_BITS_8, kZ2Start),
+ DrPredFunc<DrPred>(&z3_wrapper<av1_dr_prediction_z3_c>,
+ &z3_wrapper<av1_dr_prediction_z3_avx2>,
+ AOM_BITS_8, kZ3Start)));
+
+TEST_P(LowbdDrPredTest, DISABLED_Speed) {
+ const int angles[] = { 3, 45, 87 };
+ for (enable_upsample_ = 0; enable_upsample_ < 2; ++enable_upsample_) {
+ for (int i = 0; i < 3; ++i) {
+ const int angle = angles[i] + start_angle_;
+ dx_ = av1_get_dx(angle);
+ dy_ = av1_get_dy(angle);
+ printf("enable_upsample: %d angle: %d ~~~~~~~~~~~~~~~\n",
+ enable_upsample_, angle);
+ if (dx_ && dy_) RunTest(true, false, angle);
+ }
+ }
+}
+
+TEST_P(LowbdDrPredTest, OperationCheck) {
+ if (params_.tst_fn == NULL) return;
+ // const int angles[] = { 3, 45, 81, 87, 93, 100, 145, 187, 199, 260 };
+ for (enable_upsample_ = 0; enable_upsample_ < 2; ++enable_upsample_) {
+ for (int angle = start_angle_; angle < stop_angle_; ++angle) {
+ dx_ = av1_get_dx(angle);
+ dy_ = av1_get_dy(angle);
+ if (dx_ && dy_) RunTest(false, false, angle);
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(
AVX2, HighbdDrPredTest,
::testing::Values(DrPredFunc<DrPred_Hbd>(
&z1_wrapper_hbd<av1_highbd_dr_prediction_z1_c>,
@@ -375,7 +397,7 @@
&z1_wrapper_hbd<av1_highbd_dr_prediction_z1_c>,
&z1_wrapper_hbd<av1_highbd_dr_prediction_z1_avx2>,
AOM_BITS_12, kZ1Start),
- /*DrPredFunc<DrPred_Hbd>(
+ DrPredFunc<DrPred_Hbd>(
&z2_wrapper_hbd<av1_highbd_dr_prediction_z2_c>,
&z2_wrapper_hbd<av1_highbd_dr_prediction_z2_avx2>,
AOM_BITS_8, kZ2Start),
@@ -386,7 +408,7 @@
DrPredFunc<DrPred_Hbd>(
&z2_wrapper_hbd<av1_highbd_dr_prediction_z2_c>,
&z2_wrapper_hbd<av1_highbd_dr_prediction_z2_avx2>,
- AOM_BITS_12, kZ2Start),*/
+ AOM_BITS_12, kZ2Start),
DrPredFunc<DrPred_Hbd>(
&z3_wrapper_hbd<av1_highbd_dr_prediction_z3_c>,
&z3_wrapper_hbd<av1_highbd_dr_prediction_z3_avx2>,