dr_prediction AVX calc moved to 16 bit
resulted in performance increase up to 1.5x
vs initial 32 bit implementation
and valgrind issue fix
BUG=aomedia:2316
Change-Id: Icb59cd419514bc7f90a47583a8c7b17303c0ead7
diff --git a/aom_dsp/x86/intrapred_avx2.c b/aom_dsp/x86/intrapred_avx2.c
index 17f35a0..783fec5 100644
--- a/aom_dsp/x86/intrapred_avx2.c
+++ b/aom_dsp/x86/intrapred_avx2.c
@@ -64,6 +64,74 @@
}
}
+static uint8_t HighbdLoadMaskx[8][16] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
+ { 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 },
+};
+
+static uint8_t HighbdEvenOddMaskx4[4][16] = {
+ { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 },
+ { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 4, 5, 8, 9, 12, 13 },
+ { 0, 1, 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 6, 7, 10, 11 },
+ { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 0, 1, 8, 9 }
+};
+
+static uint8_t HighbdEvenOddMaskx[8][32] = {
+ { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29,
+ 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 },
+ { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27,
+ 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 },
+ { 0, 1, 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25,
+ 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27 },
+ { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23,
+ 0, 1, 0, 1, 0, 1, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 8, 9, 12, 13, 16, 17, 20, 21,
+ 0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 18, 19, 22, 23 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 18, 19,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 16, 17, 20, 21 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 16, 17,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15, 18, 19 },
+ { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 16, 17 }
+};
+
+static uint16_t HighbdBaseMask[17][16] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0,
+ 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0,
+ 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0 },
+ { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff }
+};
+
static INLINE void highbd_transpose16x4_8x8_sse2(__m128i *x, __m128i *d) {
__m128i r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
@@ -1008,6 +1076,78 @@
// final pixels will be caluculated as:
// (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
__m256i a0, a1, a32, a16;
+ __m256i diff, c3f;
+ __m128i a_mbase_x, max_base_x128, base_inc128, mask128;
+ __m128i a0_128, a1_128;
+ a16 = _mm256_set1_epi16(16);
+ a_mbase_x = _mm_set1_epi16(above[max_base_x]);
+ max_base_x128 = _mm_set1_epi16(max_base_x);
+ c3f = _mm256_set1_epi16(0x3f);
+
+ x = dx;
+ for (int r = 0; r < N; r++) {
+ __m256i b, res, shift;
+ __m128i res1;
+
+ int base = x >> frac_bits;
+ if (base >= max_base_x) {
+ for (int i = r; i < N; ++i) {
+ dst[i] = a_mbase_x; // save 4 values
+ }
+ return;
+ }
+
+ a0_128 = _mm_loadu_si128((__m128i *)(above + base));
+ a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1));
+
+ if (upsample_above) {
+ a0_128 = _mm_shuffle_epi8(
+ a0_128,
+ _mm_setr_epi8(0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15));
+ a1_128 = _mm_srli_si128(a0_128, 8);
+
+ base_inc128 = _mm_setr_epi16(base, base + 2, base + 4, base + 6, base + 8,
+ base + 10, base + 12, base + 14);
+ shift = _mm256_srli_epi16(
+ _mm256_and_si256(
+ _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above),
+ _mm256_set1_epi16(0x3f)),
+ 1);
+ } else {
+ base_inc128 = _mm_setr_epi16(base, base + 1, base + 2, base + 3, base + 4,
+ base + 5, base + 6, base + 7);
+ shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
+ }
+ a0 = _mm256_castsi128_si256(a0_128);
+ a1 = _mm256_castsi128_si256(a1_128);
+ diff = _mm256_sub_epi16(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi16(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi16(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi16(diff, shift);
+ res = _mm256_add_epi16(a32, b);
+ res = _mm256_srli_epi16(res, 5);
+ res1 = _mm256_castsi256_si128(res);
+
+ mask128 = _mm_cmpgt_epi16(max_base_x128, base_inc128);
+ dst[r] = _mm_blendv_epi8(a_mbase_x, res1, mask128);
+ x += dx;
+ }
+}
+
+static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_4xN_internal_avx2(
+ int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
+ const int frac_bits = 6 - upsample_above;
+ const int max_base_x = ((N + 4) - 1) << upsample_above;
+ int x;
+ // a assert(dx > 0);
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0, a1, a32, a16;
__m256i diff;
__m128i a_mbase_x, max_base_x128, base_inc128, mask128;
@@ -1068,17 +1208,22 @@
static void highbd_dr_prediction_z1_4xN_avx2(int N, uint16_t *dst,
ptrdiff_t stride,
const uint16_t *above,
- int upsample_above, int dx) {
+ int upsample_above, int dx,
+ int bd) {
__m128i dstvec[16];
-
- highbd_dr_prediction_z1_4xN_internal_avx2(N, dstvec, above, upsample_above,
- dx);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_4xN_internal_avx2(N, dstvec, above, upsample_above,
+ dx);
+ } else {
+ highbd_dr_prediction_32bit_z1_4xN_internal_avx2(N, dstvec, above,
+ upsample_above, dx);
+ }
for (int i = 0; i < N; i++) {
_mm_storel_epi64((__m128i *)(dst + stride * i), dstvec[i]);
}
}
-static AOM_FORCE_INLINE void highbd_dr_prediction_z1_8xN_internal_avx2(
+static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_8xN_internal_avx2(
int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
const int frac_bits = 6 - upsample_above;
const int max_base_x = ((8 + N) - 1) << upsample_above;
@@ -1162,20 +1307,108 @@
}
}
+static AOM_FORCE_INLINE void highbd_dr_prediction_z1_8xN_internal_avx2(
+ int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
+ const int frac_bits = 6 - upsample_above;
+ const int max_base_x = ((8 + N) - 1) << upsample_above;
+
+ int x;
+ // a assert(dx > 0);
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0, a1, a32, a16, c3f;
+ __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
+ __m128i a0_x128, a1_x128;
+
+ a16 = _mm256_set1_epi16(16);
+ a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
+ max_base_x256 = _mm256_set1_epi16(max_base_x);
+ c3f = _mm256_set1_epi16(0x3f);
+
+ x = dx;
+ for (int r = 0; r < N; r++) {
+ __m256i b, res, res1, shift;
+
+ int base = x >> frac_bits;
+ if (base >= max_base_x) {
+ for (int i = r; i < N; ++i) {
+ dst[i] = _mm256_castsi256_si128(a_mbase_x); // save 8 values
+ }
+ return;
+ }
+
+ a0_x128 = _mm_loadu_si128((__m128i *)(above + base));
+ if (upsample_above) {
+ __m128i mask, atmp0, atmp1, atmp2, atmp3;
+ a1_x128 = _mm_loadu_si128((__m128i *)(above + base + 8));
+ atmp0 = _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdEvenOddMaskx[0]);
+ atmp1 = _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdEvenOddMaskx[0]);
+ atmp2 =
+ _mm_shuffle_epi8(a0_x128, *(__m128i *)(HighbdEvenOddMaskx[0] + 16));
+ atmp3 =
+ _mm_shuffle_epi8(a1_x128, *(__m128i *)(HighbdEvenOddMaskx[0] + 16));
+ mask =
+ _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[0], _mm_set1_epi8(15));
+ a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
+ mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[0] + 16),
+ _mm_set1_epi8(15));
+ a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
+
+ base_inc256 = _mm256_setr_epi16(base, base + 2, base + 4, base + 6,
+ base + 8, base + 10, base + 12, base + 14,
+ 0, 0, 0, 0, 0, 0, 0, 0);
+ shift = _mm256_srli_epi16(
+ _mm256_and_si256(
+ _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above), c3f),
+ 1);
+ } else {
+ a1_x128 = _mm_loadu_si128((__m128i *)(above + base + 1));
+ base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
+ base + 4, base + 5, base + 6, base + 7, 0,
+ 0, 0, 0, 0, 0, 0, 0);
+ shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
+ }
+ a0 = _mm256_castsi128_si256(a0_x128);
+ a1 = _mm256_castsi128_si256(a1_x128);
+
+ diff = _mm256_sub_epi16(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi16(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi16(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi16(diff, shift);
+ res = _mm256_add_epi16(a32, b);
+ res = _mm256_srli_epi16(res, 5);
+
+ mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
+ res1 = _mm256_blendv_epi8(a_mbase_x, res, mask256);
+ dst[r] = _mm256_castsi256_si128(res1);
+ x += dx;
+ }
+}
+
static void highbd_dr_prediction_z1_8xN_avx2(int N, uint16_t *dst,
ptrdiff_t stride,
const uint16_t *above,
- int upsample_above, int dx) {
+ int upsample_above, int dx,
+ int bd) {
__m128i dstvec[32];
-
- highbd_dr_prediction_z1_8xN_internal_avx2(N, dstvec, above, upsample_above,
- dx);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_8xN_internal_avx2(N, dstvec, above, upsample_above,
+ dx);
+ } else {
+ highbd_dr_prediction_32bit_z1_8xN_internal_avx2(N, dstvec, above,
+ upsample_above, dx);
+ }
for (int i = 0; i < N; i++) {
_mm_storeu_si128((__m128i *)(dst + stride * i), dstvec[i]);
}
}
-static AOM_FORCE_INLINE void highbd_dr_prediction_z1_16xN_internal_avx2(
+static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_16xN_internal_avx2(
int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
int x;
// here upsample_above is 0 by design of av1_use_intra_edge_upsample
@@ -1255,19 +1488,82 @@
}
}
+static AOM_FORCE_INLINE void highbd_dr_prediction_z1_16xN_internal_avx2(
+ int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
+ int x;
+ // here upsample_above is 0 by design of av1_use_intra_edge_upsample
+ (void)upsample_above;
+ const int frac_bits = 6;
+ const int max_base_x = ((16 + N) - 1);
+
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0, a1, a32, a16, c3f;
+ __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
+
+ a16 = _mm256_set1_epi16(16);
+ a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
+ max_base_x256 = _mm256_set1_epi16(max_base_x);
+ c3f = _mm256_set1_epi16(0x3f);
+
+ x = dx;
+ for (int r = 0; r < N; r++) {
+ __m256i b, res;
+
+ int base = x >> frac_bits;
+ if (base >= max_base_x) {
+ for (int i = r; i < N; ++i) {
+ dstvec[i] = a_mbase_x; // save 16 values
+ }
+ return;
+ }
+ __m256i shift =
+ _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
+
+ a0 = _mm256_loadu_si256((__m256i *)(above + base));
+ a1 = _mm256_loadu_si256((__m256i *)(above + base + 1));
+
+ diff = _mm256_sub_epi16(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi16(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi16(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi16(diff, shift);
+
+ res = _mm256_add_epi16(a32, b);
+ res = _mm256_srli_epi16(res, 5); // 16 16bit values
+
+ base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
+ base + 4, base + 5, base + 6, base + 7,
+ base + 8, base + 9, base + 10, base + 11,
+ base + 12, base + 13, base + 14, base + 15);
+ mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
+ dstvec[r] = _mm256_blendv_epi8(a_mbase_x, res, mask256);
+ x += dx;
+ }
+}
+
static void highbd_dr_prediction_z1_16xN_avx2(int N, uint16_t *dst,
ptrdiff_t stride,
const uint16_t *above,
- int upsample_above, int dx) {
+ int upsample_above, int dx,
+ int bd) {
__m256i dstvec[64];
- highbd_dr_prediction_z1_16xN_internal_avx2(N, dstvec, above, upsample_above,
- dx);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_16xN_internal_avx2(N, dstvec, above, upsample_above,
+ dx);
+ } else {
+ highbd_dr_prediction_32bit_z1_16xN_internal_avx2(N, dstvec, above,
+ upsample_above, dx);
+ }
for (int i = 0; i < N; i++) {
_mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
}
}
-static AOM_FORCE_INLINE void highbd_dr_prediction_z1_32xN_internal_avx2(
+static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_32xN_internal_avx2(
int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
int x;
// here upsample_above is 0 by design of av1_use_intra_edge_upsample
@@ -1281,12 +1577,13 @@
// above[x+1] - above[x]
// final pixels will be caluculated as:
// (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
- __m256i a0, a0_1, a1, a1_1, a32, a16;
+ __m256i a0, a0_1, a1, a1_1, a32, a16, c3f;
__m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
a16 = _mm256_set1_epi32(16);
a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
max_base_x256 = _mm256_set1_epi16(max_base_x);
+ c3f = _mm256_set1_epi16(0x3f);
x = dx;
for (int r = 0; r < N; r++) {
@@ -1301,8 +1598,8 @@
return;
}
- __m256i shift = _mm256_srli_epi32(
- _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
+ __m256i shift =
+ _mm256_srli_epi32(_mm256_and_si256(_mm256_set1_epi32(x), c3f), 1);
for (int j = 0; j < 32; j += 16) {
int mdif = max_base_x - (base + j);
@@ -1363,24 +1660,102 @@
}
}
+static AOM_FORCE_INLINE void highbd_dr_prediction_z1_32xN_internal_avx2(
+ int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
+ int x;
+ // here upsample_above is 0 by design of av1_use_intra_edge_upsample
+ (void)upsample_above;
+ const int frac_bits = 6;
+ const int max_base_x = ((32 + N) - 1);
+
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0, a1, a32, a16, c3f;
+ __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
+
+ a16 = _mm256_set1_epi16(16);
+ a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
+ max_base_x256 = _mm256_set1_epi16(max_base_x);
+ c3f = _mm256_set1_epi16(0x3f);
+
+ x = dx;
+ for (int r = 0; r < N; r++) {
+ __m256i b, res;
+
+ int base = x >> frac_bits;
+ if (base >= max_base_x) {
+ for (int i = r; i < N; ++i) {
+ dstvec[i] = a_mbase_x; // save 32 values
+ dstvec[i + N] = a_mbase_x;
+ }
+ return;
+ }
+
+ __m256i shift =
+ _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
+
+ for (int j = 0; j < 32; j += 16) {
+ int mdif = max_base_x - (base + j);
+ if (mdif <= 0) {
+ res = a_mbase_x;
+ } else {
+ a0 = _mm256_loadu_si256((__m256i *)(above + base + j));
+ a1 = _mm256_loadu_si256((__m256i *)(above + base + 1 + j));
+
+ diff = _mm256_sub_epi16(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi16(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi16(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi16(diff, shift);
+
+ res = _mm256_add_epi16(a32, b);
+ res = _mm256_srli_epi16(res, 5);
+
+ base_inc256 = _mm256_setr_epi16(
+ base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
+ base + j + 5, base + j + 6, base + j + 7, base + j + 8,
+ base + j + 9, base + j + 10, base + j + 11, base + j + 12,
+ base + j + 13, base + j + 14, base + j + 15);
+
+ mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
+ res = _mm256_blendv_epi8(a_mbase_x, res, mask256);
+ }
+ if (!j)
+ dstvec[r] = res;
+ else
+ dstvec[r + N] = res;
+ }
+ x += dx;
+ }
+}
+
static void highbd_dr_prediction_z1_32xN_avx2(int N, uint16_t *dst,
ptrdiff_t stride,
const uint16_t *above,
- int upsample_above, int dx) {
+ int upsample_above, int dx,
+ int bd) {
__m256i dstvec[128];
-
- highbd_dr_prediction_z1_32xN_internal_avx2(N, dstvec, above, upsample_above,
- dx);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_32xN_internal_avx2(N, dstvec, above, upsample_above,
+ dx);
+ } else {
+ highbd_dr_prediction_32bit_z1_32xN_internal_avx2(N, dstvec, above,
+ upsample_above, dx);
+ }
for (int i = 0; i < N; i++) {
_mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
_mm256_storeu_si256((__m256i *)(dst + stride * i + 16), dstvec[i + N]);
}
}
-static void highbd_dr_prediction_z1_64xN_avx2(int N, uint16_t *dst,
- ptrdiff_t stride,
- const uint16_t *above,
- int upsample_above, int dx) {
+static void highbd_dr_prediction_32bit_z1_64xN_avx2(int N, uint16_t *dst,
+ ptrdiff_t stride,
+ const uint16_t *above,
+ int upsample_above,
+ int dx) {
int x;
// here upsample_above is 0 by design of av1_use_intra_edge_upsample
@@ -1477,6 +1852,81 @@
}
}
+static void highbd_dr_prediction_z1_64xN_avx2(int N, uint16_t *dst,
+ ptrdiff_t stride,
+ const uint16_t *above,
+ int upsample_above, int dx) {
+ int x;
+
+ // here upsample_above is 0 by design of av1_use_intra_edge_upsample
+ (void)upsample_above;
+ const int frac_bits = 6;
+ const int max_base_x = ((64 + N) - 1);
+
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0, a1, a32, a16, c3f;
+ __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
+
+ a16 = _mm256_set1_epi16(16);
+ a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
+ max_base_x256 = _mm256_set1_epi16(max_base_x);
+ c3f = _mm256_set1_epi16(0x3f);
+
+ x = dx;
+ for (int r = 0; r < N; r++, dst += stride) {
+ __m256i b, res;
+
+ int base = x >> frac_bits;
+ if (base >= max_base_x) {
+ for (int i = r; i < N; ++i) {
+ _mm256_storeu_si256((__m256i *)dst, a_mbase_x); // save 32 values
+ _mm256_storeu_si256((__m256i *)(dst + 16), a_mbase_x);
+ _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
+ _mm256_storeu_si256((__m256i *)(dst + 48), a_mbase_x);
+ dst += stride;
+ }
+ return;
+ }
+
+ __m256i shift =
+ _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
+
+ for (int j = 0; j < 64; j += 16) {
+ int mdif = max_base_x - (base + j);
+ if (mdif <= 0) {
+ _mm256_storeu_si256((__m256i *)(dst + j), a_mbase_x);
+ } else {
+ a0 = _mm256_loadu_si256((__m256i *)(above + base + j));
+ a1 = _mm256_loadu_si256((__m256i *)(above + base + 1 + j));
+
+ diff = _mm256_sub_epi16(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi16(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi16(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi16(diff, shift);
+
+ res = _mm256_add_epi16(a32, b);
+ res = _mm256_srli_epi16(res, 5);
+
+ base_inc256 = _mm256_setr_epi16(
+ base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
+ base + j + 5, base + j + 6, base + j + 7, base + j + 8,
+ base + j + 9, base + j + 10, base + j + 11, base + j + 12,
+ base + j + 13, base + j + 14, base + j + 15);
+
+ mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
+ res = _mm256_blendv_epi8(a_mbase_x, res, mask256);
+ _mm256_storeu_si256((__m256i *)(dst + j), res); // 16 16bit values
+ }
+ }
+ x += dx;
+ }
+}
+
// Directional prediction, zone 1: 0 < angle < 90
void av1_highbd_dr_prediction_z1_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
int bh, const uint16_t *above,
@@ -1484,182 +1934,61 @@
int dx, int dy, int bd) {
(void)left;
(void)dy;
- (void)bd;
switch (bw) {
case 4:
highbd_dr_prediction_z1_4xN_avx2(bh, dst, stride, above, upsample_above,
- dx);
+ dx, bd);
break;
case 8:
highbd_dr_prediction_z1_8xN_avx2(bh, dst, stride, above, upsample_above,
- dx);
+ dx, bd);
break;
case 16:
highbd_dr_prediction_z1_16xN_avx2(bh, dst, stride, above, upsample_above,
- dx);
+ dx, bd);
break;
case 32:
highbd_dr_prediction_z1_32xN_avx2(bh, dst, stride, above, upsample_above,
- dx);
+ dx, bd);
break;
case 64:
- highbd_dr_prediction_z1_64xN_avx2(bh, dst, stride, above, upsample_above,
- dx);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_64xN_avx2(bh, dst, stride, above,
+ upsample_above, dx);
+ } else {
+ highbd_dr_prediction_32bit_z1_64xN_avx2(bh, dst, stride, above,
+ upsample_above, dx);
+ }
break;
default: break;
}
return;
}
-static void highbd_transpose_TX_8X8(const uint16_t *src, ptrdiff_t pitchSrc,
- uint16_t *dst, ptrdiff_t pitchDst) {
- __m128i r0, r1, r2, r3, r4, r5, r6, r7, r0_Lo, r1_Lo, r2_Lo, r3_Lo, r4_Lo,
- r5_Lo, r6_Lo;
- r0 = _mm_load_si128(
- (__m128i *)(src + 0 * pitchSrc)); // 07,06,05,04,03,02,01,00
- r1 = _mm_load_si128(
- (__m128i *)(src + 1 * pitchSrc)); // 17,16,15,14,13,12,11,10
- r2 = _mm_load_si128(
- (__m128i *)(src + 2 * pitchSrc)); // 27,26,25,24,23,22,21,20
- r3 = _mm_load_si128(
- (__m128i *)(src + 3 * pitchSrc)); // 37,36,35,34,33,32,31,30
- r4 = _mm_load_si128(
- (__m128i *)(src + 4 * pitchSrc)); // 47,46,45,44,43,42,41,40
- r5 = _mm_load_si128(
- (__m128i *)(src + 5 * pitchSrc)); // 57,56,55,54,53,52,51,50
- r6 = _mm_load_si128(
- (__m128i *)(src + 6 * pitchSrc)); // 67,66,65,64,63,62,61,60
- r7 = _mm_load_si128(
- (__m128i *)(src + 7 * pitchSrc)); // 77,76,75,74,73,72,71,70
-
- r0_Lo = _mm_unpacklo_epi16(r0, r1);
- r2_Lo = _mm_unpacklo_epi16(r2, r3);
- r4_Lo = _mm_unpacklo_epi16(r4, r5);
- r6_Lo = _mm_unpacklo_epi16(r6, r7);
-
- r1_Lo = r0_Lo;
- r0_Lo = _mm_unpacklo_epi32(r0_Lo, r2_Lo);
- r1_Lo = _mm_unpackhi_epi32(r1_Lo, r2_Lo);
- r5_Lo = r4_Lo;
- r4_Lo = _mm_unpacklo_epi32(r4_Lo, r6_Lo);
- r5_Lo = _mm_unpackhi_epi32(r5_Lo, r6_Lo);
- r2_Lo = r0_Lo;
- r0_Lo = _mm_unpacklo_epi64(r0_Lo, r4_Lo); // 64
- r2_Lo = _mm_unpackhi_epi64(r2_Lo, r4_Lo);
- r3_Lo = r1_Lo;
- r1_Lo = _mm_unpacklo_epi64(r1_Lo, r5_Lo);
- r3_Lo = _mm_unpackhi_epi64(r3_Lo, r5_Lo);
-
- _mm_storeu_si128((__m128i *)(dst + 0 * pitchDst), r0_Lo);
- _mm_storeu_si128((__m128i *)(dst + 1 * pitchDst), r2_Lo);
- _mm_storeu_si128((__m128i *)(dst + 2 * pitchDst), r1_Lo);
- _mm_storeu_si128((__m128i *)(dst + 3 * pitchDst), r3_Lo);
-
- r0 = _mm_unpackhi_epi16(r0, r1);
- r2 = _mm_unpackhi_epi16(r2, r3);
- r4 = _mm_unpackhi_epi16(r4, r5);
- r6 = _mm_unpackhi_epi16(r6, r7);
-
- r1 = r0;
- r0 = _mm_unpacklo_epi32(r0, r2);
- r1 = _mm_unpackhi_epi32(r1, r2);
- r5 = r4;
- r4 = _mm_unpacklo_epi32(r4, r6);
- r5 = _mm_unpackhi_epi32(r5, r6);
- r2 = r0;
- r0 = _mm_unpacklo_epi64(r0, r4);
- r2 = _mm_unpackhi_epi64(r2, r4);
- r3 = r1;
- r1 = _mm_unpacklo_epi64(r1, r5);
- r3 = _mm_unpackhi_epi64(r3, r5);
-
- _mm_storeu_si128((__m128i *)(dst + 4 * pitchDst), r0);
- _mm_storeu_si128((__m128i *)(dst + 5 * pitchDst), r2);
- _mm_storeu_si128((__m128i *)(dst + 6 * pitchDst), r1);
- _mm_storeu_si128((__m128i *)(dst + 7 * pitchDst), r3);
+static void highbd_transpose_TX_16X16(const uint16_t *src, ptrdiff_t pitchSrc,
+ uint16_t *dst, ptrdiff_t pitchDst) {
+ __m256i r[16];
+ __m256i d[16];
+ for (int j = 0; j < 16; j++) {
+ r[j] = _mm256_loadu_si256((__m256i *)(src + j * pitchSrc));
+ }
+ highbd_transpose16x16_avx2(r, d);
+ for (int j = 0; j < 16; j++) {
+ _mm256_storeu_si256((__m256i *)(dst + j * pitchDst), d[j]);
+ }
}
-static uint8_t HighbdLoadMaskx[8][16] = {
- { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
- { 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
- { 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
- { 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
- { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7 },
- { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5 },
- { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3 },
- { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 },
-};
+static void highbd_transpose(const uint16_t *src, ptrdiff_t pitchSrc,
+ uint16_t *dst, ptrdiff_t pitchDst, int width,
+ int height) {
+ for (int j = 0; j < height; j += 16)
+ for (int i = 0; i < width; i += 16)
+ highbd_transpose_TX_16X16(src + i * pitchSrc + j, pitchSrc,
+ dst + j * pitchDst + i, pitchDst);
+}
-static uint8_t HighbdEvenOddMaskx4[8][16] = {
- { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14,
- 15 }, // 0=0,1, 1=2,3, 2=4,5, 3=6,7, 4=8,9, 5=10,11, 6=12,13, 7=14,15,
- // >7=0,1
- { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 4, 5, 8, 9, 12, 13 },
- { 0, 1, 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 6, 7, 10, 11 },
- { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 0, 1, 8, 9 },
- { 0, 1, 0, 1, 0, 1, 0, 1, 8, 9, 12, 13, 0, 1, 0, 1 },
- { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 0, 1 },
- { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 0, 1 },
- { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15 }
-};
-
-static uint16_t HighbdEvenOddMaskx8_2[8][16] = {
- { 0, 2, 4, 6, 8, 10, 12, 14 }, { 2, 2, 4, 6, 8, 10, 12, 14 },
- { 4, 4, 4, 6, 8, 10, 12, 14 }, { 6, 6, 6, 6, 8, 10, 12, 14 },
- { 8, 8, 8, 8, 8, 10, 12, 14 }, { 10, 10, 10, 10, 10, 10, 12, 14 },
- { 12, 12, 12, 12, 12, 12, 12, 14 }, { 14, 14, 14, 14, 14, 14, 14, 14 },
-};
-
-static uint16_t HighbdBaseMask[17][16] = {
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- },
- { 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0,
- 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0,
- 0, 0, 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0,
- 0, 0, 0, 0, 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
- 0xffff, 0, 0, 0, 0, 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
- 0xffff, 0xffff, 0, 0, 0, 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
- 0xffff, 0xffff, 0xffff, 0, 0, 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
- 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
- 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
- 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0 },
- { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
- 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff }
-};
-
-static void highbd_dr_prediction_z2_Nx4_avx2(
+static void highbd_dr_prediction_32bit_z2_Nx4_avx2(
int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
const uint16_t *left, int upsample_above, int upsample_left, int dx,
int dy) {
@@ -1785,6 +2114,130 @@
}
}
+static void highbd_dr_prediction_z2_Nx4_avx2(
+ int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
+ const uint16_t *left, int upsample_above, int upsample_left, int dx,
+ int dy) {
+ const int min_base_x = -(1 << upsample_above);
+ const int min_base_y = -(1 << upsample_left);
+ const int frac_bits_x = 6 - upsample_above;
+ const int frac_bits_y = 6 - upsample_left;
+
+ // a assert(dx > 0);
+ // pre-filter above pixels
+ // store in temp buffers:
+ // above[x] * 32 + 16
+ // above[x+1] - above[x]
+ // final pixels will be caluculated as:
+ // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+ __m256i a0_x, a1_x, a32, a16;
+ __m256i diff;
+ __m128i c3f, min_base_y128;
+
+ a16 = _mm256_set1_epi16(16);
+ c3f = _mm_set1_epi16(0x3f);
+ min_base_y128 = _mm_set1_epi16(min_base_y);
+
+ for (int r = 0; r < N; r++) {
+ __m256i b, res, shift;
+ __m128i resx, resy, resxy;
+ __m128i a0_x128, a1_x128;
+ int y = r + 1;
+ int base_x = (-y * dx) >> frac_bits_x;
+ int base_shift = 0;
+ if (base_x < (min_base_x - 1)) {
+ base_shift = (min_base_x - base_x - 1) >> upsample_above;
+ }
+ int base_min_diff =
+ (min_base_x - base_x + upsample_above) >> upsample_above;
+ if (base_min_diff > 4) {
+ base_min_diff = 4;
+ } else {
+ if (base_min_diff < 0) base_min_diff = 0;
+ }
+
+ if (base_shift > 3) {
+ a0_x = _mm256_setzero_si256();
+ a1_x = _mm256_setzero_si256();
+ shift = _mm256_setzero_si256();
+ } else {
+ a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
+ if (upsample_above) {
+ a0_x128 = _mm_shuffle_epi8(a0_x128,
+ *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
+ a1_x128 = _mm_srli_si128(a0_x128, 8);
+
+ shift = _mm256_castsi128_si256(_mm_srli_epi16(
+ _mm_and_si128(
+ _mm_slli_epi16(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
+ (2 << 6) - y * dx,
+ (3 << 6) - y * dx, 0, 0, 0, 0),
+ upsample_above),
+ c3f),
+ 1));
+ } else {
+ a0_x128 =
+ _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+ a1_x128 = _mm_srli_si128(a0_x128, 2);
+
+ shift = _mm256_castsi128_si256(_mm_srli_epi16(
+ _mm_and_si128(
+ _mm_setr_epi16(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
+ (3 << 6) - y * dx, 0, 0, 0, 0),
+ c3f),
+ 1));
+ }
+ a0_x = _mm256_castsi128_si256(a0_x128);
+ a1_x = _mm256_castsi128_si256(a1_x128);
+ }
+ // y calc
+ __m128i a0_y, a1_y, shifty;
+ if (base_x < min_base_x) {
+ __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
+ DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
+ r6 = _mm_set1_epi16(r << 6);
+ dy128 = _mm_set1_epi16(dy);
+ c1234 = _mm_setr_epi16(1, 2, 3, 4, 0, 0, 0, 0);
+ y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
+ base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
+ mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
+ base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
+ _mm_store_si128((__m128i *)base_y_c, base_y_c128);
+
+ a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
+ left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
+ a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
+ left[base_y_c[2] + 1], left[base_y_c[3] + 1], 0, 0,
+ 0, 0);
+
+ if (upsample_left) {
+ shifty = _mm_srli_epi16(
+ _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
+ } else {
+ shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
+ }
+ a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
+ a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
+ shift = _mm256_inserti128_si256(shift, shifty, 1);
+ }
+
+ diff = _mm256_sub_epi16(a1_x, a0_x); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi16(a0_x, 5); // a[x] * 32
+ a32 = _mm256_add_epi16(a32, a16); // a[x] * 32 + 16
+
+ b = _mm256_mullo_epi16(diff, shift);
+ res = _mm256_add_epi16(a32, b);
+ res = _mm256_srli_epi16(res, 5);
+
+ resx = _mm256_castsi256_si128(res);
+ resy = _mm256_extracti128_si256(res, 1);
+ resxy =
+ _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
+ _mm_storel_epi64((__m128i *)(dst), resxy);
+ dst += stride;
+ }
+}
+
static void highbd_dr_prediction_32bit_z2_Nx8_avx2(
int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
const uint16_t *left, int upsample_above, int upsample_left, int dx,
@@ -1828,25 +2281,24 @@
if (base_shift > 7) {
resx = _mm_setzero_si128();
} else {
+ a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
if (upsample_above) {
- a0_x128 = _mm_setr_epi16(
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][0]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][1]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][2]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][3]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][4]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][5]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][6]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][7]]);
- a1_x128 = _mm_setr_epi16(
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][0]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][1]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][2]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][3]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][4]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][5]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][6]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][7]]);
+ __m128i mask, atmp0, atmp1, atmp2, atmp3;
+ a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
+ atmp0 = _mm_shuffle_epi8(a0_x128,
+ *(__m128i *)HighbdEvenOddMaskx[base_shift]);
+ atmp1 = _mm_shuffle_epi8(a1_x128,
+ *(__m128i *)HighbdEvenOddMaskx[base_shift]);
+ atmp2 = _mm_shuffle_epi8(
+ a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
+ atmp3 = _mm_shuffle_epi8(
+ a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
+ mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
+ _mm_set1_epi8(15));
+ a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
+ mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
+ _mm_set1_epi8(15));
+ a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
shift = _mm256_srli_epi32(
_mm256_and_si256(
_mm256_slli_epi32(
@@ -1858,7 +2310,6 @@
c3f),
1);
} else {
- a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
a0_x128 =
_mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
@@ -1874,7 +2325,6 @@
c3f),
1);
}
-
a0_x = _mm256_cvtepu16_epi32(a0_x128);
a1_x = _mm256_cvtepu16_epi32(a1_x128);
@@ -1983,25 +2433,25 @@
a1_x = _mm256_setzero_si256();
shift = _mm256_setzero_si256();
} else {
+ a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
if (upsample_above) {
- a0_x128 = _mm_setr_epi16(
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][0]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][1]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][2]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][3]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][4]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][5]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][6]],
- above[base_x + HighbdEvenOddMaskx8_2[base_shift][7]]);
- a1_x128 = _mm_setr_epi16(
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][0]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][1]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][2]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][3]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][4]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][5]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][6]],
- above[base_x + 1 + HighbdEvenOddMaskx8_2[base_shift][7]]);
+ __m128i mask, atmp0, atmp1, atmp2, atmp3;
+ a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
+ atmp0 = _mm_shuffle_epi8(a0_x128,
+ *(__m128i *)HighbdEvenOddMaskx[base_shift]);
+ atmp1 = _mm_shuffle_epi8(a1_x128,
+ *(__m128i *)HighbdEvenOddMaskx[base_shift]);
+ atmp2 = _mm_shuffle_epi8(
+ a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
+ atmp3 = _mm_shuffle_epi8(
+ a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
+ mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
+ _mm_set1_epi8(15));
+ a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
+ mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
+ _mm_set1_epi8(15));
+ a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
+
shift = _mm256_castsi128_si256(_mm_srli_epi16(
_mm_and_si128(
_mm_slli_epi16(
@@ -2013,7 +2463,6 @@
c3f),
1));
} else {
- a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
a0_x128 =
_mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
@@ -2035,7 +2484,7 @@
// y calc
__m128i a0_y, a1_y, shifty;
if (base_x < min_base_x) {
- DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
+ DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
__m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
r6 = _mm_set1_epi16(r << 6);
dy128 = _mm_set1_epi16(dy);
@@ -2209,6 +2658,7 @@
1); // 16 16bit values
// y calc
+ resy[0] = _mm256_setzero_si256();
if ((base_x < min_base_x)) {
DECLARE_ALIGNED(32, int, base_y_c[16]);
__m256i r6, c256, dy256, y_c256, y_c_1_256, base_y_c256, mask256;
@@ -2276,9 +2726,8 @@
resy[0] =
_mm256_inserti128_si256(resy[0], _mm256_castsi256_si128(resy[1]),
1); // 16 16bit values
- } else {
- resy[0] = resx[0];
}
+
resxy = _mm256_blendv_epi8(resx[0], resy[0],
*(__m256i *)HighbdBaseMask[base_min_diff]);
_mm256_storeu_si256((__m256i *)(dst + j), resxy);
@@ -2397,6 +2846,7 @@
resx = _mm256_srli_epi16(res, 5); // 16 16-bit values
// y calc
+ resy = _mm256_setzero_si256();
__m256i a0_y, a1_y, shifty;
if ((base_x < min_base_x)) {
DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
@@ -2439,8 +2889,6 @@
b = _mm256_mullo_epi16(diff, shifty);
res = _mm256_add_epi16(a32, b);
resy = _mm256_srli_epi16(res, 5);
- } else {
- resy = _mm256_setzero_si256();
}
resxy = _mm256_blendv_epi8(resx, resy,
@@ -2462,8 +2910,14 @@
assert(dy > 0);
switch (bw) {
case 4:
- highbd_dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left,
- upsample_above, upsample_left, dx, dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left,
+ upsample_above, upsample_left, dx, dy);
+ } else {
+ highbd_dr_prediction_32bit_z2_Nx4_avx2(bh, dst, stride, above, left,
+ upsample_above, upsample_left,
+ dx, dy);
+ }
break;
case 8:
if (bd < 12) {
@@ -2488,21 +2942,19 @@
}
}
-static void highbd_transpose(const uint16_t *src, ptrdiff_t pitchSrc,
- uint16_t *dst, ptrdiff_t pitchDst, int width,
- int height) {
- for (int j = 0; j < height; j += 8)
- for (int i = 0; i < width; i += 8)
- highbd_transpose_TX_8X8(src + i * pitchSrc + j, pitchSrc,
- dst + j * pitchDst + i, pitchDst);
-}
-
+// Directional prediction, zone 3 functions
static void highbd_dr_prediction_z3_4x4_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m128i dstvec[4], d[4];
-
- highbd_dr_prediction_z1_4xN_internal_avx2(4, dstvec, left, upsample_left, dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_4xN_internal_avx2(4, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_4xN_internal_avx2(4, dstvec, left,
+ upsample_left, dy);
+ }
highbd_transpose4x8_8x4_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2],
&dstvec[3], &d[0], &d[1], &d[2], &d[3]);
_mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
@@ -2514,10 +2966,16 @@
static void highbd_dr_prediction_z3_8x8_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m128i dstvec[8], d[8];
-
- highbd_dr_prediction_z1_8xN_internal_avx2(8, dstvec, left, upsample_left, dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_8xN_internal_avx2(8, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_8xN_internal_avx2(8, dstvec, left,
+ upsample_left, dy);
+ }
highbd_transpose8x8_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
&dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7],
&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6],
@@ -2529,10 +2987,17 @@
static void highbd_dr_prediction_z3_4x8_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m128i dstvec[4], d[8];
+ if (bd < 12) {
+ highbd_dr_prediction_z1_8xN_internal_avx2(4, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_8xN_internal_avx2(4, dstvec, left,
+ upsample_left, dy);
+ }
- highbd_dr_prediction_z1_8xN_internal_avx2(4, dstvec, left, upsample_left, dy);
highbd_transpose4x8_8x4_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6],
&d[7]);
@@ -2543,10 +3008,17 @@
static void highbd_dr_prediction_z3_8x4_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m128i dstvec[8], d[4];
+ if (bd < 12) {
+ highbd_dr_prediction_z1_4xN_internal_avx2(8, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_4xN_internal_avx2(8, dstvec, left,
+ upsample_left, dy);
+ }
- highbd_dr_prediction_z1_4xN_internal_avx2(8, dstvec, left, upsample_left, dy);
highbd_transpose8x8_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
&dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7],
&d[0], &d[1], &d[2], &d[3]);
@@ -2558,11 +3030,16 @@
static void highbd_dr_prediction_z3_8x16_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m256i dstvec[8], d[8];
-
- highbd_dr_prediction_z1_16xN_internal_avx2(8, dstvec, left, upsample_left,
- dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_16xN_internal_avx2(8, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_16xN_internal_avx2(8, dstvec, left,
+ upsample_left, dy);
+ }
highbd_transpose8x16_16x8_avx2(dstvec, d);
for (int i = 0; i < 8; i++) {
_mm_storeu_si128((__m128i *)(dst + i * stride),
@@ -2576,11 +3053,16 @@
static void highbd_dr_prediction_z3_16x8_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m128i dstvec[16], d[16];
-
- highbd_dr_prediction_z1_8xN_internal_avx2(16, dstvec, left, upsample_left,
- dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_8xN_internal_avx2(16, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_8xN_internal_avx2(16, dstvec, left,
+ upsample_left, dy);
+ }
for (int i = 0; i < 16; i += 8) {
highbd_transpose8x8_sse2(&dstvec[0 + i], &dstvec[1 + i], &dstvec[2 + i],
&dstvec[3 + i], &dstvec[4 + i], &dstvec[5 + i],
@@ -2596,11 +3078,16 @@
static void highbd_dr_prediction_z3_4x16_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m256i dstvec[4], d[4], d1;
-
- highbd_dr_prediction_z1_16xN_internal_avx2(4, dstvec, left, upsample_left,
- dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_16xN_internal_avx2(4, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_16xN_internal_avx2(4, dstvec, left,
+ upsample_left, dy);
+ }
highbd_transpose4x16_avx2(dstvec, d);
for (int i = 0; i < 4; i++) {
_mm_storel_epi64((__m128i *)(dst + i * stride),
@@ -2617,11 +3104,16 @@
static void highbd_dr_prediction_z3_16x4_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m128i dstvec[16], d[8];
-
- highbd_dr_prediction_z1_4xN_internal_avx2(16, dstvec, left, upsample_left,
- dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_4xN_internal_avx2(16, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_4xN_internal_avx2(16, dstvec, left,
+ upsample_left, dy);
+ }
highbd_transpose16x4_8x8_sse2(dstvec, d);
_mm_storeu_si128((__m128i *)(dst + 0 * stride), d[0]);
@@ -2636,11 +3128,17 @@
static void highbd_dr_prediction_z3_8x32_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m256i dstvec[16], d[16];
+ if (bd < 12) {
+ highbd_dr_prediction_z1_32xN_internal_avx2(8, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_32xN_internal_avx2(8, dstvec, left,
+ upsample_left, dy);
+ }
- highbd_dr_prediction_z1_32xN_internal_avx2(8, dstvec, left, upsample_left,
- dy);
for (int i = 0; i < 16; i += 8) {
highbd_transpose8x16_16x8_avx2(dstvec + i, d + i);
}
@@ -2665,11 +3163,17 @@
static void highbd_dr_prediction_z3_32x8_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m128i dstvec[32], d[32];
+ if (bd < 12) {
+ highbd_dr_prediction_z1_8xN_internal_avx2(32, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_8xN_internal_avx2(32, dstvec, left,
+ upsample_left, dy);
+ }
- highbd_dr_prediction_z1_8xN_internal_avx2(32, dstvec, left, upsample_left,
- dy);
for (int i = 0; i < 32; i += 8) {
highbd_transpose8x8_sse2(&dstvec[0 + i], &dstvec[1 + i], &dstvec[2 + i],
&dstvec[3 + i], &dstvec[4 + i], &dstvec[5 + i],
@@ -2687,11 +3191,17 @@
static void highbd_dr_prediction_z3_16x16_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m256i dstvec[16], d[16];
+ if (bd < 12) {
+ highbd_dr_prediction_z1_16xN_internal_avx2(16, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_16xN_internal_avx2(16, dstvec, left,
+ upsample_left, dy);
+ }
- highbd_dr_prediction_z1_16xN_internal_avx2(16, dstvec, left, upsample_left,
- dy);
highbd_transpose16x16_avx2(dstvec, d);
for (int i = 0; i < 16; i++) {
@@ -2701,12 +3211,16 @@
static void highbd_dr_prediction_z3_32x32_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m256i dstvec[64], d[16];
-
- highbd_dr_prediction_z1_32xN_internal_avx2(32, dstvec, left, upsample_left,
- dy);
-
+ if (bd < 12) {
+ highbd_dr_prediction_z1_32xN_internal_avx2(32, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_32xN_internal_avx2(32, dstvec, left,
+ upsample_left, dy);
+ }
highbd_transpose16x16_avx2(dstvec, d);
for (int j = 0; j < 16; j++) {
_mm256_storeu_si256((__m256i *)(dst + j * stride), d[j]);
@@ -2727,19 +3241,30 @@
static void highbd_dr_prediction_z3_64x64_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
DECLARE_ALIGNED(16, uint16_t, dstT[64 * 64]);
- highbd_dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_64xN_avx2(64, dstT, 64, left, upsample_left,
+ dy);
+ }
highbd_transpose(dstT, 64, dst, stride, 64, 64);
}
static void highbd_dr_prediction_z3_16x32_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m256i dstvec[32], d[32];
-
- highbd_dr_prediction_z1_32xN_internal_avx2(16, dstvec, left, upsample_left,
- dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_32xN_internal_avx2(16, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_32xN_internal_avx2(16, dstvec, left,
+ upsample_left, dy);
+ }
for (int i = 0; i < 32; i += 8) {
highbd_transpose8x16_16x8_avx2(dstvec + i, d + i);
}
@@ -2764,11 +3289,16 @@
static void highbd_dr_prediction_z3_32x16_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m256i dstvec[32], d[16];
-
- highbd_dr_prediction_z1_16xN_internal_avx2(32, dstvec, left, upsample_left,
- dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_16xN_internal_avx2(32, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_16xN_internal_avx2(32, dstvec, left,
+ upsample_left, dy);
+ }
for (int i = 0; i < 32; i += 16) {
highbd_transpose16x16_avx2((dstvec + i), d);
for (int j = 0; j < 16; j++) {
@@ -2779,36 +3309,54 @@
static void highbd_dr_prediction_z3_32x64_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
uint16_t dstT[64 * 32];
- highbd_dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_64xN_avx2(32, dstT, 64, left, upsample_left,
+ dy);
+ }
highbd_transpose(dstT, 64, dst, stride, 32, 64);
}
static void highbd_dr_prediction_z3_64x32_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
DECLARE_ALIGNED(16, uint16_t, dstT[32 * 64]);
- highbd_dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy);
+ highbd_dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy, bd);
highbd_transpose(dstT, 32, dst, stride, 64, 32);
return;
}
static void highbd_dr_prediction_z3_16x64_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
DECLARE_ALIGNED(16, uint16_t, dstT[64 * 16]);
- highbd_dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_64xN_avx2(16, dstT, 64, left, upsample_left,
+ dy);
+ }
highbd_transpose(dstT, 64, dst, stride, 16, 64);
}
static void highbd_dr_prediction_z3_64x16_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left,
- int upsample_left, int dy) {
+ int upsample_left, int dy,
+ int bd) {
__m256i dstvec[64], d[16];
-
- highbd_dr_prediction_z1_16xN_internal_avx2(64, dstvec, left, upsample_left,
- dy);
+ if (bd < 12) {
+ highbd_dr_prediction_z1_16xN_internal_avx2(64, dstvec, left, upsample_left,
+ dy);
+ } else {
+ highbd_dr_prediction_32bit_z1_16xN_internal_avx2(64, dstvec, left,
+ upsample_left, dy);
+ }
for (int i = 0; i < 64; i += 16) {
highbd_transpose16x16_avx2((dstvec + i), d);
for (int j = 0; j < 16; j++) {
@@ -2823,28 +3371,30 @@
int dx, int dy, int bd) {
(void)above;
(void)dx;
- (void)bd;
+
assert(dx == 1);
assert(dy > 0);
if (bw == bh) {
switch (bw) {
case 4:
- highbd_dr_prediction_z3_4x4_avx2(dst, stride, left, upsample_left, dy);
+ highbd_dr_prediction_z3_4x4_avx2(dst, stride, left, upsample_left, dy,
+ bd);
break;
case 8:
- highbd_dr_prediction_z3_8x8_avx2(dst, stride, left, upsample_left, dy);
+ highbd_dr_prediction_z3_8x8_avx2(dst, stride, left, upsample_left, dy,
+ bd);
break;
case 16:
- highbd_dr_prediction_z3_16x16_avx2(dst, stride, left, upsample_left,
- dy);
+ highbd_dr_prediction_z3_16x16_avx2(dst, stride, left, upsample_left, dy,
+ bd);
break;
case 32:
- highbd_dr_prediction_z3_32x32_avx2(dst, stride, left, upsample_left,
- dy);
+ highbd_dr_prediction_z3_32x32_avx2(dst, stride, left, upsample_left, dy,
+ bd);
break;
case 64:
- highbd_dr_prediction_z3_64x64_avx2(dst, stride, left, upsample_left,
- dy);
+ highbd_dr_prediction_z3_64x64_avx2(dst, stride, left, upsample_left, dy,
+ bd);
break;
}
} else {
@@ -2853,34 +3403,34 @@
switch (bw) {
case 4:
highbd_dr_prediction_z3_4x8_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
case 8:
highbd_dr_prediction_z3_8x16_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
case 16:
highbd_dr_prediction_z3_16x32_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
case 32:
highbd_dr_prediction_z3_32x64_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
}
} else {
switch (bw) {
case 4:
highbd_dr_prediction_z3_4x16_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
case 8:
highbd_dr_prediction_z3_8x32_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
case 16:
highbd_dr_prediction_z3_16x64_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
}
}
@@ -2889,34 +3439,34 @@
switch (bh) {
case 4:
highbd_dr_prediction_z3_8x4_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
case 8:
highbd_dr_prediction_z3_16x8_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
case 16:
highbd_dr_prediction_z3_32x16_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
case 32:
highbd_dr_prediction_z3_64x32_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
}
} else {
switch (bh) {
case 4:
highbd_dr_prediction_z3_16x4_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
case 8:
highbd_dr_prediction_z3_32x8_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
case 16:
highbd_dr_prediction_z3_64x16_avx2(dst, stride, left, upsample_left,
- dy);
+ dy, bd);
break;
}
}
@@ -3018,10 +3568,33 @@
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
};
-static AOM_FORCE_INLINE void dr_prediction_z1_4xN_internal_avx2(
- int N, __m128i *dst, const uint8_t *above, int upsample_above, int dx) {
+static uint8_t LoadMaskx[8][16] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 },
+ { 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
+ { 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 },
+ { 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
+ { 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 },
+ { 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8 },
+};
+
+static uint8_t EvenOddMaskx[8][16] = {
+ { 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15 },
+ { 0, 1, 3, 5, 7, 9, 11, 13, 0, 2, 4, 6, 8, 10, 12, 14 },
+ { 0, 0, 2, 4, 6, 8, 10, 12, 0, 0, 3, 5, 7, 9, 11, 13 },
+ { 0, 0, 0, 3, 5, 7, 9, 11, 0, 0, 0, 4, 6, 8, 10, 12 },
+ { 0, 0, 0, 0, 4, 6, 8, 10, 0, 0, 0, 0, 5, 7, 9, 11 },
+ { 0, 0, 0, 0, 0, 5, 7, 9, 0, 0, 0, 0, 0, 6, 8, 10 },
+ { 0, 0, 0, 0, 0, 0, 6, 8, 0, 0, 0, 0, 0, 0, 7, 9 },
+ { 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 8 }
+};
+
+static AOM_FORCE_INLINE void dr_prediction_z1_HxW_internal_avx2(
+ int H, int W, __m128i *dst, const uint8_t *above, int upsample_above,
+ int dx) {
const int frac_bits = 6 - upsample_above;
- const int max_base_x = ((N + 4) - 1) << upsample_above;
+ const int max_base_x = ((W + H) - 1) << upsample_above;
int x;
// a assert(dx > 0);
// pre-filter above pixels
@@ -3039,27 +3612,25 @@
c3f = _mm256_set1_epi16(0x3f);
x = dx;
- for (int r = 0; r < N; r++) {
+ for (int r = 0; r < W; r++) {
__m256i b, res, shift;
__m128i res1, a0_128, a1_128;
int base = x >> frac_bits;
int base_max_diff = (max_base_x - base) >> upsample_above;
if (base_max_diff <= 0) {
- for (int i = r; i < N; ++i) {
+ for (int i = r; i < W; ++i) {
dst[i] = a_mbase_x; // save 4 values
}
return;
}
- if (base_max_diff > 4) base_max_diff = 4;
+ if (base_max_diff > H) base_max_diff = H;
a0_128 = _mm_loadu_si128((__m128i *)(above + base));
- a1_128 = _mm_srli_si128(a0_128, 1);
+ a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1));
if (upsample_above) {
- a0_128 = _mm_shuffle_epi8(
- a0_128,
- _mm_setr_epi8(0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15));
- a1_128 = _mm_srli_si128(a0_128, 4);
+ a0_128 = _mm_shuffle_epi8(a0_128, *(__m128i *)EvenOddMaskx[0]);
+ a1_128 = _mm_srli_si128(a0_128, 8);
shift = _mm256_srli_epi16(
_mm256_and_si256(
@@ -3079,8 +3650,10 @@
res = _mm256_add_epi16(a32, b);
res = _mm256_srli_epi16(res, 5);
- res1 = _mm256_castsi256_si128(res);
- res1 = _mm_packus_epi16(res1, res1);
+ res = _mm256_packus_epi16(
+ res, _mm256_castsi128_si256(
+ _mm256_extracti128_si256(res, 1))); // goto 8 bit
+ res1 = _mm256_castsi256_si128(res); // 16 8bit values
dst[r] =
_mm_blendv_epi8(a_mbase_x, res1, *(__m128i *)BaseMask[base_max_diff]);
@@ -3093,191 +3666,29 @@
int dx) {
__m128i dstvec[16];
- dr_prediction_z1_4xN_internal_avx2(N, dstvec, above, upsample_above, dx);
+ dr_prediction_z1_HxW_internal_avx2(4, N, dstvec, above, upsample_above, dx);
for (int i = 0; i < N; i++) {
*(uint32_t *)(dst + stride * i) = _mm_cvtsi128_si32(dstvec[i]);
}
}
-static AOM_FORCE_INLINE void dr_prediction_z1_8xN_internal_avx2(
- int N, __m128i *dst, const uint8_t *above, int upsample_above, int dx) {
- const int frac_bits = 6 - upsample_above;
- const int max_base_x = ((8 + N) - 1) << upsample_above;
-
- int x;
- // pre-filter above pixels
- // store in temp buffers:
- // above[x] * 32 + 16
- // above[x+1] - above[x]
- // final pixels will be caluculated as:
- // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
- __m256i a0, a1, a0_1, a1_1, a32, a16, diff, c3f;
- __m128i a_mbase_x;
-
- a16 = _mm256_set1_epi32(16);
- a_mbase_x = _mm_set1_epi8(above[max_base_x]);
- c3f = _mm256_set1_epi32(0x3f);
-
- x = dx;
- for (int r = 0; r < N; r++) {
- __m256i b, res, res1, shift;
- __m128i res128;
-
- int base = x >> frac_bits;
- int base_max_diff = (max_base_x - base) >> upsample_above;
- if (base_max_diff <= 0) {
- for (int i = r; i < N; ++i) {
- dst[i] = a_mbase_x; // save 16 values, 8 to be used furter
- }
- return;
- }
- if (base_max_diff > 8) base_max_diff = 8;
-
- a0 = _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base)));
- a1 = _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
-
- if (upsample_above) {
- a0 = _mm256_permutevar8x32_epi32(
- a0, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
- a1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0, 1));
-
- a0_1 =
- _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
- a0_1 = _mm256_permutevar8x32_epi32(
- a0_1, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
- a1_1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0_1, 1));
-
- a0 = _mm256_inserti128_si256(a0, _mm256_castsi256_si128(a0_1), 1);
- a1 = _mm256_inserti128_si256(a1, _mm256_castsi256_si128(a1_1), 1);
-
- shift = _mm256_srli_epi32(
- _mm256_and_si256(
- _mm256_slli_epi32(_mm256_set1_epi32(x), upsample_above), c3f),
- 1);
- } else {
- shift = _mm256_srli_epi32(_mm256_and_si256(_mm256_set1_epi32(x), c3f), 1);
- }
-
- diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x]
- a32 = _mm256_slli_epi32(a0, 5); // a[x] * 32
- a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
-
- b = _mm256_mullo_epi32(diff, shift);
- res = _mm256_add_epi32(a32, b);
- res = _mm256_srli_epi32(res, 5);
-
- res1 = _mm256_packus_epi32(
- res, _mm256_castsi128_si256(
- _mm256_extracti128_si256(res, 1))); // goto 16 bit
-
- res128 = _mm_packus_epi16(_mm256_castsi256_si128(res1),
- _mm256_castsi256_si128(res1)); // goto 8 bit
-
- res128 =
- _mm_blendv_epi8(a_mbase_x, res128, *(__m128i *)BaseMask[base_max_diff]);
- dst[r] = res128;
- x += dx;
- }
-}
-
static void dr_prediction_z1_8xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, int upsample_above,
int dx) {
__m128i dstvec[32];
- dr_prediction_z1_8xN_internal_avx2(N, dstvec, above, upsample_above, dx);
+ dr_prediction_z1_HxW_internal_avx2(8, N, dstvec, above, upsample_above, dx);
for (int i = 0; i < N; i++) {
_mm_storel_epi64((__m128i *)(dst + stride * i), dstvec[i]);
}
}
-static AOM_FORCE_INLINE void dr_prediction_z1_16xN_internal_avx2(
- int N, __m128i *dstvec, const uint8_t *above, int upsample_above, int dx) {
- int x;
- // here upsample_above is 0 by design of av1_use_intra_edge_upsample
- (void)upsample_above;
- const int frac_bits = 6;
- const int max_base_x = ((16 + N) - 1);
-
- // pre-filter above pixels
- // store in temp buffers:
- // above[x] * 32 + 16
- // above[x+1] - above[x]
- // final pixels will be caluculated as:
- // (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
- __m256i a0, a0_1, a1, a1_1, diff, a32, a16, c3f;
- __m128i a_mbase_x;
-
- a16 = _mm256_set1_epi32(16);
- a_mbase_x = _mm_set1_epi8((uint8_t)above[max_base_x]);
- c3f = _mm256_set1_epi32(0x3f);
-
- x = dx;
- for (int r = 0; r < N; r++) {
- __m256i b, res[2];
- __m128i res128[2];
- int base = x >> frac_bits;
- int base_max_diff = (max_base_x - base);
- if (base_max_diff <= 0) {
- for (int i = r; i < N; ++i) {
- dstvec[i] = a_mbase_x; // save 16 values
- }
- return;
- }
- __m256i shift =
- _mm256_srli_epi32(_mm256_and_si256(_mm256_set1_epi32(x), c3f), 1);
-
- a0 = _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base)));
- a1 = _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
-
- diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x]
- a32 = _mm256_slli_epi32(a0, 5); // a[x] * 32
- a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
- b = _mm256_mullo_epi32(diff, shift);
-
- res[0] = _mm256_add_epi32(a32, b);
- res[0] = _mm256_srli_epi32(res[0], 5);
- res[0] = _mm256_packus_epi32(
- res[0], _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
- res128[0] = _mm_packus_epi16(_mm256_castsi256_si128(res[0]),
- _mm256_castsi256_si128(res[0])); // goto 8 bit
-
- if (base_max_diff > 8) {
- if (base_max_diff > 16) base_max_diff = 16;
- a0_1 =
- _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
- a1_1 =
- _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i *)(above + base + 9)));
-
- diff = _mm256_sub_epi32(a1_1, a0_1); // a[x+1] - a[x]
- a32 = _mm256_slli_epi32(a0_1, 5); // a[x] * 32
- a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
- b = _mm256_mullo_epi32(diff, shift);
-
- res[1] = _mm256_add_epi32(a32, b);
- res[1] = _mm256_srli_epi32(res[1], 5);
- res[1] = _mm256_packus_epi32(
- res[1], _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
- res128[1] =
- _mm_packus_epi16(_mm256_castsi256_si128(res[1]),
- _mm256_castsi256_si128(res[1])); // goto 8 bit
-
- } else {
- res128[1] = a_mbase_x;
- }
- res128[0] = _mm_unpacklo_epi64(res128[0], res128[1]); // 16 8bit values
-
- dstvec[r] = _mm_blendv_epi8(a_mbase_x, res128[0],
- *(__m128i *)BaseMask[base_max_diff]);
- x += dx;
- }
-}
static void dr_prediction_z1_16xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, int upsample_above,
int dx) {
__m128i dstvec[64];
- dr_prediction_z1_16xN_internal_avx2(N, dstvec, above, upsample_above, dx);
+ dr_prediction_z1_HxW_internal_avx2(16, N, dstvec, above, upsample_above, dx);
for (int i = 0; i < N; i++) {
_mm_storeu_si128((__m128i *)(dst + stride * i), dstvec[i]);
}
@@ -3297,16 +3708,17 @@
// above[x+1] - above[x]
// final pixels will be caluculated as:
// (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
- __m256i a0, a0_1, a1, a1_1, a32, a16;
+ __m256i a0, a1, a32, a16;
__m256i a_mbase_x, diff, c3f;
- a16 = _mm256_set1_epi32(16);
+ a16 = _mm256_set1_epi16(16);
a_mbase_x = _mm256_set1_epi8(above[max_base_x]);
- c3f = _mm256_set1_epi32(0x3f);
+ c3f = _mm256_set1_epi16(0x3f);
x = dx;
for (int r = 0; r < N; r++) {
- __m256i b, res[2], res16[2];
+ __m256i b, res, res16[2];
+ __m128i a0_128, a1_128;
int base = x >> frac_bits;
int base_max_diff = (max_base_x - base);
@@ -3318,54 +3730,28 @@
}
if (base_max_diff > 32) base_max_diff = 32;
__m256i shift =
- _mm256_srli_epi32(_mm256_and_si256(_mm256_set1_epi32(x), c3f), 1);
+ _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
for (int j = 0, jj = 0; j < 32; j += 16, jj++) {
int mdiff = base_max_diff - j;
if (mdiff <= 0) {
res16[jj] = a_mbase_x;
} else {
- a0 = _mm256_cvtepu8_epi32(
- _mm_loadu_si128((__m128i *)(above + base + j)));
- a1 = _mm256_cvtepu8_epi32(
- _mm_loadu_si128((__m128i *)(above + base + 1 + j)));
+ a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
+ a1_128 = _mm_loadu_si128((__m128i *)(above + base + j + 1));
+ a0 = _mm256_cvtepu8_epi16(a0_128);
+ a1 = _mm256_cvtepu8_epi16(a1_128);
- diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x]
- a32 = _mm256_slli_epi32(a0, 5); // a[x] * 32
- a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
- b = _mm256_mullo_epi32(diff, shift);
+ diff = _mm256_sub_epi16(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi16(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi16(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi16(diff, shift);
- res[0] = _mm256_add_epi32(a32, b);
- res[0] = _mm256_srli_epi32(res[0], 5);
- res[0] = _mm256_packus_epi32(
- res[0],
- _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
-
- // goto 8 bit
- res[0] = _mm256_packus_epi16(res[0], res[0]);
-
- if (mdiff > 8) {
- a0_1 = _mm256_cvtepu8_epi32(
- _mm_loadu_si128((__m128i *)(above + base + 8 + j)));
- a1_1 = _mm256_cvtepu8_epi32(
- _mm_loadu_si128((__m128i *)(above + base + 9 + j)));
-
- diff = _mm256_sub_epi32(a1_1, a0_1); // a[x+1] - a[x]
- a32 = _mm256_slli_epi32(a0_1, 5); // a[x] * 32
- a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
- b = _mm256_mullo_epi32(diff, shift);
-
- res[1] = _mm256_add_epi32(a32, b);
- res[1] = _mm256_srli_epi32(res[1], 5);
- res[1] = _mm256_packus_epi32(
- res[1],
- _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
- res[1] = _mm256_packus_epi16(res[1], res[1]);
- // goto 8 bit
- } else {
- res[1] = a_mbase_x;
- }
- res16[jj] = _mm256_unpacklo_epi64(res[0], res[1]); // 16 8bit values
+ res = _mm256_add_epi16(a32, b);
+ res = _mm256_srli_epi16(res, 5);
+ res16[jj] = _mm256_packus_epi16(
+ res, _mm256_castsi128_si256(
+ _mm256_extracti128_si256(res, 1))); // 16 8bit values
}
}
res16[1] =
@@ -3404,20 +3790,18 @@
// above[x+1] - above[x]
// final pixels will be caluculated as:
// (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
- __m256i a0, a0_1, a1, a1_1, a32, a16;
+ __m256i a0, a1, a32, a16;
__m256i a_mbase_x, diff, c3f;
__m128i max_base_x128, base_inc128, mask128;
- a16 = _mm256_set1_epi32(16);
+ a16 = _mm256_set1_epi16(16);
a_mbase_x = _mm256_set1_epi8(above[max_base_x]);
max_base_x128 = _mm_set1_epi8(max_base_x);
- c3f = _mm256_set1_epi32(0x3f);
+ c3f = _mm256_set1_epi16(0x3f);
x = dx;
for (int r = 0; r < N; r++, dst += stride) {
- __m256i b, res[2];
- __m128i res1;
-
+ __m256i b, res;
int base = x >> frac_bits;
if (base >= max_base_x) {
for (int i = r; i < N; ++i) {
@@ -3429,9 +3813,9 @@
}
__m256i shift =
- _mm256_srli_epi32(_mm256_and_si256(_mm256_set1_epi32(x), c3f), 1);
+ _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
- __m128i a0_128, a0_1_128, a1_128, a1_1_128;
+ __m128i a0_128, a1_128, res128;
for (int j = 0; j < 64; j += 16) {
int mdif = max_base_x - (base + j);
if (mdif <= 0) {
@@ -3440,46 +3824,19 @@
} else {
a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1 + j));
- a0 = _mm256_cvtepu8_epi32(a0_128);
- a1 = _mm256_cvtepu8_epi32(a1_128);
+ a0 = _mm256_cvtepu8_epi16(a0_128);
+ a1 = _mm256_cvtepu8_epi16(a1_128);
- diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x]
- a32 = _mm256_slli_epi32(a0, 5); // a[x] * 32
- a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
- b = _mm256_mullo_epi32(diff, shift);
+ diff = _mm256_sub_epi16(a1, a0); // a[x+1] - a[x]
+ a32 = _mm256_slli_epi16(a0, 5); // a[x] * 32
+ a32 = _mm256_add_epi16(a32, a16); // a[x] * 32 + 16
+ b = _mm256_mullo_epi16(diff, shift);
- res[0] = _mm256_add_epi32(a32, b);
- res[0] = _mm256_srli_epi32(res[0], 5);
- res[0] = _mm256_packus_epi32(
- res[0],
- _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
- // goto 8 bit
- res[0] = _mm256_packus_epi16(res[0], res[0]);
-
- if (mdif > 8) {
- a0_1_128 = _mm_loadu_si128((__m128i *)(above + base + 8 + j));
- a1_1_128 = _mm_loadu_si128((__m128i *)(above + base + 9 + j));
- a0_1 = _mm256_cvtepu8_epi32(a0_1_128);
- a1_1 = _mm256_cvtepu8_epi32(a1_1_128);
-
- diff = _mm256_sub_epi32(a1_1, a0_1); // a[x+1] - a[x]
- a32 = _mm256_slli_epi32(a0_1, 5); // a[x] * 32
- a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
- b = _mm256_mullo_epi32(diff, shift);
-
- res[1] = _mm256_add_epi32(a32, b);
- res[1] = _mm256_srli_epi32(res[1], 5);
- res[1] = _mm256_packus_epi32(
- res[1],
- _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
- res[1] = _mm256_packus_epi16(res[1], res[1]);
-
- } else {
- res[1] = a_mbase_x;
- }
- res1 = _mm_unpacklo_epi64(
- _mm256_castsi256_si128(res[0]),
- _mm256_castsi256_si128(res[1])); // 16 8bit values
+ res = _mm256_add_epi16(a32, b);
+ res = _mm256_srli_epi16(res, 5);
+ res = _mm256_packus_epi16(
+ res, _mm256_castsi128_si256(
+ _mm256_extracti128_si256(res, 1))); // 16 8bit values
base_inc128 = _mm_setr_epi8(
base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
@@ -3489,9 +3846,9 @@
mask128 = _mm_cmpgt_epi8(_mm_subs_epu8(max_base_x128, base_inc128),
_mm_setzero_si128());
- res1 =
- _mm_blendv_epi8(_mm256_castsi256_si128(a_mbase_x), res1, mask128);
- _mm_storeu_si128((__m128i *)(dst + j), res1);
+ res128 = _mm_blendv_epi8(_mm256_castsi256_si128(a_mbase_x),
+ _mm256_castsi256_si128(res), mask128);
+ _mm_storeu_si128((__m128i *)(dst + j), res128);
}
}
x += dx;
@@ -3525,39 +3882,6 @@
return;
}
-static uint8_t LoadMaskx[8][16] = {
- { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
- { 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 },
- { 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
- { 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 },
- { 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
- { 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 },
- { 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8 },
-};
-
-static uint8_t EvenOddMaskx4[8][16] = {
- { 0, 2, 4, 6, 1, 3, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 1, 3, 5, 7, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 2, 4, 6, 8, 3, 5, 7, 9, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 3, 5, 7, 9, 4, 6, 8, 10, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 4, 6, 8, 10, 5, 7, 9, 11, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 5, 7, 9, 11, 6, 8, 10, 12, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 6, 8, 10, 12, 7, 9, 11, 13, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 7, 9, 11, 13, 8, 10, 12, 14, 0 }
-};
-
-static uint8_t EvenOddMaskx[8][16] = {
- { 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 0, 0, 0, 0 },
- { 0, 1, 3, 5, 7, 9, 11, 13, 15, 2, 4, 6, 8, 0, 0, 0 },
- { 0, 0, 2, 4, 6, 8, 10, 12, 14, 3, 5, 7, 9, 0, 0, 0 },
- { 0, 0, 0, 3, 5, 7, 9, 11, 13, 15, 4, 6, 8, 10, 0 },
- { 0, 0, 0, 0, 4, 6, 8, 10, 12, 14, 5, 7, 9, 11, 0, 0 },
- { 0, 0, 0, 0, 0, 5, 7, 9, 11, 13, 15, 6, 8, 10, 12, 0 },
- { 0, 0, 0, 0, 0, 0, 6, 8, 10, 12, 14, 7, 9, 11, 13, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 7, 9, 11, 13, 15, 8, 10, 12, 14 }
-};
-
static void dr_prediction_z2_Nx4_avx2(int N, uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left,
int upsample_above, int upsample_left,
@@ -3574,15 +3898,15 @@
// above[x+1] - above[x]
// final pixels will be caluculated as:
// (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
- __m256i a0_x, a1_x, a32, a16, diff;
+ __m128i a0_x, a1_x, a32, a16, diff;
__m128i c3f, min_base_y128;
- a16 = _mm256_set1_epi32(16);
- c3f = _mm_set1_epi32(0x3f);
- min_base_y128 = _mm_set1_epi32(min_base_y);
+ a16 = _mm_set1_epi16(16);
+ c3f = _mm_set1_epi16(0x3f);
+ min_base_y128 = _mm_set1_epi16(min_base_y);
for (int r = 0; r < N; r++) {
- __m256i b, res, shift;
+ __m128i b, res, shift;
__m128i resx, resy, resxy;
__m128i a0_x128, a1_x128;
int y = r + 1;
@@ -3600,82 +3924,79 @@
}
if (base_shift > 3) {
- a0_x = _mm256_setzero_si256();
- a1_x = _mm256_setzero_si256();
- shift = _mm256_setzero_si256();
+ a0_x = _mm_setzero_si128();
+ a1_x = _mm_setzero_si128();
+ shift = _mm_setzero_si128();
} else {
a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
if (upsample_above) {
a0_x128 =
- _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx4[base_shift]);
- a1_x128 = _mm_srli_si128(a0_x128, 4);
+ _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx[base_shift]);
+ a1_x128 = _mm_srli_si128(a0_x128, 8);
- shift = _mm256_castsi128_si256(_mm_srli_epi32(
+ shift = _mm_srli_epi16(
_mm_and_si128(
- _mm_slli_epi32(
- _mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
- (2 << 6) - y * dx, (3 << 6) - y * dx),
- upsample_above),
+ _mm_slli_epi16(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
+ (2 << 6) - y * dx,
+ (3 << 6) - y * dx, 0, 0, 0, 0),
+ upsample_above),
c3f),
- 1));
+ 1);
} else {
a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
a1_x128 = _mm_srli_si128(a0_x128, 1);
- shift = _mm256_castsi128_si256(_mm_srli_epi32(
- _mm_and_si128(_mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
- (2 << 6) - y * dx, (3 << 6) - y * dx),
- c3f),
- 1));
+ shift = _mm_srli_epi16(
+ _mm_and_si128(
+ _mm_setr_epi16(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
+ (3 << 6) - y * dx, 0, 0, 0, 0),
+ c3f),
+ 1);
}
- a0_x = _mm256_cvtepu8_epi32(a0_x128);
- a1_x = _mm256_cvtepu8_epi32(a1_x128);
+ a0_x = _mm_cvtepu8_epi16(a0_x128);
+ a1_x = _mm_cvtepu8_epi16(a1_x128);
}
// y calc
__m128i a0_y, a1_y, shifty;
if (base_x < min_base_x) {
- DECLARE_ALIGNED(32, int, base_y_c[4]);
+ DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
__m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
- r6 = _mm_set1_epi32(r << 6);
- dy128 = _mm_set1_epi32(dy);
- c1234 = _mm_setr_epi32(1, 2, 3, 4);
- y_c128 = _mm_sub_epi32(r6, _mm_mullo_epi32(c1234, dy128));
- base_y_c128 = _mm_srai_epi32(y_c128, frac_bits_y);
- mask128 = _mm_cmpgt_epi32(min_base_y128, base_y_c128);
+ r6 = _mm_set1_epi16(r << 6);
+ dy128 = _mm_set1_epi16(dy);
+ c1234 = _mm_setr_epi16(1, 2, 3, 4, 0, 0, 0, 0);
+ y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
+ base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
+ mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
_mm_store_si128((__m128i *)base_y_c, base_y_c128);
- a0_y = _mm_setr_epi32(left[base_y_c[0]], left[base_y_c[1]],
- left[base_y_c[2]], left[base_y_c[3]]);
- a1_y = _mm_setr_epi32(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
- left[base_y_c[2] + 1], left[base_y_c[3] + 1]);
+ a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
+ left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
+ a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
+ left[base_y_c[2] + 1], left[base_y_c[3] + 1], 0, 0,
+ 0, 0);
if (upsample_left) {
- shifty = _mm_srli_epi32(
- _mm_and_si128(_mm_slli_epi32(y_c128, upsample_left), c3f), 1);
+ shifty = _mm_srli_epi16(
+ _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
} else {
- shifty = _mm_srli_epi32(_mm_and_si128(y_c128, c3f), 1);
+ shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
}
- a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
- a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
- shift = _mm256_inserti128_si256(shift, shifty, 1);
+ a0_x = _mm_unpacklo_epi64(a0_x, a0_y);
+ a1_x = _mm_unpacklo_epi64(a1_x, a1_y);
+ shift = _mm_unpacklo_epi64(shift, shifty);
}
- diff = _mm256_sub_epi32(a1_x, a0_x); // a[x+1] - a[x]
- a32 = _mm256_slli_epi32(a0_x, 5); // a[x] * 32
- a32 = _mm256_add_epi32(a32, a16); // a[x] * 32 + 16
+ diff = _mm_sub_epi16(a1_x, a0_x); // a[x+1] - a[x]
+ a32 = _mm_slli_epi16(a0_x, 5); // a[x] * 32
+ a32 = _mm_add_epi16(a32, a16); // a[x] * 32 + 16
- b = _mm256_mullo_epi32(diff, shift);
- res = _mm256_add_epi32(a32, b);
- res = _mm256_srli_epi32(res, 5);
+ b = _mm_mullo_epi16(diff, shift);
+ res = _mm_add_epi16(a32, b);
+ res = _mm_srli_epi16(res, 5);
- resx = _mm256_castsi256_si128(res);
- resx = _mm_packus_epi32(resx, resx);
- resx = _mm_packus_epi16(resx, resx);
-
- resy = _mm256_extracti128_si256(res, 1);
- resy = _mm_packus_epi32(resy, resy);
- resy = _mm_packus_epi16(resy, resy);
+ resx = _mm_packus_epi16(res, res);
+ resy = _mm_srli_si128(resx, 4);
resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
*(uint32_t *)(dst) = _mm_cvtsi128_si32(resxy);
@@ -3730,12 +4051,11 @@
shift = _mm256_setzero_si256();
} else {
a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
- a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
+ a1_x128 = _mm_srli_si128(a0_x128, 1);
if (upsample_above) {
a0_x128 =
_mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx[base_shift]);
- a1_x128 =
- _mm_shuffle_epi8(a1_x128, *(__m128i *)EvenOddMaskx[base_shift]);
+ a1_x128 = _mm_srli_si128(a0_x128, 8);
shift = _mm256_castsi128_si256(_mm_srli_epi16(
_mm_and_si128(
@@ -3929,7 +4249,7 @@
res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
// y calc
- if ((base_x < min_base_x)) {
+ if (base_x < min_base_x) {
DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
__m256i r6, c256, dy256, y_c256, base_y_c256, mask256, mul16;
r6 = _mm256_set1_epi16(r << 6);
@@ -4227,38 +4547,25 @@
d[15] = _mm_unpackhi_epi64(w7, w15);
}
-static void transpose_TX_8X8(const uint8_t *src, ptrdiff_t pitchSrc,
- uint8_t *dst, ptrdiff_t pitchDst) {
- __m128i r0, r1, r2, r3, r4, r5, r6, r7;
- __m128i d0d1, d2d3, d4d5, d6d7;
- r0 = _mm_loadl_epi64((__m128i *)(src + 0 * pitchSrc));
- r1 = _mm_loadl_epi64((__m128i *)(src + 1 * pitchSrc));
- r2 = _mm_loadl_epi64((__m128i *)(src + 2 * pitchSrc));
- r3 = _mm_loadl_epi64((__m128i *)(src + 3 * pitchSrc));
- r4 = _mm_loadl_epi64((__m128i *)(src + 4 * pitchSrc));
- r5 = _mm_loadl_epi64((__m128i *)(src + 5 * pitchSrc));
- r6 = _mm_loadl_epi64((__m128i *)(src + 6 * pitchSrc));
- r7 = _mm_loadl_epi64((__m128i *)(src + 7 * pitchSrc));
-
- transpose8x8_sse2(&r0, &r1, &r2, &r3, &r4, &r5, &r6, &r7, &d0d1, &d2d3, &d4d5,
- &d6d7);
-
- _mm_storel_epi64((__m128i *)(dst + 0 * pitchDst), d0d1);
- _mm_storel_epi64((__m128i *)(dst + 1 * pitchDst), _mm_srli_si128(d0d1, 8));
- _mm_storel_epi64((__m128i *)(dst + 2 * pitchDst), d2d3);
- _mm_storel_epi64((__m128i *)(dst + 3 * pitchDst), _mm_srli_si128(d2d3, 8));
- _mm_storel_epi64((__m128i *)(dst + 4 * pitchDst), d4d5);
- _mm_storel_epi64((__m128i *)(dst + 5 * pitchDst), _mm_srli_si128(d4d5, 8));
- _mm_storel_epi64((__m128i *)(dst + 6 * pitchDst), d6d7);
- _mm_storel_epi64((__m128i *)(dst + 7 * pitchDst), _mm_srli_si128(d6d7, 8));
+static void transpose_TX_16X16(const uint8_t *src, ptrdiff_t pitchSrc,
+ uint8_t *dst, ptrdiff_t pitchDst) {
+ __m128i r[16];
+ __m128i d[16];
+ for (int j = 0; j < 16; j++) {
+ r[j] = _mm_loadu_si128((__m128i *)(src + j * pitchSrc));
+ }
+ transpose16x16_sse2(r, d);
+ for (int j = 0; j < 16; j++) {
+ _mm_storeu_si128((__m128i *)(dst + j * pitchDst), d[j]);
+ }
}
static void transpose(const uint8_t *src, ptrdiff_t pitchSrc, uint8_t *dst,
ptrdiff_t pitchDst, int width, int height) {
- for (int j = 0; j < height; j += 8)
- for (int i = 0; i < width; i += 8)
- transpose_TX_8X8(src + i * pitchSrc + j, pitchSrc, dst + j * pitchDst + i,
- pitchDst);
+ for (int j = 0; j < height; j += 16)
+ for (int i = 0; i < width; i += 16)
+ transpose_TX_16X16(src + i * pitchSrc + j, pitchSrc,
+ dst + j * pitchDst + i, pitchDst);
}
static void dr_prediction_z3_4x4_avx2(uint8_t *dst, ptrdiff_t stride,
@@ -4266,7 +4573,7 @@
int dy) {
__m128i dstvec[4], d[4];
- dr_prediction_z1_4xN_internal_avx2(4, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(4, 4, dstvec, left, upsample_left, dy);
transpose4x8_8x4_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
&d[0], &d[1], &d[2], &d[3]);
@@ -4282,7 +4589,7 @@
int dy) {
__m128i dstvec[8], d[8];
- dr_prediction_z1_8xN_internal_avx2(8, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(8, 8, dstvec, left, upsample_left, dy);
transpose8x8_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4],
&dstvec[5], &dstvec[6], &dstvec[7], &d[0], &d[1], &d[2],
&d[3]);
@@ -4302,7 +4609,7 @@
int dy) {
__m128i dstvec[4], d[8];
- dr_prediction_z1_8xN_internal_avx2(4, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(8, 4, dstvec, left, upsample_left, dy);
transpose4x8_8x4_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &d[0],
&d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]);
for (int i = 0; i < 8; i++) {
@@ -4315,7 +4622,7 @@
int dy) {
__m128i dstvec[8], d[4];
- dr_prediction_z1_4xN_internal_avx2(8, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(4, 8, dstvec, left, upsample_left, dy);
transpose8x8_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
&dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7], &d[0],
&d[1], &d[2], &d[3]);
@@ -4330,7 +4637,7 @@
int dy) {
__m128i dstvec[8], d[8];
- dr_prediction_z1_16xN_internal_avx2(8, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(16, 8, dstvec, left, upsample_left, dy);
transpose8x16_16x8_sse2(dstvec, dstvec + 1, dstvec + 2, dstvec + 3,
dstvec + 4, dstvec + 5, dstvec + 6, dstvec + 7, d,
d + 1, d + 2, d + 3, d + 4, d + 5, d + 6, d + 7);
@@ -4346,7 +4653,7 @@
int dy) {
__m128i dstvec[16], d[16];
- dr_prediction_z1_8xN_internal_avx2(16, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(8, 16, dstvec, left, upsample_left, dy);
transpose16x8_8x16_sse2(
&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
&dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
@@ -4363,7 +4670,7 @@
int dy) {
__m128i dstvec[4], d[16];
- dr_prediction_z1_16xN_internal_avx2(4, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(16, 4, dstvec, left, upsample_left, dy);
transpose4x16_sse2(dstvec, d);
for (int i = 0; i < 16; i++) {
*(uint32_t *)(dst + stride * i) = _mm_cvtsi128_si32(d[i]);
@@ -4375,7 +4682,7 @@
int dy) {
__m128i dstvec[16], d[8];
- dr_prediction_z1_4xN_internal_avx2(16, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(4, 16, dstvec, left, upsample_left, dy);
for (int i = 4; i < 8; i++) {
d[i] = _mm_setzero_si128();
}
@@ -4416,7 +4723,7 @@
int dy) {
__m128i dstvec[32], d[16];
- dr_prediction_z1_8xN_internal_avx2(32, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(8, 32, dstvec, left, upsample_left, dy);
transpose16x8_8x16_sse2(
&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
@@ -4442,7 +4749,7 @@
int dy) {
__m128i dstvec[16], d[16];
- dr_prediction_z1_16xN_internal_avx2(16, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(16, 16, dstvec, left, upsample_left, dy);
transpose16x16_sse2(dstvec, d);
for (int i = 0; i < 16; i++) {
@@ -4501,7 +4808,7 @@
int dy) {
__m128i dstvec[32], d[16];
- dr_prediction_z1_16xN_internal_avx2(32, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(16, 32, dstvec, left, upsample_left, dy);
for (int i = 0; i < 32; i += 16) {
transpose16x16_sse2((dstvec + i), d);
for (int j = 0; j < 16; j++) {
@@ -4540,7 +4847,7 @@
int dy) {
__m128i dstvec[64], d[16];
- dr_prediction_z1_16xN_internal_avx2(64, dstvec, left, upsample_left, dy);
+ dr_prediction_z1_HxW_internal_avx2(16, 64, dstvec, left, upsample_left, dy);
for (int i = 0; i < 64; i += 16) {
transpose16x16_sse2((dstvec + i), d);
for (int j = 0; j < 16; j++) {
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index ab93916..a046eec 100644
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -160,9 +160,7 @@
add_proto qw/void av1_highbd_dr_prediction_z1/, "uint16_t *dst, ptrdiff_t stride, int bw, int bh, const uint16_t *above, const uint16_t *left, int upsample_above, int dx, int dy, int bd";
specialize qw/av1_highbd_dr_prediction_z1 avx2/;
add_proto qw/void av1_highbd_dr_prediction_z2/, "uint16_t *dst, ptrdiff_t stride, int bw, int bh, const uint16_t *above, const uint16_t *left, int upsample_above, int upsample_left, int dx, int dy, int bd";
-# TODO(niva213@gmail.com): Re-enable avx2 after fixing valgrind issue
-# https://crbug.com/aomedia/2316
-# specialize qw/av1_highbd_dr_prediction_z2 avx2/;
+specialize qw/av1_highbd_dr_prediction_z2 avx2/;
add_proto qw/void av1_highbd_dr_prediction_z3/, "uint16_t *dst, ptrdiff_t stride, int bw, int bh, const uint16_t *above, const uint16_t *left, int upsample_left, int dx, int dy, int bd";
specialize qw/av1_highbd_dr_prediction_z3 avx2/;
diff --git a/test/dr_prediction_test.cc b/test/dr_prediction_test.cc
index 4be8489..bacc366 100644
--- a/test/dr_prediction_test.cc
+++ b/test/dr_prediction_test.cc
@@ -365,11 +365,9 @@
::testing::Values(DrPredFunc<DrPred>(&z1_wrapper<av1_dr_prediction_z1_c>,
&z1_wrapper<av1_dr_prediction_z1_avx2>,
AOM_BITS_8, kZ1Start),
- /* TODO(niva213@gmail.com): Re-enable this test after
- fixing valgrind issue: https://crbug.com/aomedia/2316
DrPredFunc<DrPred>(&z2_wrapper<av1_dr_prediction_z2_c>,
&z2_wrapper<av1_dr_prediction_z2_avx2>,
- AOM_BITS_8, kZ2Start), */
+ AOM_BITS_8, kZ2Start),
DrPredFunc<DrPred>(&z3_wrapper<av1_dr_prediction_z3_c>,
&z3_wrapper<av1_dr_prediction_z3_avx2>,
AOM_BITS_8, kZ3Start)));
@@ -414,8 +412,6 @@
&z1_wrapper_hbd<av1_highbd_dr_prediction_z1_c>,
&z1_wrapper_hbd<av1_highbd_dr_prediction_z1_avx2>,
AOM_BITS_12, kZ1Start),
- /* TODO(niva213@gmail.com): Re-enable these tests after
- fixing valgrind issue: https://crbug.com/aomedia/2316
DrPredFunc<DrPred_Hbd>(
&z2_wrapper_hbd<av1_highbd_dr_prediction_z2_c>,
&z2_wrapper_hbd<av1_highbd_dr_prediction_z2_avx2>,
@@ -427,7 +423,7 @@
DrPredFunc<DrPred_Hbd>(
&z2_wrapper_hbd<av1_highbd_dr_prediction_z2_c>,
&z2_wrapper_hbd<av1_highbd_dr_prediction_z2_avx2>,
- AOM_BITS_12, kZ2Start),*/
+ AOM_BITS_12, kZ2Start),
DrPredFunc<DrPred_Hbd>(
&z3_wrapper_hbd<av1_highbd_dr_prediction_z3_c>,
&z3_wrapper_hbd<av1_highbd_dr_prediction_z3_avx2>,