blob: 21613789b730feaaaa75d0310ed14f0f6fe1116a [file] [log] [blame]
/*
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <arm_neon.h>
#include <assert.h>
#include "config/aom_config.h"
#include "config/aom_dsp_rtcd.h"
#include "aom/aom_integer.h"
#include "aom_dsp/arm/mem_neon.h"
#include "aom_dsp/arm/sum_neon.h"
#include "aom_dsp/intrapred_common.h"
//------------------------------------------------------------------------------
// DC 4x4
static INLINE uint16x8_t dc_load_sum_4(const uint8_t *in) {
const uint8x8_t a = load_u8_4x1_lane0(in);
const uint16x4_t p0 = vpaddl_u8(a);
const uint16x4_t p1 = vpadd_u16(p0, p0);
return vcombine_u16(p1, vdup_n_u16(0));
}
static INLINE void dc_store_4xh(uint8_t *dst, ptrdiff_t stride, int h,
uint8x8_t dc) {
for (int i = 0; i < h; ++i) {
store_u8_4x1(dst + i * stride, dc, 0);
}
}
void aom_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint16x8_t sum_top = dc_load_sum_4(above);
const uint16x8_t sum_left = dc_load_sum_4(left);
const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
const uint8x8_t dc0 = vrshrn_n_u16(sum, 3);
dc_store_4xh(dst, stride, 4, vdup_lane_u8(dc0, 0));
}
void aom_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint16x8_t sum_left = dc_load_sum_4(left);
const uint8x8_t dc0 = vrshrn_n_u16(sum_left, 2);
(void)above;
dc_store_4xh(dst, stride, 4, vdup_lane_u8(dc0, 0));
}
void aom_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint16x8_t sum_top = dc_load_sum_4(above);
const uint8x8_t dc0 = vrshrn_n_u16(sum_top, 2);
(void)left;
dc_store_4xh(dst, stride, 4, vdup_lane_u8(dc0, 0));
}
void aom_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x8_t dc0 = vdup_n_u8(0x80);
(void)above;
(void)left;
dc_store_4xh(dst, stride, 4, dc0);
}
//------------------------------------------------------------------------------
// DC 8x8
static INLINE uint16x8_t dc_load_sum_8(const uint8_t *in) {
// This isn't used in the case where we want to load both above and left
// vectors, since we want to avoid performing the reduction twice.
const uint8x8_t a = vld1_u8(in);
const uint16x4_t p0 = vpaddl_u8(a);
const uint16x4_t p1 = vpadd_u16(p0, p0);
const uint16x4_t p2 = vpadd_u16(p1, p1);
return vcombine_u16(p2, vdup_n_u16(0));
}
static INLINE uint16x8_t horizontal_add_and_broadcast_u16x8(uint16x8_t a) {
#if AOM_ARCH_AARCH64
// On AArch64 we could also use vdupq_n_u16(vaddvq_u16(a)) here to save an
// instruction, however the addv instruction is usually slightly more
// expensive than a pairwise addition, so the need for immediately
// broadcasting the result again seems to negate any benefit.
const uint16x8_t b = vpaddq_u16(a, a);
const uint16x8_t c = vpaddq_u16(b, b);
return vpaddq_u16(c, c);
#else
const uint16x4_t b = vadd_u16(vget_low_u16(a), vget_high_u16(a));
const uint16x4_t c = vpadd_u16(b, b);
const uint16x4_t d = vpadd_u16(c, c);
return vcombine_u16(d, d);
#endif
}
static INLINE void dc_store_8xh(uint8_t *dst, ptrdiff_t stride, int h,
uint8x8_t dc) {
for (int i = 0; i < h; ++i) {
vst1_u8(dst + i * stride, dc);
}
}
void aom_dc_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x8_t sum_top = vld1_u8(above);
const uint8x8_t sum_left = vld1_u8(left);
uint16x8_t sum = vaddl_u8(sum_left, sum_top);
sum = horizontal_add_and_broadcast_u16x8(sum);
const uint8x8_t dc0 = vrshrn_n_u16(sum, 4);
dc_store_8xh(dst, stride, 8, vdup_lane_u8(dc0, 0));
}
void aom_dc_left_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint16x8_t sum_left = dc_load_sum_8(left);
const uint8x8_t dc0 = vrshrn_n_u16(sum_left, 3);
(void)above;
dc_store_8xh(dst, stride, 8, vdup_lane_u8(dc0, 0));
}
void aom_dc_top_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint16x8_t sum_top = dc_load_sum_8(above);
const uint8x8_t dc0 = vrshrn_n_u16(sum_top, 3);
(void)left;
dc_store_8xh(dst, stride, 8, vdup_lane_u8(dc0, 0));
}
void aom_dc_128_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x8_t dc0 = vdup_n_u8(0x80);
(void)above;
(void)left;
dc_store_8xh(dst, stride, 8, dc0);
}
//------------------------------------------------------------------------------
// DC 16x16
static INLINE uint16x8_t dc_load_partial_sum_16(const uint8_t *in) {
const uint8x16_t a = vld1q_u8(in);
// delay the remainder of the reduction until
// horizontal_add_and_broadcast_u16x8, since we want to do it once rather
// than twice in the case we are loading both above and left.
return vpaddlq_u8(a);
}
static INLINE uint16x8_t dc_load_sum_16(const uint8_t *in) {
return horizontal_add_and_broadcast_u16x8(dc_load_partial_sum_16(in));
}
static INLINE void dc_store_16xh(uint8_t *dst, ptrdiff_t stride, int h,
uint8x16_t dc) {
for (int i = 0; i < h; ++i) {
vst1q_u8(dst + i * stride, dc);
}
}
void aom_dc_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint16x8_t sum_top = dc_load_partial_sum_16(above);
const uint16x8_t sum_left = dc_load_partial_sum_16(left);
uint16x8_t sum = vaddq_u16(sum_left, sum_top);
sum = horizontal_add_and_broadcast_u16x8(sum);
const uint8x8_t dc0 = vrshrn_n_u16(sum, 5);
dc_store_16xh(dst, stride, 16, vdupq_lane_u8(dc0, 0));
}
void aom_dc_left_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
const uint16x8_t sum_left = dc_load_sum_16(left);
const uint8x8_t dc0 = vrshrn_n_u16(sum_left, 4);
(void)above;
dc_store_16xh(dst, stride, 16, vdupq_lane_u8(dc0, 0));
}
void aom_dc_top_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
const uint16x8_t sum_top = dc_load_sum_16(above);
const uint8x8_t dc0 = vrshrn_n_u16(sum_top, 4);
(void)left;
dc_store_16xh(dst, stride, 16, vdupq_lane_u8(dc0, 0));
}
void aom_dc_128_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
const uint8x16_t dc0 = vdupq_n_u8(0x80);
(void)above;
(void)left;
dc_store_16xh(dst, stride, 16, dc0);
}
//------------------------------------------------------------------------------
// DC 32x32
static INLINE uint16x8_t dc_load_partial_sum_32(const uint8_t *in) {
const uint8x16_t a0 = vld1q_u8(in);
const uint8x16_t a1 = vld1q_u8(in + 16);
// delay the remainder of the reduction until
// horizontal_add_and_broadcast_u16x8, since we want to do it once rather
// than twice in the case we are loading both above and left.
return vpadalq_u8(vpaddlq_u8(a0), a1);
}
static INLINE uint16x8_t dc_load_sum_32(const uint8_t *in) {
return horizontal_add_and_broadcast_u16x8(dc_load_partial_sum_32(in));
}
static INLINE void dc_store_32xh(uint8_t *dst, ptrdiff_t stride, int h,
uint8x16_t dc) {
for (int i = 0; i < h; ++i) {
vst1q_u8(dst + i * stride, dc);
vst1q_u8(dst + i * stride + 16, dc);
}
}
void aom_dc_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint16x8_t sum_top = dc_load_partial_sum_32(above);
const uint16x8_t sum_left = dc_load_partial_sum_32(left);
uint16x8_t sum = vaddq_u16(sum_left, sum_top);
sum = horizontal_add_and_broadcast_u16x8(sum);
const uint8x8_t dc0 = vrshrn_n_u16(sum, 6);
dc_store_32xh(dst, stride, 32, vdupq_lane_u8(dc0, 0));
}
void aom_dc_left_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
const uint16x8_t sum_left = dc_load_sum_32(left);
const uint8x8_t dc0 = vrshrn_n_u16(sum_left, 5);
(void)above;
dc_store_32xh(dst, stride, 32, vdupq_lane_u8(dc0, 0));
}
void aom_dc_top_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
const uint16x8_t sum_top = dc_load_sum_32(above);
const uint8x8_t dc0 = vrshrn_n_u16(sum_top, 5);
(void)left;
dc_store_32xh(dst, stride, 32, vdupq_lane_u8(dc0, 0));
}
void aom_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
const uint8x16_t dc0 = vdupq_n_u8(0x80);
(void)above;
(void)left;
dc_store_32xh(dst, stride, 32, dc0);
}
//------------------------------------------------------------------------------
// DC 64x64
static INLINE uint16x8_t dc_load_partial_sum_64(const uint8_t *in) {
const uint8x16_t a0 = vld1q_u8(in);
const uint8x16_t a1 = vld1q_u8(in + 16);
const uint8x16_t a2 = vld1q_u8(in + 32);
const uint8x16_t a3 = vld1q_u8(in + 48);
const uint16x8_t p01 = vpadalq_u8(vpaddlq_u8(a0), a1);
const uint16x8_t p23 = vpadalq_u8(vpaddlq_u8(a2), a3);
// delay the remainder of the reduction until
// horizontal_add_and_broadcast_u16x8, since we want to do it once rather
// than twice in the case we are loading both above and left.
return vaddq_u16(p01, p23);
}
static INLINE uint16x8_t dc_load_sum_64(const uint8_t *in) {
return horizontal_add_and_broadcast_u16x8(dc_load_partial_sum_64(in));
}
static INLINE void dc_store_64xh(uint8_t *dst, ptrdiff_t stride, int h,
uint8x16_t dc) {
for (int i = 0; i < h; ++i) {
vst1q_u8(dst + i * stride, dc);
vst1q_u8(dst + i * stride + 16, dc);
vst1q_u8(dst + i * stride + 32, dc);
vst1q_u8(dst + i * stride + 48, dc);
}
}
void aom_dc_predictor_64x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint16x8_t sum_top = dc_load_partial_sum_64(above);
const uint16x8_t sum_left = dc_load_partial_sum_64(left);
uint16x8_t sum = vaddq_u16(sum_left, sum_top);
sum = horizontal_add_and_broadcast_u16x8(sum);
const uint8x8_t dc0 = vrshrn_n_u16(sum, 7);
dc_store_64xh(dst, stride, 64, vdupq_lane_u8(dc0, 0));
}
void aom_dc_left_predictor_64x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
const uint16x8_t sum_left = dc_load_sum_64(left);
const uint8x8_t dc0 = vrshrn_n_u16(sum_left, 6);
(void)above;
dc_store_64xh(dst, stride, 64, vdupq_lane_u8(dc0, 0));
}
void aom_dc_top_predictor_64x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
const uint16x8_t sum_top = dc_load_sum_64(above);
const uint8x8_t dc0 = vrshrn_n_u16(sum_top, 6);
(void)left;
dc_store_64xh(dst, stride, 64, vdupq_lane_u8(dc0, 0));
}
void aom_dc_128_predictor_64x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above,
const uint8_t *left) {
const uint8x16_t dc0 = vdupq_n_u8(0x80);
(void)above;
(void)left;
dc_store_64xh(dst, stride, 64, dc0);
}
//------------------------------------------------------------------------------
// DC rectangular cases
#define DC_MULTIPLIER_1X2 0x5556
#define DC_MULTIPLIER_1X4 0x3334
#define DC_SHIFT2 16
static INLINE int divide_using_multiply_shift(int num, int shift1,
int multiplier, int shift2) {
const int interm = num >> shift1;
return interm * multiplier >> shift2;
}
static INLINE int calculate_dc_from_sum(int bw, int bh, uint32_t sum,
int shift1, int multiplier) {
const int expected_dc = divide_using_multiply_shift(
sum + ((bw + bh) >> 1), shift1, multiplier, DC_SHIFT2);
assert(expected_dc < (1 << 8));
return expected_dc;
}
#undef DC_SHIFT2
void aom_dc_predictor_4x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint8x8_t a = load_u8_4x1_lane0(above);
uint8x8_t l = vld1_u8(left);
uint32_t sum = horizontal_add_u16x8(vaddl_u8(a, l));
uint32_t dc = calculate_dc_from_sum(4, 8, sum, 2, DC_MULTIPLIER_1X2);
dc_store_4xh(dst, stride, 8, vdup_n_u8(dc));
}
void aom_dc_predictor_8x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint8x8_t a = vld1_u8(above);
uint8x8_t l = load_u8_4x1_lane0(left);
uint32_t sum = horizontal_add_u16x8(vaddl_u8(a, l));
uint32_t dc = calculate_dc_from_sum(8, 4, sum, 2, DC_MULTIPLIER_1X2);
dc_store_8xh(dst, stride, 4, vdup_n_u8(dc));
}
void aom_dc_predictor_4x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint8x8_t a = load_u8_4x1_lane0(above);
uint8x16_t l = vld1q_u8(left);
uint16x8_t sum_al = vaddw_u8(vpaddlq_u8(l), a);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(4, 16, sum, 2, DC_MULTIPLIER_1X4);
dc_store_4xh(dst, stride, 16, vdup_n_u8(dc));
}
void aom_dc_predictor_16x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint8x16_t a = vld1q_u8(above);
uint8x8_t l = load_u8_4x1_lane0(left);
uint16x8_t sum_al = vaddw_u8(vpaddlq_u8(a), l);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(16, 4, sum, 2, DC_MULTIPLIER_1X4);
dc_store_16xh(dst, stride, 4, vdupq_n_u8(dc));
}
void aom_dc_predictor_8x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint8x8_t a = vld1_u8(above);
uint8x16_t l = vld1q_u8(left);
uint16x8_t sum_al = vaddw_u8(vpaddlq_u8(l), a);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(8, 16, sum, 3, DC_MULTIPLIER_1X2);
dc_store_8xh(dst, stride, 16, vdup_n_u8(dc));
}
void aom_dc_predictor_16x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint8x16_t a = vld1q_u8(above);
uint8x8_t l = vld1_u8(left);
uint16x8_t sum_al = vaddw_u8(vpaddlq_u8(a), l);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(16, 8, sum, 3, DC_MULTIPLIER_1X2);
dc_store_16xh(dst, stride, 8, vdupq_n_u8(dc));
}
void aom_dc_predictor_8x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint8x8_t a = vld1_u8(above);
uint16x8_t sum_left = dc_load_partial_sum_32(left);
uint16x8_t sum_al = vaddw_u8(sum_left, a);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(8, 32, sum, 3, DC_MULTIPLIER_1X4);
dc_store_8xh(dst, stride, 32, vdup_n_u8(dc));
}
void aom_dc_predictor_32x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint16x8_t sum_top = dc_load_partial_sum_32(above);
uint8x8_t l = vld1_u8(left);
uint16x8_t sum_al = vaddw_u8(sum_top, l);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(32, 8, sum, 3, DC_MULTIPLIER_1X4);
dc_store_32xh(dst, stride, 8, vdupq_n_u8(dc));
}
void aom_dc_predictor_16x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint16x8_t sum_above = dc_load_partial_sum_16(above);
uint16x8_t sum_left = dc_load_partial_sum_32(left);
uint16x8_t sum_al = vaddq_u16(sum_left, sum_above);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(16, 32, sum, 4, DC_MULTIPLIER_1X2);
dc_store_16xh(dst, stride, 32, vdupq_n_u8(dc));
}
void aom_dc_predictor_32x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint16x8_t sum_above = dc_load_partial_sum_32(above);
uint16x8_t sum_left = dc_load_partial_sum_16(left);
uint16x8_t sum_al = vaddq_u16(sum_left, sum_above);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(32, 16, sum, 4, DC_MULTIPLIER_1X2);
dc_store_32xh(dst, stride, 16, vdupq_n_u8(dc));
}
void aom_dc_predictor_16x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint16x8_t sum_above = dc_load_partial_sum_16(above);
uint16x8_t sum_left = dc_load_partial_sum_64(left);
uint16x8_t sum_al = vaddq_u16(sum_left, sum_above);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(16, 64, sum, 4, DC_MULTIPLIER_1X4);
dc_store_16xh(dst, stride, 64, vdupq_n_u8(dc));
}
void aom_dc_predictor_64x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint16x8_t sum_above = dc_load_partial_sum_64(above);
uint16x8_t sum_left = dc_load_partial_sum_16(left);
uint16x8_t sum_al = vaddq_u16(sum_above, sum_left);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(64, 16, sum, 4, DC_MULTIPLIER_1X4);
dc_store_64xh(dst, stride, 16, vdupq_n_u8(dc));
}
void aom_dc_predictor_32x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint16x8_t sum_above = dc_load_partial_sum_32(above);
uint16x8_t sum_left = dc_load_partial_sum_64(left);
uint16x8_t sum_al = vaddq_u16(sum_above, sum_left);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(32, 64, sum, 5, DC_MULTIPLIER_1X2);
dc_store_32xh(dst, stride, 64, vdupq_n_u8(dc));
}
void aom_dc_predictor_64x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
uint16x8_t sum_above = dc_load_partial_sum_64(above);
uint16x8_t sum_left = dc_load_partial_sum_32(left);
uint16x8_t sum_al = vaddq_u16(sum_above, sum_left);
uint32_t sum = horizontal_add_u16x8(sum_al);
uint32_t dc = calculate_dc_from_sum(64, 32, sum, 5, DC_MULTIPLIER_1X2);
dc_store_64xh(dst, stride, 32, vdupq_n_u8(dc));
}
#undef DC_MULTIPLIER_1X2
#undef DC_MULTIPLIER_1X4
#define DC_PREDICTOR_128(w, h, q) \
void aom_dc_128_predictor_##w##x##h##_neon(uint8_t *dst, ptrdiff_t stride, \
const uint8_t *above, \
const uint8_t *left) { \
(void)above; \
(void)left; \
dc_store_##w##xh(dst, stride, (h), vdup##q##_n_u8(0x80)); \
}
DC_PREDICTOR_128(4, 8, )
DC_PREDICTOR_128(4, 16, )
DC_PREDICTOR_128(8, 4, )
DC_PREDICTOR_128(8, 16, )
DC_PREDICTOR_128(8, 32, )
DC_PREDICTOR_128(16, 4, q)
DC_PREDICTOR_128(16, 8, q)
DC_PREDICTOR_128(16, 32, q)
DC_PREDICTOR_128(16, 64, q)
DC_PREDICTOR_128(32, 8, q)
DC_PREDICTOR_128(32, 16, q)
DC_PREDICTOR_128(32, 64, q)
DC_PREDICTOR_128(64, 32, q)
DC_PREDICTOR_128(64, 16, q)
#undef DC_PREDICTOR_128
#define DC_PREDICTOR_LEFT(w, h, shift, q) \
void aom_dc_left_predictor_##w##x##h##_neon(uint8_t *dst, ptrdiff_t stride, \
const uint8_t *above, \
const uint8_t *left) { \
(void)above; \
const uint16x8_t sum = dc_load_sum_##h(left); \
const uint8x8_t dc0 = vrshrn_n_u16(sum, (shift)); \
dc_store_##w##xh(dst, stride, (h), vdup##q##_lane_u8(dc0, 0)); \
}
DC_PREDICTOR_LEFT(4, 8, 3, )
DC_PREDICTOR_LEFT(8, 4, 2, )
DC_PREDICTOR_LEFT(8, 16, 4, )
DC_PREDICTOR_LEFT(16, 8, 3, q)
DC_PREDICTOR_LEFT(16, 32, 5, q)
DC_PREDICTOR_LEFT(32, 16, 4, q)
DC_PREDICTOR_LEFT(32, 64, 6, q)
DC_PREDICTOR_LEFT(64, 32, 5, q)
DC_PREDICTOR_LEFT(4, 16, 4, )
DC_PREDICTOR_LEFT(16, 4, 2, q)
DC_PREDICTOR_LEFT(8, 32, 5, )
DC_PREDICTOR_LEFT(32, 8, 3, q)
DC_PREDICTOR_LEFT(16, 64, 6, q)
DC_PREDICTOR_LEFT(64, 16, 4, q)
#undef DC_PREDICTOR_LEFT
#define DC_PREDICTOR_TOP(w, h, shift, q) \
void aom_dc_top_predictor_##w##x##h##_neon(uint8_t *dst, ptrdiff_t stride, \
const uint8_t *above, \
const uint8_t *left) { \
(void)left; \
const uint16x8_t sum = dc_load_sum_##w(above); \
const uint8x8_t dc0 = vrshrn_n_u16(sum, (shift)); \
dc_store_##w##xh(dst, stride, (h), vdup##q##_lane_u8(dc0, 0)); \
}
DC_PREDICTOR_TOP(4, 8, 2, )
DC_PREDICTOR_TOP(4, 16, 2, )
DC_PREDICTOR_TOP(8, 4, 3, )
DC_PREDICTOR_TOP(8, 16, 3, )
DC_PREDICTOR_TOP(8, 32, 3, )
DC_PREDICTOR_TOP(16, 4, 4, q)
DC_PREDICTOR_TOP(16, 8, 4, q)
DC_PREDICTOR_TOP(16, 32, 4, q)
DC_PREDICTOR_TOP(16, 64, 4, q)
DC_PREDICTOR_TOP(32, 8, 5, q)
DC_PREDICTOR_TOP(32, 16, 5, q)
DC_PREDICTOR_TOP(32, 64, 5, q)
DC_PREDICTOR_TOP(64, 16, 6, q)
DC_PREDICTOR_TOP(64, 32, 6, q)
#undef DC_PREDICTOR_TOP
// -----------------------------------------------------------------------------
static INLINE void v_store_4xh(uint8_t *dst, ptrdiff_t stride, int h,
uint8x8_t d0) {
for (int i = 0; i < h; ++i) {
store_u8_4x1(dst + i * stride, d0, 0);
}
}
static INLINE void v_store_8xh(uint8_t *dst, ptrdiff_t stride, int h,
uint8x8_t d0) {
for (int i = 0; i < h; ++i) {
vst1_u8(dst + i * stride, d0);
}
}
static INLINE void v_store_16xh(uint8_t *dst, ptrdiff_t stride, int h,
uint8x16_t d0) {
for (int i = 0; i < h; ++i) {
vst1q_u8(dst + i * stride, d0);
}
}
static INLINE void v_store_32xh(uint8_t *dst, ptrdiff_t stride, int h,
uint8x16_t d0, uint8x16_t d1) {
for (int i = 0; i < h; ++i) {
vst1q_u8(dst + 0, d0);
vst1q_u8(dst + 16, d1);
dst += stride;
}
}
static INLINE void v_store_64xh(uint8_t *dst, ptrdiff_t stride, int h,
uint8x16_t d0, uint8x16_t d1, uint8x16_t d2,
uint8x16_t d3) {
for (int i = 0; i < h; ++i) {
vst1q_u8(dst + 0, d0);
vst1q_u8(dst + 16, d1);
vst1q_u8(dst + 32, d2);
vst1q_u8(dst + 48, d3);
dst += stride;
}
}
void aom_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_4xh(dst, stride, 4, load_u8_4x1_lane0(above));
}
void aom_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_8xh(dst, stride, 8, vld1_u8(above));
}
void aom_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_16xh(dst, stride, 16, vld1q_u8(above));
}
void aom_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(above);
const uint8x16_t d1 = vld1q_u8(above + 16);
(void)left;
v_store_32xh(dst, stride, 32, d0, d1);
}
void aom_v_predictor_4x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_4xh(dst, stride, 8, load_u8_4x1_lane0(above));
}
void aom_v_predictor_4x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_4xh(dst, stride, 16, load_u8_4x1_lane0(above));
}
void aom_v_predictor_8x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_8xh(dst, stride, 4, vld1_u8(above));
}
void aom_v_predictor_8x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_8xh(dst, stride, 16, vld1_u8(above));
}
void aom_v_predictor_8x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_8xh(dst, stride, 32, vld1_u8(above));
}
void aom_v_predictor_16x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_16xh(dst, stride, 4, vld1q_u8(above));
}
void aom_v_predictor_16x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_16xh(dst, stride, 8, vld1q_u8(above));
}
void aom_v_predictor_16x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_16xh(dst, stride, 32, vld1q_u8(above));
}
void aom_v_predictor_16x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
v_store_16xh(dst, stride, 64, vld1q_u8(above));
}
void aom_v_predictor_32x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(above);
const uint8x16_t d1 = vld1q_u8(above + 16);
(void)left;
v_store_32xh(dst, stride, 8, d0, d1);
}
void aom_v_predictor_32x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(above);
const uint8x16_t d1 = vld1q_u8(above + 16);
(void)left;
v_store_32xh(dst, stride, 16, d0, d1);
}
void aom_v_predictor_32x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(above);
const uint8x16_t d1 = vld1q_u8(above + 16);
(void)left;
v_store_32xh(dst, stride, 64, d0, d1);
}
void aom_v_predictor_64x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(above);
const uint8x16_t d1 = vld1q_u8(above + 16);
const uint8x16_t d2 = vld1q_u8(above + 32);
const uint8x16_t d3 = vld1q_u8(above + 48);
(void)left;
v_store_64xh(dst, stride, 16, d0, d1, d2, d3);
}
void aom_v_predictor_64x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(above);
const uint8x16_t d1 = vld1q_u8(above + 16);
const uint8x16_t d2 = vld1q_u8(above + 32);
const uint8x16_t d3 = vld1q_u8(above + 48);
(void)left;
v_store_64xh(dst, stride, 32, d0, d1, d2, d3);
}
void aom_v_predictor_64x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(above);
const uint8x16_t d1 = vld1q_u8(above + 16);
const uint8x16_t d2 = vld1q_u8(above + 32);
const uint8x16_t d3 = vld1q_u8(above + 48);
(void)left;
v_store_64xh(dst, stride, 64, d0, d1, d2, d3);
}
// -----------------------------------------------------------------------------
static INLINE void h_store_4x8(uint8_t *dst, ptrdiff_t stride, uint8x8_t d0) {
store_u8_4x1(dst + 0 * stride, vdup_lane_u8(d0, 0), 0);
store_u8_4x1(dst + 1 * stride, vdup_lane_u8(d0, 1), 0);
store_u8_4x1(dst + 2 * stride, vdup_lane_u8(d0, 2), 0);
store_u8_4x1(dst + 3 * stride, vdup_lane_u8(d0, 3), 0);
store_u8_4x1(dst + 4 * stride, vdup_lane_u8(d0, 4), 0);
store_u8_4x1(dst + 5 * stride, vdup_lane_u8(d0, 5), 0);
store_u8_4x1(dst + 6 * stride, vdup_lane_u8(d0, 6), 0);
store_u8_4x1(dst + 7 * stride, vdup_lane_u8(d0, 7), 0);
}
static INLINE void h_store_8x8(uint8_t *dst, ptrdiff_t stride, uint8x8_t d0) {
vst1_u8(dst + 0 * stride, vdup_lane_u8(d0, 0));
vst1_u8(dst + 1 * stride, vdup_lane_u8(d0, 1));
vst1_u8(dst + 2 * stride, vdup_lane_u8(d0, 2));
vst1_u8(dst + 3 * stride, vdup_lane_u8(d0, 3));
vst1_u8(dst + 4 * stride, vdup_lane_u8(d0, 4));
vst1_u8(dst + 5 * stride, vdup_lane_u8(d0, 5));
vst1_u8(dst + 6 * stride, vdup_lane_u8(d0, 6));
vst1_u8(dst + 7 * stride, vdup_lane_u8(d0, 7));
}
static INLINE void h_store_16x8(uint8_t *dst, ptrdiff_t stride, uint8x8_t d0) {
vst1q_u8(dst + 0 * stride, vdupq_lane_u8(d0, 0));
vst1q_u8(dst + 1 * stride, vdupq_lane_u8(d0, 1));
vst1q_u8(dst + 2 * stride, vdupq_lane_u8(d0, 2));
vst1q_u8(dst + 3 * stride, vdupq_lane_u8(d0, 3));
vst1q_u8(dst + 4 * stride, vdupq_lane_u8(d0, 4));
vst1q_u8(dst + 5 * stride, vdupq_lane_u8(d0, 5));
vst1q_u8(dst + 6 * stride, vdupq_lane_u8(d0, 6));
vst1q_u8(dst + 7 * stride, vdupq_lane_u8(d0, 7));
}
static INLINE void h_store_32x8(uint8_t *dst, ptrdiff_t stride, uint8x8_t d0) {
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 0));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 0));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 1));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 1));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 2));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 2));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 3));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 3));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 4));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 4));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 5));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 5));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 6));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 6));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 7));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 7));
}
static INLINE void h_store_64x8(uint8_t *dst, ptrdiff_t stride, uint8x8_t d0) {
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 0));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 0));
vst1q_u8(dst + 32, vdupq_lane_u8(d0, 0));
vst1q_u8(dst + 48, vdupq_lane_u8(d0, 0));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 1));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 1));
vst1q_u8(dst + 32, vdupq_lane_u8(d0, 1));
vst1q_u8(dst + 48, vdupq_lane_u8(d0, 1));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 2));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 2));
vst1q_u8(dst + 32, vdupq_lane_u8(d0, 2));
vst1q_u8(dst + 48, vdupq_lane_u8(d0, 2));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 3));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 3));
vst1q_u8(dst + 32, vdupq_lane_u8(d0, 3));
vst1q_u8(dst + 48, vdupq_lane_u8(d0, 3));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 4));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 4));
vst1q_u8(dst + 32, vdupq_lane_u8(d0, 4));
vst1q_u8(dst + 48, vdupq_lane_u8(d0, 4));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 5));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 5));
vst1q_u8(dst + 32, vdupq_lane_u8(d0, 5));
vst1q_u8(dst + 48, vdupq_lane_u8(d0, 5));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 6));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 6));
vst1q_u8(dst + 32, vdupq_lane_u8(d0, 6));
vst1q_u8(dst + 48, vdupq_lane_u8(d0, 6));
dst += stride;
vst1q_u8(dst + 0, vdupq_lane_u8(d0, 7));
vst1q_u8(dst + 16, vdupq_lane_u8(d0, 7));
vst1q_u8(dst + 32, vdupq_lane_u8(d0, 7));
vst1q_u8(dst + 48, vdupq_lane_u8(d0, 7));
}
void aom_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x8_t d0 = load_u8_4x1_lane0(left);
(void)above;
store_u8_4x1(dst + 0 * stride, vdup_lane_u8(d0, 0), 0);
store_u8_4x1(dst + 1 * stride, vdup_lane_u8(d0, 1), 0);
store_u8_4x1(dst + 2 * stride, vdup_lane_u8(d0, 2), 0);
store_u8_4x1(dst + 3 * stride, vdup_lane_u8(d0, 3), 0);
}
void aom_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x8_t d0 = vld1_u8(left);
(void)above;
h_store_8x8(dst, stride, d0);
}
void aom_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(left);
(void)above;
h_store_16x8(dst, stride, vget_low_u8(d0));
h_store_16x8(dst + 8 * stride, stride, vget_high_u8(d0));
}
void aom_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(left);
const uint8x16_t d1 = vld1q_u8(left + 16);
(void)above;
h_store_32x8(dst + 0 * stride, stride, vget_low_u8(d0));
h_store_32x8(dst + 8 * stride, stride, vget_high_u8(d0));
h_store_32x8(dst + 16 * stride, stride, vget_low_u8(d1));
h_store_32x8(dst + 24 * stride, stride, vget_high_u8(d1));
}
void aom_h_predictor_4x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x8_t d0 = vld1_u8(left);
(void)above;
h_store_4x8(dst, stride, d0);
}
void aom_h_predictor_4x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(left);
(void)above;
h_store_4x8(dst + 0 * stride, stride, vget_low_u8(d0));
h_store_4x8(dst + 8 * stride, stride, vget_high_u8(d0));
}
void aom_h_predictor_8x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x8_t d0 = load_u8_4x1_lane0(left);
(void)above;
vst1_u8(dst + 0 * stride, vdup_lane_u8(d0, 0));
vst1_u8(dst + 1 * stride, vdup_lane_u8(d0, 1));
vst1_u8(dst + 2 * stride, vdup_lane_u8(d0, 2));
vst1_u8(dst + 3 * stride, vdup_lane_u8(d0, 3));
}
void aom_h_predictor_8x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(left);
(void)above;
h_store_8x8(dst + 0 * stride, stride, vget_low_u8(d0));
h_store_8x8(dst + 8 * stride, stride, vget_high_u8(d0));
}
void aom_h_predictor_8x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(left);
const uint8x16_t d1 = vld1q_u8(left + 16);
(void)above;
h_store_8x8(dst + 0 * stride, stride, vget_low_u8(d0));
h_store_8x8(dst + 8 * stride, stride, vget_high_u8(d0));
h_store_8x8(dst + 16 * stride, stride, vget_low_u8(d1));
h_store_8x8(dst + 24 * stride, stride, vget_high_u8(d1));
}
void aom_h_predictor_16x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x8_t d0 = load_u8_4x1_lane0(left);
(void)above;
vst1q_u8(dst + 0 * stride, vdupq_lane_u8(d0, 0));
vst1q_u8(dst + 1 * stride, vdupq_lane_u8(d0, 1));
vst1q_u8(dst + 2 * stride, vdupq_lane_u8(d0, 2));
vst1q_u8(dst + 3 * stride, vdupq_lane_u8(d0, 3));
}
void aom_h_predictor_16x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x8_t d0 = vld1_u8(left);
(void)above;
h_store_16x8(dst, stride, d0);
}
void aom_h_predictor_16x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(left);
const uint8x16_t d1 = vld1q_u8(left + 16);
(void)above;
h_store_16x8(dst + 0 * stride, stride, vget_low_u8(d0));
h_store_16x8(dst + 8 * stride, stride, vget_high_u8(d0));
h_store_16x8(dst + 16 * stride, stride, vget_low_u8(d1));
h_store_16x8(dst + 24 * stride, stride, vget_high_u8(d1));
}
void aom_h_predictor_16x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(left);
const uint8x16_t d1 = vld1q_u8(left + 16);
const uint8x16_t d2 = vld1q_u8(left + 32);
const uint8x16_t d3 = vld1q_u8(left + 48);
(void)above;
h_store_16x8(dst + 0 * stride, stride, vget_low_u8(d0));
h_store_16x8(dst + 8 * stride, stride, vget_high_u8(d0));
h_store_16x8(dst + 16 * stride, stride, vget_low_u8(d1));
h_store_16x8(dst + 24 * stride, stride, vget_high_u8(d1));
h_store_16x8(dst + 32 * stride, stride, vget_low_u8(d2));
h_store_16x8(dst + 40 * stride, stride, vget_high_u8(d2));
h_store_16x8(dst + 48 * stride, stride, vget_low_u8(d3));
h_store_16x8(dst + 56 * stride, stride, vget_high_u8(d3));
}
void aom_h_predictor_32x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x8_t d0 = vld1_u8(left);
(void)above;
h_store_32x8(dst, stride, d0);
}
void aom_h_predictor_32x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(left);
(void)above;
h_store_32x8(dst + 0 * stride, stride, vget_low_u8(d0));
h_store_32x8(dst + 8 * stride, stride, vget_high_u8(d0));
}
void aom_h_predictor_32x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(left + 0);
const uint8x16_t d1 = vld1q_u8(left + 16);
const uint8x16_t d2 = vld1q_u8(left + 32);
const uint8x16_t d3 = vld1q_u8(left + 48);
(void)above;
h_store_32x8(dst + 0 * stride, stride, vget_low_u8(d0));
h_store_32x8(dst + 8 * stride, stride, vget_high_u8(d0));
h_store_32x8(dst + 16 * stride, stride, vget_low_u8(d1));
h_store_32x8(dst + 24 * stride, stride, vget_high_u8(d1));
h_store_32x8(dst + 32 * stride, stride, vget_low_u8(d2));
h_store_32x8(dst + 40 * stride, stride, vget_high_u8(d2));
h_store_32x8(dst + 48 * stride, stride, vget_low_u8(d3));
h_store_32x8(dst + 56 * stride, stride, vget_high_u8(d3));
}
void aom_h_predictor_64x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint8x16_t d0 = vld1q_u8(left);
(void)above;
h_store_64x8(dst + 0 * stride, stride, vget_low_u8(d0));
h_store_64x8(dst + 8 * stride, stride, vget_high_u8(d0));
}
void aom_h_predictor_64x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
for (int i = 0; i < 2; ++i) {
const uint8x16_t d0 = vld1q_u8(left);
h_store_64x8(dst + 0 * stride, stride, vget_low_u8(d0));
h_store_64x8(dst + 8 * stride, stride, vget_high_u8(d0));
left += 16;
dst += 16 * stride;
}
}
void aom_h_predictor_64x64_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
for (int i = 0; i < 4; ++i) {
const uint8x16_t d0 = vld1q_u8(left);
h_store_64x8(dst + 0 * stride, stride, vget_low_u8(d0));
h_store_64x8(dst + 8 * stride, stride, vget_high_u8(d0));
left += 16;
dst += 16 * stride;
}
}
/* ---------------------P R E D I C T I O N Z 1--------------------------- */
static DECLARE_ALIGNED(16, uint8_t, EvenOddMaskx[8][16]) = {
{ 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15 },
{ 0, 1, 3, 5, 7, 9, 11, 13, 0, 2, 4, 6, 8, 10, 12, 14 },
{ 0, 0, 2, 4, 6, 8, 10, 12, 0, 0, 3, 5, 7, 9, 11, 13 },
{ 0, 0, 0, 3, 5, 7, 9, 11, 0, 0, 0, 4, 6, 8, 10, 12 },
{ 0, 0, 0, 0, 4, 6, 8, 10, 0, 0, 0, 0, 5, 7, 9, 11 },
{ 0, 0, 0, 0, 0, 5, 7, 9, 0, 0, 0, 0, 0, 6, 8, 10 },
{ 0, 0, 0, 0, 0, 0, 6, 8, 0, 0, 0, 0, 0, 0, 7, 9 },
{ 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 8 }
};
// Low bit depth functions
static DECLARE_ALIGNED(32, uint8_t, BaseMask[33][32]) = {
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
};
static AOM_FORCE_INLINE void dr_prediction_z1_HxW_internal_neon_64(
int H, int W, uint8x8_t *dst, const uint8_t *above, int upsample_above,
int dx) {
const int frac_bits = 6 - upsample_above;
const int max_base_x = ((W + H) - 1) << upsample_above;
assert(dx > 0);
// pre-filter above pixels
// store in temp buffers:
// above[x] * 32 + 16
// above[x+1] - above[x]
// final pixels will be calculated as:
// (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
const uint16x8_t a16 = vdupq_n_u16(16);
const uint8x8_t a_mbase_x = vdup_n_u8(above[max_base_x]);
const uint8x8_t v_32 = vdup_n_u8(32);
int x = dx;
for (int r = 0; r < W; r++) {
int base = x >> frac_bits;
int base_max_diff = (max_base_x - base) >> upsample_above;
if (base_max_diff <= 0) {
for (int i = r; i < W; ++i) {
dst[i] = a_mbase_x; // save 4 values
}
return;
}
if (base_max_diff > H) base_max_diff = H;
uint8x8x2_t a01_128;
uint16x8_t shift;
if (upsample_above) {
a01_128 = vld2_u8(above + base);
shift = vdupq_n_u16(((x << upsample_above) & 0x3f) >> 1);
} else {
a01_128.val[0] = vld1_u8(above + base);
a01_128.val[1] = vld1_u8(above + base + 1);
shift = vdupq_n_u16((x & 0x3f) >> 1);
}
uint16x8_t diff = vsubl_u8(a01_128.val[1], a01_128.val[0]);
uint16x8_t a32 = vmlal_u8(a16, a01_128.val[0], v_32);
uint16x8_t res = vmlaq_u16(a32, diff, shift);
uint8x8_t mask = vld1_u8(BaseMask[base_max_diff]);
dst[r] = vbsl_u8(mask, vshrn_n_u16(res, 5), a_mbase_x);
x += dx;
}
}
static void dr_prediction_z1_4xN_neon(int N, uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, int upsample_above,
int dx) {
uint8x8_t dstvec[16];
dr_prediction_z1_HxW_internal_neon_64(4, N, dstvec, above, upsample_above,
dx);
for (int i = 0; i < N; i++) {
vst1_lane_u32((uint32_t *)(dst + stride * i),
vreinterpret_u32_u8(dstvec[i]), 0);
}
}
static void dr_prediction_z1_8xN_neon(int N, uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, int upsample_above,
int dx) {
uint8x8_t dstvec[32];
dr_prediction_z1_HxW_internal_neon_64(8, N, dstvec, above, upsample_above,
dx);
for (int i = 0; i < N; i++) {
vst1_u8(dst + stride * i, dstvec[i]);
}
}
static AOM_FORCE_INLINE void dr_prediction_z1_HxW_internal_neon(
int H, int W, uint8x16_t *dst, const uint8_t *above, int upsample_above,
int dx) {
const int frac_bits = 6 - upsample_above;
const int max_base_x = ((W + H) - 1) << upsample_above;
assert(dx > 0);
// pre-filter above pixels
// store in temp buffers:
// above[x] * 32 + 16
// above[x+1] - above[x]
// final pixels will be calculated as:
// (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
const uint16x8_t a16 = vdupq_n_u16(16);
const uint8x16_t a_mbase_x = vdupq_n_u8(above[max_base_x]);
const uint8x8_t v_32 = vdup_n_u8(32);
const uint8x16_t v_zero = vdupq_n_u8(0);
int x = dx;
for (int r = 0; r < W; r++) {
uint16x8x2_t res;
uint16x8_t shift;
uint8x16_t a0_128, a1_128;
int base = x >> frac_bits;
int base_max_diff = (max_base_x - base) >> upsample_above;
if (base_max_diff <= 0) {
for (int i = r; i < W; ++i) {
dst[i] = a_mbase_x; // save 4 values
}
return;
}
if (base_max_diff > H) base_max_diff = H;
if (upsample_above) {
uint8x8x2_t v_tmp_a0_128 = vld2_u8(above + base);
a0_128 = vcombine_u8(v_tmp_a0_128.val[0], v_tmp_a0_128.val[1]);
a1_128 = vextq_u8(a0_128, v_zero, 8);
shift = vdupq_n_u16(((x << upsample_above) & 0x3f) >> 1);
} else {
a0_128 = vld1q_u8(above + base);
a1_128 = vld1q_u8(above + base + 1);
shift = vdupq_n_u16((x & 0x3f) >> 1);
}
uint16x8x2_t diff, a32;
diff.val[0] = vsubl_u8(vget_low_u8(a1_128), vget_low_u8(a0_128));
diff.val[1] = vsubl_u8(vget_high_u8(a1_128), vget_high_u8(a0_128));
a32.val[0] = vmlal_u8(a16, vget_low_u8(a0_128), v_32);
a32.val[1] = vmlal_u8(a16, vget_high_u8(a0_128), v_32);
res.val[0] = vmlaq_u16(a32.val[0], diff.val[0], shift);
res.val[1] = vmlaq_u16(a32.val[1], diff.val[1], shift);
uint8x16_t v_temp =
vcombine_u8(vshrn_n_u16(res.val[0], 5), vshrn_n_u16(res.val[1], 5));
uint8x16_t mask = vld1q_u8(BaseMask[base_max_diff]);
dst[r] = vbslq_u8(mask, v_temp, a_mbase_x);
x += dx;
}
}
static void dr_prediction_z1_16xN_neon(int N, uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, int upsample_above,
int dx) {
uint8x16_t dstvec[64];
dr_prediction_z1_HxW_internal_neon(16, N, dstvec, above, upsample_above, dx);
for (int i = 0; i < N; i++) {
vst1q_u8(dst + stride * i, dstvec[i]);
}
}
static AOM_FORCE_INLINE void dr_prediction_z1_32xN_internal_neon(
int N, uint8x16x2_t *dstvec, const uint8_t *above, int upsample_above,
int dx) {
// here upsample_above is 0 by design of av1_use_intra_edge_upsample
(void)upsample_above;
const int frac_bits = 6;
const int max_base_x = ((32 + N) - 1);
// pre-filter above pixels
// store in temp buffers:
// above[x] * 32 + 16
// above[x+1] - above[x]
// final pixels will be calculated as:
// (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
const uint8x16_t a_mbase_x = vdupq_n_u8(above[max_base_x]);
const uint16x8_t a16 = vdupq_n_u16(16);
const uint8x8_t v_32 = vdup_n_u8(32);
int x = dx;
for (int r = 0; r < N; r++) {
uint8x16_t res16[2];
int base = x >> frac_bits;
int base_max_diff = (max_base_x - base);
if (base_max_diff <= 0) {
for (int i = r; i < N; ++i) {
dstvec[i].val[0] = a_mbase_x; // save 32 values
dstvec[i].val[1] = a_mbase_x;
}
return;
}
if (base_max_diff > 32) base_max_diff = 32;
uint16x8_t shift = vdupq_n_u16((x & 0x3f) >> 1);
for (int j = 0, jj = 0; j < 32; j += 16, jj++) {
int mdiff = base_max_diff - j;
if (mdiff <= 0) {
res16[jj] = a_mbase_x;
} else {
uint16x8x2_t a32, diff, res;
uint8x16_t a0_128, a1_128;
a0_128 = vld1q_u8(above + base + j);
a1_128 = vld1q_u8(above + base + j + 1);
diff.val[0] = vsubl_u8(vget_low_u8(a1_128), vget_low_u8(a0_128));
diff.val[1] = vsubl_u8(vget_high_u8(a1_128), vget_high_u8(a0_128));
a32.val[0] = vmlal_u8(a16, vget_low_u8(a0_128), v_32);
a32.val[1] = vmlal_u8(a16, vget_high_u8(a0_128), v_32);
res.val[0] = vmlaq_u16(a32.val[0], diff.val[0], shift);
res.val[1] = vmlaq_u16(a32.val[1], diff.val[1], shift);
res16[jj] =
vcombine_u8(vshrn_n_u16(res.val[0], 5), vshrn_n_u16(res.val[1], 5));
}
}
uint8x16x2_t mask;
mask.val[0] = vld1q_u8(BaseMask[base_max_diff]);
mask.val[1] = vld1q_u8(BaseMask[base_max_diff] + 16);
dstvec[r].val[0] = vbslq_u8(mask.val[0], res16[0], a_mbase_x);
dstvec[r].val[1] = vbslq_u8(mask.val[1], res16[1], a_mbase_x);
x += dx;
}
}
static void dr_prediction_z1_32xN_neon(int N, uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, int upsample_above,
int dx) {
uint8x16x2_t dstvec[64];
dr_prediction_z1_32xN_internal_neon(N, dstvec, above, upsample_above, dx);
for (int i = 0; i < N; i++) {
vst1q_u8(dst + stride * i, dstvec[i].val[0]);
vst1q_u8(dst + stride * i + 16, dstvec[i].val[1]);
}
}
static void dr_prediction_z1_64xN_neon(int N, uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, int upsample_above,
int dx) {
// here upsample_above is 0 by design of av1_use_intra_edge_upsample
(void)upsample_above;
const int frac_bits = 6;
const int max_base_x = ((64 + N) - 1);
// pre-filter above pixels
// store in temp buffers:
// above[x] * 32 + 16
// above[x+1] - above[x]
// final pixels will be calculated as:
// (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
const uint16x8_t a16 = vdupq_n_u16(16);
const uint8x16_t a_mbase_x = vdupq_n_u8(above[max_base_x]);
const uint8x16_t max_base_x128 = vdupq_n_u8(max_base_x);
const uint8x8_t v_32 = vdup_n_u8(32);
const uint8x16_t v_zero = vdupq_n_u8(0);
const uint8x16_t step = vdupq_n_u8(16);
int x = dx;
for (int r = 0; r < N; r++, dst += stride) {
int base = x >> frac_bits;
if (base >= max_base_x) {
for (int i = r; i < N; ++i) {
vst1q_u8(dst, a_mbase_x);
vst1q_u8(dst + 16, a_mbase_x);
vst1q_u8(dst + 32, a_mbase_x);
vst1q_u8(dst + 48, a_mbase_x);
dst += stride;
}
return;
}
uint16x8_t shift = vdupq_n_u16((x & 0x3f) >> 1);
uint8x16_t base_inc128 =
vaddq_u8(vdupq_n_u8(base), vcombine_u8(vcreate_u8(0x0706050403020100),
vcreate_u8(0x0F0E0D0C0B0A0908)));
for (int j = 0; j < 64; j += 16) {
int mdif = max_base_x - (base + j);
if (mdif <= 0) {
vst1q_u8(dst + j, a_mbase_x);
} else {
uint16x8x2_t a32, diff, res;
uint8x16_t a0_128, a1_128, mask128, res128;
a0_128 = vld1q_u8(above + base + j);
a1_128 = vld1q_u8(above + base + 1 + j);
diff.val[0] = vsubl_u8(vget_low_u8(a1_128), vget_low_u8(a0_128));
diff.val[1] = vsubl_u8(vget_high_u8(a1_128), vget_high_u8(a0_128));
a32.val[0] = vmlal_u8(a16, vget_low_u8(a0_128), v_32);
a32.val[1] = vmlal_u8(a16, vget_high_u8(a0_128), v_32);
res.val[0] = vmlaq_u16(a32.val[0], diff.val[0], shift);
res.val[1] = vmlaq_u16(a32.val[1], diff.val[1], shift);
uint8x16_t v_temp =
vcombine_u8(vshrn_n_u16(res.val[0], 5), vshrn_n_u16(res.val[1], 5));
mask128 = vcgtq_u8(vqsubq_u8(max_base_x128, base_inc128), v_zero);
res128 = vbslq_u8(mask128, v_temp, a_mbase_x);
vst1q_u8(dst + j, res128);
base_inc128 = vaddq_u8(base_inc128, step);
}
}
x += dx;
}
}
// Directional prediction, zone 1: 0 < angle < 90
void av1_dr_prediction_z1_neon(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
const uint8_t *above, const uint8_t *left,
int upsample_above, int dx, int dy) {
(void)left;
(void)dy;
switch (bw) {
case 4:
dr_prediction_z1_4xN_neon(bh, dst, stride, above, upsample_above, dx);
break;
case 8:
dr_prediction_z1_8xN_neon(bh, dst, stride, above, upsample_above, dx);
break;
case 16:
dr_prediction_z1_16xN_neon(bh, dst, stride, above, upsample_above, dx);
break;
case 32:
dr_prediction_z1_32xN_neon(bh, dst, stride, above, upsample_above, dx);
break;
case 64:
dr_prediction_z1_64xN_neon(bh, dst, stride, above, upsample_above, dx);
break;
default: break;
}
}
/* ---------------------P R E D I C T I O N Z 2--------------------------- */
static DECLARE_ALIGNED(16, uint8_t, LoadMaskz2[4][16]) = {
{ 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
0, 0, 0 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff }
};
static AOM_FORCE_INLINE void vector_shift_x4(uint8x8_t *vec, uint8x8_t *v_zero,
int shift_value) {
switch (shift_value) {
case 1: *vec = vext_u8(*v_zero, *vec, 7); break;
case 2: *vec = vext_u8(*v_zero, *vec, 6); break;
case 3: *vec = vext_u8(*v_zero, *vec, 5); break;
default: break;
}
}
static void dr_prediction_z2_Nx4_neon(int N, uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left,
int upsample_above, int upsample_left,
int dx, int dy) {
const int min_base_x = -(1 << upsample_above);
const int min_base_y = -(1 << upsample_left);
const int frac_bits_x = 6 - upsample_above;
const int frac_bits_y = 6 - upsample_left;
assert(dx > 0);
// pre-filter above pixels
// store in temp buffers:
// above[x] * 32 + 16
// above[x+1] - above[x]
// final pixels will be calculated as:
// (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
uint16x8_t a0_x, a1_x, a32, diff;
uint16x8_t v_32 = vdupq_n_u16(32);
uint16x8_t v_zero = vdupq_n_u16(0);
uint16x8_t a16 = vdupq_n_u16(16);
uint8x8_t v_zero_u8 = vdup_n_u8(0);
uint16x4_t v_c3f = vdup_n_u16(0x3f);
uint16x4_t r6 = vcreate_u16(0x00C0008000400000);
int16x4_t v_upsample_left = vdup_n_s16(upsample_left);
int16x4_t v_upsample_above = vdup_n_s16(upsample_above);
int16x4_t v_1234 = vcreate_s16(0x0004000300020001);
int16x4_t dy64 = vdup_n_s16(dy);
int16x4_t v_frac_bits_y = vdup_n_s16(-frac_bits_y);
int16x4_t min_base_y64 = vdup_n_s16(min_base_y);
#if AOM_ARCH_AARCH64
// Use ext rather than loading left + 14 directly to avoid over-read.
const uint8x16_t left_m2 = vld1q_u8(left - 2);
const uint8x16_t left_0 = vld1q_u8(left);
const uint8x16_t left_14 = vextq_u8(left_0, left_0, 14);
const uint8x16x2_t left_vals = { { left_m2, left_14 } };
#endif // AOM_ARCH_AARCH64
for (int r = 0; r < N; r++) {
uint16x8_t res, shift;
uint8x8_t resx, resy;
uint16x4x2_t v_shift;
v_shift.val[1] = vdup_n_u16(0);
int y = r + 1;
int base_x = (-y * dx) >> frac_bits_x;
int base_shift = 0;
if (base_x < (min_base_x - 1)) {
base_shift = (min_base_x - base_x - 1) >> upsample_above;
}
int base_min_diff =
(min_base_x - base_x + upsample_above) >> upsample_above;
if (base_min_diff > 4) {
base_min_diff = 4;
} else {
if (base_min_diff < 0) base_min_diff = 0;
}
if (base_shift > 3) {
a0_x = v_zero;
a1_x = v_zero;
v_shift.val[0] = vreinterpret_u16_u8(v_zero_u8);
v_shift.val[1] = vreinterpret_u16_u8(v_zero_u8);
} else {
uint16x4_t ydx = vdup_n_u16(y * dx);
if (upsample_above) {
uint8x8x2_t v_tmp;
v_tmp.val[0] = vld1_u8(above + base_x + base_shift);
v_tmp.val[1] = vld1_u8(above + base_x + base_shift + 8);
uint8x8_t v_index_low = vld1_u8(EvenOddMaskx[base_shift]);
uint8x8_t v_index_high = vld1_u8(EvenOddMaskx[base_shift] + 8);
a0_x = vmovl_u8(vtbl2_u8(v_tmp, v_index_low));
a1_x = vmovl_u8(vtbl2_u8(v_tmp, v_index_high));
v_shift.val[0] = vshr_n_u16(
vand_u16(vshl_u16(vsub_u16(r6, ydx), v_upsample_above), v_c3f), 1);
} else {
uint8x8_t v_a0_x64 = vld1_u8(above + base_x + base_shift);
vector_shift_x4(&v_a0_x64, &v_zero_u8, base_shift);
uint8x8_t v_a1_x64 = vext_u8(v_a0_x64, v_zero_u8, 1);
v_shift.val[0] = vshr_n_u16(vand_u16(vsub_u16(r6, ydx), v_c3f), 1);
a0_x = vmovl_u8(v_a0_x64);
a1_x = vmovl_u8(v_a1_x64);
}
}
// y calc
if (base_x < min_base_x) {
int16x4_t v_r6 = vdup_n_s16(r << 6);
int16x4_t y_c64 = vmls_s16(v_r6, v_1234, dy64);
int16x4_t base_y_c64 = vshl_s16(y_c64, v_frac_bits_y);
uint16x4_t mask64 = vcgt_s16(min_base_y64, base_y_c64);
// Values in base_y_c64 range from -2 through 14 inclusive.
base_y_c64 = vbic_s16(base_y_c64, vreinterpret_s16_u16(mask64));
#if AOM_ARCH_AARCH64
uint8x8_t left_idx0 = vreinterpret_u8_s16(base_y_c64 + 2); // [0, 16]
uint8x8_t left_idx1 = vreinterpret_u8_s16(base_y_c64 + 3); // [1, 17]
uint8x8_t a0_y = vtrn1_u8(vqtbl2_u8(left_vals, left_idx0), v_zero_u8);
uint8x8_t a1_y = vtrn1_u8(vqtbl2_u8(left_vals, left_idx1), v_zero_u8);
#else // !AOM_ARCH_AARCH64
DECLARE_ALIGNED(32, int16_t, base_y_c[4]);
vst1_s16(base_y_c, base_y_c64);
uint8x8_t a0_y = vdup_n_u8(0);
a0_y = vld1_lane_u8(left + base_y_c[0], a0_y, 0);
a0_y = vld1_lane_u8(left + base_y_c[1], a0_y, 2);
a0_y = vld1_lane_u8(left + base_y_c[2], a0_y, 4);
a0_y = vld1_lane_u8(left + base_y_c[3], a0_y, 6);
base_y_c64 = vadd_s16(base_y_c64, vdup_n_s16(1));
vst1_s16(base_y_c, base_y_c64);
uint8x8_t a1_y = vdup_n_u8(0);
a1_y = vld1_lane_u8(left + base_y_c[0], a1_y, 0);
a1_y = vld1_lane_u8(left + base_y_c[1], a1_y, 2);
a1_y = vld1_lane_u8(left + base_y_c[2], a1_y, 4);
a1_y = vld1_lane_u8(left + base_y_c[3], a1_y, 6);
#endif // AOM_ARCH_AARCH64
if (upsample_left) {
v_shift.val[1] = vshr_n_u16(
vand_u16(vshl_u16(vreinterpret_u16_s16(y_c64), v_upsample_left),
v_c3f),
1);
} else {
v_shift.val[1] =
vshr_n_u16(vand_u16(vreinterpret_u16_s16(y_c64), v_c3f), 1);
}
a0_x = vcombine_u16(vget_low_u16(a0_x), vreinterpret_u16_u8(a0_y));
a1_x = vcombine_u16(vget_low_u16(a1_x), vreinterpret_u16_u8(a1_y));
}
shift = vcombine_u16(v_shift.val[0], v_shift.val[1]);
diff = vsubq_u16(a1_x, a0_x); // a[x+1] - a[x]
a32 = vmlaq_u16(a16, a0_x, v_32); // a[x] * 32 + 16
res = vmlaq_u16(a32, diff, shift);
resx = vshrn_n_u16(res, 5);
resy = vext_u8(resx, v_zero_u8, 4);
uint8x8_t mask = vld1_u8(BaseMask[base_min_diff]);
uint8x8_t v_resxy = vbsl_u8(mask, resy, resx);
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(v_resxy), 0);
dst += stride;
}
}
static AOM_FORCE_INLINE void vector_shuffle(uint8x16_t *vec, uint8x16_t *vzero,
int shift_value) {
switch (shift_value) {
case 1: *vec = vextq_u8(*vzero, *vec, 15); break;
case 2: *vec = vextq_u8(*vzero, *vec, 14); break;
case 3: *vec = vextq_u8(*vzero, *vec, 13); break;
case 4: *vec = vextq_u8(*vzero, *vec, 12); break;
case 5: *vec = vextq_u8(*vzero, *vec, 11); break;
case 6: *vec = vextq_u8(*vzero, *vec, 10); break;
case 7: *vec = vextq_u8(*vzero, *vec, 9); break;
case 8: *vec = vextq_u8(*vzero, *vec, 8); break;
case 9: *vec = vextq_u8(*vzero, *vec, 7); break;
case 10: *vec = vextq_u8(*vzero, *vec, 6); break;
case 11: *vec = vextq_u8(*vzero, *vec, 5); break;
case 12: *vec = vextq_u8(*vzero, *vec, 4); break;
case 13: *vec = vextq_u8(*vzero, *vec, 3); break;
case 14: *vec = vextq_u8(*vzero, *vec, 2); break;
case 15: *vec = vextq_u8(*vzero, *vec, 1); break;
default: break;
}
}
static void dr_prediction_z2_Nx8_neon(int N, uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left,
int upsample_above, int upsample_left,
int dx, int dy) {
const int min_base_x = -(1 << upsample_above);
const int min_base_y = -(1 << upsample_left);
const int frac_bits_x = 6 - upsample_above;
const int frac_bits_y = 6 - upsample_left;
// pre-filter above pixels
// store in temp buffers:
// above[x] * 32 + 16
// above[x+1] - above[x]
// final pixels will be calculated as:
// (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
uint16x8x2_t diff, a32;
uint8x16_t v_zero = vdupq_n_u8(0);
int16x8_t v_upsample_left = vdupq_n_s16(upsample_left);
int16x8_t v_upsample_above = vdupq_n_s16(upsample_above);
int16x8_t v_frac_bits_y = vdupq_n_s16(-frac_bits_y);
uint16x8_t a16 = vdupq_n_u16(16);
uint16x8_t c3f = vdupq_n_u16(0x3f);
int16x8_t min_base_y128 = vdupq_n_s16(min_base_y);
int16x8_t dy128 = vdupq_n_s16(dy);
uint16x8_t c1234 = vcombine_u16(vcreate_u16(0x0004000300020001),
vcreate_u16(0x0008000700060005));
#if AOM_ARCH_AARCH64
// Use ext rather than loading left + 30 directly to avoid over-read.
const uint8x16_t left_m2 = vld1q_u8(left - 2);
const uint8x16_t left_0 = vld1q_u8(left + 0);
const uint8x16_t left_16 = vld1q_u8(left + 16);
const uint8x16_t left_14 = vextq_u8(left_0, left_16, 14);
const uint8x16_t left_30 = vextq_u8(left_16, left_16, 14);
const uint8x16x3_t left_vals = { { left_m2, left_14, left_30 } };
#endif // AOM_ARCH_AARCH64
for (int r = 0; r < N; r++) {
uint8x8_t resx, resy, resxy;
uint16x8x2_t res, shift;
shift.val[1] = vdupq_n_u16(0);
int y = r + 1;
int base_x = (-y * dx) >> frac_bits_x;
int base_shift = 0;
if (base_x < (min_base_x - 1)) {
base_shift = (min_base_x - base_x - 1) >> upsample_above;
}
int base_min_diff =
(min_base_x - base_x + upsample_above) >> upsample_above;
if (base_min_diff > 8) {
base_min_diff = 8;
} else {
if (base_min_diff < 0) base_min_diff = 0;
}
uint8x8_t a0_x0, a1_x0;
if (base_shift > 7) {
a0_x0 = vdup_n_u8(0);
a1_x0 = vdup_n_u8(0);
shift.val[0] = vreinterpretq_u16_u8(v_zero);
shift.val[1] = vreinterpretq_u16_u8(v_zero);
} else {
uint16x8_t ydx = vdupq_n_u16(y * dx);
uint16x8_t r6 =
vshlq_n_u16(vextq_u16(c1234, vreinterpretq_u16_u8(v_zero), 2), 6);
if (upsample_above) {
uint8x8x2_t v_tmp;
v_tmp.val[0] = vld1_u8(above + base_x + base_shift);
v_tmp.val[1] = vld1_u8(above + base_x + base_shift + 8);
uint8x8_t v_index_low = vld1_u8(EvenOddMaskx[base_shift]);
uint8x8_t v_index_high = vld1_u8(EvenOddMaskx[base_shift] + 8);
shift.val[0] = vshrq_n_u16(
vandq_u16(vshlq_u16(vsubq_u16(r6, ydx), v_upsample_above), c3f), 1);
a0_x0 = vtbl2_u8(v_tmp, v_index_low);
a1_x0 = vtbl2_u8(v_tmp, v_index_high);
} else {
uint8x16_t a0_x128, a1_x128;
a0_x128 = vld1q_u8(above + base_x + base_shift);
a1_x128 = vextq_u8(a0_x128, v_zero, 1);
vector_shuffle(&a0_x128, &v_zero, base_shift);
vector_shuffle(&a1_x128, &v_zero, base_shift);
shift.val[0] = vshrq_n_u16(vandq_u16(vsubq_u16(r6, ydx), c3f), 1);
a0_x0 = vget_low_u8(a0_x128);
a1_x0 = vget_low_u8(a1_x128);
}
}
diff.val[0] = vsubl_u8(a1_x0, a0_x0); // a[x+1] - a[x]
a32.val[0] = vmlal_u8(a16, a0_x0, vdup_n_u8(32)); // a[x] * 32 + 16
res.val[0] = vmlaq_u16(a32.val[0], diff.val[0], shift.val[0]);
resx = vshrn_n_u16(res.val[0], 5);
// y calc
if (base_x < min_base_x) {
int16x8_t y_c128, base_y_c128;
uint16x8_t mask128;
int16x8_t v_r6 = vdupq_n_s16(r << 6);
y_c128 = vmlsq_s16(v_r6, vreinterpretq_s16_u16(c1234), dy128);
base_y_c128 = vshlq_s16(y_c128, v_frac_bits_y);
mask128 = vcgtq_s16(min_base_y128, base_y_c128);
// Values in base_y_c128 range from -2 through 31 inclusive.
base_y_c128 = vbicq_s16(base_y_c128, vreinterpretq_s16_u16(mask128));
#if AOM_ARCH_AARCH64
uint8x16_t left_idx0 = vreinterpretq_u8_s16(base_y_c128 + 2); // [0, 33]
uint8x16_t left_idx1 = vreinterpretq_u8_s16(base_y_c128 + 3); // [1, 34]
uint8x16_t left_idx01 = vuzp1q_u8(left_idx0, left_idx1);
uint8x16_t a01_x = vqtbl3q_u8(left_vals, left_idx01);
uint8x8_t a0_x1 = vget_low_u8(a01_x);
uint8x8_t a1_x1 = vget_high_u8(a01_x);
#else // !AOM_ARCH_AARCH64
DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
vst1q_s16(base_y_c, base_y_c128);
uint8x8_t a0_x1 = vdup_n_u8(0);
a0_x1 = vld1_lane_u8(left + base_y_c[0], a0_x1, 0);
a0_x1 = vld1_lane_u8(left + base_y_c[1], a0_x1, 1);
a0_x1 = vld1_lane_u8(left + base_y_c[2], a0_x1, 2);
a0_x1 = vld1_lane_u8(left + base_y_c[3], a0_x1, 3);
a0_x1 = vld1_lane_u8(left + base_y_c[4], a0_x1, 4);
a0_x1 = vld1_lane_u8(left + base_y_c[5], a0_x1, 5);
a0_x1 = vld1_lane_u8(left + base_y_c[6], a0_x1, 6);
a0_x1 = vld1_lane_u8(left + base_y_c[7], a0_x1, 7);
base_y_c128 = vaddq_s16(base_y_c128, vdupq_n_s16(1));
vst1q_s16(base_y_c, base_y_c128);
uint8x8_t a1_x1 = vdup_n_u8(0);
a1_x1 = vld1_lane_u8(left + base_y_c[0], a1_x1, 0);
a1_x1 = vld1_lane_u8(left + base_y_c[1], a1_x1, 1);
a1_x1 = vld1_lane_u8(left + base_y_c[2], a1_x1, 2);
a1_x1 = vld1_lane_u8(left + base_y_c[3], a1_x1, 3);
a1_x1 = vld1_lane_u8(left + base_y_c[4], a1_x1, 4);
a1_x1 = vld1_lane_u8(left + base_y_c[5], a1_x1, 5);
a1_x1 = vld1_lane_u8(left + base_y_c[6], a1_x1, 6);
a1_x1 = vld1_lane_u8(left + base_y_c[7], a1_x1, 7);
#endif // AOM_ARCH_AARCH64
if (upsample_left) {
shift.val[1] = vshrq_n_u16(
vandq_u16(vshlq_u16(vreinterpretq_u16_s16(y_c128), v_upsample_left),
c3f),
1);
} else {
shift.val[1] =
vshrq_n_u16(vandq_u16(vreinterpretq_u16_s16(y_c128), c3f), 1);
}
diff.val[1] = vsubl_u8(a1_x1, a0_x1);
a32.val[1] = vmlal_u8(a16, a0_x1, vdup_n_u8(32));
res.val[1] = vmlaq_u16(a32.val[1], diff.val[1], shift.val[1]);
resy = vshrn_n_u16(res.val[1], 5);
uint8x8_t mask = vld1_u8(BaseMask[base_min_diff]);
resxy = vbsl_u8(mask, resy, resx);
vst1_u8(dst, resxy);
} else {
vst1_u8(dst, resx);
}
dst += stride;
}
}
static void dr_prediction_z2_HxW_neon(int H, int W, uint8_t *dst,
ptrdiff_t stride, const uint8_t *above,
const uint8_t *left, int upsample_above,
int upsample_left, int dx, int dy) {
// here upsample_above and upsample_left are 0 by design of
// av1_use_intra_edge_upsample
const int min_base_x = -1;
const int min_base_y = -1;
(void)upsample_above;
(void)upsample_left;
const int frac_bits_x = 6;
const int frac_bits_y = 6;
uint16x8x2_t a32, c0123, c1234, diff, shifty;
uint8x16x2_t a0_x, a1_x;
uint16x8_t v_32 = vdupq_n_u16(32);
uint8x16_t v_zero = vdupq_n_u8(0);
int16x8_t v_frac_bits_y = vdupq_n_s16(-frac_bits_y);
uint16x8_t a16 = vdupq_n_u16(16);
uint16x8_t c1 = vshrq_n_u16(a16, 4);
int16x8_t min_base_y256 = vdupq_n_s16(min_base_y);
uint16x8_t c3f = vdupq_n_u16(0x3f);
int16x8_t dy256 = vdupq_n_s16(dy);
c0123.val[0] = vcombine_u16(vcreate_u16(0x0003000200010000),
vcreate_u16(0x0007000600050004));
c0123.val[1] = vcombine_u16(vcreate_u16(0x000B000A00090008),
vcreate_u16(0x000F000E000D000C));
c1234.val[0] = vaddq_u16(c0123.val[0], c1);
c1234.val[1] = vaddq_u16(c0123.val[1], c1);
#if AOM_ARCH_AARCH64
const uint8x16_t left_m1 = vld1q_u8(left - 1);
const uint8x16_t left_0 = vld1q_u8(left + 0);
const uint8x16_t left_16 = vld1q_u8(left + 16);
const uint8x16_t left_32 = vld1q_u8(left + 32);
const uint8x16_t left_48 = vld1q_u8(left + 48);
const uint8x16_t left_15 = vextq_u8(left_0, left_16, 15);
const uint8x16_t left_31 = vextq_u8(left_16, left_32, 15);
const uint8x16_t left_47 = vextq_u8(left_32, left_48, 15);
const uint8x16x4_t left_vals0 = { { left_m1, left_15, left_31, left_47 } };
const uint8x16x4_t left_vals1 = { { left_0, left_16, left_32, left_48 } };
#endif // AOM_ARCH_AARCH64
for (int r = 0; r < H; r++) {
uint16x8x2_t res, r6, shift;
uint16x8_t j256;
uint8x16_t resx, resy, resxy;
int y = r + 1;
uint16x8_t ydx = vdupq_n_u16((uint16_t)(y * dx));
int base_x = (-y * dx) >> frac_bits_x;
for (int j = 0; j < W; j += 16) {
j256 = vdupq_n_u16(j);
int base_shift = 0;
if ((base_x + j) < (min_base_x - 1)) {
base_shift = (min_base_x - (base_x + j) - 1);
}
int base_min_diff = (min_base_x - base_x - j);
if (base_min_diff > 16) {
base_min_diff = 16;
} else {
if (base_min_diff < 0) base_min_diff = 0;
}
if (base_shift < 16) {
uint8x16_t a0_x128, a1_x128;
a0_x128 = vld1q_u8(above + base_x + base_shift + j);
a1_x128 = vld1q_u8(above + base_x + base_shift + 1 + j);
vector_shuffle(&a0_x128, &v_zero, base_shift);
vector_shuffle(&a1_x128, &v_zero, base_shift);
a0_x = vzipq_u8(a0_x128, v_zero);
a1_x = vzipq_u8(a1_x128, v_zero);
r6.val[0] = vshlq_n_u16(vaddq_u16(c0123.val[0], j256), 6);
r6.val[1] = vshlq_n_u16(vaddq_u16(c0123.val[1], j256), 6);
shift.val[0] =
vshrq_n_u16(vandq_u16(vsubq_u16(r6.val[0], ydx), c3f), 1);
shift.val[1] =
vshrq_n_u16(vandq_u16(vsubq_u16(r6.val[1], ydx), c3f), 1);
diff.val[0] =
vsubq_u16(vreinterpretq_u16_u8(a1_x.val[0]),
vreinterpretq_u16_u8(a0_x.val[0])); // a[x+1] - a[x]
diff.val[1] =
vsubq_u16(vreinterpretq_u16_u8(a1_x.val[1]),
vreinterpretq_u16_u8(a0_x.val[1])); // a[x+1] - a[x]
a32.val[0] = vmlaq_u16(a16, vreinterpretq_u16_u8(a0_x.val[0]),
v_32); // a[x] * 32 + 16
a32.val[1] = vmlaq_u16(a16, vreinterpretq_u16_u8(a0_x.val[1]),
v_32); // a[x] * 32 + 16
res.val[0] = vmlaq_u16(a32.val[0], diff.val[0], shift.val[0]);
res.val[1] = vmlaq_u16(a32.val[1], diff.val[1], shift.val[1]);
resx =
vcombine_u8(vshrn_n_u16(res.val[0], 5), vshrn_n_u16(res.val[1], 5));
} else {
resx = v_zero;
}
// y calc
if (base_x < min_base_x) {
uint16x8x2_t mask256;
int16x8x2_t c256, y_c256, base_y_c256, mul16;
int16x8_t v_r6 = vdupq_n_s16(r << 6);
c256.val[0] = vaddq_s16(vreinterpretq_s16_u16(j256),
vreinterpretq_s16_u16(c1234.val[0]));
c256.val[1] = vaddq_s16(vreinterpretq_s16_u16(j256),
vreinterpretq_s16_u16(c1234.val[1]));
mul16.val[0] = vreinterpretq_s16_u16(
vminq_u16(vreinterpretq_u16_s16(vmulq_s16(c256.val[0], dy256)),
vshrq_n_u16(vreinterpretq_u16_s16(min_base_y256), 1)));
mul16.val[1] = vreinterpretq_s16_u16(
vminq_u16(vreinterpretq_u16_s16(vmulq_s16(c256.val[1], dy256)),
vshrq_n_u16(vreinterpretq_u16_s16(min_base_y256), 1)));
y_c256.val[0] = vsubq_s16(v_r6, mul16.val[0]);
y_c256.val[1] = vsubq_s16(v_r6, mul16.val[1]);
base_y_c256.val[0] = vshlq_s16(y_c256.val[0], v_frac_bits_y);
base_y_c256.val[1] = vshlq_s16(y_c256.val[1], v_frac_bits_y);
mask256.val[0] = vcgtq_s16(min_base_y256, base_y_c256.val[0]);
mask256.val[1] = vcgtq_s16(min_base_y256, base_y_c256.val[1]);
base_y_c256.val[0] =
vbslq_s16(mask256.val[0], min_base_y256, base_y_c256.val[0]);
base_y_c256.val[1] =
vbslq_s16(mask256.val[1], min_base_y256, base_y_c256.val[1]);
int16_t min_y = vgetq_lane_s16(base_y_c256.val[1], 7);
int16_t max_y = vgetq_lane_s16(base_y_c256.val[0], 0);
int16_t offset_diff = max_y - min_y;
uint8x8_t a0_y0;
uint8x8_t a0_y1;
uint8x8_t a1_y0;
uint8x8_t a1_y1;
if (offset_diff < 16) {
assert(offset_diff >= 0);
int16x8_t min_y256 =
vdupq_lane_s16(vget_high_s16(base_y_c256.val[1]), 3);
int16x8x2_t base_y_offset;
base_y_offset.val[0] = vsubq_s16(base_y_c256.val[0], min_y256);
base_y_offset.val[1] = vsubq_s16(base_y_c256.val[1], min_y256);
int8x16_t base_y_offset128 =
vcombine_s8(vqmovn_s16(base_y_offset.val[0]),
vqmovn_s16(base_y_offset.val[1]));
uint8x16_t a0_y128, a1_y128;
uint8x16_t v_loadmaskz2 = vld1q_u8(LoadMaskz2[offset_diff / 4]);
a0_y128 = vld1q_u8(left + min_y);
a0_y128 = vandq_u8(a0_y128, v_loadmaskz2);
a1_y128 = vld1q_u8(left + min_y + 1);
a1_y128 = vandq_u8(a1_y128, v_loadmaskz2);
#if AOM_ARCH_AARCH64
a0_y128 = vqtbl1q_u8(a0_y128, vreinterpretq_u8_s8(base_y_offset128));
a1_y128 = vqtbl1q_u8(a1_y128, vreinterpretq_u8_s8(base_y_offset128));
#else
uint8x8x2_t v_tmp;
uint8x8x2_t v_res;
uint8x8_t v_index_low =
vget_low_u8(vreinterpretq_u8_s8(base_y_offset128));
uint8x8_t v_index_high =
vget_high_u8(vreinterpretq_u8_s8(base_y_offset128));
v_tmp.val[0] = vget_low_u8(a0_y128);
v_tmp.val[1] = vget_high_u8(a0_y128);
v_res.val[0] = vtbl2_u8(v_tmp, v_index_low);
v_res.val[1] = vtbl2_u8(v_tmp, v_index_high);
a0_y128 = vcombine_u8(v_res.val[0], v_res.val[1]);
v_tmp.val[0] = vget_low_u8(a1_y128);
v_tmp.val[1] = vget_high_u8(a1_y128);
v_res.val[0] = vtbl2_u8(v_tmp, v_index_low);
v_res.val[1] = vtbl2_u8(v_tmp, v_index_high);
a1_y128 = vcombine_u8(v_res.val[0], v_res.val[1]);
#endif
a0_y0 = vget_low_u8(a0_y128);
a0_y1 = vget_high_u8(a0_y128);
a1_y0 = vget_low_u8(a1_y128);
a1_y1 = vget_high_u8(a1_y128);
} else {
// Values in base_y_c256 range from -1 through 62 inclusive.
base_y_c256.val[0] = vbicq_s16(base_y_c256.val[0],
vreinterpretq_s16_u16(mask256.val[0]));
base_y_c256.val[1] = vbicq_s16(base_y_c256.val[1],
vreinterpretq_s16_u16(mask256.val[1]));
#if AOM_ARCH_AARCH64
// Values in left_idx{0,1} range from 0 through 63 inclusive.
uint8x16_t left_idx0 = vreinterpretq_u8_s16(base_y_c256.val[0] + 1);
uint8x16_t left_idx1 = vreinterpretq_u8_s16(base_y_c256.val[1] + 1);
uint8x16_t left_idx01 = vuzp1q_u8(left_idx0, left_idx1);
uint8x16_t a0_y01 = vqtbl4q_u8(left_vals0, left_idx01);
uint8x16_t a1_y01 = vqtbl4q_u8(left_vals1, left_idx01);
a0_y0 = vget_low_u8(a0_y01);
a0_y1 = vget_high_u8(a0_y01);
a1_y0 = vget_low_u8(a1_y01);
a1_y1 = vget_high_u8(a1_y01);
#else // !AOM_ARCH_AARCH64
DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
vst1q_s16(base_y_c, base_y_c256.val[0]);
vst1q_s16(base_y_c + 8, base_y_c256.val[1]);
a0_y0 = vdup_n_u8(0);
a0_y0 = vld1_lane_u8(left + base_y_c[0], a0_y0, 0);
a0_y0 = vld1_lane_u8(left + base_y_c[1], a0_y0, 1);
a0_y0 = vld1_lane_u8(left + base_y_c[2], a0_y0, 2);
a0_y0 = vld1_lane_u8(left + base_y_c[3], a0_y0, 3);
a0_y0 = vld1_lane_u8(left + base_y_c[4], a0_y0, 4);
a0_y0 = vld1_lane_u8(left + base_y_c[5], a0_y0, 5);
a0_y0 = vld1_lane_u8(left + base_y_c[6], a0_y0, 6);
a0_y0 = vld1_lane_u8(left + base_y_c[7], a0_y0, 7);
a0_y1 = vdup_n_u8(0);
a0_y1 = vld1_lane_u8(left + base_y_c[8], a0_y1, 0);
a0_y1 = vld1_lane_u8(left + base_y_c[9], a0_y1, 1);
a0_y1 = vld1_lane_u8(left + base_y_c[10], a0_y1, 2);
a0_y1 = vld1_lane_u8(left + base_y_c[11], a0_y1, 3);
a0_y1 = vld1_lane_u8(left + base_y_c[12], a0_y1, 4);
a0_y1 = vld1_lane_u8(left + base_y_c[13], a0_y1, 5);
a0_y1 = vld1_lane_u8(left + base_y_c[14], a0_y1, 6);
a0_y1 = vld1_lane_u8(left + base_y_c[15], a0_y1, 7);
base_y_c256.val[0] =
vaddq_s16(base_y_c256.val[0], vreinterpretq_s16_u16(c1));
base_y_c256.val[1] =
vaddq_s16(base_y_c256.val[1], vreinterpretq_s16_u16(c1));
vst1q_s16(base_y_c, base_y_c256.val[0]);
vst1q_s16(base_y_c + 8, base_y_c256.val[1]);
a1_y0 = vdup_n_u8(0);
a1_y0 = vld1_lane_u8(left + base_y_c[0], a1_y0, 0);
a1_y0 = vld1_lane_u8(left + base_y_c[1], a1_y0, 1);
a1_y0 = vld1_lane_u8(left + base_y_c[2], a1_y0, 2);
a1_y0 = vld1_lane_u8(left + base_y_c[3], a1_y0, 3);
a1_y0 = vld1_lane_u8(left + base_y_c[4], a1_y0, 4);
a1_y0 = vld1_lane_u8(left + base_y_c[5], a1_y0, 5);
a1_y0 = vld1_lane_u8(left + base_y_c[6], a1_y0, 6);
a1_y0 = vld1_lane_u8(left + base_y_c[7], a1_y0, 7);
a1_y1 = vdup_n_u8(0);
a1_y1 = vld1_lane_u8(left + base_y_c[8], a1_y1, 0);
a1_y1 = vld1_lane_u8(left + base_y_c[9], a1_y1, 1);
a1_y1 = vld1_lane_u8(left + base_y_c[10], a1_y1, 2);
a1_y1 = vld1_lane_u8(left + base_y_c[11], a1_y1, 3);
a1_y1 = vld1_lane_u8(left + base_y_c[12], a1_y1, 4);
a1_y1 = vld1_lane_u8(left + base_y_c[13], a1_y1, 5);
a1_y1 = vld1_lane_u8(left + base_y_c[14], a1_y1, 6);
a1_y1 = vld1_lane_u8(left + base_y_c[15], a1_y1, 7);
#endif // AOM_ARCH_AARCH64
}
shifty.val[0] = vshrq_n_u16(
vandq_u16(vreinterpretq_u16_s16(y_c256.val[0]), c3f), 1);
shifty.val[1] = vshrq_n_u16(
vandq_u16(vreinterpretq_u16_s16(y_c256.val[1]), c3f), 1);
diff.val[0] = vsubl_u8(a1_y0, a0_y0); // a[x+1] - a[x]
diff.val[1] = vsubl_u8(a1_y1, a0_y1); // a[x+1] - a[x]
a32.val[0] = vmlal_u8(a16, a0_y0, vdup_n_u8(32)); // a[x] * 32 + 16
a32.val[1] = vmlal_u8(a16, a0_y1, vdup_n_u8(32)); // a[x] * 32 + 16
res.val[0] = vmlaq_u16(a32.val[0], diff.val[0], shifty.val[0]);
res.val[1] = vmlaq_u16(a32.val[1], diff.val[1], shifty.val[1]);
resy =
vcombine_u8(vshrn_n_u16(res.val[0], 5), vshrn_n_u16(res.val[1], 5));
} else {
resy = v_zero;
}
uint8x16_t mask = vld1q_u8(BaseMask[base_min_diff]);
resxy = vbslq_u8(mask, resy, resx);
vst1q_u8(dst + j, resxy);
} // for j
dst += stride;
}
}
// Directional prediction, zone 2: 90 < angle < 180
void av1_dr_prediction_z2_neon(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
const uint8_t *above, const uint8_t *left,
int upsample_above, int upsample_left, int dx,
int dy) {
assert(dx > 0);
assert(dy > 0);
switch (bw) {
case 4:
dr_prediction_z2_Nx4_neon(bh, dst, stride, above, left, upsample_above,
upsample_left, dx, dy);
break;
case 8:
dr_prediction_z2_Nx8_neon(bh, dst, stride, above, left, upsample_above,
upsample_left, dx, dy);
break;
default:
dr_prediction_z2_HxW_neon(bh, bw, dst, stride, above, left,
upsample_above, upsample_left, dx, dy);
break;
}
}
/* ---------------------P R E D I C T I O N Z 3--------------------------- */
static AOM_FORCE_INLINE void transpose4x16_neon(uint8x16_t *x,
uint16x8x2_t *d) {
uint8x16x2_t w0, w1;
w0 = vzipq_u8(x[0], x[1]);
w1 = vzipq_u8(x[2], x[3]);
d[0] = vzipq_u16(vreinterpretq_u16_u8(w0.val[0]),
vreinterpretq_u16_u8(w1.val[0]));
d[1] = vzipq_u16(vreinterpretq_u16_u8(w0.val[1]),
vreinterpretq_u16_u8(w1.val[1]));
}
static AOM_FORCE_INLINE void transpose4x8_8x4_low_neon(uint8x8_t *x,
uint16x4x2_t *d) {
uint8x8x2_t w0, w1;
w0 = vzip_u8(x[0], x[1]);
w1 = vzip_u8(x[2], x[3]);
*d = vzip_u16(vreinterpret_u16_u8(w0.val[0]),