blob: 5e4a903556891db33d8fa1fb0bf740f30c169f3a [file] [log] [blame] [edit]
/*
* Copyright (c) 2021, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 3-Clause Clear License
* and the Alliance for Open Media Patent License 1.0. If the BSD 3-Clause Clear
* License was not distributed with this source code in the LICENSE file, you
* can obtain it at aomedia.org/license/software-license/bsd-3-c-c/. If the
* Alliance for Open Media Patent License 1.0 was not distributed with this
* source code in the PATENTS file, you can obtain it at
* aomedia.org/license/patent-license/.
*/
#include <arm_neon.h>
#include "config/av1_rtcd.h"
#include "av1/common/cfl.h"
static INLINE void vldsubstq_s16(int16_t *dst, const uint16_t *src, int offset,
int16x8_t sub) {
vst1q_s16(dst + offset,
vsubq_s16(vreinterpretq_s16_u16(vld1q_u16(src + offset)), sub));
}
static INLINE uint16x8_t vldaddq_u16(const uint16_t *buf, size_t offset) {
return vaddq_u16(vld1q_u16(buf), vld1q_u16(buf + offset));
}
// Load half of a vector and duplicated in other half
static INLINE uint8x8_t vldh_dup_u8(const uint8_t *ptr) {
return vreinterpret_u8_u32(vld1_dup_u32((const uint32_t *)ptr));
}
// Store half of a vector.
static INLINE void vsth_u16(uint16_t *ptr, uint16x4_t val) {
*((uint32_t *)ptr) = vreinterpret_u32_u16(val)[0];
}
#ifndef __aarch64__
uint16x8_t vpaddq_u16(uint16x8_t a, uint16x8_t b) {
return vcombine_u16(vpadd_u16(vget_low_u16(a), vget_high_u16(a)),
vpadd_u16(vget_low_u16(b), vget_high_u16(b)));
}
#endif
static void cfl_luma_subsampling_420_hbd_neon(const uint16_t *input,
int input_stride,
uint16_t *pred_buf_q3, int width,
int height) {
const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
const int luma_stride = input_stride << 1;
do {
if (width == 4) {
const uint16x4_t top = vld1_u16(input);
const uint16x4_t bot = vld1_u16(input + input_stride);
const uint16x4_t sum = vadd_u16(top, bot);
const uint16x4_t hsum = vpadd_u16(sum, sum);
vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
} else if (width < 32) {
const uint16x8_t top = vld1q_u16(input);
const uint16x8_t bot = vld1q_u16(input + input_stride);
const uint16x8_t sum = vaddq_u16(top, bot);
if (width == 8) {
const uint16x4_t hsum = vget_low_u16(vpaddq_u16(sum, sum));
vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
} else {
const uint16x8_t top_1 = vld1q_u16(input + 8);
const uint16x8_t bot_1 = vld1q_u16(input + 8 + input_stride);
const uint16x8_t sum_1 = vaddq_u16(top_1, bot_1);
const uint16x8_t hsum = vpaddq_u16(sum, sum_1);
vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 1));
}
} else {
const uint16x8x4_t top = vld4q_u16(input);
const uint16x8x4_t bot = vld4q_u16(input + input_stride);
// equivalent to a vpaddq_u16 (because vld4q interleaves)
const uint16x8_t top_0 = vaddq_u16(top.val[0], top.val[1]);
// equivalent to a vpaddq_u16 (because vld4q interleaves)
const uint16x8_t bot_0 = vaddq_u16(bot.val[0], bot.val[1]);
// equivalent to a vpaddq_u16 (because vld4q interleaves)
const uint16x8_t top_1 = vaddq_u16(top.val[2], top.val[3]);
// equivalent to a vpaddq_u16 (because vld4q interleaves)
const uint16x8_t bot_1 = vaddq_u16(bot.val[2], bot.val[3]);
uint16x8x2_t sum;
sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
vst2q_u16(pred_buf_q3, sum);
}
input += luma_stride;
} while ((pred_buf_q3 += CFL_BUF_LINE) < end);
}
static void cfl_luma_subsampling_422_hbd_neon(const uint16_t *input,
int input_stride,
uint16_t *pred_buf_q3, int width,
int height) {
const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
do {
if (width == 4) {
const uint16x4_t top = vld1_u16(input);
const uint16x4_t hsum = vpadd_u16(top, top);
vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
} else if (width == 8) {
const uint16x4x2_t top = vld2_u16(input);
// equivalent to a vpadd_u16 (because vld2 interleaves)
const uint16x4_t hsum = vadd_u16(top.val[0], top.val[1]);
vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
} else if (width == 16) {
const uint16x8x2_t top = vld2q_u16(input);
// equivalent to a vpaddq_u16 (because vld2q interleaves)
const uint16x8_t hsum = vaddq_u16(top.val[0], top.val[1]);
vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 2));
} else {
const uint16x8x4_t top = vld4q_u16(input);
// equivalent to a vpaddq_u16 (because vld4q interleaves)
const uint16x8_t hsum_0 = vaddq_u16(top.val[0], top.val[1]);
// equivalent to a vpaddq_u16 (because vld4q interleaves)
const uint16x8_t hsum_1 = vaddq_u16(top.val[2], top.val[3]);
uint16x8x2_t result = { { vshlq_n_u16(hsum_0, 2),
vshlq_n_u16(hsum_1, 2) } };
vst2q_u16(pred_buf_q3, result);
}
input += input_stride;
} while ((pred_buf_q3 += CFL_BUF_LINE) < end);
}
static void cfl_luma_subsampling_444_hbd_neon(const uint16_t *input,
int input_stride,
uint16_t *pred_buf_q3, int width,
int height) {
const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
do {
if (width == 4) {
const uint16x4_t top = vld1_u16(input);
vst1_u16(pred_buf_q3, vshl_n_u16(top, 3));
} else if (width == 8) {
const uint16x8_t top = vld1q_u16(input);
vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 3));
} else if (width == 16) {
uint16x8x2_t top = vld2q_u16(input);
top.val[0] = vshlq_n_u16(top.val[0], 3);
top.val[1] = vshlq_n_u16(top.val[1], 3);
vst2q_u16(pred_buf_q3, top);
} else {
uint16x8x4_t top = vld4q_u16(input);
top.val[0] = vshlq_n_u16(top.val[0], 3);
top.val[1] = vshlq_n_u16(top.val[1], 3);
top.val[2] = vshlq_n_u16(top.val[2], 3);
top.val[3] = vshlq_n_u16(top.val[3], 3);
vst4q_u16(pred_buf_q3, top);
}
input += input_stride;
} while ((pred_buf_q3 += CFL_BUF_LINE) < end);
}
CFL_GET_SUBSAMPLE_FUNCTION(neon)
static INLINE void subtract_average_neon(const uint16_t *src, int16_t *dst,
int width, int height,
int round_offset,
const int num_pel_log2) {
const uint16_t *const end = src + height * CFL_BUF_LINE;
// Round offset is not needed, because NEON will handle the rounding.
(void)round_offset;
// To optimize the use of the CPU pipeline, we process 4 rows per iteration
const int step = 4 * CFL_BUF_LINE;
// At this stage, the prediction buffer contains scaled reconstructed luma
// pixels, which are positive integer and only require 15 bits. By using
// unsigned integer for the sum, we can do one addition operation inside 16
// bits (8 lanes) before having to convert to 32 bits (4 lanes).
const uint16_t *sum_buf = src;
uint32x4_t sum_32x4 = { 0, 0, 0, 0 };
do {
// For all widths, we load, add and combine the data so it fits in 4 lanes.
if (width == 4) {
const uint16x4_t a0 =
vadd_u16(vld1_u16(sum_buf), vld1_u16(sum_buf + CFL_BUF_LINE));
const uint16x4_t a1 = vadd_u16(vld1_u16(sum_buf + 2 * CFL_BUF_LINE),
vld1_u16(sum_buf + 3 * CFL_BUF_LINE));
sum_32x4 = vaddq_u32(sum_32x4, vaddl_u16(a0, a1));
} else if (width == 8) {
const uint16x8_t a0 = vldaddq_u16(sum_buf, CFL_BUF_LINE);
const uint16x8_t a1 =
vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, CFL_BUF_LINE);
sum_32x4 = vpadalq_u16(sum_32x4, a0);
sum_32x4 = vpadalq_u16(sum_32x4, a1);
} else {
const uint16x8_t row0 = vldaddq_u16(sum_buf, 8);
const uint16x8_t row1 = vldaddq_u16(sum_buf + CFL_BUF_LINE, 8);
const uint16x8_t row2 = vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, 8);
const uint16x8_t row3 = vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE, 8);
sum_32x4 = vpadalq_u16(sum_32x4, row0);
sum_32x4 = vpadalq_u16(sum_32x4, row1);
sum_32x4 = vpadalq_u16(sum_32x4, row2);
sum_32x4 = vpadalq_u16(sum_32x4, row3);
if (width == 32) {
const uint16x8_t row0_1 = vldaddq_u16(sum_buf + 16, 8);
const uint16x8_t row1_1 = vldaddq_u16(sum_buf + CFL_BUF_LINE + 16, 8);
const uint16x8_t row2_1 =
vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE + 16, 8);
const uint16x8_t row3_1 =
vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE + 16, 8);
sum_32x4 = vpadalq_u16(sum_32x4, row0_1);
sum_32x4 = vpadalq_u16(sum_32x4, row1_1);
sum_32x4 = vpadalq_u16(sum_32x4, row2_1);
sum_32x4 = vpadalq_u16(sum_32x4, row3_1);
}
}
sum_buf += step;
} while (sum_buf < end);
// Permute and add in such a way that each lane contains the block sum.
// [A+C+B+D, B+D+A+C, C+A+D+B, D+B+C+A]
#ifdef __aarch64__
sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
#else
uint32x4_t flip =
vcombine_u32(vget_high_u32(sum_32x4), vget_low_u32(sum_32x4));
sum_32x4 = vaddq_u32(sum_32x4, flip);
sum_32x4 = vaddq_u32(sum_32x4, vrev64q_u32(sum_32x4));
#endif
// Computing the average could be done using scalars, but getting off the NEON
// engine introduces latency, so we use vqrshrn.
int16x4_t avg_16x4;
// Constant propagation makes for some ugly code.
switch (num_pel_log2) {
case 4: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 4)); break;
case 5: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 5)); break;
case 6: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 6)); break;
case 7: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 7)); break;
case 8: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 8)); break;
case 9: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 9)); break;
case 10:
avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 10));
break;
default: assert(0);
}
if (width == 4) {
do {
vst1_s16(dst, vsub_s16(vreinterpret_s16_u16(vld1_u16(src)), avg_16x4));
src += CFL_BUF_LINE;
dst += CFL_BUF_LINE;
} while (src < end);
} else {
const int16x8_t avg_16x8 = vcombine_s16(avg_16x4, avg_16x4);
do {
vldsubstq_s16(dst, src, 0, avg_16x8);
vldsubstq_s16(dst, src, CFL_BUF_LINE, avg_16x8);
vldsubstq_s16(dst, src, 2 * CFL_BUF_LINE, avg_16x8);
vldsubstq_s16(dst, src, 3 * CFL_BUF_LINE, avg_16x8);
if (width > 8) {
vldsubstq_s16(dst, src, 8, avg_16x8);
vldsubstq_s16(dst, src, 8 + CFL_BUF_LINE, avg_16x8);
vldsubstq_s16(dst, src, 8 + 2 * CFL_BUF_LINE, avg_16x8);
vldsubstq_s16(dst, src, 8 + 3 * CFL_BUF_LINE, avg_16x8);
}
if (width == 32) {
vldsubstq_s16(dst, src, 16, avg_16x8);
vldsubstq_s16(dst, src, 16 + CFL_BUF_LINE, avg_16x8);
vldsubstq_s16(dst, src, 16 + 2 * CFL_BUF_LINE, avg_16x8);
vldsubstq_s16(dst, src, 16 + 3 * CFL_BUF_LINE, avg_16x8);
vldsubstq_s16(dst, src, 24, avg_16x8);
vldsubstq_s16(dst, src, 24 + CFL_BUF_LINE, avg_16x8);
vldsubstq_s16(dst, src, 24 + 2 * CFL_BUF_LINE, avg_16x8);
vldsubstq_s16(dst, src, 24 + 3 * CFL_BUF_LINE, avg_16x8);
}
src += step;
dst += step;
} while (src < end);
}
}
CFL_SUB_AVG_FN(neon)
// Saturating negate 16-bit integers in a when the corresponding signed 16-bit
// integer in b is negative.
// Notes:
// * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
// practice, as scaled_luma is the multiplication of two absolute values.
// * In the Intel equivalent, elements in a are zeroed out when the
// corresponding elements in b are zero. Because vsign is used twice in a
// row, with b in the first call becoming a in the second call, there's no
// impact from not zeroing out.
static int16x4_t vsign_s16(int16x4_t a, int16x4_t b) {
const int16x4_t mask = vshr_n_s16(b, 15);
return veor_s16(vadd_s16(a, mask), mask);
}
// Saturating negate 16-bit integers in a when the corresponding signed 16-bit
// integer in b is negative.
// Notes:
// * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
// practice, as scaled_luma is the multiplication of two absolute values.
// * In the Intel equivalent, elements in a are zeroed out when the
// corresponding elements in b are zero. Because vsignq is used twice in a
// row, with b in the first call becoming a in the second call, there's no
// impact from not zeroing out.
static int16x8_t vsignq_s16(int16x8_t a, int16x8_t b) {
const int16x8_t mask = vshrq_n_s16(b, 15);
return veorq_s16(vaddq_s16(a, mask), mask);
}
static INLINE int16x4_t predict_w4(const int16_t *pred_buf_q3,
int16x4_t alpha_sign, int abs_alpha_q12,
int16x4_t dc) {
const int16x4_t ac_q3 = vld1_s16(pred_buf_q3);
const int16x4_t ac_sign = veor_s16(alpha_sign, ac_q3);
int16x4_t scaled_luma = vqrdmulh_n_s16(vabs_s16(ac_q3), abs_alpha_q12);
return vadd_s16(vsign_s16(scaled_luma, ac_sign), dc);
}
static INLINE int16x8_t predict_w8(const int16_t *pred_buf_q3,
int16x8_t alpha_sign, int abs_alpha_q12,
int16x8_t dc) {
const int16x8_t ac_q3 = vld1q_s16(pred_buf_q3);
const int16x8_t ac_sign = veorq_s16(alpha_sign, ac_q3);
int16x8_t scaled_luma = vqrdmulhq_n_s16(vabsq_s16(ac_q3), abs_alpha_q12);
return vaddq_s16(vsignq_s16(scaled_luma, ac_sign), dc);
}
static INLINE int16x8x2_t predict_w16(const int16_t *pred_buf_q3,
int16x8_t alpha_sign, int abs_alpha_q12,
int16x8_t dc) {
// vld2q_s16 interleaves, which is not useful for prediction. vst1q_s16_x2
// does not interleave, but is not currently available in the compilier used
// by the AOM build system.
const int16x8x2_t ac_q3 = vld2q_s16(pred_buf_q3);
const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
const int16x8_t scaled_luma_0 =
vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
const int16x8_t scaled_luma_1 =
vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
int16x8x2_t result;
result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
return result;
}
static INLINE int16x8x4_t predict_w32(const int16_t *pred_buf_q3,
int16x8_t alpha_sign, int abs_alpha_q12,
int16x8_t dc) {
// vld4q_s16 interleaves, which is not useful for prediction. vst1q_s16_x4
// does not interleave, but is not currently available in the compilier used
// by the AOM build system.
const int16x8x4_t ac_q3 = vld4q_s16(pred_buf_q3);
const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
const int16x8_t ac_sign_2 = veorq_s16(alpha_sign, ac_q3.val[2]);
const int16x8_t ac_sign_3 = veorq_s16(alpha_sign, ac_q3.val[3]);
const int16x8_t scaled_luma_0 =
vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
const int16x8_t scaled_luma_1 =
vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
const int16x8_t scaled_luma_2 =
vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[2]), abs_alpha_q12);
const int16x8_t scaled_luma_3 =
vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[3]), abs_alpha_q12);
int16x8x4_t result;
result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
result.val[2] = vaddq_s16(vsignq_s16(scaled_luma_2, ac_sign_2), dc);
result.val[3] = vaddq_s16(vsignq_s16(scaled_luma_3, ac_sign_3), dc);
return result;
}
static INLINE uint16x4_t clamp_s16(int16x4_t a, int16x4_t max) {
return vreinterpret_u16_s16(vmax_s16(vmin_s16(a, max), vdup_n_s16(0)));
}
static INLINE uint16x8_t clampq_s16(int16x8_t a, int16x8_t max) {
return vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(a, max), vdupq_n_s16(0)));
}
static INLINE uint16x8x2_t clamp2q_s16(int16x8x2_t a, int16x8_t max) {
uint16x8x2_t result;
result.val[0] = vreinterpretq_u16_s16(
vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
result.val[1] = vreinterpretq_u16_s16(
vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
return result;
}
static INLINE uint16x8x4_t clamp4q_s16(int16x8x4_t a, int16x8_t max) {
uint16x8x4_t result;
result.val[0] = vreinterpretq_u16_s16(
vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
result.val[1] = vreinterpretq_u16_s16(
vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
result.val[2] = vreinterpretq_u16_s16(
vmaxq_s16(vminq_s16(a.val[2], max), vdupq_n_s16(0)));
result.val[3] = vreinterpretq_u16_s16(
vmaxq_s16(vminq_s16(a.val[3], max), vdupq_n_s16(0)));
return result;
}
static INLINE void cfl_predict_hbd_neon(const int16_t *pred_buf_q3,
uint16_t *dst, int dst_stride,
int alpha_q3, int bd, int width,
int height) {
const int max = (1 << bd) - 1;
const int16_t abs_alpha_q12 = abs(alpha_q3) << (9 - CFL_ADD_BITS_ALPHA);
const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
if (width == 4) {
const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
const int16x4_t dc = vdup_n_s16(*dst);
const int16x4_t max_16x4 = vdup_n_s16(max);
do {
const int16x4_t scaled_luma =
predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
vst1_u16(dst, clamp_s16(scaled_luma, max_16x4));
dst += dst_stride;
} while ((pred_buf_q3 += CFL_BUF_LINE) < end);
} else {
const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
const int16x8_t dc = vdupq_n_s16(*dst);
const int16x8_t max_16x8 = vdupq_n_s16(max);
do {
if (width == 8) {
const int16x8_t pred =
predict_w8(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
vst1q_u16(dst, clampq_s16(pred, max_16x8));
} else if (width == 16) {
const int16x8x2_t pred =
predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
vst2q_u16(dst, clamp2q_s16(pred, max_16x8));
} else {
const int16x8x4_t pred =
predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
vst4q_u16(dst, clamp4q_s16(pred, max_16x8));
}
dst += dst_stride;
} while ((pred_buf_q3 += CFL_BUF_LINE) < end);
}
}
CFL_PREDICT_FN(neon, hbd)