blob: 524b098416d6642bd661dc2de091f1177c2eb4a0 [file] [log] [blame]
/*
* Copyright (c) 2020, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <arm_neon.h>
#include <assert.h>
#include "aom_dsp/arm/mem_neon.h"
#include "aom_dsp/arm/sum_neon.h"
#include "config/aom_dsp_rtcd.h"
static INLINE uint64_t aom_sum_squares_2d_i16_4x4_neon(const int16_t *src,
int stride) {
int16x4_t s0 = vld1_s16(src + 0 * stride);
int16x4_t s1 = vld1_s16(src + 1 * stride);
int16x4_t s2 = vld1_s16(src + 2 * stride);
int16x4_t s3 = vld1_s16(src + 3 * stride);
int32x4_t sum_squares = vmull_s16(s0, s0);
sum_squares = vmlal_s16(sum_squares, s1, s1);
sum_squares = vmlal_s16(sum_squares, s2, s2);
sum_squares = vmlal_s16(sum_squares, s3, s3);
return horizontal_long_add_u32x4(vreinterpretq_u32_s32(sum_squares));
}
static INLINE uint64_t aom_sum_squares_2d_i16_4xn_neon(const int16_t *src,
int stride, int height) {
int32x4_t sum_squares[2] = { vdupq_n_s32(0), vdupq_n_s32(0) };
int h = 0;
do {
int16x4_t s0 = vld1_s16(src + 0 * stride);
int16x4_t s1 = vld1_s16(src + 1 * stride);
int16x4_t s2 = vld1_s16(src + 2 * stride);
int16x4_t s3 = vld1_s16(src + 3 * stride);
sum_squares[0] = vmlal_s16(sum_squares[0], s0, s0);
sum_squares[0] = vmlal_s16(sum_squares[0], s1, s1);
sum_squares[1] = vmlal_s16(sum_squares[1], s2, s2);
sum_squares[1] = vmlal_s16(sum_squares[1], s3, s3);
src += 4 * stride;
h += 4;
} while (h < height);
return horizontal_long_add_u32x4(
vreinterpretq_u32_s32(vaddq_s32(sum_squares[0], sum_squares[1])));
}
static INLINE uint64_t aom_sum_squares_2d_i16_nxn_neon(const int16_t *src,
int stride, int width,
int height) {
uint64x2_t sum_squares = vdupq_n_u64(0);
int h = 0;
do {
int32x4_t ss_row[2] = { vdupq_n_s32(0), vdupq_n_s32(0) };
int w = 0;
do {
const int16_t *s = src + w;
int16x8_t s0 = vld1q_s16(s + 0 * stride);
int16x8_t s1 = vld1q_s16(s + 1 * stride);
int16x8_t s2 = vld1q_s16(s + 2 * stride);
int16x8_t s3 = vld1q_s16(s + 3 * stride);
ss_row[0] = vmlal_s16(ss_row[0], vget_low_s16(s0), vget_low_s16(s0));
ss_row[0] = vmlal_s16(ss_row[0], vget_low_s16(s1), vget_low_s16(s1));
ss_row[0] = vmlal_s16(ss_row[0], vget_low_s16(s2), vget_low_s16(s2));
ss_row[0] = vmlal_s16(ss_row[0], vget_low_s16(s3), vget_low_s16(s3));
ss_row[1] = vmlal_s16(ss_row[1], vget_high_s16(s0), vget_high_s16(s0));
ss_row[1] = vmlal_s16(ss_row[1], vget_high_s16(s1), vget_high_s16(s1));
ss_row[1] = vmlal_s16(ss_row[1], vget_high_s16(s2), vget_high_s16(s2));
ss_row[1] = vmlal_s16(ss_row[1], vget_high_s16(s3), vget_high_s16(s3));
w += 8;
} while (w < width);
sum_squares = vpadalq_u32(
sum_squares, vreinterpretq_u32_s32(vaddq_s32(ss_row[0], ss_row[1])));
src += 4 * stride;
h += 4;
} while (h < height);
return horizontal_add_u64x2(sum_squares);
}
uint64_t aom_sum_squares_2d_i16_neon(const int16_t *src, int stride, int width,
int height) {
// 4 elements per row only requires half an SIMD register, so this
// must be a special case, but also note that over 75% of all calls
// are with size == 4, so it is also the common case.
if (LIKELY(width == 4 && height == 4)) {
return aom_sum_squares_2d_i16_4x4_neon(src, stride);
} else if (LIKELY(width == 4 && (height & 3) == 0)) {
return aom_sum_squares_2d_i16_4xn_neon(src, stride, height);
} else if (LIKELY((width & 7) == 0 && (height & 3) == 0)) {
// Generic case
return aom_sum_squares_2d_i16_nxn_neon(src, stride, width, height);
} else {
return aom_sum_squares_2d_i16_c(src, stride, width, height);
}
}