| /* |
| * Copyright (c) 2021, Alliance for Open Media. All rights reserved |
| * |
| * This source code is subject to the terms of the BSD 3-Clause Clear License |
| * and the Alliance for Open Media Patent License 1.0. If the BSD 3-Clause Clear |
| * License was not distributed with this source code in the LICENSE file, you |
| * can obtain it at aomedia.org/license/software-license/bsd-3-c-c/. If the |
| * Alliance for Open Media Patent License 1.0 was not distributed with this |
| * source code in the PATENTS file, you can obtain it at |
| * aomedia.org/license/patent-license/. |
| */ |
| |
| #include <arm_neon.h> |
| #include <assert.h> |
| |
| #include "av1/common/av1_txfm.h" |
| #include "av1/encoder/av1_fwd_txfm1d_cfg.h" |
| #include "aom_dsp/txfm_common.h" |
| #include "aom_ports/mem.h" |
| #include "config/av1_rtcd.h" |
| #include "config/aom_config.h" |
| |
| static INLINE int32x4_t half_btf_neon(const int32_t *w0, const int32x4_t *n0, |
| const int32_t *w1, const int32x4_t *n1, |
| const int32x4_t v_bit) { |
| int32x4_t x; |
| x = vmulq_n_s32(*n0, *w0); |
| x = vmlaq_n_s32(x, *n1, *w1); |
| x = vrshlq_s32(x, v_bit); |
| return x; |
| } |
| |
| static INLINE int32x4_t half_btf_neon_m(const int32_t *w0, const int32x4_t *n0, |
| const int32_t *w1, const int32x4_t *n1, |
| const int32x4_t v_bit) { |
| int32x4_t x; |
| x = vmulq_n_s32(*n0, *w0); |
| x = vmlsq_n_s32(x, *n1, *w1); |
| x = vrshlq_s32(x, v_bit); |
| return x; |
| } |
| |
| #if defined(__aarch64__) |
| #define TRANSPOSE_4X4(x0, x1, x2, x3, y0, y1, y2, y3) \ |
| do { \ |
| int32x4x2_t swap_low = vtrnq_s32(x0, x1); \ |
| int32x4x2_t swap_high = vtrnq_s32(x2, x3); \ |
| y0 = vreinterpretq_s32_s64( \ |
| vzip1q_s64(vreinterpretq_s64_s32(swap_low.val[0]), \ |
| vreinterpretq_s64_s32(swap_high.val[0]))); \ |
| y1 = vreinterpretq_s32_s64( \ |
| vzip1q_s64(vreinterpretq_s64_s32(swap_low.val[1]), \ |
| vreinterpretq_s64_s32(swap_high.val[1]))); \ |
| y2 = vreinterpretq_s32_s64( \ |
| vzip2q_s64(vreinterpretq_s64_s32(swap_low.val[0]), \ |
| vreinterpretq_s64_s32(swap_high.val[0]))); \ |
| y3 = vreinterpretq_s32_s64( \ |
| vzip2q_s64(vreinterpretq_s64_s32(swap_low.val[1]), \ |
| vreinterpretq_s64_s32(swap_high.val[1]))); \ |
| } while (0) |
| #else |
| #define TRANSPOSE_4X4(x0, x1, x2, x3, y0, y1, y2, y3) \ |
| do { \ |
| int32x4x2_t swap_low = vtrnq_s32(x0, x1); \ |
| int32x4x2_t swap_high = vtrnq_s32(x2, x3); \ |
| y0 = vextq_s32(vextq_s32(swap_low.val[0], swap_low.val[0], 2), \ |
| swap_high.val[0], 2); \ |
| y1 = vextq_s32(vextq_s32(swap_low.val[1], swap_low.val[1], 2), \ |
| swap_high.val[1], 2); \ |
| y2 = vextq_s32(swap_low.val[0], \ |
| vextq_s32(swap_high.val[0], swap_high.val[0], 2), 2); \ |
| y3 = vextq_s32(swap_low.val[1], \ |
| vextq_s32(swap_high.val[1], swap_high.val[1], 2), 2); \ |
| } while (0) |
| #endif // (__aarch64__) |
| |
| static INLINE void transpose_8x8(const int32x4_t *in, int32x4_t *out) { |
| TRANSPOSE_4X4(in[0], in[2], in[4], in[6], out[0], out[2], out[4], out[6]); |
| TRANSPOSE_4X4(in[1], in[3], in[5], in[7], out[8], out[10], out[12], out[14]); |
| TRANSPOSE_4X4(in[8], in[10], in[12], in[14], out[1], out[3], out[5], out[7]); |
| TRANSPOSE_4X4(in[9], in[11], in[13], in[15], out[9], out[11], out[13], |
| out[15]); |
| } |
| |
| static INLINE void transpose_16x16(const int32x4_t *in, int32x4_t *out) { |
| // Upper left 8x8 |
| TRANSPOSE_4X4(in[0], in[4], in[8], in[12], out[0], out[4], out[8], out[12]); |
| TRANSPOSE_4X4(in[1], in[5], in[9], in[13], out[16], out[20], out[24], |
| out[28]); |
| TRANSPOSE_4X4(in[16], in[20], in[24], in[28], out[1], out[5], out[9], |
| out[13]); |
| TRANSPOSE_4X4(in[17], in[21], in[25], in[29], out[17], out[21], out[25], |
| out[29]); |
| |
| // Upper right 8x8 |
| TRANSPOSE_4X4(in[2], in[6], in[10], in[14], out[32], out[36], out[40], |
| out[44]); |
| TRANSPOSE_4X4(in[3], in[7], in[11], in[15], out[48], out[52], out[56], |
| out[60]); |
| TRANSPOSE_4X4(in[18], in[22], in[26], in[30], out[33], out[37], out[41], |
| out[45]); |
| TRANSPOSE_4X4(in[19], in[23], in[27], in[31], out[49], out[53], out[57], |
| out[61]); |
| |
| // Lower left 8x8 |
| TRANSPOSE_4X4(in[32], in[36], in[40], in[44], out[2], out[6], out[10], |
| out[14]); |
| TRANSPOSE_4X4(in[33], in[37], in[41], in[45], out[18], out[22], out[26], |
| out[30]); |
| TRANSPOSE_4X4(in[48], in[52], in[56], in[60], out[3], out[7], out[11], |
| out[15]); |
| TRANSPOSE_4X4(in[49], in[53], in[57], in[61], out[19], out[23], out[27], |
| out[31]); |
| // Lower right 8x8 |
| TRANSPOSE_4X4(in[34], in[38], in[42], in[46], out[34], out[38], out[42], |
| out[46]); |
| TRANSPOSE_4X4(in[35], in[39], in[43], in[47], out[50], out[54], out[58], |
| out[62]); |
| TRANSPOSE_4X4(in[50], in[54], in[58], in[62], out[35], out[39], out[43], |
| out[47]); |
| TRANSPOSE_4X4(in[51], in[55], in[59], in[63], out[51], out[55], out[59], |
| out[63]); |
| } |
| |
| static INLINE void av1_round_shift_rect_array_32_neon(int32x4_t *input, |
| int32x4_t *output, |
| const int size, |
| const int bit, |
| const int val) { |
| const int32x4_t sqrt2 = vdupq_n_s32(val); |
| const int32x4_t v_bit = vdupq_n_s32(-bit); |
| int i; |
| for (i = 0; i < size; i++) { |
| const int32x4_t r0 = vrshlq_s32(input[i], v_bit); |
| const int32x4_t r1 = vmulq_s32(sqrt2, r0); |
| output[i] = vrshrq_n_s32(r1, NewSqrt2Bits); |
| } |
| } |
| |
| #define btf_32_neon_type0(w0, w1, in0, in1, out0, out1, v_cos_bit) \ |
| do { \ |
| out0 = vmulq_n_s32(in0, w0); \ |
| out0 = vmlaq_n_s32(out0, in1, w1); \ |
| out0 = vrshlq_s32(out0, v_cos_bit); \ |
| out1 = vmulq_n_s32(in0, w1); \ |
| out1 = vmlsq_n_s32(out1, in1, w0); \ |
| out1 = vrshlq_s32(out1, v_cos_bit); \ |
| } while (0) |
| |
| #define btf_32_neon_type1(w0, w1, in0, in1, out0, out1, bit) \ |
| do { \ |
| btf_32_neon_type0(w1, w0, in1, in0, out0, out1, bit); \ |
| } while (0) |
| |
| static INLINE void load_buffer_4x4(const int16_t *input, int32x4_t *in, |
| int stride, int flipud, int fliplr, |
| const int32x4_t *v_shift) { |
| int16x4_t v0, v1, v2, v3; |
| |
| if (!flipud) { |
| v0 = vld1_s16(input + 0 * stride); |
| v1 = vld1_s16(input + 1 * stride); |
| v2 = vld1_s16(input + 2 * stride); |
| v3 = vld1_s16(input + 3 * stride); |
| } else { |
| v0 = vld1_s16(input + 3 * stride); |
| v1 = vld1_s16(input + 2 * stride); |
| v2 = vld1_s16(input + 1 * stride); |
| v3 = vld1_s16(input + 0 * stride); |
| } |
| |
| if (fliplr) { |
| v0 = vrev64_s16(v0); |
| v1 = vrev64_s16(v1); |
| v2 = vrev64_s16(v2); |
| v3 = vrev64_s16(v3); |
| } |
| in[0] = vshlq_s32(vmovl_s16(v0), *v_shift); |
| in[1] = vshlq_s32(vmovl_s16(v1), *v_shift); |
| in[2] = vshlq_s32(vmovl_s16(v2), *v_shift); |
| in[3] = vshlq_s32(vmovl_s16(v3), *v_shift); |
| } |
| |
| static void fdct4x4_neon(int32x4_t *in, int32x4_t *out, int bit, |
| const int num_col) { |
| const int32_t *cospi = cospi_arr(bit); |
| const int32x4_t cospi32 = vdupq_n_s32(cospi[32]); |
| const int32x4_t cospi48 = vdupq_n_s32(cospi[48]); |
| const int32x4_t cospi16 = vdupq_n_s32(cospi[16]); |
| int32x4_t s0, s1, s2, s3; |
| int32x4_t u0, u1, u2, u3; |
| int32x4_t v0, v2; |
| |
| int endidx = 3 * num_col; |
| s0 = vaddq_s32(in[0], in[endidx]); |
| s3 = vsubq_s32(in[0], in[endidx]); |
| endidx -= num_col; |
| s1 = vaddq_s32(in[num_col], in[endidx]); |
| s2 = vsubq_s32(in[num_col], in[endidx]); |
| |
| u0 = vmulq_s32(s0, cospi32); |
| u1 = vmulq_s32(s1, cospi32); |
| u2 = vaddq_s32(u0, u1); |
| v0 = vsubq_s32(u0, u1); |
| const int32x4_t v_bit = vdupq_n_s32(-bit); |
| u0 = vrshlq_s32(u2, v_bit); |
| u2 = vrshlq_s32(v0, v_bit); |
| |
| v0 = vmulq_s32(s2, cospi48); |
| v2 = vmlaq_s32(v0, s3, cospi16); |
| |
| u1 = vrshlq_s32(v2, v_bit); |
| |
| v0 = vmulq_s32(s3, cospi48); |
| v2 = vmlsq_s32(v0, s2, cospi16); |
| |
| u3 = vrshlq_s32(v2, v_bit); |
| |
| TRANSPOSE_4X4(u0, u1, u2, u3, out[0], out[1], out[2], out[3]); |
| } |
| |
| static INLINE void write_buffer_4x4(int32x4_t *res, int32_t *output) { |
| vst1q_s32((output + 0 * 4), res[0]); |
| vst1q_s32((output + 1 * 4), res[1]); |
| vst1q_s32((output + 2 * 4), res[2]); |
| vst1q_s32((output + 3 * 4), res[3]); |
| } |
| |
| static void fadst4x4_neon(int32x4_t *in, int32x4_t *out, int bit, |
| const int num_col) { |
| const int32_t *sinpi = sinpi_arr(bit); |
| const int32x4_t sinpi4x = vld1q_s32(&sinpi[1]); |
| |
| const int32x4_t sinpi1 = vdupq_lane_s32(vget_low_s32(sinpi4x), 0); |
| const int32x4_t sinpi2 = vdupq_lane_s32(vget_low_s32(sinpi4x), 1); |
| const int32x4_t sinpi3 = vdupq_lane_s32(vget_high_s32(sinpi4x), 0); |
| const int32x4_t sinpi4 = vdupq_lane_s32(vget_high_s32(sinpi4x), 1); |
| int32x4_t t; |
| int32x4_t s0, s1, s2, s3, s7; |
| int32x4_t x0, x1, x2, x3; |
| int32x4_t u0, u1, u2, u3; |
| |
| int idx = 0 * num_col; |
| s0 = vmulq_s32(in[idx], sinpi1); |
| s1 = vmulq_s32(in[idx], sinpi4); |
| t = vaddq_s32(in[idx], in[idx + num_col]); |
| idx += 2 * num_col; |
| x3 = vmulq_s32(in[idx], sinpi3); |
| idx += num_col; |
| s7 = vsubq_s32(t, in[idx]); |
| |
| t = vmlaq_s32(s0, in[idx - 2 * num_col], sinpi2); |
| x0 = vmlaq_s32(t, in[idx], sinpi4); |
| x1 = vmulq_s32(s7, sinpi3); |
| t = vmlsq_s32(s1, in[idx - 2 * num_col], sinpi1); |
| x2 = vmlaq_s32(t, in[idx], sinpi2); |
| |
| s0 = vaddq_s32(x0, x3); |
| s1 = x1; |
| s2 = vsubq_s32(x2, x3); |
| t = vsubq_s32(x2, x0); |
| s3 = vaddq_s32(t, x3); |
| |
| const int32x4_t v_bit = vdupq_n_s32(-bit); |
| u0 = vrshlq_s32(s0, v_bit); |
| u1 = vrshlq_s32(s1, v_bit); |
| u2 = vrshlq_s32(s2, v_bit); |
| u3 = vrshlq_s32(s3, v_bit); |
| |
| TRANSPOSE_4X4(u0, u1, u2, u3, out[0], out[1], out[2], out[3]); |
| } |
| static void idtx4x4_neon(int32x4_t *in, int32x4_t *out, int bit, int col_num) { |
| (void)bit; |
| int32x4_t fact = vdupq_n_s32(NewSqrt2); |
| int32x4_t a_low; |
| |
| int i; |
| for (i = 0; i < 4; i++) { |
| a_low = vmulq_s32(in[i * col_num], fact); |
| out[i] = vrshrq_n_s32(a_low, NewSqrt2Bits); |
| } |
| |
| TRANSPOSE_4X4(out[0], out[1], out[2], out[3], out[0], out[1], out[2], out[3]); |
| } |
| void av1_fwd_txfm2d_4x4_neon(const int16_t *input, int32_t *coeff, |
| int input_stride, TX_TYPE tx_type, int bd) { |
| int32x4_t in[4]; |
| const int8_t *shift = av1_fwd_txfm_shift_ls[TX_4X4]; |
| const int txw_idx = get_txw_idx(TX_4X4); |
| const int txh_idx = get_txh_idx(TX_4X4); |
| int32x4_t v_shift0 = vdupq_n_s32(shift[0]); |
| switch (tx_type) { |
| case DCT_DCT: |
| load_buffer_4x4(input, in, input_stride, 0, 0, &v_shift0); |
| fdct4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| fdct4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case ADST_DCT: |
| load_buffer_4x4(input, in, input_stride, 0, 0, &v_shift0); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| fdct4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case DCT_ADST: |
| load_buffer_4x4(input, in, input_stride, 0, 0, &v_shift0); |
| fdct4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case ADST_ADST: |
| load_buffer_4x4(input, in, input_stride, 0, 0, &v_shift0); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case FLIPADST_DCT: |
| load_buffer_4x4(input, in, input_stride, 1, 0, &v_shift0); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| fdct4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case DCT_FLIPADST: |
| load_buffer_4x4(input, in, input_stride, 0, 1, &v_shift0); |
| fdct4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case FLIPADST_FLIPADST: |
| load_buffer_4x4(input, in, input_stride, 1, 1, &v_shift0); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case ADST_FLIPADST: |
| load_buffer_4x4(input, in, input_stride, 0, 1, &v_shift0); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case FLIPADST_ADST: |
| load_buffer_4x4(input, in, input_stride, 1, 0, &v_shift0); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case IDTX: |
| load_buffer_4x4(input, in, input_stride, 0, 0, &v_shift0); |
| idtx4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| idtx4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case V_DCT: |
| load_buffer_4x4(input, in, input_stride, 0, 0, &v_shift0); |
| fdct4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| idtx4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case H_DCT: |
| load_buffer_4x4(input, in, input_stride, 0, 0, &v_shift0); |
| idtx4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| fdct4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case V_ADST: |
| load_buffer_4x4(input, in, input_stride, 0, 0, &v_shift0); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| idtx4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case H_ADST: |
| load_buffer_4x4(input, in, input_stride, 0, 0, &v_shift0); |
| idtx4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case V_FLIPADST: |
| load_buffer_4x4(input, in, input_stride, 1, 0, &v_shift0); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| idtx4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| case H_FLIPADST: |
| load_buffer_4x4(input, in, input_stride, 0, 1, &v_shift0); |
| idtx4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| fadst4x4_neon(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1); |
| write_buffer_4x4(in, coeff); |
| break; |
| default: assert(0); |
| } |
| (void)bd; |
| } |
| |
| static INLINE void load_buffer_8x8(const int16_t *input, int32x4_t *in, |
| int stride, int flipud, int fliplr, |
| const int shift) { |
| if (!flipud) { |
| in[0] = vreinterpretq_s32_s16(vld1q_s16((input + 0 * stride))); |
| in[1] = vreinterpretq_s32_s16(vld1q_s16((input + 1 * stride))); |
| in[2] = vreinterpretq_s32_s16(vld1q_s16((input + 2 * stride))); |
| in[3] = vreinterpretq_s32_s16(vld1q_s16((input + 3 * stride))); |
| in[4] = vreinterpretq_s32_s16(vld1q_s16((input + 4 * stride))); |
| in[5] = vreinterpretq_s32_s16(vld1q_s16((input + 5 * stride))); |
| in[6] = vreinterpretq_s32_s16(vld1q_s16((input + 6 * stride))); |
| in[7] = vreinterpretq_s32_s16(vld1q_s16((input + 7 * stride))); |
| } else { |
| in[0] = vreinterpretq_s32_s16(vld1q_s16((input + 7 * stride))); |
| in[1] = vreinterpretq_s32_s16(vld1q_s16((input + 6 * stride))); |
| in[2] = vreinterpretq_s32_s16(vld1q_s16((input + 5 * stride))); |
| in[3] = vreinterpretq_s32_s16(vld1q_s16((input + 4 * stride))); |
| in[4] = vreinterpretq_s32_s16(vld1q_s16((input + 3 * stride))); |
| in[5] = vreinterpretq_s32_s16(vld1q_s16((input + 2 * stride))); |
| in[6] = vreinterpretq_s32_s16(vld1q_s16((input + 1 * stride))); |
| in[7] = vreinterpretq_s32_s16(vld1q_s16((input + 0 * stride))); |
| } |
| |
| if (fliplr) { |
| in[0] = vreinterpretq_s32_s16(vrev64q_s16(vreinterpretq_s16_s32(in[0]))); |
| in[0] = vextq_s32(in[0], in[0], 2); |
| in[1] = vreinterpretq_s32_s16(vrev64q_s16(vreinterpretq_s16_s32(in[1]))); |
| in[1] = vextq_s32(in[1], in[1], 2); |
| in[2] = vreinterpretq_s32_s16(vrev64q_s16(vreinterpretq_s16_s32(in[2]))); |
| in[2] = vextq_s32(in[2], in[2], 2); |
| in[3] = vreinterpretq_s32_s16(vrev64q_s16(vreinterpretq_s16_s32(in[3]))); |
| in[3] = vextq_s32(in[3], in[3], 2); |
| in[4] = vreinterpretq_s32_s16(vrev64q_s16(vreinterpretq_s16_s32(in[4]))); |
| in[4] = vextq_s32(in[4], in[4], 2); |
| in[5] = vreinterpretq_s32_s16(vrev64q_s16(vreinterpretq_s16_s32(in[5]))); |
| in[5] = vextq_s32(in[5], in[5], 2); |
| in[6] = vreinterpretq_s32_s16(vrev64q_s16(vreinterpretq_s16_s32(in[6]))); |
| in[6] = vextq_s32(in[6], in[6], 2); |
| in[7] = vreinterpretq_s32_s16(vrev64q_s16(vreinterpretq_s16_s32(in[7]))); |
| in[7] = vextq_s32(in[7], in[7], 2); |
| } |
| |
| int16x4_t u = vget_high_s16(vreinterpretq_s16_s32(in[4])); |
| in[8] = vmovl_s16(vget_low_s16(vreinterpretq_s16_s32(in[4]))); |
| in[9] = vmovl_s16(u); |
| |
| u = vget_high_s16(vreinterpretq_s16_s32(in[5])); |
| in[10] = vmovl_s16(vget_low_s16(vreinterpretq_s16_s32(in[5]))); |
| in[11] = vmovl_s16(u); |
| |
| u = vget_high_s16(vreinterpretq_s16_s32(in[6])); |
| in[12] = vmovl_s16(vget_low_s16(vreinterpretq_s16_s32(in[6]))); |
| in[13] = vmovl_s16(u); |
| |
| u = vget_high_s16(vreinterpretq_s16_s32(in[7])); |
| in[14] = vmovl_s16(vget_low_s16(vreinterpretq_s16_s32(in[7]))); |
| in[15] = vmovl_s16(u); |
| |
| u = vget_high_s16(vreinterpretq_s16_s32(in[3])); |
| in[6] = vmovl_s16(vget_low_s16(vreinterpretq_s16_s32(in[3]))); |
| in[7] = vmovl_s16(u); |
| |
| u = vget_high_s16(vreinterpretq_s16_s32(in[2])); |
| in[4] = vmovl_s16(vget_low_s16(vreinterpretq_s16_s32(in[2]))); |
| in[5] = vmovl_s16(u); |
| |
| u = vget_high_s16(vreinterpretq_s16_s32(in[1])); |
| in[2] = vmovl_s16(vget_low_s16(vreinterpretq_s16_s32(in[1]))); |
| in[3] = vmovl_s16(u); |
| |
| u = vget_high_s16(vreinterpretq_s16_s32(in[0])); |
| in[0] = vmovl_s16(vget_low_s16(vreinterpretq_s16_s32(in[0]))); |
| in[1] = vmovl_s16(u); |
| |
| const int32x4_t v_shift = vdupq_n_s32(shift); |
| |
| in[0] = vshlq_s32(in[0], v_shift); |
| in[1] = vshlq_s32(in[1], v_shift); |
| in[2] = vshlq_s32(in[2], v_shift); |
| in[3] = vshlq_s32(in[3], v_shift); |
| in[4] = vshlq_s32(in[4], v_shift); |
| in[5] = vshlq_s32(in[5], v_shift); |
| in[6] = vshlq_s32(in[6], v_shift); |
| in[7] = vshlq_s32(in[7], v_shift); |
| |
| in[8] = vshlq_s32(in[8], v_shift); |
| in[9] = vshlq_s32(in[9], v_shift); |
| in[10] = vshlq_s32(in[10], v_shift); |
| in[11] = vshlq_s32(in[11], v_shift); |
| in[12] = vshlq_s32(in[12], v_shift); |
| in[13] = vshlq_s32(in[13], v_shift); |
| in[14] = vshlq_s32(in[14], v_shift); |
| in[15] = vshlq_s32(in[15], v_shift); |
| } |
| |
| static INLINE void col_txfm_8x8_rounding(int32x4_t *in, |
| const int32x4_t *v_shift) { |
| in[0] = vrshlq_s32(in[0], *v_shift); |
| in[1] = vrshlq_s32(in[1], *v_shift); |
| in[2] = vrshlq_s32(in[2], *v_shift); |
| in[3] = vrshlq_s32(in[3], *v_shift); |
| in[4] = vrshlq_s32(in[4], *v_shift); |
| in[5] = vrshlq_s32(in[5], *v_shift); |
| in[6] = vrshlq_s32(in[6], *v_shift); |
| in[7] = vrshlq_s32(in[7], *v_shift); |
| in[8] = vrshlq_s32(in[8], *v_shift); |
| in[9] = vrshlq_s32(in[9], *v_shift); |
| in[10] = vrshlq_s32(in[10], *v_shift); |
| in[11] = vrshlq_s32(in[11], *v_shift); |
| in[12] = vrshlq_s32(in[12], *v_shift); |
| in[13] = vrshlq_s32(in[13], *v_shift); |
| in[14] = vrshlq_s32(in[14], *v_shift); |
| in[15] = vrshlq_s32(in[15], *v_shift); |
| } |
| |
| static INLINE void col_txfm_4x8_rounding(int32x4_t *in, |
| const int32x4_t *v_shift) { |
| in[0] = vrshlq_s32(in[0], *v_shift); |
| in[1] = vrshlq_s32(in[1], *v_shift); |
| in[2] = vrshlq_s32(in[2], *v_shift); |
| in[3] = vrshlq_s32(in[3], *v_shift); |
| in[4] = vrshlq_s32(in[4], *v_shift); |
| in[5] = vrshlq_s32(in[5], *v_shift); |
| in[6] = vrshlq_s32(in[6], *v_shift); |
| in[7] = vrshlq_s32(in[7], *v_shift); |
| } |
| |
| static INLINE void write_buffer_8x8(const int32x4_t *res, int32_t *output) { |
| vst1q_s32(output + 0 * 4, res[0]); |
| vst1q_s32(output + 1 * 4, res[1]); |
| vst1q_s32(output + 2 * 4, res[2]); |
| vst1q_s32(output + 3 * 4, res[3]); |
| |
| vst1q_s32(output + 4 * 4, res[4]); |
| vst1q_s32(output + 5 * 4, res[5]); |
| vst1q_s32(output + 6 * 4, res[6]); |
| vst1q_s32(output + 7 * 4, res[7]); |
| |
| vst1q_s32(output + 8 * 4, res[8]); |
| vst1q_s32(output + 9 * 4, res[9]); |
| vst1q_s32(output + 10 * 4, res[10]); |
| vst1q_s32(output + 11 * 4, res[11]); |
| |
| vst1q_s32(output + 12 * 4, res[12]); |
| vst1q_s32(output + 13 * 4, res[13]); |
| vst1q_s32(output + 14 * 4, res[14]); |
| vst1q_s32(output + 15 * 4, res[15]); |
| } |
| |
| static INLINE void write_buffer_16x8(const int32x4_t *res, int32_t *output, |
| const int stride) { |
| vst1q_s32(output, res[0]); |
| vst1q_s32(output + 4, res[1]); |
| vst1q_s32(output + stride, res[2]); |
| vst1q_s32(output + stride + 4, res[3]); |
| |
| vst1q_s32(output + (stride * 2), res[4]); |
| vst1q_s32(output + (stride * 2) + 4, res[5]); |
| vst1q_s32(output + (stride * 3), res[6]); |
| vst1q_s32(output + (stride * 3) + 4, res[7]); |
| |
| vst1q_s32(output + (stride * 4), res[8]); |
| vst1q_s32(output + (stride * 4) + 4, res[9]); |
| vst1q_s32(output + (stride * 5), res[10]); |
| vst1q_s32(output + (stride * 5) + 4, res[11]); |
| |
| vst1q_s32(output + (stride * 6), res[12]); |
| vst1q_s32(output + (stride * 6) + 4, res[13]); |
| vst1q_s32(output + (stride * 7), res[14]); |
| vst1q_s32(output + (stride * 7) + 4, res[15]); |
| } |
| |
| static void fdct4x8_neon(int32x4_t *in, int32x4_t *out, int bit, |
| const int col_num) { |
| const int32_t *cospi = cospi_arr(bit); |
| const int32x4_t v_bit = vdupq_n_s32(-bit); |
| int32x4_t u[8], v[8]; |
| |
| int startidx = 0 * col_num; |
| int endidx = 7 * col_num; |
| // stage 0-1 |
| u[0] = vaddq_s32(in[startidx], in[endidx]); |
| v[7] = vsubq_s32(in[startidx], in[endidx]); |
| startidx += col_num; |
| endidx -= col_num; |
| u[1] = vaddq_s32(in[startidx], in[endidx]); |
| u[6] = vsubq_s32(in[startidx], in[endidx]); |
| startidx += col_num; |
| endidx -= col_num; |
| u[2] = vaddq_s32(in[startidx], in[endidx]); |
| u[5] = vsubq_s32(in[startidx], in[endidx]); |
| startidx += col_num; |
| endidx -= col_num; |
| u[3] = vaddq_s32(in[startidx], in[endidx]); |
| v[4] = vsubq_s32(in[startidx], in[endidx]); |
| |
| // stage 2 |
| v[0] = vaddq_s32(u[0], u[3]); |
| v[3] = vsubq_s32(u[0], u[3]); |
| v[1] = vaddq_s32(u[1], u[2]); |
| v[2] = vsubq_s32(u[1], u[2]); |
| |
| v[5] = vmulq_n_s32(u[6], cospi[32]); |
| v[5] = vmlsq_n_s32(v[5], u[5], cospi[32]); |
| v[5] = vrshlq_s32(v[5], v_bit); |
| |
| u[0] = vmulq_n_s32(u[5], cospi[32]); |
| v[6] = vmlaq_n_s32(u[0], u[6], cospi[32]); |
| v[6] = vrshlq_s32(v[6], v_bit); |
| |
| // stage 3 |
| // type 0 |
| v[0] = vmulq_n_s32(v[0], cospi[32]); |
| v[1] = vmulq_n_s32(v[1], cospi[32]); |
| u[0] = vaddq_s32(v[0], v[1]); |
| u[0] = vrshlq_s32(u[0], v_bit); |
| |
| u[1] = vsubq_s32(v[0], v[1]); |
| u[1] = vrshlq_s32(u[1], v_bit); |
| |
| // type 1 |
| v[0] = vmulq_n_s32(v[2], cospi[48]); |
| u[2] = vmlaq_n_s32(v[0], v[3], cospi[16]); |
| u[2] = vrshlq_s32(u[2], v_bit); |
| |
| v[1] = vmulq_n_s32(v[3], cospi[48]); |
| u[3] = vmlsq_n_s32(v[1], v[2], cospi[16]); |
| u[3] = vrshlq_s32(u[3], v_bit); |
| |
| u[4] = vaddq_s32(v[4], v[5]); |
| u[5] = vsubq_s32(v[4], v[5]); |
| u[6] = vsubq_s32(v[7], v[6]); |
| u[7] = vaddq_s32(v[7], v[6]); |
| |
| // stage 4-5 |
| v[0] = vmulq_n_s32(u[4], cospi[56]); |
| v[0] = vmlaq_n_s32(v[0], u[7], cospi[8]); |
| out[1 * col_num] = vrshlq_s32(v[0], v_bit); |
| |
| v[1] = vmulq_n_s32(u[7], cospi[56]); |
| v[0] = vmlsq_n_s32(v[1], u[4], cospi[8]); |
| out[7 * col_num] = vrshlq_s32(v[0], v_bit); |
| |
| v[0] = vmulq_n_s32(u[5], cospi[24]); |
| v[0] = vmlaq_n_s32(v[0], u[6], cospi[40]); |
| out[5 * col_num] = vrshlq_s32(v[0], v_bit); |
| |
| v[1] = vmulq_n_s32(u[6], cospi[24]); |
| v[0] = vmlsq_n_s32(v[1], u[5], cospi[40]); |
| out[3 * col_num] = vrshlq_s32(v[0], v_bit); |
| |
| out[0 * col_num] = u[0]; |
| out[4 * col_num] = u[1]; |
| out[2 * col_num] = u[2]; |
| out[6 * col_num] = u[3]; |
| } |
| |
| static void fdct8x8_neon(int32x4_t *in, int32x4_t *out, int bit, |
| const int col_num) { |
| fdct4x8_neon(in, out, bit, col_num); |
| fdct4x8_neon(in + 1, out + 1, bit, col_num); |
| } |
| |
| static void fadst8x8_neon(int32x4_t *in, int32x4_t *out, int bit, |
| const int col_num) { |
| const int32_t *cospi = cospi_arr(bit); |
| |
| const int32x4_t v_bit = vdupq_n_s32(-bit); |
| int32x4_t u0, u1, u2, u3, u4, u5, u6, u7; |
| int32x4_t v0, v1, v2, v3, v4, v5, v6, v7; |
| int32x4_t x, y; |
| int col; |
| |
| for (col = 0; col < col_num; ++col) { |
| // stage 0-1 |
| u0 = in[col_num * 0 + col]; |
| u1 = vnegq_s32(in[col_num * 7 + col]); |
| u2 = vnegq_s32(in[col_num * 3 + col]); |
| u3 = in[col_num * 4 + col]; |
| u4 = vnegq_s32(in[col_num * 1 + col]); |
| u5 = in[col_num * 6 + col]; |
| u6 = in[col_num * 2 + col]; |
| u7 = vnegq_s32(in[col_num * 5 + col]); |
| |
| // stage 2 |
| v0 = u0; |
| v1 = u1; |
| |
| x = vmulq_n_s32(u2, cospi[32]); |
| y = vmulq_n_s32(u3, cospi[32]); |
| v2 = vaddq_s32(x, y); |
| v2 = vrshlq_s32(v2, v_bit); |
| |
| v3 = vsubq_s32(x, y); |
| v3 = vrshlq_s32(v3, v_bit); |
| |
| v4 = u4; |
| v5 = u5; |
| |
| x = vmulq_n_s32(u6, cospi[32]); |
| y = vmulq_n_s32(u7, cospi[32]); |
| v6 = vaddq_s32(x, y); |
| v6 = vrshlq_s32(v6, v_bit); |
| |
| v7 = vsubq_s32(x, y); |
| v7 = vrshlq_s32(v7, v_bit); |
| |
| // stage 3 |
| u0 = vaddq_s32(v0, v2); |
| u1 = vaddq_s32(v1, v3); |
| u2 = vsubq_s32(v0, v2); |
| u3 = vsubq_s32(v1, v3); |
| u4 = vaddq_s32(v4, v6); |
| u5 = vaddq_s32(v5, v7); |
| u6 = vsubq_s32(v4, v6); |
| u7 = vsubq_s32(v5, v7); |
| |
| // stage 4 |
| v0 = u0; |
| v1 = u1; |
| v2 = u2; |
| v3 = u3; |
| |
| v4 = vmulq_n_s32(u4, cospi[16]); |
| v4 = vmlaq_n_s32(v4, u5, cospi[48]); |
| v4 = vrshlq_s32(v4, v_bit); |
| |
| v5 = vmulq_n_s32(u4, cospi[48]); |
| v5 = vmlsq_n_s32(v5, u5, cospi[16]); |
| v5 = vrshlq_s32(v5, v_bit); |
| |
| v6 = vmulq_n_s32(u7, cospi[16]); |
| v6 = vmlsq_n_s32(v6, u6, cospi[48]); |
| v6 = vrshlq_s32(v6, v_bit); |
| |
| v7 = vmulq_n_s32(u6, cospi[16]); |
| v7 = vmlaq_n_s32(v7, u7, cospi[48]); |
| v7 = vrshlq_s32(v7, v_bit); |
| |
| // stage 5 |
| u0 = vaddq_s32(v0, v4); |
| u1 = vaddq_s32(v1, v5); |
| u2 = vaddq_s32(v2, v6); |
| u3 = vaddq_s32(v3, v7); |
| u4 = vsubq_s32(v0, v4); |
| u5 = vsubq_s32(v1, v5); |
| u6 = vsubq_s32(v2, v6); |
| u7 = vsubq_s32(v3, v7); |
| |
| // stage 6 |
| v0 = vmulq_n_s32(u0, cospi[4]); |
| v0 = vmlaq_n_s32(v0, u1, cospi[60]); |
| v0 = vrshlq_s32(v0, v_bit); |
| |
| v1 = vmulq_n_s32(u0, cospi[60]); |
| v1 = vmlsq_n_s32(v1, u1, cospi[4]); |
| v1 = vrshlq_s32(v1, v_bit); |
| |
| v2 = vmulq_n_s32(u2, cospi[20]); |
| v2 = vmlaq_n_s32(v2, u3, cospi[44]); |
| v2 = vrshlq_s32(v2, v_bit); |
| |
| v3 = vmulq_n_s32(u2, cospi[44]); |
| v3 = vmlsq_n_s32(v3, u3, cospi[20]); |
| v3 = vrshlq_s32(v3, v_bit); |
| |
| v4 = vmulq_n_s32(u4, cospi[36]); |
| v4 = vmlaq_n_s32(v4, u5, cospi[28]); |
| v4 = vrshlq_s32(v4, v_bit); |
| |
| v5 = vmulq_n_s32(u4, cospi[28]); |
| v5 = vmlsq_n_s32(v5, u5, cospi[36]); |
| v5 = vrshlq_s32(v5, v_bit); |
| |
| x = vmulq_n_s32(u6, cospi[52]); |
| v6 = vmlaq_n_s32(x, u7, cospi[12]); |
| v6 = vrshlq_s32(v6, v_bit); |
| |
| v7 = vmulq_n_s32(u6, cospi[12]); |
| v7 = vmlsq_n_s32(v7, u7, cospi[52]); |
| v7 = vrshlq_s32(v7, v_bit); |
| |
| // stage 7 |
| out[col_num * 0 + col] = v1; |
| out[col_num * 1 + col] = v6; |
| out[col_num * 2 + col] = v3; |
| out[col_num * 3 + col] = v4; |
| out[col_num * 4 + col] = v5; |
| out[col_num * 5 + col] = v2; |
| out[col_num * 6 + col] = v7; |
| out[col_num * 7 + col] = v0; |
| } |
| } |
| static void idtx8x8_neon(int32x4_t *in, int32x4_t *out, int bit, int col_num) { |
| (void)bit; |
| |
| for (int i = 0; i < col_num; i += 1) { |
| out[0 + 8 * i] = vshlq_n_s32(in[0 + 8 * i], 1); |
| out[1 + 8 * i] = vshlq_n_s32(in[1 + 8 * i], 1); |
| out[2 + 8 * i] = vshlq_n_s32(in[2 + 8 * i], 1); |
| out[3 + 8 * i] = vshlq_n_s32(in[3 + 8 * i], 1); |
| out[4 + 8 * i] = vshlq_n_s32(in[4 + 8 * i], 1); |
| out[5 + 8 * i] = vshlq_n_s32(in[5 + 8 * i], 1); |
| out[6 + 8 * i] = vshlq_n_s32(in[6 + 8 * i], 1); |
| out[7 + 8 * i] = vshlq_n_s32(in[7 + 8 * i], 1); |
| } |
| } |
| static void idtx32x8_neon(int32x4_t *in, int32x4_t *out, int bit, int col_num) { |
| (void)bit; |
| (void)col_num; |
| for (int j = 0; j < 2; j++) { |
| out[j + 8 * 0] = vshlq_n_s32(in[j + 8 * 0], 1); |
| out[j + 8 * 1] = vshlq_n_s32(in[j + 8 * 1], 1); |
| out[j + 8 * 2] = vshlq_n_s32(in[j + 8 * 2], 1); |
| out[j + 8 * 3] = vshlq_n_s32(in[j + 8 * 3], 1); |
| out[j + 8 * 4] = vshlq_n_s32(in[j + 8 * 4], 1); |
| out[j + 8 * 5] = vshlq_n_s32(in[j + 8 * 5], 1); |
| out[j + 8 * 6] = vshlq_n_s32(in[j + 8 * 6], 1); |
| out[j + 8 * 7] = vshlq_n_s32(in[j + 8 * 7], 1); |
| } |
| } |
| void av1_fwd_txfm2d_8x8_neon(const int16_t *input, int32_t *coeff, int stride, |
| TX_TYPE tx_type, int bd) { |
| int32x4_t in[16], out[16]; |
| const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X8]; |
| const int txw_idx = get_txw_idx(TX_8X8); |
| const int txh_idx = get_txh_idx(TX_8X8); |
| const int32x4_t v_shift1 = vdupq_n_s32(shift[1]); |
| switch (tx_type) { |
| case DCT_DCT: |
| load_buffer_8x8(input, in, stride, 0, 0, shift[0]); |
| fdct8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fdct8x8_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case ADST_DCT: |
| load_buffer_8x8(input, in, stride, 0, 0, shift[0]); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fdct8x8_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case DCT_ADST: |
| load_buffer_8x8(input, in, stride, 0, 0, shift[0]); |
| fdct8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case ADST_ADST: |
| load_buffer_8x8(input, in, stride, 0, 0, shift[0]); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case FLIPADST_DCT: |
| load_buffer_8x8(input, in, stride, 1, 0, shift[0]); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fdct8x8_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case DCT_FLIPADST: |
| load_buffer_8x8(input, in, stride, 0, 1, shift[0]); |
| fdct8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case FLIPADST_FLIPADST: |
| load_buffer_8x8(input, in, stride, 1, 1, shift[0]); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case ADST_FLIPADST: |
| load_buffer_8x8(input, in, stride, 0, 1, shift[0]); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case FLIPADST_ADST: |
| load_buffer_8x8(input, in, stride, 1, 0, shift[0]); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case IDTX: |
| load_buffer_8x8(input, in, stride, 0, 0, shift[0]); |
| idtx8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| idtx8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case V_DCT: |
| load_buffer_8x8(input, in, stride, 0, 0, shift[0]); |
| fdct8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| idtx8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case H_DCT: |
| load_buffer_8x8(input, in, stride, 0, 0, shift[0]); |
| idtx8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fdct8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case V_ADST: |
| load_buffer_8x8(input, in, stride, 0, 0, shift[0]); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| idtx8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case H_ADST: |
| load_buffer_8x8(input, in, stride, 0, 0, shift[0]); |
| idtx8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case V_FLIPADST: |
| load_buffer_8x8(input, in, stride, 1, 0, shift[0]); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| idtx8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| case H_FLIPADST: |
| load_buffer_8x8(input, in, stride, 0, 1, shift[0]); |
| idtx8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| col_txfm_8x8_rounding(out, &v_shift1); |
| transpose_8x8(out, in); |
| fadst8x8_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2); |
| transpose_8x8(out, in); |
| write_buffer_8x8(in, coeff); |
| break; |
| default: assert(0); |
| } |
| (void)bd; |
| } |
| |
| // Hybrid Transform 16x16 |
| |
| static INLINE void convert_8x8_to_16x16(const int32x4_t *in, int32x4_t *out) { |
| int row_index = 0; |
| int dst_index = 0; |
| int src_index = 0; |
| |
| // row 0, 1, .., 7 |
| do { |
| out[dst_index] = in[src_index]; |
| out[dst_index + 1] = in[src_index + 1]; |
| out[dst_index + 2] = in[src_index + 16]; |
| out[dst_index + 3] = in[src_index + 17]; |
| dst_index += 4; |
| src_index += 2; |
| row_index += 1; |
| } while (row_index < 8); |
| |
| // row 8, 9, ..., 15 |
| src_index += 16; |
| do { |
| out[dst_index] = in[src_index]; |
| out[dst_index + 1] = in[src_index + 1]; |
| out[dst_index + 2] = in[src_index + 16]; |
| out[dst_index + 3] = in[src_index + 17]; |
| dst_index += 4; |
| src_index += 2; |
| row_index += 1; |
| } while (row_index < 16); |
| } |
| |
| static INLINE void load_buffer_16x16(const int16_t *input, int32x4_t *out, |
| int stride, int flipud, int fliplr, |
| int shift) { |
| int32x4_t in[64]; |
| // Load 4 8x8 blocks |
| const int16_t *topL = input; |
| const int16_t *topR = input + 8; |
| const int16_t *botL = input + 8 * stride; |
| const int16_t *botR = input + 8 * stride + 8; |
| |
| const int16_t *tmp; |
| |
| if (flipud) { |
| // Swap left columns |
| tmp = topL; |
| topL = botL; |
| botL = tmp; |
| // Swap right columns |
| tmp = topR; |
| topR = botR; |
| botR = tmp; |
| } |
| |
| if (fliplr) { |
| // Swap top rows |
| tmp = topL; |
| topL = topR; |
| topR = tmp; |
| // Swap bottom rows |
| tmp = botL; |
| botL = botR; |
| botR = tmp; |
| } |
| |
| // load first 8 columns |
| load_buffer_8x8(topL, &in[0], stride, flipud, fliplr, shift); |
| load_buffer_8x8(botL, &in[32], stride, flipud, fliplr, shift); |
| |
| // load second 8 columns |
| load_buffer_8x8(topR, &in[16], stride, flipud, fliplr, shift); |
| load_buffer_8x8(botR, &in[48], stride, flipud, fliplr, shift); |
| |
| convert_8x8_to_16x16(in, out); |
| } |
| |
| static INLINE void load_buffer_8x16(const int16_t *input, int32x4_t *out, |
| int stride, int flipud, int fliplr, |
| int shift) { |
| const int16_t *topL = input; |
| const int16_t *botL = input + 8 * stride; |
| |
| const int16_t *tmp; |
| |
| if (flipud) { |
| tmp = topL; |
| topL = botL; |
| botL = tmp; |
| } |
| |
| load_buffer_8x8(topL, out, stride, flipud, fliplr, shift); |
| load_buffer_8x8(botL, out + 16, stride, flipud, fliplr, shift); |
| } |
| |
| static INLINE void load_buffer_8x4(const int16_t *input, int32x4_t *out, |
| int stride, int flipud, int fliplr, |
| const int32x4_t *v_shift) { |
| const int16_t *topL = input; |
| const int16_t *topR = input + 4; |
| |
| const int16_t *tmp; |
| |
| if (fliplr) { |
| tmp = topL; |
| topL = topR; |
| topR = tmp; |
| } |
| load_buffer_4x4(topL, out, stride, flipud, fliplr, v_shift); |
| load_buffer_4x4(topR, out + 4, stride, flipud, fliplr, v_shift); |
| } |
| |
| static INLINE void load_buffer_16x4(const int16_t *input, int32x4_t *out, |
| int stride, int flipud, int fliplr, |
| const int32x4_t *v_shift) { |
| const int16_t *topL = input; |
| const int16_t *topR = input + 8; |
| |
| const int16_t *tmp; |
| |
| if (fliplr) { |
| tmp = topL; |
| topL = topR; |
| topR = tmp; |
| } |
| |
| load_buffer_8x4(topL, out, stride, flipud, fliplr, v_shift); |
| load_buffer_8x4(topR, out + 8, stride, flipud, fliplr, v_shift); |
| } |
| |
| static INLINE void load_buffer_4x8(const int16_t *input, int32x4_t *out, |
| int stride, int flipud, int fliplr, |
| const int32x4_t *v_shift) { |
| const int16_t *topL = input; |
| const int16_t *botL = input + 4 * stride; |
| |
| const int16_t *tmp; |
| |
| if (flipud) { |
| tmp = topL; |
| topL = botL; |
| botL = tmp; |
| } |
| |
| load_buffer_4x4(topL, out, stride, flipud, fliplr, v_shift); |
| load_buffer_4x4(botL, out + 4, stride, flipud, fliplr, v_shift); |
| } |
| |
| static INLINE void load_buffer_4x16(const int16_t *input, int32x4_t *out, |
| const int stride, const int flipud, |
| const int fliplr, |
| const int32x4_t *v_shift) { |
| const int16_t *topL = input; |
| const int16_t *botL = input + 8 * stride; |
| |
| const int16_t *tmp; |
| |
| if (flipud) { |
| tmp = topL; |
| topL = botL; |
| botL = tmp; |
| } |
| load_buffer_4x8(topL, out, stride, flipud, fliplr, v_shift); |
| load_buffer_4x8(botL, out + 8, stride, flipud, fliplr, v_shift); |
| } |
| |
| static INLINE void load_buffer_32x8n(const int16_t *input, int32x4_t *out, |
| int stride, int flipud, int fliplr, |
| int shift, const int height) { |
| const int16_t *in = input; |
| int32x4_t *output = out; |
| for (int col = 0; col < height; col++) { |
| in = input + col * stride; |
| output = out + col * 8; |
| int32x4_t v_shift = vdupq_n_s32(shift); |
| load_buffer_4x4(in, output, 4, flipud, fliplr, &v_shift); |
| load_buffer_4x4((in + 16), (output + 4), 4, flipud, fliplr, &v_shift); |
| } |
| } |
| |
| static void fdct16x16_neon(int32x4_t *in, int32x4_t *out, int bit, |
| const int col_num) { |
| const int32_t *cospi = cospi_arr(bit); |
| const int32x4_t v_bit = vdupq_n_s32(-bit); |
| int32x4_t u[16], v[16]; |
| int col; |
| |
| // Calculate the column 0, 1, 2, 3 |
| for (col = 0; col < col_num; ++col) { |
| // stage 0 |
| // stage 1 |
| u[0] = vaddq_s32(in[0 * col_num + col], in[15 * col_num + col]); |
| u[15] = vsubq_s32(in[0 * col_num + col], in[15 * col_num + col]); |
| u[1] = vaddq_s32(in[1 * col_num + col], in[14 * col_num + col]); |
| u[14] = vsubq_s32(in[1 * col_num + col], in[14 * col_num + col]); |
| u[2] = vaddq_s32(in[2 * col_num + col], in[13 * col_num + col]); |
| u[13] = vsubq_s32(in[2 * col_num + col], in[13 * col_num + col]); |
| u[3] = vaddq_s32(in[3 * col_num + col], in[12 * col_num + col]); |
| u[12] = vsubq_s32(in[3 * col_num + col], in[12 * col_num + col]); |
| u[4] = vaddq_s32(in[4 * col_num + col], in[11 * col_num + col]); |
| u[11] = vsubq_s32(in[4 * col_num + col], in[11 * col_num + col]); |
| u[5] = vaddq_s32(in[5 * col_num + col], in[10 * col_num + col]); |
| u[10] = vsubq_s32(in[5 * col_num + col], in[10 * col_num + col]); |
| u[6] = vaddq_s32(in[6 * col_num + col], in[9 * col_num + col]); |
| u[9] = vsubq_s32(in[6 * col_num + col], in[9 * col_num + col]); |
| u[7] = vaddq_s32(in[7 * col_num + col], in[8 * col_num + col]); |
| u[8] = vsubq_s32(in[7 * col_num + col], in[8 * col_num + col]); |
| |
| // stage 2 |
| v[0] = vaddq_s32(u[0], u[7]); |
| v[7] = vsubq_s32(u[0], u[7]); |
| v[1] = vaddq_s32(u[1], u[6]); |
| v[6] = vsubq_s32(u[1], u[6]); |
| v[2] = vaddq_s32(u[2], u[5]); |
| v[5] = vsubq_s32(u[2], u[5]); |
| v[3] = vaddq_s32(u[3], u[4]); |
| v[4] = vsubq_s32(u[3], u[4]); |
| v[8] = u[8]; |
| v[9] = u[9]; |
| |
| v[10] = vmulq_n_s32(u[13], cospi[32]); |
| v[10] = vmlsq_n_s32(v[10], u[10], cospi[32]); |
| v[10] = vrshlq_s32(v[10], v_bit); |
| |
| v[13] = vmulq_n_s32(u[10], cospi[32]); |
| v[13] = vmlaq_n_s32(v[13], u[13], cospi[32]); |
| v[13] = vrshlq_s32(v[13], v_bit); |
| |
| v[11] = vmulq_n_s32(u[12], cospi[32]); |
| v[11] = vmlsq_n_s32(v[11], u[11], cospi[32]); |
| v[11] = vrshlq_s32(v[11], v_bit); |
| |
| v[12] = vmulq_n_s32(u[11], cospi[32]); |
| v[12] = vmlaq_n_s32(v[12], u[12], cospi[32]); |
| v[12] = vrshlq_s32(v[12], v_bit); |
| v[14] = u[14]; |
| v[15] = u[15]; |
| |
| // stage 3 |
| u[0] = vaddq_s32(v[0], v[3]); |
| u[3] = vsubq_s32(v[0], v[3]); |
| u[1] = vaddq_s32(v[1], v[2]); |
| u[2] = vsubq_s32(v[1], v[2]); |
| u[4] = v[4]; |
| |
| u[5] = vmulq_n_s32(v[6], cospi[32]); |
| u[5] = vmlsq_n_s32(u[5], v[5], cospi[32]); |
| u[5] = vrshlq_s32(u[5], v_bit); |
| |
| u[6] = vmulq_n_s32(v[5], cospi[32]); |
| u[6] = vmlaq_n_s32(u[6], v[6], cospi[32]); |
| u[6] = vrshlq_s32(u[6], v_bit); |
| |
| u[7] = v[7]; |
| u[8] = vaddq_s32(v[8], v[11]); |
| u[11] = vsubq_s32(v[8], v[11]); |
| u[9] = vaddq_s32(v[9], v[10]); |
| u[10] = vsubq_s32(v[9], v[10]); |
| u[12] = vsubq_s32(v[15], v[12]); |
| u[15] = vaddq_s32(v[15], v[12]); |
| u[13] = vsubq_s32(v[14], v[13]); |
| u[14] = vaddq_s32(v[14], v[13]); |
| |
| // stage 4 |
| u[0] = vmulq_n_s32(u[0], cospi[32]); |
| u[1] = vmulq_n_s32(u[1], cospi[32]); |
| v[0] = vaddq_s32(u[0], u[1]); |
| v[0] = vrshlq_s32(v[0], v_bit); |
| |
| v[1] = vsubq_s32(u[0], u[1]); |
| v[1] = vrshlq_s32(v[1], v_bit); |
| |
| v[2] = vmulq_n_s32(u[2], cospi[48]); |
| v[2] = vmlaq_n_s32(v[2], u[3], cospi[16]); |
| v[2] = vrshlq_s32(v[2], v_bit); |
| |
| v[3] = vmulq_n_s32(u[3], cospi[48]); |
| v[3] = vmlsq_n_s32(v[3], u[2], cospi[16]); |
| v[3] = vrshlq_s32(v[3], v_bit); |
| |
| v[4] = vaddq_s32(u[4], u[5]); |
| v[5] = vsubq_s32(u[4], u[5]); |
| v[6] = vsubq_s32(u[7], u[6]); |
| v[7] = vaddq_s32(u[7], u[6]); |
| v[8] = u[8]; |
| |
| v[9] = vmulq_n_s32(u[14], cospi[48]); |
| v[9] = vmlsq_n_s32(v[9], u[9], cospi[16]); |
| v[9] = vrshlq_s32(v[9], v_bit); |
| |
| v[14] = vmulq_n_s32(u[9], cospi[48]); |
| v[14] = vmlaq_n_s32(v[14], u[14], cospi[16]); |
| v[14] = vrshlq_s32(v[14], v_bit); |
| |
| v[10] = vmulq_n_s32(u[13], -cospi[16]); |
| v[10] = vmlsq_n_s32(v[10], u[10], cospi[48]); |
| v[10] = vrshlq_s32(v[10], v_bit); |
| |
| v[13] = vmulq_n_s32(u[10], -cospi[16]); |
| v[13] = vmlaq_n_s32(v[13], u[13], cospi[48]); |
| v[13] = vrshlq_s32(v[13], v_bit); |
| |
| v[11] = u[11]; |
| v[12] = u[12]; |
| v[15] = u[15]; |
| |
| // stage 5 |
| u[0] = v[0]; |
| u[1] = v[1]; |
| u[2] = v[2]; |
| u[3] = v[3]; |
| |
| u[4] = vmulq_n_s32(v[4], cospi[56]); |
| u[4] = vmlaq_n_s32(u[4], v[7], cospi[8]); |
| u[4] = vrshlq_s32(u[4], v_bit); |
| |
| u[7] = vmulq_n_s32(v[7], cospi[56]); |
| u[7] = vmlsq_n_s32(u[7], v[4], cospi[8]); |
| u[7] = vrshlq_s32(u[7], v_bit); |
| |
| u[5] = vmulq_n_s32(v[5], cospi[24]); |
| u[5] = vmlaq_n_s32(u[5], v[6], cospi[40]); |
| u[5] = vrshlq_s32(u[5], v_bit); |
| |
| u[6] = vmulq_n_s32(v[6], cospi[24]); |
| u[6] = vmlsq_n_s32(u[6], v[5], cospi[40]); |
| u[6] = vrshlq_s32(u[6], v_bit); |
| |
| u[8] = vaddq_s32(v[8], v[9]); |
| u[9] = vsubq_s32(v[8], v[9]); |
| u[10] = vsubq_s32(v[11], v[10]); |
| u[11] = vaddq_s32(v[11], v[10]); |
| u[12] = vaddq_s32(v[12], v[13]); |
| u[13] = vsubq_s32(v[12], v[13]); |
| u[14] = vsubq_s32(v[15], v[14]); |
| u[15] = vaddq_s32(v[15], v[14]); |
| |
| // stage 6 |
| v[0] = u[0]; |
| v[1] = u[1]; |
| v[2] = u[2]; |
| v[3] = u[3]; |
| v[4] = u[4]; |
| v[5] = u[5]; |
| v[6] = u[6]; |
| v[7] = u[7]; |
| |
| v[8] = vmulq_n_s32(u[8], cospi[60]); |
| v[8] = vmlaq_n_s32(v[8], u[15], cospi[4]); |
| v[8] = vrshlq_s32(v[8], v_bit); |
| |
| v[15] = vmulq_n_s32(u[15], cospi[60]); |
| v[15] = vmlsq_n_s32(v[15], u[8], cospi[4]); |
| v[15] = vrshlq_s32(v[15], v_bit); |
| |
| v[9] = vmulq_n_s32(u[9], cospi[28]); |
| v[9] = vmlaq_n_s32(v[9], u[14], cospi[36]); |
| v[9] = vrshlq_s32(v[9], v_bit); |
| |
| v[14] = vmulq_n_s32(u[14], cospi[28]); |
| v[14] = vmlsq_n_s32(v[14], u[9], cospi[36]); |
| v[14] = vrshlq_s32(v[14], v_bit); |
| |
| v[10] = vmulq_n_s32(u[10], cospi[44]); |
| v[10] = vmlaq_n_s32(v[10], u[13], cospi[20]); |
| v[10] = vrshlq_s32(v[10], v_bit); |
| |
| v[13] = vmulq_n_s32(u[13], cospi[44]); |
| v[13] = vmlsq_n_s32(v[13], u[10], cospi[20]); |
| v[13] = vrshlq_s32(v[13], v_bit); |
| |
| v[11] = vmulq_n_s32(u[11], cospi[12]); |
| v[11] = vmlaq_n_s32(v[11], u[12], cospi[52]); |
| v[11] = vrshlq_s32(v[11], v_bit); |
| |
| v[12] = vmulq_n_s32(u[12], cospi[12]); |
| v[12] = vmlsq_n_s32(v[12], u[11], cospi[52]); |
| v[12] = vrshlq_s32(v[12], v_bit); |
| |
| out[0 * col_num + col] = v[0]; |
| out[1 * col_num + col] = v[8]; |
| out[2 * col_num + col] = v[4]; |
| out[3 * col_num + col] = v[12]; |
| out[4 * col_num + col] = v[2]; |
| out[5 * col_num + col] = v[10]; |
| out[6 * col_num + col] = v[6]; |
| out[7 * col_num + col] = v[14]; |
| out[8 * col_num + col] = v[1]; |
| out[9 * col_num + col] = v[9]; |
| out[10 * col_num + col] = v[5]; |
| out[11 * col_num + col] = v[13]; |
| out[12 * col_num + col] = v[3]; |
| out[13 * col_num + col] = v[11]; |
| out[14 * col_num + col] = v[7]; |
| out[15 * col_num + col] = v[15]; |
| } |
| } |
| |
| static void fadst16x16_neon(int32x4_t *in, int32x4_t *out, int bit, |
| const int num_cols) { |
| const int32_t *cospi = cospi_arr(bit); |
| |
| const int32x4_t v_bit = vdupq_n_s32(-bit); |
| |
| int32x4_t u[16], v[16], x, y; |
| int col; |
| |
| for (col = 0; col < num_cols; ++col) { |
| // stage 0-1 |
| u[0] = in[0 * num_cols + col]; |
| u[1] = vnegq_s32(in[15 * num_cols + col]); |
| u[2] = vnegq_s32(in[7 * num_cols + col]); |
| u[3] = in[8 * num_cols + col]; |
| u[4] = vnegq_s32(in[3 * num_cols + col]); |
| u[5] = in[12 * num_cols + col]; |
| u[6] = in[4 * num_cols + col]; |
| u[7] = vnegq_s32(in[11 * num_cols + col]); |
| u[8] = vnegq_s32(in[1 * num_cols + col]); |
| u[9] = in[14 * num_cols + col]; |
| u[10] = in[6 * num_cols + col]; |
| u[11] = vnegq_s32(in[9 * num_cols + col]); |
| u[12] = in[2 * num_cols + col]; |
| u[13] = vnegq_s32(in[13 * num_cols + col]); |
| u[14] = vnegq_s32(in[5 * num_cols + col]); |
| u[15] = in[10 * num_cols + col]; |
| |
| // stage 2 |
| v[0] = u[0]; |
| v[1] = u[1]; |
| |
| x = vmulq_n_s32(u[2], cospi[32]); |
| y = vmulq_n_s32(u[3], cospi[32]); |
| v[2] = vaddq_s32(x, y); |
| v[2] = vrshlq_s32(v[2], v_bit); |
| |
| v[3] = vsubq_s32(x, y); |
| v[3] = vrshlq_s32(v[3], v_bit); |
| |
| v[4] = u[4]; |
| v[5] = u[5]; |
| |
| x = vmulq_n_s32(u[6], cospi[32]); |
| y = vmulq_n_s32(u[7], cospi[32]); |
| v[6] = vaddq_s32(x, y); |
| v[6] = vrshlq_s32(v[6], v_bit); |
| |
| v[7] = vsubq_s32(x, y); |
| v[7] = vrshlq_s32(v[7], v_bit); |
| |
| v[8] = u[8]; |
| v[9] = u[9]; |
| |
| x = vmulq_n_s32(u[10], cospi[32]); |
| y = vmulq_n_s32(u[11], cospi[32]); |
| v[10] = vaddq_s32(x, y); |
| v[10] = vrshlq_s32(v[10], v_bit); |
| |
| v[11] = vsubq_s32(x, y); |
| v[11] = vrshlq_s32(v[11], v_bit); |
| |
| v[12] = u[12]; |
| v[13] = u[13]; |
| |
| x = vmulq_n_s32(u[14], cospi[32]); |
| y = vmulq_n_s32(u[15], cospi[32]); |
| v[14] = vaddq_s32(x, y); |
| v[14] = vrshlq_s32(v[14], v_bit); |
| |
| v[15] = vsubq_s32(x, y); |
| v[15] = vrshlq_s32(v[15], v_bit); |
| |
| // stage 3 |
| u[0] = vaddq_s32(v[0], v[2]); |
| u[1] = vaddq_s32(v[1], v[3]); |
| u[2] = vsubq_s32(v[0], v[2]); |
| u[3] = vsubq_s32(v[1], v[3]); |
| u[4] = vaddq_s32(v[4], v[6]); |
| u[5] = vaddq_s32(v[5], v[7]); |
| u[6] = vsubq_s32(v[4], v[6]); |
| u[7] = vsubq_s32(v[5], v[7]); |
| u[8] = vaddq_s32(v[8], v[10]); |
| u[9] = vaddq_s32(v[9], v[11]); |
| u[10] = vsubq_s32(v[8], v[10]); |
| u[11] = vsubq_s32(v[9], v[11]); |
| u[12] = vaddq_s32(v[12], v[14]); |
| u[13] = vaddq_s32(v[13], v[15]); |
| u[14] = vsubq_s32(v[12], v[14]); |
| u[15] = vsubq_s32(v[13], v[15]); |
| |
| // stage 4 |
| v[0] = u[0]; |
| v[1] = u[1]; |
| v[2] = u[2]; |
| v[3] = u[3]; |
| v[4] = half_btf_neon(&cospi[16], &u[4], &cospi[48], &u[5], v_bit); |
| v[7] = half_btf_neon(&cospi[16], &u[6], &cospi[48], &u[7], v_bit); |
| v[5] = half_btf_neon_m(&cospi[48], &u[4], &cospi[16], &u[5], v_bit); |
| v[6] = half_btf_neon_m(&cospi[16], &u[7], &cospi[48], &u[6], v_bit); |
| |
| v[8] = u[8]; |
| v[9] = u[9]; |
| v[10] = u[10]; |
| v[11] = u[11]; |
| |
| v[12] = half_btf_neon(&cospi[16], &u[12], &cospi[48], &u[13], v_bit); |
| v[15] = half_btf_neon(&cospi[16], &u[14], &cospi[48], &u[15], v_bit); |
| v[13] = half_btf_neon_m(&cospi[48], &u[12], &cospi[16], &u[13], v_bit); |
| v[14] = half_btf_neon_m(&cospi[16], &u[15], &cospi[48], &u[14], v_bit); |
| |
| // stage 5 |
| u[0] = vaddq_s32(v[0], v[4]); |
| u[1] = vaddq_s32(v[1], v[5]); |
| u[2] = vaddq_s32(v[2], v[6]); |
| u[3] = vaddq_s32(v[3], v[7]); |
| u[4] = vsubq_s32(v[0], v[4]); |
| u[5] = vsubq_s32(v[1], v[5]); |
| u[6] = vsubq_s32(v[2], v[6]); |
| u[7] = vsubq_s32(v[3], v[7]); |
| u[8] = vaddq_s32(v[8], v[12]); |
| u[9] = vaddq_s32(v[9], v[13]); |
| u[10] = vaddq_s32(v[10], v[14]); |
| u[11] = vaddq_s32(v[11], v[15]); |
| u[12] = vsubq_s32(v[8], v[12]); |
| u[13] = vsubq_s32(v[9], v[13]); |
| u[14] = vsubq_s32(v[10], v[14]); |
| u[15] = vsubq_s32(v[11], v[15]); |
| |
| // stage 6 |
| v[0] = u[0]; |
| v[1] = u[1]; |
| v[2] = u[2]; |
| v[3] = u[3]; |
| v[4] = u[4]; |
| v[5] = u[5]; |
| v[6] = u[6]; |
| v[7] = u[7]; |
| |
| v[8] = half_btf_neon(&cospi[8], &u[8], &cospi[56], &u[9], v_bit); |
| v[13] = half_btf_neon(&cospi[8], &u[12], &cospi[56], &u[13], v_bit); |
| v[9] = half_btf_neon_m(&cospi[56], &u[8], &cospi[8], &u[9], v_bit); |
| v[12] = half_btf_neon_m(&cospi[8], &u[13], &cospi[56], &u[12], v_bit); |
| |
| v[10] = half_btf_neon(&cospi[40], &u[10], &cospi[24], &u[11], v_bit); |
| v[15] = half_btf_neon(&cospi[40], &u[14], &cospi[24], &u[15], v_bit); |
| v[11] = half_btf_neon_m(&cospi[24], &u[10], &cospi[40], &u[11], v_bit); |
| v[14] = half_btf_neon_m(&cospi[40], &u[15], &cospi[24], &u[14], v_bit); |
| |
| // stage 7 |
| u[0] = vaddq_s32(v[0], v[8]); |
| u[1] = vaddq_s32(v[1], v[9]); |
| u[2] = vaddq_s32(v[2], v[10]); |
| u[3] = vaddq_s32(v[3], v[11]); |
| u[4] = vaddq_s32(v[4], v[12]); |
| u[5] = vaddq_s32(v[5], v[13]); |
| u[6] = vaddq_s32(v[6], v[14]); |
| u[7] = vaddq_s32(v[7], v[15]); |
| u[8] = vsubq_s32(v[0], v[8]); |
| u[9] = vsubq_s32(v[1], v[9]); |
| u[10] = vsubq_s32(v[2], v[10]); |
| u[11] = vsubq_s32(v[3], v[11]); |
| u[12] = vsubq_s32(v[4], v[12]); |
| u[13] = vsubq_s32(v[5], v[13]); |
| u[14] = vsubq_s32(v[6], v[14]); |
| u[15] = vsubq_s32(v[7], v[15]); |
| |
| // stage 8 |
| v[0] = half_btf_neon(&cospi[2], &u[0], &cospi[62], &u[1], v_bit); |
| v[1] = half_btf_neon_m(&cospi[62], &u[0], &cospi[2], &u[1], v_bit); |
| v[2] = half_btf_neon(&cospi[10], &u[2], &cospi[54], &u[3], v_bit); |
| v[3] = half_btf_neon_m(&cospi[54], &u[2], &cospi[10], &u[3], v_bit); |
| v[4] = half_btf_neon(&cospi[18], &u[4], &cospi[46], &u[5], v_bit); |
| v[5] = half_btf_neon_m(&cospi[46], &u[4], &cospi[18], &u[5], v_bit); |
| v[6] = half_btf_neon(&cospi[26], &u[6], &cospi[38], &u[7], v_bit); |
| v[7] = half_btf_neon_m(&cospi[38], &u[6], &cospi[26], &u[7], v_bit); |
| v[8] = half_btf_neon(&cospi[34], &u[8], &cospi[30], &u[9], v_bit); |
| v[9] = half_btf_neon_m(&cospi[30], &u[8], &cospi[34], &u[9], v_bit); |
| v[10] = half_btf_neon(&cospi[42], &u[10], &cospi[22], &u[11], v_bit); |
| v[11] = half_btf_neon_m(&cospi[22], &u[10], &cospi[42], &u[11], v_bit); |
| v[12] = half_btf_neon(&cospi[50], &u[12], &cospi[14], &u[13], v_bit); |
| v[13] = half_btf_neon_m(&cospi[14], &u[12], &cospi[50], &u[13], v_bit); |
| v[14] = half_btf_neon(&cospi[58], &u[14], &cospi[6], &u[15], v_bit); |
| v[15] = half_btf_neon_m(&cospi[6], &u[14], &cospi[58], &u[15], v_bit); |
| |
| // stage 9 |
| out[0 * num_cols + col] = v[1]; |
| out[1 * num_cols + col] = v[14]; |
| out[2 * num_cols + col] = v[3]; |
| out[3 * num_cols + col] = v[12]; |
| out[4 * num_cols + col] = v[5]; |
| out[5 * num_cols + col] = v[10]; |
| out[6 * num_cols + col] = v[7]; |
| out[7 * num_cols + col] = v[8]; |
| out[8 * num_cols + col] = v[9]; |
| out[9 * num_cols + col] = v[6]; |
| out[10 * num_cols + col] = v[11]; |
| out[11 * num_cols + col] = v[4]; |
| out[12 * num_cols + col] = v[13]; |
| out[13 * num_cols + col] = v[2]; |
| out[14 * num_cols + col] = v[15]; |
| out[15 * num_cols + col] = v[0]; |
| } |
| } |
| |
| static void col_txfm_16x16_rounding(int32x4_t *in, const int32x4_t *v_shift) { |
| // Note: |
| // We split 16x16 rounding into 4 sections of 8x8 rounding, |
| // instead of 4 columns |
| col_txfm_8x8_rounding(&in[0], v_shift); |
| col_txfm_8x8_rounding(&in[16], v_shift); |
| col_txfm_8x8_rounding(&in[32], v_shift); |
| col_txfm_8x8_rounding(&in[48], v_shift); |
| } |
| |
| static void col_txfm_8x16_rounding(int32x4_t *in, const int32x4_t *v_shift) { |
| col_txfm_8x8_rounding(&in[0], v_shift); |
| col_txfm_8x8_rounding(&in[16], v_shift); |
| } |
| |
| static void write_buffer_16x16(const int32x4_t *in, int32_t *output) { |
| const int size_8x8 = 16 * 4; |
| write_buffer_8x8(&in[0], output); |
| output += size_8x8; |
| write_buffer_8x8(&in[16], output); |
| output += size_8x8; |
| write_buffer_8x8(&in[32], output); |
| output += size_8x8; |
| write_buffer_8x8(&in[48], output); |
| } |
| static void idtx16x16_neon(int32x4_t *in, int32x4_t *out, int bit, |
| int col_num) { |
| (void)bit; |
| int32x4_t fact = vdupq_n_s32(2 * NewSqrt2); |
| int32x4_t offset = vdupq_n_s32(1 << (NewSqrt2Bits - 1)); |
| int32x4_t a_low; |
| |
| int num_iters = 16 * col_num; |
| for (int i = 0; i < num_iters; i++) { |
| a_low = vmulq_s32(in[i], fact); |
| a_low = vaddq_s32(a_low, offset); |
| out[i] = vshrq_n_s32(a_low, NewSqrt2Bits); |
| } |
| } |
| void av1_fwd_txfm2d_16x16_neon(const int16_t *input, int32_t *coeff, int stride, |
| TX_TYPE tx_type, int bd) { |
| int32x4_t in[64], out[64]; |
| const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X16]; |
| const int txw_idx = get_txw_idx(TX_16X16); |
| const int txh_idx = get_txh_idx(TX_16X16); |
| const int col_num = 4; |
| const int32x4_t v_shift = vdupq_n_s32(shift[1]); |
| switch (tx_type) { |
| case DCT_DCT: |
| load_buffer_16x16(input, in, stride, 0, 0, shift[0]); |
| fdct16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fdct16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case ADST_DCT: |
| load_buffer_16x16(input, in, stride, 0, 0, shift[0]); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fdct16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case DCT_ADST: |
| load_buffer_16x16(input, in, stride, 0, 0, shift[0]); |
| fdct16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case ADST_ADST: |
| load_buffer_16x16(input, in, stride, 0, 0, shift[0]); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case FLIPADST_DCT: |
| load_buffer_16x16(input, in, stride, 1, 0, shift[0]); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fdct16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case DCT_FLIPADST: |
| load_buffer_16x16(input, in, stride, 0, 1, shift[0]); |
| fdct16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case FLIPADST_FLIPADST: |
| load_buffer_16x16(input, in, stride, 1, 1, shift[0]); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case ADST_FLIPADST: |
| load_buffer_16x16(input, in, stride, 0, 1, shift[0]); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case FLIPADST_ADST: |
| load_buffer_16x16(input, in, stride, 1, 0, shift[0]); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case IDTX: |
| load_buffer_16x16(input, in, stride, 0, 0, shift[0]); |
| idtx16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| idtx16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case V_DCT: |
| load_buffer_16x16(input, in, stride, 0, 0, shift[0]); |
| fdct16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| idtx16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case H_DCT: |
| load_buffer_16x16(input, in, stride, 0, 0, shift[0]); |
| idtx16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fdct16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case V_ADST: |
| load_buffer_16x16(input, in, stride, 0, 0, shift[0]); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| idtx16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case H_ADST: |
| load_buffer_16x16(input, in, stride, 0, 0, shift[0]); |
| idtx16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case V_FLIPADST: |
| load_buffer_16x16(input, in, stride, 1, 0, shift[0]); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| idtx16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| case H_FLIPADST: |
| load_buffer_16x16(input, in, stride, 0, 1, shift[0]); |
| idtx16x16_neon(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num); |
| col_txfm_16x16_rounding(out, &v_shift); |
| transpose_16x16(out, in); |
| fadst16x16_neon(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num); |
| transpose_16x16(out, in); |
| write_buffer_16x16(in, coeff); |
| break; |
| default: assert(0); |
| } |
| (void)bd; |
| } |
| |
| static INLINE void flip_buf_neon(int32x4_t *in, int32x4_t *out, int size) { |
| for (int i = 0; i < size; i += 2) in[30 - i] = out[i]; |
| for (int i = 1; i < size; i += 2) in[size - i] = out[i]; |
| } |
| |
| typedef void (*fwd_transform_1d_neon)(int32x4_t *in, int32x4_t *out, int bit, |
| const int num_cols); |
| |
| static const fwd_transform_1d_neon col_highbd_txfm8x8_arr[TX_TYPES] = { |
| fdct8x8_neon, // DCT_DCT |
| fadst8x8_neon, // ADST_DCT |
| fdct8x8_neon, // DCT_ADST |
| fadst8x8_neon, // ADST_ADST |
| fadst8x8_neon, // FLIPADST_DCT |
| fdct8x8_neon, // DCT_FLIPADST |
| fadst8x8_neon, // FLIPADST_FLIPADST |
| fadst8x8_neon, // ADST_FLIPADST |
| fadst8x8_neon, // FLIPADST_ADST |
| idtx8x8_neon, // IDTX |
| fdct8x8_neon, // V_DCT |
| idtx8x8_neon, // H_DCT |
| fadst8x8_neon, // V_ADST |
| idtx8x8_neon, // H_ADST |
| fadst8x8_neon, // V_FLIPADST |
| idtx8x8_neon // H_FLIPADST |
| }; |
| static const fwd_transform_1d_neon row_highbd_txfm32x8_arr[TX_TYPES] = { |
| fdct8x8_neon, // DCT_DCT |
| NULL, // ADST_DCT |
| NULL, // DCT_ADST |
| NULL, // ADST_ADST |
| NULL, // FLIPADST_DCT |
| NULL, // DCT_FLIPADST |
| NULL, // FLIPADST_FLIPADST |
| NULL, // ADST_FLIPADST |
| NULL, // FLIPADST-ADST |
| idtx32x8_neon, // IDTX |
| NULL, // V_DCT |
| NULL, // H_DCT |
| NULL, // V_ADST |
| NULL, // H_ADST |
| NULL, // V_FLIPADST |
| NULL, // H_FLIPADST |
| }; |
| static const fwd_transform_1d_neon col_highbd_txfm4x8_arr[TX_TYPES] = { |
| fdct4x8_neon, // DCT_DCT |
| fadst8x8_neon, // ADST_DCT |
| fdct4x8_neon, // DCT_ADST |
| fadst8x8_neon, // ADST_ADST |
| fadst8x8_neon, // FLIPADST_DCT |
| fdct4x8_neon, // DCT_FLIPADST |
| fadst8x8_neon, // FLIPADST_FLIPADST |
| fadst8x8_neon, // ADST_FLIPADST |
| fadst8x8_neon, // FLIPADST_ADST |
| idtx8x8_neon, // IDTX |
| fdct4x8_neon, // V_DCT |
| idtx8x8_neon, // H_DCT |
| fadst8x8_neon, // V_ADST |
| idtx8x8_neon, // H_ADST |
| fadst8x8_neon, // V_FLIPADST |
| idtx8x8_neon // H_FLIPADST |
| }; |
| |
| static const fwd_transform_1d_neon row_highbd_txfm8x16_arr[TX_TYPES] = { |
| fdct16x16_neon, // DCT_DCT |
| fdct16x16_neon, // ADST_DCT |
| fadst16x16_neon, // DCT_ADST |
| fadst16x16_neon, // ADST_ADST |
| fdct16x16_neon, // FLIPADST_DCT |
| fadst16x16_neon, // DCT_FLIPADST |
| fadst16x16_neon, // FLIPADST_FLIPADST |
| fadst16x16_neon, // ADST_FLIPADST |
| fadst16x16_neon, // FLIPADST_ADST |
| idtx16x16_neon, // IDTX |
| idtx16x16_neon, // V_DCT |
| fdct16x16_neon, // H_DCT |
| idtx16x16_neon, // V_ADST |
| fadst16x16_neon, // H_ADST |
| idtx16x16_neon, // V_FLIPADST |
| fadst16x16_neon // H_FLIPADST |
| }; |
| |
| static const fwd_transform_1d_neon col_highbd_txfm8x16_arr[TX_TYPES] = { |
| fdct16x16_neon, // DCT_DCT |
| fadst16x16_neon, // ADST_DCT |
| fdct16x16_neon, // DCT_ADST |
| fadst16x16_neon, // ADST_ADST |
| fadst16x16_neon, // FLIPADST_DCT |
| fdct16x16_neon, // DCT_FLIPADST |
| fadst16x16_neon, // FLIPADST_FLIPADST |
| fadst16x16_neon, // ADST_FLIPADST |
| fadst16x16_neon, // FLIPADST_ADST |
| idtx16x16_neon, // IDTX |
| fdct16x16_neon, // V_DCT |
| idtx16x16_neon, // H_DCT |
| fadst16x16_neon, // V_ADST |
| idtx16x16_neon, // H_ADST |
| fadst16x16_neon, // V_FLIPADST |
| idtx16x16_neon // H_FLIPADST |
| }; |
| static const fwd_transform_1d_neon row_highbd_txfm8x8_arr[TX_TYPES] = { |
| fdct8x8_neon, // DCT_DCT |
| fdct8x8_neon, // ADST_DCT |
| fadst8x8_neon, // DCT_ADST |
| fadst8x8_neon, // ADST_ADST |
| fdct8x8_neon, // FLIPADST_DCT |
| fadst8x8_neon, // DCT_FLIPADST |
| fadst8x8_neon, // FLIPADST_FLIPADST |
| fadst8x8_neon, // ADST_FLIPADST |
| fadst8x8_neon, // FLIPADST_ADST |
| idtx8x8_neon, // IDTX |
| idtx8x8_neon, // V_DCT |
| fdct8x8_neon, // H_DCT |
| idtx8x8_neon, // V_ADST |
| fadst8x8_neon, // H_ADST |
| idtx8x8_neon, // V_FLIPADST |
| fadst8x8_neon // H_FLIPADST |
| }; |
| |
| static const fwd_transform_1d_neon row_highbd_txfm4x8_arr[TX_TYPES] = { |
| fdct4x8_neon, // DCT_DCT |
| fdct4x8_neon, // ADST_DCT |
| fadst8x8_neon, // DCT_ADST |
| fadst8x8_neon, // ADST_ADST |
| fdct4x8_neon, // FLIPADST_DCT |
| fadst8x8_neon, // DCT_FLIPADST |
| fadst8x8_neon, // FLIPADST_FLIPADST |
| fadst8x8_neon, // ADST_FLIPADST |
| fadst8x8_neon, // FLIPADST_ADST |
| idtx8x8_neon, // IDTX |
| idtx8x8_neon, // V_DCT |
| fdct4x8_neon, // H_DCT |
| idtx8x8_neon, // V_ADST |
| fadst8x8_neon, // H_ADST |
| idtx8x8_neon, // V_FLIPADST |
| fadst8x8_neon // H_FLIPADST |
| }; |
| |
| static const fwd_transform_1d_neon row_highbd_txfm4x4_arr[TX_TYPES] = { |
| fdct4x4_neon, // DCT_DCT |
| fdct4x4_neon, // ADST_DCT |
| fadst4x4_neon, // DCT_ADST |
| fadst4x4_neon, // ADST_ADST |
| fdct4x4_neon, // FLIPADST_DCT |
| fadst4x4_neon, // DCT_FLIPADST |
| fadst4x4_neon, // FLIPADST_FLIPADST |
| fadst4x4_neon, // ADST_FLIPADST |
| fadst4x4_neon, // FLIPADST_ADST |
| idtx4x4_neon, // IDTX |
| idtx4x4_neon, // V_DCT |
| fdct4x4_neon, // H_DCT |
| idtx4x4_neon, // V_ADST |
| fadst4x4_neon, // H_ADST |
| idtx4x4_neon, // V_FLIPADST |
| fadst4x4_neon // H_FLIPADST |
| }; |
| |
| static const fwd_transform_1d_neon col_highbd_txfm4x4_arr[TX_TYPES] = { |
| fdct4x4_neon, // DCT_DCT |
| fadst4x4_neon, // ADST_DCT |
| fdct4x4_neon, // DCT_ADST |
| fadst4x4_neon, // ADST_ADST |
| fadst4x4_neon, // FLIPADST_DCT |
| fdct4x4_neon, // DCT_FLIPADST |
| fadst4x4_neon, // FLIPADST_FLIPADST |
| fadst4x4_neon, // ADST_FLIPADST |
| fadst4x4_neon, // FLIPADST_ADST |
| idtx4x4_neon, // IDTX |
| fdct4x4_neon, // V_DCT |
| idtx4x4_neon, // H_DCT |
| fadst4x4_neon, // V_ADST |
| idtx4x4_neon, // H_ADST |
| fadst4x4_neon, // V_FLIPADST |
| idtx4x4_neon // H_FLIPADST |
| }; |
| |
| void av1_fdct32_new_neon(int32x4_t *input, int32x4_t *output, int cos_bit, |
| const int stride) { |
| int32x4_t buf0[32]; |
| int32x4_t buf1[32]; |
| const int32_t *cospi; |
| const int32x4_t v_cos_bit = vdupq_n_s32(-cos_bit); |
| |
| int startidx = 0 * stride; |
| int endidx = 31 * stride; |
| // stage 0 |
| // stage 1 |
| buf1[0] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[31] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[1] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[30] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[2] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[29] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[3] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[28] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[4] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[27] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[5] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[26] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[6] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[25] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[7] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[24] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[8] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[23] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[9] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[22] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[10] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[21] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[11] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[20] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[12] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[19] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[13] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[18] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[14] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[17] = vsubq_s32(input[startidx], input[endidx]); |
| startidx += stride; |
| endidx -= stride; |
| buf1[15] = vaddq_s32(input[startidx], input[endidx]); |
| buf1[16] = vsubq_s32(input[startidx], input[endidx]); |
| |
| // stage 2 |
| cospi = cospi_arr(cos_bit); |
| buf0[0] = vaddq_s32(buf1[0], buf1[15]); |
| buf0[15] = vsubq_s32(buf1[0], buf1[15]); |
| buf0[1] = vaddq_s32(buf1[1], buf1[14]); |
| buf0[14] = vsubq_s32(buf1[1], buf1[14]); |
| buf0[2] = vaddq_s32(buf1[2], buf1[13]); |
| buf0[13] = vsubq_s32(buf1[2], buf1[13]); |
| buf0[3] = vaddq_s32(buf1[3], buf1[12]); |
| buf0[12] = vsubq_s32(buf1[3], buf1[12]); |
| buf0[4] = vaddq_s32(buf1[4], buf1[11]); |
| buf0[11] = vsubq_s32(buf1[4], buf1[11]); |
| buf0[5] = vaddq_s32(buf1[5], buf1[10]); |
| buf0[10] = vsubq_s32(buf1[5], buf1[10]); |
| buf0[6] = vaddq_s32(buf1[6], buf1[9]); |
| buf0[9] = vsubq_s32(buf1[6], buf1[9]); |
| buf0[7] = vaddq_s32(buf1[7], buf1[8]); |
| buf0[8] = vsubq_s32(buf1[7], buf1[8]); |
| buf0[16] = buf1[16]; |
| buf0[17] = buf1[17]; |
| buf0[18] = buf1[18]; |
| buf0[19] = buf1[19]; |
| btf_32_neon_type0(-cospi[32], cospi[32], buf1[20], buf1[27], buf0[20], |
| buf0[27], v_cos_bit); |
| btf_32_neon_type0(-cospi[32], cospi[32], buf1[21], buf1[26], buf0[21], |
| buf0[26], v_cos_bit); |
| btf_32_neon_type0(-cospi[32], cospi[32], buf1[22], buf1[25], buf0[22], |
| buf0[25], v_cos_bit); |
| btf_32_neon_type0(-cospi[32], cospi[32], buf1[23], buf1[24], buf0[23], |
| buf0[24], v_cos_bit); |
| buf0[28] = buf1[28]; |
| buf0[29] = buf1[29]; |
| buf0[30] = buf1[30]; |
| buf0[31] = buf1[31]; |
| |
| // stage 3 |
| cospi = cospi_arr(cos_bit); |
| buf1[0] = vaddq_s32(buf0[0], buf0[7]); |
| buf1[7] = vsubq_s32(buf0[0], buf0[7]); |
| buf1[1] = vaddq_s32(buf0[1], buf0[6]); |
| buf1[6] = vsubq_s32(buf0[1], buf0[6]); |
| buf1[2] = vaddq_s32(buf0[2], buf0[5]); |
| buf1[5] = vsubq_s32(buf0[2], buf0[5]); |
| buf1[3] = vaddq_s32(buf0[3], buf0[4]); |
| buf1[4] = vsubq_s32(buf0[3], buf0[4]); |
| buf1[8] = buf0[8]; |
| buf1[9] = buf0[9]; |
| btf_32_neon_type0(-cospi[32], cospi[32], buf0[10], buf0[13], buf1[10], |
| buf1[13], v_cos_bit); |
| btf_32_neon_type0(-cospi[32], cospi[32], buf0[11], buf0[12], buf1[11], |
| buf1[12], v_cos_bit); |
| buf1[14] = buf0[14]; |
| buf1[15] = buf0[15]; |
| buf1[16] = vaddq_s32(buf0[16], buf0[23]); |
| buf1[23] = vsubq_s32(buf0[16], buf0[23]); |
| buf1[17] = vaddq_s32(buf0[17], buf0[22]); |
| buf1[22] = vsubq_s32(buf0[17], buf0[22]); |
| buf1[18] = vaddq_s32(buf0[18], buf0[21]); |
| buf1[21] = vsubq_s32(buf0[18], buf0[21]); |
| buf1[19] = vaddq_s32(buf0[19], buf0[20]); |
| buf1[20] = vsubq_s32(buf0[19], buf0[20]); |
| buf1[24] = vsubq_s32(buf0[31], buf0[24]); |
| buf1[31] = vaddq_s32(buf0[31], buf0[24]); |
| buf1[25] = vsubq_s32(buf0[30], buf0[25]); |
| buf1[30] = vaddq_s32(buf0[30], buf0[25]); |
| buf1[26] = vsubq_s32(buf0[29], buf0[26]); |
| buf1[29] = vaddq_s32(buf0[29], buf0[26]); |
| buf1[27] = vsubq_s32(buf0[28], buf0[27]); |
| buf1[28] = vaddq_s32(buf0[28], buf0[27]); |
| |
| // stage 4 |
| cospi = cospi_arr(cos_bit); |
| buf0[0] = vaddq_s32(buf1[0], buf1[3]); |
| buf0[3] = vsubq_s32(buf1[0], buf1[3]); |
| buf0[1] = vaddq_s32(buf1[1], buf1[2]); |
| buf0[2] = vsubq_s32(buf1[1], buf1[2]); |
| buf0[4] = buf1[4]; |
| btf_32_neon_type0(-cospi[32], cospi[32], buf1[5], buf1[6], buf0[5], buf0[6], |
| v_cos_bit); |
| buf0[7] = buf1[7]; |
| buf0[8] = vaddq_s32(buf1[8], buf1[11]); |
| buf0[11] = vsubq_s32(buf1[8], buf1[11]); |
| buf0[9] = vaddq_s32(buf1[9], buf1[10]); |
| buf0[10] = vsubq_s32(buf1[9], buf1[10]); |
| buf0[12] = vsubq_s32(buf1[15], buf1[12]); |
| buf0[15] = vaddq_s32(buf1[15], buf1[12]); |
| buf0[13] = vsubq_s32(buf1[14], buf1[13]); |
| buf0[14] = vaddq_s32(buf1[14], buf1[13]); |
| buf0[16] = buf1[16]; |
| buf0[17] = buf1[17]; |
| |
| btf_32_neon_type0(-cospi[16], cospi[48], buf1[18], buf1[29], buf0[18], |
| buf0[29], v_cos_bit); |
| btf_32_neon_type0(-cospi[16], cospi[48], buf1[19], buf1[28], buf0[19], |
| buf0[28], v_cos_bit); |
| |
| btf_32_neon_type0(-cospi[48], -cospi[16], buf1[20], buf1[27], buf0[20], |
| buf0[27], v_cos_bit); |
| btf_32_neon_type0(-cospi[48], -cospi[16], buf1[21], buf1[26], buf0[21], |
| buf0[26], v_cos_bit); |
| |
| buf0[22] = buf1[22]; |
| buf0[23] = buf1[23]; |
| buf0[24] = buf1[24]; |
| buf0[25] = buf1[25]; |
| buf0[30] = buf1[30]; |
| buf0[31] = buf1[31]; |
| |
| // stage 5 |
| btf_32_neon_type0(cospi[32], cospi[32], buf0[0], buf0[1], buf1[0], buf1[1], |
| v_cos_bit); |
| |
| btf_32_neon_type1(cospi[48], cospi[16], buf0[2], buf0[3], buf1[2], buf1[3], |
| v_cos_bit); |
| buf1[4] = vaddq_s32(buf0[4], buf0[5]); |
| buf1[5] = vsubq_s32(buf0[4], buf0[5]); |
| buf1[6] = vsubq_s32(buf0[7], buf0[6]); |
| buf1[7] = vaddq_s32(buf0[7], buf0[6]); |
| buf1[8] = buf0[8]; |
| btf_32_neon_type0(-cospi[16], cospi[48], buf0[9], buf0[14], buf1[9], buf1[14], |
| v_cos_bit); |
| btf_32_neon_type0(-cospi[48], -cospi[16], buf0[10], buf0[13], buf1[10], |
| buf1[13], v_cos_bit); |
| buf1[11] = buf0[11]; |
| buf1[12] = buf0[12]; |
| buf1[15] = buf0[15]; |
| buf1[16] = vaddq_s32(buf0[16], buf0[19]); |
| buf1[19] = vsubq_s32(buf0[16], buf0[19]); |
| buf1[17] = vaddq_s32(buf0[17], buf0[18]); |
| buf1[18] = vsubq_s32(buf0[17], buf0[18]); |
| buf1[20] = vsubq_s32(buf0[23], buf0[20]); |
| buf1[23] = vaddq_s32(buf0[23], buf0[20]); |
| buf1[21] = vsubq_s32(buf0[22], buf0[21]); |
| buf1[22] = vaddq_s32(buf0[22], buf0[21]); |
| buf1[24] = vaddq_s32(buf0[24], buf0[27]); |
| buf1[27] = vsubq_s32(buf0[24], buf0[27]); |
| buf1[25] = vaddq_s32(buf0[25], buf0[26]); |
| buf1[26] = vsubq_s32(buf0[25], buf0[26]); |
| buf1[28] = vsubq_s32(buf0[31], buf0[28]); |
| buf1[31] = vaddq_s32(buf0[31], buf0[28]); |
| buf1[29] = vsubq_s32(buf0[30], buf0[29]); |
| buf1[30] = vaddq_s32(buf0[30], buf0[29]); |
| |
| // stage 6 |
| cospi = cospi_arr(cos_bit); |
| buf0[0] = buf1[0]; |
| buf0[1] = buf1[1]; |
| buf0[2] = buf1[2]; |
| buf0[3] = buf1[3]; |
| |
| btf_32_neon_type1(cospi[56], cospi[8], buf1[4], buf1[7], buf0[4], buf0[7], |
| v_cos_bit); |
| btf_32_neon_type0(-cospi[8], cospi[56], buf1[17], buf1[30], buf0[17], |
| buf0[30], v_cos_bit); |
| btf_32_neon_type0(-cospi[56], -cospi[8], buf1[18], buf1[29], buf0[18], |
| buf0[29], v_cos_bit); |
| |
| buf0[8] = vaddq_s32(buf1[8], buf1[9]); |
| buf0[9] = vsubq_s32(buf1[8], buf1[9]); |
| buf0[10] = vsubq_s32(buf1[11], buf1[10]); |
| buf0[11] = vaddq_s32(buf1[11], buf1[10]); |
| buf0[12] = vaddq_s32(buf1[12], buf1[13]); |
| buf0[13] = vsubq_s32(buf1[12], buf1[13]); |
| buf0[14] = vsubq_s32(buf1[15], buf1[14]); |
| buf0[15] = vaddq_s32(buf1[15], buf1[14]); |
| buf0[16] = buf1[16]; |
| buf0[19] = buf1[19]; |
| buf0[20] = buf1[20]; |
| |
| btf_32_neon_type1(cospi[24], cospi[40], buf1[5], buf1[6], buf0[5], buf0[6], |
| v_cos_bit); |
| btf_32_neon_type0(-cospi[40], cospi[24], buf1[21], buf1[26], buf0[21], |
| buf0[26], v_cos_bit); |
| btf_32_neon_type0(-cospi[24], -cospi[40], buf1[22], buf1[25], buf0[22], |
| buf0[25], v_cos_bit); |
| |