| /* |
| * Copyright (c) 2018, Alliance for Open Media. All rights reserved |
| * |
| * This source code is subject to the terms of the BSD 2 Clause License and |
| * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
| * was not distributed with this source code in the LICENSE file, you can |
| * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
| * Media Patent License 1.0 was not distributed with this source code in the |
| * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
| */ |
| |
| #include <arm_neon.h> |
| |
| #include "config/aom_config.h" |
| #include "config/aom_dsp_rtcd.h" |
| #include "config/av1_rtcd.h" |
| |
| #include "av1/common/av1_inv_txfm1d.h" |
| #include "av1/common/av1_inv_txfm1d_cfg.h" |
| #include "av1/common/av1_txfm.h" |
| #include "av1/common/enums.h" |
| #include "av1/common/idct.h" |
| #include "av1/common/arm/av1_inv_txfm_neon.h" |
| #include "av1/common/arm/transpose_neon.h" |
| |
| // 1D itx types |
| typedef enum ATTRIBUTE_PACKED { |
| IDCT_1D, |
| IADST_1D, |
| IFLIPADST_1D = IADST_1D, |
| IIDENTITY_1D, |
| ITX_TYPES_1D, |
| } ITX_TYPE_1D; |
| |
| static const ITX_TYPE_1D vitx_1d_tab[TX_TYPES] = { |
| IDCT_1D, IADST_1D, IDCT_1D, IADST_1D, |
| IFLIPADST_1D, IDCT_1D, IFLIPADST_1D, IADST_1D, |
| IFLIPADST_1D, IIDENTITY_1D, IDCT_1D, IIDENTITY_1D, |
| IADST_1D, IIDENTITY_1D, IFLIPADST_1D, IIDENTITY_1D, |
| }; |
| |
| static const ITX_TYPE_1D hitx_1d_tab[TX_TYPES] = { |
| IDCT_1D, IDCT_1D, IADST_1D, IADST_1D, |
| IDCT_1D, IFLIPADST_1D, IFLIPADST_1D, IFLIPADST_1D, |
| IADST_1D, IIDENTITY_1D, IIDENTITY_1D, IDCT_1D, |
| IIDENTITY_1D, IADST_1D, IIDENTITY_1D, IFLIPADST_1D, |
| }; |
| |
| // 1D functions |
| static const transform_1d_neon lowbd_txfm_all_1d_arr[TX_SIZES][ITX_TYPES_1D] = { |
| { av1_idct4, av1_iadst4, av1_iidentity4_c }, |
| { av1_idct8, av1_iadst8, av1_iidentity8_c }, |
| { av1_idct16, av1_iadst16, av1_iidentity16_c }, |
| { av1_idct32, NULL, NULL }, |
| { av1_idct64, NULL, NULL }, |
| }; |
| |
| static INLINE void lowbd_add_flip_buffer_8xn_neon(int16x8_t *in, |
| uint8_t *output, int stride, |
| int flipud, |
| const int height) { |
| int j = flipud ? (height - 1) : 0; |
| const int step = flipud ? -1 : 1; |
| int16x8_t temp_output; |
| for (int i = 0; i < height; ++i, j += step) { |
| temp_output = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(output))); |
| temp_output = vaddq_s16(temp_output, in[j]); |
| vst1_u8(output, vqmovun_s16(temp_output)); |
| output += stride; |
| } |
| } |
| |
| static INLINE uint8x16_t lowbd_get_recon_16x16_neon(const uint8x16_t pred, |
| int16x8_t res0, |
| int16x8_t res1) { |
| int16x8_t temp_output[2]; |
| uint8x16_t temp_output_8q; |
| temp_output[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pred))); |
| temp_output[0] = vaddq_s16(temp_output[0], res0); |
| temp_output[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pred))); |
| temp_output[1] = vaddq_s16(temp_output[1], res1); |
| temp_output_8q = |
| vcombine_u8(vqmovun_s16(temp_output[0]), vqmovun_s16(temp_output[1])); |
| return temp_output_8q; |
| } |
| |
| static INLINE void lowbd_add_flip_buffer_16xn_neon(int16x8_t *in, |
| uint8_t *output, int stride, |
| int flipud, int height) { |
| uint8x16_t temp_output_8q; |
| int j = flipud ? (height - 1) : 0; |
| const int step = flipud ? -1 : 1; |
| for (int i = 0; i < height; ++i, j += step) { |
| temp_output_8q = vld1q_u8(output + i * stride); |
| temp_output_8q = |
| lowbd_get_recon_16x16_neon(temp_output_8q, in[j], in[j + height]); |
| vst1q_u8((output + i * stride), temp_output_8q); |
| } |
| } |
| |
| static INLINE void lowbd_inv_txfm2d_memset_neon(int16x8_t *a, int size, |
| int value) { |
| for (int i = 0; i < size; i++) { |
| a[i] = vdupq_n_s16((int16_t)value); |
| } |
| } |
| |
| static INLINE void btf_16_lane_0_1_neon(const int16x8_t in0, |
| const int16x8_t in1, const int16x4_t c, |
| int16x8_t *t0, int16x8_t *t1) { |
| int32x4_t s0[2], s1[2]; |
| int16x4_t v0[2], v1[2]; |
| |
| s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 0); |
| s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 0); |
| s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 1); |
| s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 1); |
| |
| s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 1); |
| s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 1); |
| s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 0); |
| s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 0); |
| |
| v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT); |
| v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT); |
| v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT); |
| v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT); |
| |
| *t0 = vcombine_s16(v0[0], v0[1]); |
| *t1 = vcombine_s16(v1[0], v1[1]); |
| } |
| |
| static INLINE void btf_16_lane_1_0_neon(const int16x8_t in0, |
| const int16x8_t in1, const int16x4_t c, |
| int16x8_t *t0, int16x8_t *t1) { |
| int32x4_t s0[2], s1[2]; |
| int16x4_t v0[2], v1[2]; |
| |
| s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 1); |
| s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 1); |
| s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 0); |
| s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 0); |
| |
| s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 0); |
| s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 0); |
| s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 1); |
| s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 1); |
| |
| v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT); |
| v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT); |
| v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT); |
| v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT); |
| |
| *t0 = vcombine_s16(v0[0], v0[1]); |
| *t1 = vcombine_s16(v1[0], v1[1]); |
| } |
| |
| static INLINE void btf_16_lane_2_3_neon(const int16x8_t in0, |
| const int16x8_t in1, const int16x4_t c, |
| int16x8_t *t0, int16x8_t *t1) { |
| int32x4_t s0[2], s1[2]; |
| int16x4_t v0[2], v1[2]; |
| |
| s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 2); |
| s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 2); |
| s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 3); |
| s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 3); |
| |
| s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 3); |
| s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 3); |
| s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 2); |
| s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 2); |
| |
| v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT); |
| v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT); |
| v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT); |
| v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT); |
| |
| *t0 = vcombine_s16(v0[0], v0[1]); |
| *t1 = vcombine_s16(v1[0], v1[1]); |
| } |
| |
| static INLINE void btf_16_neon(const int16x8_t in0, int16_t coef1, |
| int16_t coef2, int16x8_t *t0, int16x8_t *t1) { |
| int32x4_t s0_l, s0_h, s1_l, s1_h; |
| int16x4_t v0[2], v1[2]; |
| |
| s0_l = vmull_n_s16(vget_low_s16(in0), coef1); |
| s0_h = vmull_n_s16(vget_high_s16(in0), coef1); |
| s1_l = vmull_n_s16(vget_low_s16(in0), coef2); |
| s1_h = vmull_n_s16(vget_high_s16(in0), coef2); |
| |
| v0[0] = vrshrn_n_s32(s0_l, INV_COS_BIT); |
| v0[1] = vrshrn_n_s32(s0_h, INV_COS_BIT); |
| v1[0] = vrshrn_n_s32(s1_l, INV_COS_BIT); |
| v1[1] = vrshrn_n_s32(s1_h, INV_COS_BIT); |
| |
| *t0 = vcombine_s16(v0[0], v0[1]); |
| *t1 = vcombine_s16(v1[0], v1[1]); |
| } |
| |
| static INLINE void btf_16_lane_3_2_neon(const int16x8_t in0, |
| const int16x8_t in1, const int16x4_t c, |
| int16x8_t *t0, int16x8_t *t1) { |
| int32x4_t s0[2], s1[2]; |
| int16x4_t v0[2], v1[2]; |
| |
| s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 3); |
| s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 3); |
| s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 2); |
| s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 2); |
| |
| s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 2); |
| s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 2); |
| s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 3); |
| s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 3); |
| |
| v0[0] = vrshrn_n_s32(s0[0], INV_COS_BIT); |
| v0[1] = vrshrn_n_s32(s0[1], INV_COS_BIT); |
| v1[0] = vrshrn_n_s32(s1[0], INV_COS_BIT); |
| v1[1] = vrshrn_n_s32(s1[1], INV_COS_BIT); |
| |
| *t0 = vcombine_s16(v0[0], v0[1]); |
| *t1 = vcombine_s16(v1[0], v1[1]); |
| } |
| |
| static INLINE void btf_16_half_neon(int16x8_t *const x, const int16x4_t c) { |
| int32x4_t t0[2], t1[2]; |
| int16x4_t v0[2], v1[2]; |
| |
| // Don't add/sub before multiply, which will overflow in iadst8. |
| const int32x4_t x0_lo = vmull_lane_s16(vget_low_s16(x[0]), c, 0); |
| const int32x4_t x0_hi = vmull_lane_s16(vget_high_s16(x[0]), c, 0); |
| const int32x4_t x1_lo = vmull_lane_s16(vget_low_s16(x[1]), c, 0); |
| const int32x4_t x1_hi = vmull_lane_s16(vget_high_s16(x[1]), c, 0); |
| |
| t0[0] = vaddq_s32(x0_lo, x1_lo); |
| t0[1] = vaddq_s32(x0_hi, x1_hi); |
| t1[0] = vsubq_s32(x0_lo, x1_lo); |
| t1[1] = vsubq_s32(x0_hi, x1_hi); |
| |
| v0[0] = vrshrn_n_s32(t0[0], INV_COS_BIT); |
| v0[1] = vrshrn_n_s32(t0[1], INV_COS_BIT); |
| v1[0] = vrshrn_n_s32(t1[0], INV_COS_BIT); |
| v1[1] = vrshrn_n_s32(t1[1], INV_COS_BIT); |
| |
| x[0] = vcombine_s16(v0[0], v0[1]); |
| x[1] = vcombine_s16(v1[0], v1[1]); |
| } |
| |
| static INLINE int16x4_t set_s16x4_neon(const int16_t c0, const int16_t c1, |
| const int16_t c2, const int16_t c3) { |
| int16x4_t val = vdup_n_s16((int16_t)0); |
| val = vset_lane_s16(c0, val, 0); |
| val = vset_lane_s16(c1, val, 1); |
| val = vset_lane_s16(c2, val, 2); |
| val = vset_lane_s16(c3, val, 3); |
| return val; |
| } |
| |
| static INLINE void iadst8_neon(int16x8_t *const in, int16x8_t *out, |
| int8_t cos_bit, int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| |
| const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[4], (int16_t)cospi[60], |
| (int16_t)cospi[20], (int16_t)cospi[44]); |
| const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[36], (int16_t)cospi[28], |
| (int16_t)cospi[52], (int16_t)cospi[12]); |
| const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], (int16_t)cospi[48]); |
| |
| int16x8_t x[8]; |
| int16x8_t s0, s1, s2, s3, s4, s5, s6, s7; |
| |
| // Stage 1 |
| x[0] = in[7]; |
| x[1] = in[0]; |
| x[2] = in[5]; |
| x[3] = in[2]; |
| x[4] = in[3]; |
| x[5] = in[4]; |
| x[6] = in[1]; |
| x[7] = in[6]; |
| |
| // Stage 2 |
| btf_16_lane_0_1_neon(x[0], x[1], c0, &s0, &s1); |
| btf_16_lane_2_3_neon(x[2], x[3], c0, &s2, &s3); |
| btf_16_lane_0_1_neon(x[4], x[5], c1, &s4, &s5); |
| btf_16_lane_2_3_neon(x[6], x[7], c1, &s6, &s7); |
| |
| // Stage 3 |
| x[0] = vqaddq_s16(s0, s4); |
| x[1] = vqaddq_s16(s1, s5); |
| x[2] = vqaddq_s16(s2, s6); |
| x[3] = vqaddq_s16(s3, s7); |
| x[4] = vqsubq_s16(s0, s4); |
| x[5] = vqsubq_s16(s1, s5); |
| x[6] = vqsubq_s16(s2, s6); |
| x[7] = vqsubq_s16(s3, s7); |
| |
| // Stage 4 |
| s0 = x[0]; |
| s1 = x[1]; |
| s2 = x[2]; |
| s3 = x[3]; |
| btf_16_lane_2_3_neon(x[4], x[5], c2, &s4, &s5); |
| btf_16_lane_3_2_neon(x[7], x[6], c2, &s7, &s6); |
| |
| // Stage 5 |
| x[0] = vqaddq_s16(s0, s2); |
| x[1] = vqaddq_s16(s1, s3); |
| x[2] = vqsubq_s16(s0, s2); |
| x[3] = vqsubq_s16(s1, s3); |
| x[4] = vqaddq_s16(s4, s6); |
| x[5] = vqaddq_s16(s5, s7); |
| x[6] = vqsubq_s16(s4, s6); |
| x[7] = vqsubq_s16(s5, s7); |
| |
| // stage 6 |
| btf_16_half_neon(x + 2, c2); |
| btf_16_half_neon(x + 6, c2); |
| |
| // Stage 7 |
| out[0] = x[0]; |
| out[1] = vqnegq_s16(x[4]); |
| out[2] = x[6]; |
| out[3] = vqnegq_s16(x[2]); |
| out[4] = x[3]; |
| out[5] = vqnegq_s16(x[7]); |
| out[6] = x[5]; |
| out[7] = vqnegq_s16(x[1]); |
| } |
| |
| static INLINE void iadst8_low1_neon(int16x8_t *const in, int16x8_t *out, |
| int8_t cos_bit, int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], (int16_t)cospi[48]); |
| |
| int16x8_t x[8]; |
| int16x8_t s0, s1, s4, s5; |
| |
| // Stage 1 |
| x[1] = in[0]; |
| |
| // Stage 2 |
| |
| btf_16_neon(x[1], cospi[60], -cospi[4], &s0, &s1); |
| |
| // Stage 3 |
| x[0] = s0; |
| x[1] = s1; |
| x[4] = s0; |
| x[5] = s1; |
| |
| // Stage 4 |
| s0 = x[0]; |
| s1 = x[1]; |
| btf_16_lane_2_3_neon(x[4], x[5], c2, &s4, &s5); |
| |
| // Stage 5 |
| x[0] = s0; |
| x[1] = s1; |
| x[2] = s0; |
| x[3] = s1; |
| x[4] = s4; |
| x[5] = s5; |
| x[6] = s4; |
| x[7] = s5; |
| |
| // stage 6 |
| btf_16_half_neon(x + 2, c2); |
| btf_16_half_neon(x + 6, c2); |
| |
| // Stage 7 |
| out[0] = x[0]; |
| out[1] = vqnegq_s16(x[4]); |
| out[2] = x[6]; |
| out[3] = vqnegq_s16(x[2]); |
| out[4] = x[3]; |
| out[5] = vqnegq_s16(x[7]); |
| out[6] = x[5]; |
| out[7] = vqnegq_s16(x[1]); |
| } |
| |
| static INLINE void idct8_neon(int16x8_t *in, int16x8_t *out, int8_t cos_bit, |
| int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| int16x8_t step1[8], step2[8]; |
| const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], |
| (int16_t)cospi[40], (int16_t)cospi[24]); |
| const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], (int16_t)cospi[48]); |
| |
| // stage 2 |
| btf_16_lane_0_1_neon(in[1], in[7], c0, &step1[7], &step1[4]); |
| btf_16_lane_2_3_neon(in[5], in[3], c0, &step1[6], &step1[5]); |
| |
| // stage 3 |
| btf_16_lane_0_1_neon(in[0], in[4], c1, &step2[0], &step2[1]); |
| btf_16_lane_2_3_neon(in[2], in[6], c1, &step2[3], &step2[2]); |
| step2[4] = vqaddq_s16(step1[4], step1[5]); |
| step2[5] = vqsubq_s16(step1[4], step1[5]); |
| step2[6] = vqsubq_s16(step1[7], step1[6]); |
| step2[7] = vqaddq_s16(step1[7], step1[6]); |
| |
| // stage 4 |
| step1[0] = vqaddq_s16(step2[0], step2[3]); |
| step1[1] = vqaddq_s16(step2[1], step2[2]); |
| step1[2] = vqsubq_s16(step2[1], step2[2]); |
| step1[3] = vqsubq_s16(step2[0], step2[3]); |
| btf_16_lane_0_1_neon(step2[6], step2[5], c1, &step1[6], &step1[5]); |
| |
| // stage 5 |
| out[0] = vqaddq_s16(step1[0], step2[7]); |
| out[1] = vqaddq_s16(step1[1], step1[6]); |
| out[2] = vqaddq_s16(step1[2], step1[5]); |
| out[3] = vqaddq_s16(step1[3], step2[4]); |
| out[4] = vqsubq_s16(step1[3], step2[4]); |
| out[5] = vqsubq_s16(step1[2], step1[5]); |
| out[6] = vqsubq_s16(step1[1], step1[6]); |
| out[7] = vqsubq_s16(step1[0], step2[7]); |
| } |
| |
| static INLINE void idct8_low1_neon(int16x8_t *in, int16x8_t *out, |
| int8_t cos_bit, int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| int16x8_t step1; |
| int32x4_t t32[2]; |
| |
| // stage 1 |
| // stage 2 |
| // stage 3 |
| t32[0] = vmull_n_s16(vget_low_s16(in[0]), (int16_t)cospi[32]); |
| t32[1] = vmull_n_s16(vget_high_s16(in[0]), (int16_t)cospi[32]); |
| |
| step1 = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT), |
| vrshrn_n_s32(t32[1], INV_COS_BIT)); |
| |
| // stage 4 |
| // stage 5 |
| out[0] = step1; |
| out[1] = step1; |
| out[2] = step1; |
| out[3] = step1; |
| out[4] = step1; |
| out[5] = step1; |
| out[6] = step1; |
| out[7] = step1; |
| } |
| |
| void av1_round_shift_array_16_neon(int16x8_t *arr, int size, int bit) { |
| assert(!(size % 4)); |
| if (!bit) return; |
| const int16x8_t dup_bits_n_16x8 = vdupq_n_s16((int16_t)(-bit)); |
| for (int i = 0; i < size; i++) { |
| arr[i] = vrshlq_s16(arr[i], dup_bits_n_16x8); |
| } |
| } |
| |
| static INLINE void flip_buf_ud_neon(int16x8_t *input, int size) { |
| int16x8_t temp[8]; |
| for (int i = 0; i < size; ++i) { |
| temp[i] = input[size - 1 - i]; |
| } |
| for (int i = 0; i < size; ++i) { |
| input[i] = temp[i]; |
| } |
| } |
| |
| static INLINE void load_buffer_32bit_to_16bit_neon(const int32_t *input, |
| int16x8_t *const a, |
| int out_size) { |
| for (int i = 0; i < 8; ++i) { |
| a[i] = vcombine_s16(vmovn_s32(vld1q_s32(input)), |
| vmovn_s32(vld1q_s32(input + 4))); |
| input += out_size; |
| } |
| } |
| |
| static int16_t sqrt_2_list[TX_SIZES] = { 5793, 2 * 4096, 2 * 5793, 4 * 4096, |
| 4 * 5793 }; |
| |
| static INLINE void identity_txfm_round_neon(int16x8_t *input, int16x8_t *output, |
| int txw_idx, int8_t size, int bit) { |
| const int32x4_t dup_bits_n_32x4 = vdupq_n_s32((int32_t)(-bit)); |
| int16x4_t scale = vdup_n_s16(sqrt_2_list[txw_idx]); |
| int16x4_t low_i16, high_i16; |
| int32x4_t low_i32, high_i32; |
| for (int i = 0; i < size; i++) { |
| int32x4_t temp_out_low = vmull_s16(vget_low_s16(input[i]), scale); |
| int32x4_t temp_out_high = vmull_s16(vget_high_s16(input[i]), scale); |
| low_i32 = vrshlq_s32(vrshrq_n_s32(temp_out_low, 12), dup_bits_n_32x4); |
| high_i32 = vrshlq_s32(vrshrq_n_s32(temp_out_high, 12), dup_bits_n_32x4); |
| low_i16 = vqmovn_s32(low_i32); |
| high_i16 = vqmovn_s32(high_i32); |
| output[i] = vcombine_s16(low_i16, high_i16); |
| } |
| } |
| |
| static INLINE void round_shift_for_rect(int16x8_t *input, int16x8_t *output, |
| int size) { |
| int32x4_t out_low, out_high; |
| int16x4_t low, high; |
| |
| for (int z = 0; z < size; ++z) { |
| out_low = vmull_n_s16(vget_low_s16(input[z]), (int16_t)NewInvSqrt2); |
| out_high = vmull_n_s16(vget_high_s16(input[z]), (int16_t)NewInvSqrt2); |
| |
| low = vqrshrn_n_s32(out_low, (int32_t)NewSqrt2Bits); |
| high = vqrshrn_n_s32(out_high, (int32_t)NewSqrt2Bits); |
| |
| output[z] = vcombine_s16(low, high); |
| } |
| } |
| |
| static INLINE void idct16_low1_neon(int16x8_t *in, int16x8_t *out, |
| int8_t cos_bit, int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| int16x8_t step1; |
| int32x4_t t32[2]; |
| |
| // stage 4 |
| |
| t32[0] = vmull_n_s16(vget_low_s16(in[0]), cospi[32]); |
| t32[1] = vmull_n_s16(vget_high_s16(in[0]), cospi[32]); |
| step1 = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT), |
| vrshrn_n_s32(t32[1], INV_COS_BIT)); |
| |
| // stage 6 |
| // stage 7 |
| out[0] = step1; |
| out[1] = step1; |
| out[2] = step1; |
| out[3] = step1; |
| out[4] = step1; |
| out[5] = step1; |
| out[6] = step1; |
| out[7] = step1; |
| out[8] = step1; |
| out[9] = step1; |
| out[10] = step1; |
| out[11] = step1; |
| out[12] = step1; |
| out[13] = step1; |
| out[14] = step1; |
| out[15] = step1; |
| } |
| |
| static INLINE void idct16_neon(int16x8_t *in, int16x8_t *out, int8_t cos_bit, |
| int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| int16x8_t step1[16], step2[16]; |
| |
| const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[4], (int16_t)cospi[60], |
| (int16_t)cospi[36], (int16_t)cospi[28]); |
| const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[20], (int16_t)cospi[44], |
| (int16_t)cospi[52], (int16_t)cospi[12]); |
| const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], |
| (int16_t)cospi[40], (int16_t)cospi[24]); |
| const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], (int16_t)cospi[48]); |
| const int16x4_t c4 = |
| set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), |
| (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); |
| // stage 2 |
| |
| btf_16_lane_0_1_neon(in[1], in[15], c0, &step2[15], &step2[8]); |
| btf_16_lane_2_3_neon(in[9], in[7], c0, &step2[14], &step2[9]); |
| btf_16_lane_0_1_neon(in[5], in[11], c1, &step2[13], &step2[10]); |
| btf_16_lane_2_3_neon(in[13], in[3], c1, &step2[12], &step2[11]); |
| |
| step2[0] = in[0]; |
| step2[1] = in[8]; |
| step2[2] = in[4]; |
| step2[3] = in[12]; |
| step2[4] = in[2]; |
| step2[5] = in[10]; |
| step2[6] = in[6]; |
| step2[7] = in[14]; |
| |
| // stage 3 |
| |
| btf_16_lane_0_1_neon(step2[4], step2[7], c2, &step1[7], &step1[4]); |
| btf_16_lane_2_3_neon(step2[5], step2[6], c2, &step1[6], &step1[5]); |
| |
| step1[0] = step2[0]; |
| step1[1] = step2[1]; |
| step1[2] = step2[2]; |
| step1[3] = step2[3]; |
| step1[8] = vqaddq_s16(step2[8], step2[9]); |
| step1[9] = vqsubq_s16(step2[8], step2[9]); |
| step1[10] = vqsubq_s16(step2[11], step2[10]); |
| step1[11] = vqaddq_s16(step2[11], step2[10]); |
| step1[12] = vqaddq_s16(step2[12], step2[13]); |
| step1[13] = vqsubq_s16(step2[12], step2[13]); |
| step1[14] = vqsubq_s16(step2[15], step2[14]); |
| step1[15] = vqaddq_s16(step2[15], step2[14]); |
| |
| // stage 4 |
| |
| btf_16_lane_0_1_neon(step1[0], step1[1], c3, &step2[0], &step2[1]); |
| btf_16_lane_2_3_neon(step1[2], step1[3], c3, &step2[3], &step2[2]); |
| btf_16_lane_2_3_neon(step1[14], step1[9], c3, &step2[14], &step2[9]); |
| btf_16_lane_3_2_neon(step1[10], step1[13], c4, &step2[10], &step2[13]); |
| |
| step2[4] = vqaddq_s16(step1[4], step1[5]); |
| step2[5] = vqsubq_s16(step1[4], step1[5]); |
| step2[6] = vqsubq_s16(step1[7], step1[6]); |
| step2[7] = vqaddq_s16(step1[7], step1[6]); |
| step2[8] = step1[8]; |
| step2[11] = step1[11]; |
| step2[12] = step1[12]; |
| step2[15] = step1[15]; |
| |
| // stage 5 |
| |
| btf_16_lane_0_1_neon(step2[6], step2[5], c3, &step1[6], &step1[5]); |
| |
| step1[0] = vqaddq_s16(step2[0], step2[3]); |
| step1[1] = vqaddq_s16(step2[1], step2[2]); |
| step1[2] = vqsubq_s16(step2[1], step2[2]); |
| step1[3] = vqsubq_s16(step2[0], step2[3]); |
| step1[4] = step2[4]; |
| step1[7] = step2[7]; |
| step1[8] = vqaddq_s16(step2[8], step2[11]); |
| step1[9] = vqaddq_s16(step2[9], step2[10]); |
| step1[10] = vqsubq_s16(step2[9], step2[10]); |
| step1[11] = vqsubq_s16(step2[8], step2[11]); |
| step1[12] = vqsubq_s16(step2[15], step2[12]); |
| step1[13] = vqsubq_s16(step2[14], step2[13]); |
| step1[14] = vqaddq_s16(step2[14], step2[13]); |
| step1[15] = vqaddq_s16(step2[15], step2[12]); |
| |
| // stage 6 |
| |
| btf_16_lane_0_1_neon(step1[13], step1[10], c3, &step2[13], &step2[10]); |
| btf_16_lane_0_1_neon(step1[12], step1[11], c3, &step2[12], &step2[11]); |
| |
| step2[0] = vqaddq_s16(step1[0], step1[7]); |
| step2[1] = vqaddq_s16(step1[1], step1[6]); |
| step2[2] = vqaddq_s16(step1[2], step1[5]); |
| step2[3] = vqaddq_s16(step1[3], step1[4]); |
| step2[4] = vqsubq_s16(step1[3], step1[4]); |
| step2[5] = vqsubq_s16(step1[2], step1[5]); |
| step2[6] = vqsubq_s16(step1[1], step1[6]); |
| step2[7] = vqsubq_s16(step1[0], step1[7]); |
| step2[8] = step1[8]; |
| step2[9] = step1[9]; |
| step2[14] = step1[14]; |
| step2[15] = step1[15]; |
| |
| // stage 7 |
| out[0] = vqaddq_s16(step2[0], step2[15]); |
| out[1] = vqaddq_s16(step2[1], step2[14]); |
| out[2] = vqaddq_s16(step2[2], step2[13]); |
| out[3] = vqaddq_s16(step2[3], step2[12]); |
| out[4] = vqaddq_s16(step2[4], step2[11]); |
| out[5] = vqaddq_s16(step2[5], step2[10]); |
| out[6] = vqaddq_s16(step2[6], step2[9]); |
| out[7] = vqaddq_s16(step2[7], step2[8]); |
| out[8] = vqsubq_s16(step2[7], step2[8]); |
| out[9] = vqsubq_s16(step2[6], step2[9]); |
| out[10] = vqsubq_s16(step2[5], step2[10]); |
| out[11] = vqsubq_s16(step2[4], step2[11]); |
| out[12] = vqsubq_s16(step2[3], step2[12]); |
| out[13] = vqsubq_s16(step2[2], step2[13]); |
| out[14] = vqsubq_s16(step2[1], step2[14]); |
| out[15] = vqsubq_s16(step2[0], step2[15]); |
| } |
| |
| static INLINE void idct16_low8_neon(int16x8_t *in, int16x8_t *out, |
| int8_t cos_bit, int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| int16x8_t step1[16], step2[16]; |
| const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], (int16_t)cospi[48]); |
| const int16x4_t c1 = |
| set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), |
| (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); |
| |
| // stage 1 |
| // stage 2 |
| |
| step2[0] = in[0]; |
| step2[2] = in[4]; |
| step2[4] = in[2]; |
| step2[6] = in[6]; |
| |
| btf_16_neon(in[1], cospi[60], cospi[4], &step2[8], &step2[15]); |
| btf_16_neon(in[7], -cospi[36], cospi[28], &step2[9], &step2[14]); |
| btf_16_neon(in[5], cospi[44], cospi[20], &step2[10], &step2[13]); |
| btf_16_neon(in[3], -cospi[52], cospi[12], &step2[11], &step2[12]); |
| |
| // stage 3 |
| |
| btf_16_neon(step2[4], cospi[56], cospi[8], &step1[4], &step1[7]); |
| btf_16_neon(step2[6], -cospi[40], cospi[24], &step1[5], &step1[6]); |
| |
| step1[0] = step2[0]; |
| step1[2] = step2[2]; |
| step1[8] = vqaddq_s16(step2[8], step2[9]); |
| step1[9] = vqsubq_s16(step2[8], step2[9]); |
| step1[10] = vqsubq_s16(step2[11], step2[10]); |
| step1[11] = vqaddq_s16(step2[11], step2[10]); |
| step1[12] = vqaddq_s16(step2[12], step2[13]); |
| step1[13] = vqsubq_s16(step2[12], step2[13]); |
| step1[14] = vqsubq_s16(step2[15], step2[14]); |
| step1[15] = vqaddq_s16(step2[15], step2[14]); |
| |
| // stage 4 |
| |
| btf_16_neon(step1[0], cospi[32], cospi[32], &step2[0], &step2[1]); |
| btf_16_neon(step1[2], cospi[48], cospi[16], &step2[2], &step2[3]); |
| btf_16_lane_2_3_neon(step1[14], step1[9], c0, &step2[14], &step2[9]); |
| btf_16_lane_3_2_neon(step1[10], step1[13], c1, &step2[10], &step2[13]); |
| |
| step2[4] = vqaddq_s16(step1[4], step1[5]); |
| step2[5] = vqsubq_s16(step1[4], step1[5]); |
| step2[6] = vqsubq_s16(step1[7], step1[6]); |
| step2[7] = vqaddq_s16(step1[7], step1[6]); |
| step2[8] = step1[8]; |
| step2[11] = step1[11]; |
| step2[12] = step1[12]; |
| step2[15] = step1[15]; |
| |
| // stage 5 |
| |
| btf_16_lane_0_1_neon(step2[6], step2[5], c0, &step1[6], &step1[5]); |
| step1[0] = vqaddq_s16(step2[0], step2[3]); |
| step1[1] = vqaddq_s16(step2[1], step2[2]); |
| step1[2] = vqsubq_s16(step2[1], step2[2]); |
| step1[3] = vqsubq_s16(step2[0], step2[3]); |
| step1[4] = step2[4]; |
| step1[7] = step2[7]; |
| step1[8] = vqaddq_s16(step2[8], step2[11]); |
| step1[9] = vqaddq_s16(step2[9], step2[10]); |
| step1[10] = vqsubq_s16(step2[9], step2[10]); |
| step1[11] = vqsubq_s16(step2[8], step2[11]); |
| step1[12] = vqsubq_s16(step2[15], step2[12]); |
| step1[13] = vqsubq_s16(step2[14], step2[13]); |
| step1[14] = vqaddq_s16(step2[14], step2[13]); |
| step1[15] = vqaddq_s16(step2[15], step2[12]); |
| |
| // stage 6 |
| btf_16_lane_0_1_neon(step1[13], step1[10], c0, &step2[13], &step2[10]); |
| btf_16_lane_0_1_neon(step1[12], step1[11], c0, &step2[12], &step2[11]); |
| |
| step2[0] = vqaddq_s16(step1[0], step1[7]); |
| step2[1] = vqaddq_s16(step1[1], step1[6]); |
| step2[2] = vqaddq_s16(step1[2], step1[5]); |
| step2[3] = vqaddq_s16(step1[3], step1[4]); |
| step2[4] = vqsubq_s16(step1[3], step1[4]); |
| step2[5] = vqsubq_s16(step1[2], step1[5]); |
| step2[6] = vqsubq_s16(step1[1], step1[6]); |
| step2[7] = vqsubq_s16(step1[0], step1[7]); |
| step2[8] = step1[8]; |
| step2[9] = step1[9]; |
| step2[14] = step1[14]; |
| step2[15] = step1[15]; |
| |
| // stage 7 |
| |
| out[0] = vqaddq_s16(step2[0], step2[15]); |
| out[1] = vqaddq_s16(step2[1], step2[14]); |
| out[2] = vqaddq_s16(step2[2], step2[13]); |
| out[3] = vqaddq_s16(step2[3], step2[12]); |
| out[4] = vqaddq_s16(step2[4], step2[11]); |
| out[5] = vqaddq_s16(step2[5], step2[10]); |
| out[6] = vqaddq_s16(step2[6], step2[9]); |
| out[7] = vqaddq_s16(step2[7], step2[8]); |
| out[8] = vqsubq_s16(step2[7], step2[8]); |
| out[9] = vqsubq_s16(step2[6], step2[9]); |
| out[10] = vqsubq_s16(step2[5], step2[10]); |
| out[11] = vqsubq_s16(step2[4], step2[11]); |
| out[12] = vqsubq_s16(step2[3], step2[12]); |
| out[13] = vqsubq_s16(step2[2], step2[13]); |
| out[14] = vqsubq_s16(step2[1], step2[14]); |
| out[15] = vqsubq_s16(step2[0], step2[15]); |
| } |
| |
| static INLINE void iadst16_neon(int16x8_t *const in, int16x8_t *out, |
| int8_t cos_bit, int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| |
| const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[2], (int16_t)cospi[62], |
| (int16_t)cospi[10], (int16_t)cospi[54]); |
| const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[18], (int16_t)cospi[46], |
| (int16_t)cospi[26], (int16_t)cospi[38]); |
| const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[34], (int16_t)cospi[30], |
| (int16_t)cospi[42], (int16_t)cospi[22]); |
| const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[50], (int16_t)cospi[14], |
| (int16_t)cospi[58], (int16_t)cospi[6]); |
| const int16x4_t c4 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], |
| (int16_t)cospi[40], (int16_t)cospi[24]); |
| const int16x4_t c5 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], (int16_t)cospi[48]); |
| |
| int16x8_t x[16]; |
| int16x8_t t[14]; |
| int16x8_t s0, s1, s2, s3, s4, s5, s6, s7; |
| int16x8_t s8, s9, s10, s11, s12, s13, s14, s15; |
| |
| // Stage 1 |
| x[0] = in[15]; |
| x[1] = in[0]; |
| x[2] = in[13]; |
| x[3] = in[2]; |
| x[4] = in[11]; |
| x[5] = in[4]; |
| x[6] = in[9]; |
| x[7] = in[6]; |
| x[8] = in[7]; |
| x[9] = in[8]; |
| x[10] = in[5]; |
| x[11] = in[10]; |
| x[12] = in[3]; |
| x[13] = in[12]; |
| x[14] = in[1]; |
| x[15] = in[14]; |
| |
| // Stage 2 |
| btf_16_lane_0_1_neon(x[0], x[1], c0, &s0, &s1); |
| btf_16_lane_2_3_neon(x[2], x[3], c0, &s2, &s3); |
| btf_16_lane_0_1_neon(x[4], x[5], c1, &s4, &s5); |
| btf_16_lane_2_3_neon(x[6], x[7], c1, &s6, &s7); |
| btf_16_lane_0_1_neon(x[8], x[9], c2, &s8, &s9); |
| btf_16_lane_2_3_neon(x[10], x[11], c2, &s10, &s11); |
| btf_16_lane_0_1_neon(x[12], x[13], c3, &s12, &s13); |
| btf_16_lane_2_3_neon(x[14], x[15], c3, &s14, &s15); |
| |
| // Stage 3 |
| x[0] = vqaddq_s16(s0, s8); |
| x[1] = vqaddq_s16(s1, s9); |
| x[2] = vqaddq_s16(s2, s10); |
| x[3] = vqaddq_s16(s3, s11); |
| x[4] = vqaddq_s16(s4, s12); |
| x[5] = vqaddq_s16(s5, s13); |
| x[6] = vqaddq_s16(s6, s14); |
| x[7] = vqaddq_s16(s7, s15); |
| x[8] = vqsubq_s16(s0, s8); |
| x[9] = vqsubq_s16(s1, s9); |
| x[10] = vqsubq_s16(s2, s10); |
| x[11] = vqsubq_s16(s3, s11); |
| x[12] = vqsubq_s16(s4, s12); |
| x[13] = vqsubq_s16(s5, s13); |
| x[14] = vqsubq_s16(s6, s14); |
| x[15] = vqsubq_s16(s7, s15); |
| |
| // Stage 4 |
| t[0] = x[0]; |
| t[1] = x[1]; |
| t[2] = x[2]; |
| t[3] = x[3]; |
| t[4] = x[4]; |
| t[5] = x[5]; |
| t[6] = x[6]; |
| t[7] = x[7]; |
| btf_16_lane_0_1_neon(x[8], x[9], c4, &s8, &s9); |
| btf_16_lane_2_3_neon(x[10], x[11], c4, &s10, &s11); |
| btf_16_lane_1_0_neon(x[13], x[12], c4, &s13, &s12); |
| btf_16_lane_3_2_neon(x[15], x[14], c4, &s15, &s14); |
| |
| // Stage 5 |
| x[0] = vqaddq_s16(t[0], t[4]); |
| x[1] = vqaddq_s16(t[1], t[5]); |
| x[2] = vqaddq_s16(t[2], t[6]); |
| x[3] = vqaddq_s16(t[3], t[7]); |
| x[4] = vqsubq_s16(t[0], t[4]); |
| x[5] = vqsubq_s16(t[1], t[5]); |
| x[6] = vqsubq_s16(t[2], t[6]); |
| x[7] = vqsubq_s16(t[3], t[7]); |
| x[8] = vqaddq_s16(s8, s12); |
| x[9] = vqaddq_s16(s9, s13); |
| x[10] = vqaddq_s16(s10, s14); |
| x[11] = vqaddq_s16(s11, s15); |
| x[12] = vqsubq_s16(s8, s12); |
| x[13] = vqsubq_s16(s9, s13); |
| x[14] = vqsubq_s16(s10, s14); |
| x[15] = vqsubq_s16(s11, s15); |
| |
| // stage 6 |
| t[0] = x[0]; |
| t[1] = x[1]; |
| t[2] = x[2]; |
| t[3] = x[3]; |
| btf_16_lane_2_3_neon(x[4], x[5], c5, &s4, &s5); |
| btf_16_lane_3_2_neon(x[7], x[6], c5, &s7, &s6); |
| t[8] = x[8]; |
| t[9] = x[9]; |
| t[10] = x[10]; |
| t[11] = x[11]; |
| btf_16_lane_2_3_neon(x[12], x[13], c5, &s12, &s13); |
| btf_16_lane_3_2_neon(x[15], x[14], c5, &s15, &s14); |
| |
| // Stage 7 |
| x[0] = vqaddq_s16(t[0], t[2]); |
| x[1] = vqaddq_s16(t[1], t[3]); |
| x[2] = vqsubq_s16(t[0], t[2]); |
| x[3] = vqsubq_s16(t[1], t[3]); |
| x[4] = vqaddq_s16(s4, s6); |
| x[5] = vqaddq_s16(s5, s7); |
| x[6] = vqsubq_s16(s4, s6); |
| x[7] = vqsubq_s16(s5, s7); |
| x[8] = vqaddq_s16(t[8], t[10]); |
| x[9] = vqaddq_s16(t[9], t[11]); |
| x[10] = vqsubq_s16(t[8], t[10]); |
| x[11] = vqsubq_s16(t[9], t[11]); |
| x[12] = vqaddq_s16(s12, s14); |
| x[13] = vqaddq_s16(s13, s15); |
| x[14] = vqsubq_s16(s12, s14); |
| x[15] = vqsubq_s16(s13, s15); |
| |
| // Stage 8 |
| btf_16_half_neon(x + 2, c5); |
| btf_16_half_neon(x + 6, c5); |
| btf_16_half_neon(x + 10, c5); |
| btf_16_half_neon(x + 14, c5); |
| |
| // Stage 9 |
| out[0] = x[0]; |
| out[1] = vqnegq_s16(x[8]); |
| out[2] = x[12]; |
| out[3] = vqnegq_s16(x[4]); |
| out[4] = x[6]; |
| out[5] = vqnegq_s16(x[14]); |
| out[6] = x[10]; |
| out[7] = vqnegq_s16(x[2]); |
| out[8] = x[3]; |
| out[9] = vqnegq_s16(x[11]); |
| out[10] = x[15]; |
| out[11] = vqnegq_s16(x[7]); |
| out[12] = x[5]; |
| out[13] = vqnegq_s16(x[13]); |
| out[14] = x[9]; |
| out[15] = vqnegq_s16(x[1]); |
| } |
| |
| static INLINE void iadst16_low1_neon(int16x8_t *const in, int16x8_t *out, |
| int8_t cos_bit, int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], |
| (int16_t)cospi[40], (int16_t)cospi[24]); |
| const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], (int16_t)cospi[48]); |
| |
| int16x8_t x[16]; |
| int16x8_t t[10]; |
| int16x8_t s0, s1, s4, s5; |
| int16x8_t s8, s9, s12, s13; |
| |
| // Stage 1 |
| x[1] = in[0]; |
| |
| // Stage 2 |
| btf_16_neon(x[1], cospi[62], -cospi[2], &s0, &s1); |
| |
| // Stage 3 |
| x[0] = s0; |
| x[1] = s1; |
| x[8] = s0; |
| x[9] = s1; |
| |
| // Stage 4 |
| t[0] = x[0]; |
| t[1] = x[1]; |
| btf_16_lane_0_1_neon(x[8], x[9], c0, &s8, &s9); |
| |
| // Stage 5 |
| x[0] = t[0]; |
| x[1] = t[1]; |
| x[4] = t[0]; |
| x[5] = t[1]; |
| x[8] = s8; |
| x[9] = s9; |
| x[12] = s8; |
| x[13] = s9; |
| |
| // stage 6 |
| t[0] = x[0]; |
| t[1] = x[1]; |
| btf_16_lane_2_3_neon(x[4], x[5], c1, &s4, &s5); |
| t[8] = x[8]; |
| t[9] = x[9]; |
| btf_16_lane_2_3_neon(x[12], x[13], c1, &s12, &s13); |
| |
| // Stage 7 |
| x[0] = t[0]; |
| x[1] = t[1]; |
| x[2] = t[0]; |
| x[3] = t[1]; |
| x[4] = s4; |
| x[5] = s5; |
| x[6] = s4; |
| x[7] = s5; |
| x[8] = t[8]; |
| x[9] = t[9]; |
| x[10] = t[8]; |
| x[11] = t[9]; |
| x[12] = s12; |
| x[13] = s13; |
| x[14] = s12; |
| x[15] = s13; |
| |
| // Stage 8 |
| btf_16_half_neon(x + 2, c1); |
| btf_16_half_neon(x + 6, c1); |
| btf_16_half_neon(x + 10, c1); |
| btf_16_half_neon(x + 14, c1); |
| |
| // Stage 9 |
| out[0] = x[0]; |
| out[1] = vqnegq_s16(x[8]); |
| out[2] = x[12]; |
| out[3] = vqnegq_s16(x[4]); |
| out[4] = x[6]; |
| out[5] = vqnegq_s16(x[14]); |
| out[6] = x[10]; |
| out[7] = vqnegq_s16(x[2]); |
| out[8] = x[3]; |
| out[9] = vqnegq_s16(x[11]); |
| out[10] = x[15]; |
| out[11] = vqnegq_s16(x[7]); |
| out[12] = x[5]; |
| out[13] = vqnegq_s16(x[13]); |
| out[14] = x[9]; |
| out[15] = vqnegq_s16(x[1]); |
| } |
| |
| static INLINE void iadst16_low8_neon(int16x8_t *const in, int16x8_t *out, |
| int8_t cos_bit, int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| |
| const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], |
| (int16_t)cospi[40], (int16_t)cospi[24]); |
| const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], (int16_t)cospi[48]); |
| |
| int16x8_t x[16]; |
| int16x8_t t[14]; |
| int16x8_t s0, s1, s2, s3, s4, s5, s6, s7; |
| int16x8_t s8, s9, s10, s11, s12, s13, s14, s15; |
| |
| // Stage 1 |
| x[1] = in[0]; |
| x[3] = in[2]; |
| x[5] = in[4]; |
| x[7] = in[6]; |
| x[8] = in[7]; |
| x[10] = in[5]; |
| x[12] = in[3]; |
| x[14] = in[1]; |
| |
| // Stage 2 |
| btf_16_neon(x[1], cospi[62], -cospi[2], &s0, &s1); |
| btf_16_neon(x[3], cospi[54], -cospi[10], &s2, &s3); |
| btf_16_neon(x[5], cospi[46], -cospi[18], &s4, &s5); |
| btf_16_neon(x[7], cospi[38], -cospi[26], &s6, &s7); |
| |
| btf_16_neon(x[8], cospi[34], cospi[30], &s8, &s9); |
| btf_16_neon(x[10], cospi[42], cospi[22], &s10, &s11); |
| btf_16_neon(x[12], cospi[50], cospi[14], &s12, &s13); |
| btf_16_neon(x[14], cospi[58], cospi[6], &s14, &s15); |
| |
| // Stage 3 |
| x[0] = vqaddq_s16(s0, s8); |
| x[1] = vqaddq_s16(s1, s9); |
| x[2] = vqaddq_s16(s2, s10); |
| x[3] = vqaddq_s16(s3, s11); |
| x[4] = vqaddq_s16(s4, s12); |
| x[5] = vqaddq_s16(s5, s13); |
| x[6] = vqaddq_s16(s6, s14); |
| x[7] = vqaddq_s16(s7, s15); |
| x[8] = vqsubq_s16(s0, s8); |
| x[9] = vqsubq_s16(s1, s9); |
| x[10] = vqsubq_s16(s2, s10); |
| x[11] = vqsubq_s16(s3, s11); |
| x[12] = vqsubq_s16(s4, s12); |
| x[13] = vqsubq_s16(s5, s13); |
| x[14] = vqsubq_s16(s6, s14); |
| x[15] = vqsubq_s16(s7, s15); |
| |
| // Stage 4 |
| t[0] = x[0]; |
| t[1] = x[1]; |
| t[2] = x[2]; |
| t[3] = x[3]; |
| t[4] = x[4]; |
| t[5] = x[5]; |
| t[6] = x[6]; |
| t[7] = x[7]; |
| btf_16_lane_0_1_neon(x[8], x[9], c0, &s8, &s9); |
| btf_16_lane_2_3_neon(x[10], x[11], c0, &s10, &s11); |
| btf_16_lane_1_0_neon(x[13], x[12], c0, &s13, &s12); |
| btf_16_lane_3_2_neon(x[15], x[14], c0, &s15, &s14); |
| |
| // Stage 5 |
| x[0] = vqaddq_s16(t[0], t[4]); |
| x[1] = vqaddq_s16(t[1], t[5]); |
| x[2] = vqaddq_s16(t[2], t[6]); |
| x[3] = vqaddq_s16(t[3], t[7]); |
| x[4] = vqsubq_s16(t[0], t[4]); |
| x[5] = vqsubq_s16(t[1], t[5]); |
| x[6] = vqsubq_s16(t[2], t[6]); |
| x[7] = vqsubq_s16(t[3], t[7]); |
| x[8] = vqaddq_s16(s8, s12); |
| x[9] = vqaddq_s16(s9, s13); |
| x[10] = vqaddq_s16(s10, s14); |
| x[11] = vqaddq_s16(s11, s15); |
| x[12] = vqsubq_s16(s8, s12); |
| x[13] = vqsubq_s16(s9, s13); |
| x[14] = vqsubq_s16(s10, s14); |
| x[15] = vqsubq_s16(s11, s15); |
| |
| // stage 6 |
| t[0] = x[0]; |
| t[1] = x[1]; |
| t[2] = x[2]; |
| t[3] = x[3]; |
| btf_16_lane_2_3_neon(x[4], x[5], c1, &s4, &s5); |
| btf_16_lane_3_2_neon(x[7], x[6], c1, &s7, &s6); |
| t[8] = x[8]; |
| t[9] = x[9]; |
| t[10] = x[10]; |
| t[11] = x[11]; |
| btf_16_lane_2_3_neon(x[12], x[13], c1, &s12, &s13); |
| btf_16_lane_3_2_neon(x[15], x[14], c1, &s15, &s14); |
| |
| // Stage 7 |
| x[0] = vqaddq_s16(t[0], t[2]); |
| x[1] = vqaddq_s16(t[1], t[3]); |
| x[2] = vqsubq_s16(t[0], t[2]); |
| x[3] = vqsubq_s16(t[1], t[3]); |
| x[4] = vqaddq_s16(s4, s6); |
| x[5] = vqaddq_s16(s5, s7); |
| x[6] = vqsubq_s16(s4, s6); |
| x[7] = vqsubq_s16(s5, s7); |
| x[8] = vqaddq_s16(t[8], t[10]); |
| x[9] = vqaddq_s16(t[9], t[11]); |
| x[10] = vqsubq_s16(t[8], t[10]); |
| x[11] = vqsubq_s16(t[9], t[11]); |
| x[12] = vqaddq_s16(s12, s14); |
| x[13] = vqaddq_s16(s13, s15); |
| x[14] = vqsubq_s16(s12, s14); |
| x[15] = vqsubq_s16(s13, s15); |
| |
| // Stage 8 |
| btf_16_half_neon(x + 2, c1); |
| btf_16_half_neon(x + 6, c1); |
| btf_16_half_neon(x + 10, c1); |
| btf_16_half_neon(x + 14, c1); |
| |
| // Stage 9 |
| out[0] = x[0]; |
| out[1] = vqnegq_s16(x[8]); |
| out[2] = x[12]; |
| out[3] = vqnegq_s16(x[4]); |
| out[4] = x[6]; |
| out[5] = vqnegq_s16(x[14]); |
| out[6] = x[10]; |
| out[7] = vqnegq_s16(x[2]); |
| out[8] = x[3]; |
| out[9] = vqnegq_s16(x[11]); |
| out[10] = x[15]; |
| out[11] = vqnegq_s16(x[7]); |
| out[12] = x[5]; |
| out[13] = vqnegq_s16(x[13]); |
| out[14] = x[9]; |
| out[15] = vqnegq_s16(x[1]); |
| } |
| |
| static INLINE void idct32_neon(int16x8_t *in, int16x8_t *out, int8_t cos_bit, |
| int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| int16x8_t step1[32], step2[32]; |
| |
| const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[2], (int16_t)cospi[62], |
| (int16_t)cospi[34], (int16_t)cospi[30]); |
| const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[18], (int16_t)cospi[46], |
| (int16_t)cospi[50], (int16_t)cospi[14]); |
| const int16x4_t c2 = set_s16x4_neon((int16_t)cospi[10], (int16_t)cospi[54], |
| (int16_t)cospi[42], (int16_t)cospi[22]); |
| const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[26], (int16_t)cospi[38], |
| (int16_t)cospi[58], (int16_t)cospi[6]); |
| const int16x4_t c4 = set_s16x4_neon((int16_t)cospi[4], (int16_t)cospi[60], |
| (int16_t)cospi[36], (int16_t)cospi[28]); |
| const int16x4_t c5 = set_s16x4_neon((int16_t)cospi[20], (int16_t)cospi[44], |
| (int16_t)cospi[52], (int16_t)cospi[12]); |
| const int16x4_t c6 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], |
| (int16_t)cospi[40], (int16_t)cospi[24]); |
| const int16x4_t c7 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], (int16_t)cospi[48]); |
| const int16x4_t c8 = |
| set_s16x4_neon((int16_t)(-cospi[8]), (int16_t)(-cospi[56]), |
| (int16_t)(-cospi[40]), (int16_t)(-cospi[24])); |
| const int16x4_t c9 = |
| set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), |
| (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); |
| |
| // stage 2 |
| |
| btf_16_lane_0_1_neon(in[1], in[31], c0, &step2[31], &step2[16]); |
| btf_16_lane_2_3_neon(in[17], in[15], c0, &step2[30], &step2[17]); |
| btf_16_lane_0_1_neon(in[9], in[23], c1, &step2[29], &step2[18]); |
| btf_16_lane_2_3_neon(in[25], in[7], c1, &step2[28], &step2[19]); |
| btf_16_lane_0_1_neon(in[5], in[27], c2, &step2[27], &step2[20]); |
| btf_16_lane_2_3_neon(in[21], in[11], c2, &step2[26], &step2[21]); |
| btf_16_lane_0_1_neon(in[13], in[19], c3, &step2[25], &step2[22]); |
| btf_16_lane_2_3_neon(in[29], in[3], c3, &step2[24], &step2[23]); |
| |
| step2[0] = in[0]; |
| step2[1] = in[16]; |
| step2[2] = in[8]; |
| step2[3] = in[24]; |
| step2[4] = in[4]; |
| step2[5] = in[20]; |
| step2[6] = in[12]; |
| step2[7] = in[28]; |
| step2[8] = in[2]; |
| step2[9] = in[18]; |
| step2[10] = in[10]; |
| step2[11] = in[26]; |
| step2[12] = in[6]; |
| step2[13] = in[22]; |
| step2[14] = in[14]; |
| step2[15] = in[30]; |
| |
| // stage 3 |
| |
| btf_16_lane_0_1_neon(step2[8], step2[15], c4, &step1[15], &step1[8]); |
| btf_16_lane_2_3_neon(step2[9], step2[14], c4, &step1[14], &step1[9]); |
| btf_16_lane_0_1_neon(step2[10], step2[13], c5, &step1[13], &step1[10]); |
| btf_16_lane_2_3_neon(step2[11], step2[12], c5, &step1[12], &step1[11]); |
| |
| step1[0] = step2[0]; |
| step1[1] = step2[1]; |
| step1[2] = step2[2]; |
| step1[3] = step2[3]; |
| step1[4] = step2[4]; |
| step1[5] = step2[5]; |
| step1[6] = step2[6]; |
| step1[7] = step2[7]; |
| |
| step1[16] = vqaddq_s16(step2[16], step2[17]); |
| step1[17] = vqsubq_s16(step2[16], step2[17]); |
| step1[18] = vqsubq_s16(step2[19], step2[18]); |
| step1[19] = vqaddq_s16(step2[19], step2[18]); |
| step1[20] = vqaddq_s16(step2[20], step2[21]); |
| step1[21] = vqsubq_s16(step2[20], step2[21]); |
| step1[22] = vqsubq_s16(step2[23], step2[22]); |
| step1[23] = vqaddq_s16(step2[23], step2[22]); |
| step1[24] = vqaddq_s16(step2[24], step2[25]); |
| step1[25] = vqsubq_s16(step2[24], step2[25]); |
| step1[26] = vqsubq_s16(step2[27], step2[26]); |
| step1[27] = vqaddq_s16(step2[27], step2[26]); |
| step1[28] = vqaddq_s16(step2[28], step2[29]); |
| step1[29] = vqsubq_s16(step2[28], step2[29]); |
| step1[30] = vqsubq_s16(step2[31], step2[30]); |
| step1[31] = vqaddq_s16(step2[31], step2[30]); |
| |
| // stage 4 |
| |
| btf_16_lane_0_1_neon(step1[4], step1[7], c6, &step2[7], &step2[4]); |
| btf_16_lane_2_3_neon(step1[5], step1[6], c6, &step2[6], &step2[5]); |
| btf_16_lane_0_1_neon(step1[30], step1[17], c6, &step2[30], &step2[17]); |
| btf_16_lane_1_0_neon(step1[18], step1[29], c8, &step2[18], &step2[29]); |
| btf_16_lane_2_3_neon(step1[26], step1[21], c6, &step2[26], &step2[21]); |
| btf_16_lane_3_2_neon(step1[22], step1[25], c8, &step2[22], &step2[25]); |
| |
| step2[0] = step1[0]; |
| step2[1] = step1[1]; |
| step2[2] = step1[2]; |
| step2[3] = step1[3]; |
| step2[8] = vqaddq_s16(step1[8], step1[9]); |
| step2[9] = vqsubq_s16(step1[8], step1[9]); |
| step2[10] = vqsubq_s16(step1[11], step1[10]); |
| step2[11] = vqaddq_s16(step1[11], step1[10]); |
| step2[12] = vqaddq_s16(step1[12], step1[13]); |
| step2[13] = vqsubq_s16(step1[12], step1[13]); |
| step2[14] = vqsubq_s16(step1[15], step1[14]); |
| step2[15] = vqaddq_s16(step1[15], step1[14]); |
| step2[16] = step1[16]; |
| step2[19] = step1[19]; |
| step2[20] = step1[20]; |
| step2[23] = step1[23]; |
| step2[24] = step1[24]; |
| step2[27] = step1[27]; |
| step2[28] = step1[28]; |
| step2[31] = step1[31]; |
| |
| // stage 5 |
| |
| btf_16_lane_0_1_neon(step2[0], step2[1], c7, &step1[0], &step1[1]); |
| btf_16_lane_2_3_neon(step2[2], step2[3], c7, &step1[3], &step1[2]); |
| btf_16_lane_2_3_neon(step2[14], step2[9], c7, &step1[14], &step1[9]); |
| btf_16_lane_3_2_neon(step2[10], step2[13], c9, &step1[10], &step1[13]); |
| |
| step1[4] = vqaddq_s16(step2[4], step2[5]); |
| step1[5] = vqsubq_s16(step2[4], step2[5]); |
| step1[6] = vqsubq_s16(step2[7], step2[6]); |
| step1[7] = vqaddq_s16(step2[7], step2[6]); |
| step1[8] = step2[8]; |
| step1[11] = step2[11]; |
| step1[12] = step2[12]; |
| step1[15] = step2[15]; |
| step1[16] = vqaddq_s16(step2[16], step2[19]); |
| step1[17] = vqaddq_s16(step2[17], step2[18]); |
| step1[18] = vqsubq_s16(step2[17], step2[18]); |
| step1[19] = vqsubq_s16(step2[16], step2[19]); |
| step1[20] = vqsubq_s16(step2[23], step2[20]); |
| step1[21] = vqsubq_s16(step2[22], step2[21]); |
| step1[22] = vqaddq_s16(step2[22], step2[21]); |
| step1[23] = vqaddq_s16(step2[23], step2[20]); |
| step1[24] = vqaddq_s16(step2[24], step2[27]); |
| step1[25] = vqaddq_s16(step2[25], step2[26]); |
| step1[26] = vqsubq_s16(step2[25], step2[26]); |
| step1[27] = vqsubq_s16(step2[24], step2[27]); |
| step1[28] = vqsubq_s16(step2[31], step2[28]); |
| step1[29] = vqsubq_s16(step2[30], step2[29]); |
| step1[30] = vqaddq_s16(step2[30], step2[29]); |
| step1[31] = vqaddq_s16(step2[31], step2[28]); |
| |
| // stage 6 |
| |
| btf_16_lane_0_1_neon(step1[6], step1[5], c7, &step2[6], &step2[5]); |
| btf_16_lane_2_3_neon(step1[29], step1[18], c7, &step2[29], &step2[18]); |
| btf_16_lane_2_3_neon(step1[28], step1[19], c7, &step2[28], &step2[19]); |
| btf_16_lane_3_2_neon(step1[20], step1[27], c9, &step2[20], &step2[27]); |
| btf_16_lane_3_2_neon(step1[21], step1[26], c9, &step2[21], &step2[26]); |
| |
| step2[0] = vqaddq_s16(step1[0], step1[3]); |
| step2[1] = vqaddq_s16(step1[1], step1[2]); |
| step2[2] = vqsubq_s16(step1[1], step1[2]); |
| step2[3] = vqsubq_s16(step1[0], step1[3]); |
| step2[4] = step1[4]; |
| step2[7] = step1[7]; |
| step2[8] = vqaddq_s16(step1[8], step1[11]); |
| step2[9] = vqaddq_s16(step1[9], step1[10]); |
| step2[10] = vqsubq_s16(step1[9], step1[10]); |
| step2[11] = vqsubq_s16(step1[8], step1[11]); |
| step2[12] = vqsubq_s16(step1[15], step1[12]); |
| step2[13] = vqsubq_s16(step1[14], step1[13]); |
| step2[14] = vqaddq_s16(step1[14], step1[13]); |
| step2[15] = vqaddq_s16(step1[15], step1[12]); |
| step2[16] = step1[16]; |
| step2[17] = step1[17]; |
| step2[22] = step1[22]; |
| step2[23] = step1[23]; |
| step2[24] = step1[24]; |
| step2[25] = step1[25]; |
| step2[30] = step1[30]; |
| step2[31] = step1[31]; |
| |
| // stage 7 |
| |
| btf_16_lane_0_1_neon(step2[13], step2[10], c7, &step1[13], &step1[10]); |
| btf_16_lane_0_1_neon(step2[12], step2[11], c7, &step1[12], &step1[11]); |
| |
| step1[0] = vqaddq_s16(step2[0], step2[7]); |
| step1[1] = vqaddq_s16(step2[1], step2[6]); |
| step1[2] = vqaddq_s16(step2[2], step2[5]); |
| step1[3] = vqaddq_s16(step2[3], step2[4]); |
| step1[4] = vqsubq_s16(step2[3], step2[4]); |
| step1[5] = vqsubq_s16(step2[2], step2[5]); |
| step1[6] = vqsubq_s16(step2[1], step2[6]); |
| step1[7] = vqsubq_s16(step2[0], step2[7]); |
| step1[8] = step2[8]; |
| step1[9] = step2[9]; |
| step1[14] = step2[14]; |
| step1[15] = step2[15]; |
| step1[16] = vqaddq_s16(step2[16], step2[23]); |
| step1[17] = vqaddq_s16(step2[17], step2[22]); |
| step1[18] = vqaddq_s16(step2[18], step2[21]); |
| step1[19] = vqaddq_s16(step2[19], step2[20]); |
| step1[20] = vqsubq_s16(step2[19], step2[20]); |
| step1[21] = vqsubq_s16(step2[18], step2[21]); |
| step1[22] = vqsubq_s16(step2[17], step2[22]); |
| step1[23] = vqsubq_s16(step2[16], step2[23]); |
| step1[24] = vqsubq_s16(step2[31], step2[24]); |
| step1[25] = vqsubq_s16(step2[30], step2[25]); |
| step1[26] = vqsubq_s16(step2[29], step2[26]); |
| step1[27] = vqsubq_s16(step2[28], step2[27]); |
| step1[28] = vqaddq_s16(step2[27], step2[28]); |
| step1[29] = vqaddq_s16(step2[26], step2[29]); |
| step1[30] = vqaddq_s16(step2[25], step2[30]); |
| step1[31] = vqaddq_s16(step2[24], step2[31]); |
| |
| // stage 8 |
| |
| btf_16_lane_0_1_neon(step1[27], step1[20], c7, &step2[27], &step2[20]); |
| btf_16_lane_0_1_neon(step1[26], step1[21], c7, &step2[26], &step2[21]); |
| btf_16_lane_0_1_neon(step1[25], step1[22], c7, &step2[25], &step2[22]); |
| btf_16_lane_0_1_neon(step1[24], step1[23], c7, &step2[24], &step2[23]); |
| |
| step2[0] = vqaddq_s16(step1[0], step1[15]); |
| step2[1] = vqaddq_s16(step1[1], step1[14]); |
| step2[2] = vqaddq_s16(step1[2], step1[13]); |
| step2[3] = vqaddq_s16(step1[3], step1[12]); |
| step2[4] = vqaddq_s16(step1[4], step1[11]); |
| step2[5] = vqaddq_s16(step1[5], step1[10]); |
| step2[6] = vqaddq_s16(step1[6], step1[9]); |
| step2[7] = vqaddq_s16(step1[7], step1[8]); |
| step2[8] = vqsubq_s16(step1[7], step1[8]); |
| step2[9] = vqsubq_s16(step1[6], step1[9]); |
| step2[10] = vqsubq_s16(step1[5], step1[10]); |
| step2[11] = vqsubq_s16(step1[4], step1[11]); |
| step2[12] = vqsubq_s16(step1[3], step1[12]); |
| step2[13] = vqsubq_s16(step1[2], step1[13]); |
| step2[14] = vqsubq_s16(step1[1], step1[14]); |
| step2[15] = vqsubq_s16(step1[0], step1[15]); |
| step2[16] = step1[16]; |
| step2[17] = step1[17]; |
| step2[18] = step1[18]; |
| step2[19] = step1[19]; |
| step2[28] = step1[28]; |
| step2[29] = step1[29]; |
| step2[30] = step1[30]; |
| step2[31] = step1[31]; |
| |
| // stage 9 |
| |
| out[0] = vqaddq_s16(step2[0], step2[31]); |
| out[1] = vqaddq_s16(step2[1], step2[30]); |
| out[2] = vqaddq_s16(step2[2], step2[29]); |
| out[3] = vqaddq_s16(step2[3], step2[28]); |
| out[4] = vqaddq_s16(step2[4], step2[27]); |
| out[5] = vqaddq_s16(step2[5], step2[26]); |
| out[6] = vqaddq_s16(step2[6], step2[25]); |
| out[7] = vqaddq_s16(step2[7], step2[24]); |
| out[8] = vqaddq_s16(step2[8], step2[23]); |
| out[9] = vqaddq_s16(step2[9], step2[22]); |
| out[10] = vqaddq_s16(step2[10], step2[21]); |
| out[11] = vqaddq_s16(step2[11], step2[20]); |
| out[12] = vqaddq_s16(step2[12], step2[19]); |
| out[13] = vqaddq_s16(step2[13], step2[18]); |
| out[14] = vqaddq_s16(step2[14], step2[17]); |
| out[15] = vqaddq_s16(step2[15], step2[16]); |
| out[16] = vqsubq_s16(step2[15], step2[16]); |
| out[17] = vqsubq_s16(step2[14], step2[17]); |
| out[18] = vqsubq_s16(step2[13], step2[18]); |
| out[19] = vqsubq_s16(step2[12], step2[19]); |
| out[20] = vqsubq_s16(step2[11], step2[20]); |
| out[21] = vqsubq_s16(step2[10], step2[21]); |
| out[22] = vqsubq_s16(step2[9], step2[22]); |
| out[23] = vqsubq_s16(step2[8], step2[23]); |
| out[24] = vqsubq_s16(step2[7], step2[24]); |
| out[25] = vqsubq_s16(step2[6], step2[25]); |
| out[26] = vqsubq_s16(step2[5], step2[26]); |
| out[27] = vqsubq_s16(step2[4], step2[27]); |
| out[28] = vqsubq_s16(step2[3], step2[28]); |
| out[29] = vqsubq_s16(step2[2], step2[29]); |
| out[30] = vqsubq_s16(step2[1], step2[30]); |
| out[31] = vqsubq_s16(step2[0], step2[31]); |
| } |
| |
| static INLINE void idct32_low1_neon(int16x8_t *in, int16x8_t *out, |
| int8_t cos_bit, int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| int16x8_t step1; |
| int32x4_t t32[2]; |
| |
| // stage 1 |
| // stage 2 |
| // stage 3 |
| // stage 4 |
| // stage 5 |
| |
| t32[0] = vmull_n_s16(vget_low_s16(in[0]), cospi[32]); |
| t32[1] = vmull_n_s16(vget_high_s16(in[0]), cospi[32]); |
| step1 = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT), |
| vrshrn_n_s32(t32[1], INV_COS_BIT)); |
| |
| // stage 6 |
| // stage 7 |
| // stage 8 |
| // stage 9 |
| |
| out[0] = step1; |
| out[1] = step1; |
| out[2] = step1; |
| out[3] = step1; |
| out[4] = step1; |
| out[5] = step1; |
| out[6] = step1; |
| out[7] = step1; |
| out[8] = step1; |
| out[9] = step1; |
| out[10] = step1; |
| out[11] = step1; |
| out[12] = step1; |
| out[13] = step1; |
| out[14] = step1; |
| out[15] = step1; |
| out[16] = step1; |
| out[17] = step1; |
| out[18] = step1; |
| out[19] = step1; |
| out[20] = step1; |
| out[21] = step1; |
| out[22] = step1; |
| out[23] = step1; |
| out[24] = step1; |
| out[25] = step1; |
| out[26] = step1; |
| out[27] = step1; |
| out[28] = step1; |
| out[29] = step1; |
| out[30] = step1; |
| out[31] = step1; |
| } |
| |
| static INLINE void idct32_low8_neon(int16x8_t *in, int16x8_t *out, |
| int8_t cos_bit, int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| int16x8_t step1[32], step2[32]; |
| int32x4_t t32[16]; |
| const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], |
| (int16_t)cospi[40], (int16_t)cospi[24]); |
| const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], cospi[48]); |
| const int16x4_t c2 = |
| set_s16x4_neon((int16_t)(-cospi[8]), (int16_t)(-cospi[56]), |
| (int16_t)(-cospi[40]), (int16_t)(-cospi[24])); |
| const int16x4_t c3 = |
| set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), |
| (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); |
| // stage 1 |
| // stage 2 |
| |
| step2[0] = in[0]; |
| step2[4] = in[4]; |
| step2[8] = in[2]; |
| step2[12] = in[6]; |
| |
| btf_16_neon(in[1], cospi[62], cospi[2], &step2[16], &step2[31]); |
| btf_16_neon(in[7], -cospi[50], cospi[14], &step2[19], &step2[28]); |
| btf_16_neon(in[5], cospi[54], cospi[10], &step2[20], &step2[27]); |
| btf_16_neon(in[3], -cospi[58], cospi[6], &step2[23], &step2[24]); |
| |
| // stage 3 |
| step1[0] = step2[0]; |
| step1[4] = step2[4]; |
| |
| btf_16_neon(step2[8], cospi[60], cospi[4], &step1[8], &step1[15]); |
| btf_16_neon(step2[12], -cospi[52], cospi[12], &step1[11], &step1[12]); |
| |
| step1[16] = step2[16]; |
| step1[17] = step2[16]; |
| step1[18] = step2[19]; |
| step1[19] = step2[19]; |
| step1[20] = step2[20]; |
| step1[21] = step2[20]; |
| step1[22] = step2[23]; |
| step1[23] = step2[23]; |
| step1[24] = step2[24]; |
| step1[25] = step2[24]; |
| step1[26] = step2[27]; |
| step1[27] = step2[27]; |
| step1[28] = step2[28]; |
| step1[29] = step2[28]; |
| step1[30] = step2[31]; |
| step1[31] = step2[31]; |
| |
| // stage 4 |
| |
| btf_16_neon(step1[4], cospi[56], cospi[8], &step2[4], &step2[7]); |
| btf_16_lane_0_1_neon(step1[30], step1[17], c0, &step2[30], &step2[17]); |
| btf_16_lane_1_0_neon(step1[18], step1[29], c2, &step2[18], &step2[29]); |
| btf_16_lane_2_3_neon(step1[26], step1[21], c0, &step2[26], &step2[21]); |
| btf_16_lane_3_2_neon(step1[22], step1[25], c2, &step2[22], &step2[25]); |
| |
| step2[0] = step1[0]; |
| step2[8] = step1[8]; |
| step2[9] = step1[8]; |
| step2[10] = step1[11]; |
| step2[11] = step1[11]; |
| step2[12] = step1[12]; |
| step2[13] = step1[12]; |
| step2[14] = step1[15]; |
| step2[15] = step1[15]; |
| step2[16] = step1[16]; |
| step2[19] = step1[19]; |
| step2[20] = step1[20]; |
| step2[23] = step1[23]; |
| step2[24] = step1[24]; |
| step2[27] = step1[27]; |
| step2[28] = step1[28]; |
| step2[31] = step1[31]; |
| |
| // stage 5 |
| |
| t32[0] = vmull_n_s16(vget_low_s16(step2[0]), cospi[32]); |
| t32[1] = vmull_n_s16(vget_high_s16(step2[0]), cospi[32]); |
| step1[0] = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT), |
| vrshrn_n_s32(t32[1], INV_COS_BIT)); |
| |
| btf_16_lane_2_3_neon(step2[14], step2[9], c1, &step1[14], &step1[9]); |
| btf_16_lane_3_2_neon(step2[10], step2[13], c3, &step1[10], &step1[13]); |
| |
| step1[4] = step2[4]; |
| step1[5] = step2[4]; |
| step1[6] = step2[7]; |
| step1[7] = step2[7]; |
| step1[8] = step2[8]; |
| step1[11] = step2[11]; |
| step1[12] = step2[12]; |
| step1[15] = step2[15]; |
| step1[16] = vqaddq_s16(step2[16], step2[19]); |
| step1[17] = vqaddq_s16(step2[17], step2[18]); |
| step1[18] = vqsubq_s16(step2[17], step2[18]); |
| step1[19] = vqsubq_s16(step2[16], step2[19]); |
| step1[20] = vqsubq_s16(step2[23], step2[20]); |
| step1[21] = vqsubq_s16(step2[22], step2[21]); |
| step1[22] = vqaddq_s16(step2[22], step2[21]); |
| step1[23] = vqaddq_s16(step2[23], step2[20]); |
| step1[24] = vqaddq_s16(step2[24], step2[27]); |
| step1[25] = vqaddq_s16(step2[25], step2[26]); |
| step1[26] = vqsubq_s16(step2[25], step2[26]); |
| step1[27] = vqsubq_s16(step2[24], step2[27]); |
| step1[28] = vqsubq_s16(step2[31], step2[28]); |
| step1[29] = vqsubq_s16(step2[30], step2[29]); |
| step1[30] = vqaddq_s16(step2[30], step2[29]); |
| step1[31] = vqaddq_s16(step2[31], step2[28]); |
| |
| // stage 6 |
| |
| btf_16_lane_0_1_neon(step1[6], step1[5], c1, &step2[6], &step2[5]); |
| btf_16_lane_2_3_neon(step1[29], step1[18], c1, &step2[29], &step2[18]); |
| btf_16_lane_2_3_neon(step1[28], step1[19], c1, &step2[28], &step2[19]); |
| btf_16_lane_3_2_neon(step1[20], step1[27], c3, &step2[20], &step2[27]); |
| btf_16_lane_3_2_neon(step1[21], step1[26], c3, &step2[21], &step2[26]); |
| |
| step2[0] = step1[0]; |
| step2[1] = step1[0]; |
| step2[2] = step1[0]; |
| step2[3] = step1[0]; |
| step2[4] = step1[4]; |
| step2[7] = step1[7]; |
| step2[8] = vqaddq_s16(step1[8], step1[11]); |
| step2[9] = vqaddq_s16(step1[9], step1[10]); |
| step2[10] = vqsubq_s16(step1[9], step1[10]); |
| step2[11] = vqsubq_s16(step1[8], step1[11]); |
| step2[12] = vqsubq_s16(step1[15], step1[12]); |
| step2[13] = vqsubq_s16(step1[14], step1[13]); |
| step2[14] = vqaddq_s16(step1[14], step1[13]); |
| step2[15] = vqaddq_s16(step1[15], step1[12]); |
| step2[16] = step1[16]; |
| step2[17] = step1[17]; |
| step2[22] = step1[22]; |
| step2[23] = step1[23]; |
| step2[24] = step1[24]; |
| step2[25] = step1[25]; |
| step2[30] = step1[30]; |
| step2[31] = step1[31]; |
| |
| // stage 7 |
| |
| btf_16_lane_0_1_neon(step2[13], step2[10], c1, &step1[13], &step1[10]); |
| btf_16_lane_0_1_neon(step2[12], step2[11], c1, &step1[12], &step1[11]); |
| |
| step1[0] = vqaddq_s16(step2[0], step2[7]); |
| step1[1] = vqaddq_s16(step2[1], step2[6]); |
| step1[2] = vqaddq_s16(step2[2], step2[5]); |
| step1[3] = vqaddq_s16(step2[3], step2[4]); |
| step1[4] = vqsubq_s16(step2[3], step2[4]); |
| step1[5] = vqsubq_s16(step2[2], step2[5]); |
| step1[6] = vqsubq_s16(step2[1], step2[6]); |
| step1[7] = vqsubq_s16(step2[0], step2[7]); |
| step1[8] = step2[8]; |
| step1[9] = step2[9]; |
| step1[14] = step2[14]; |
| step1[15] = step2[15]; |
| step1[16] = vqaddq_s16(step2[16], step2[23]); |
| step1[17] = vqaddq_s16(step2[17], step2[22]); |
| step1[18] = vqaddq_s16(step2[18], step2[21]); |
| step1[19] = vqaddq_s16(step2[19], step2[20]); |
| step1[20] = vqsubq_s16(step2[19], step2[20]); |
| step1[21] = vqsubq_s16(step2[18], step2[21]); |
| step1[22] = vqsubq_s16(step2[17], step2[22]); |
| step1[23] = vqsubq_s16(step2[16], step2[23]); |
| step1[24] = vqsubq_s16(step2[31], step2[24]); |
| step1[25] = vqsubq_s16(step2[30], step2[25]); |
| step1[26] = vqsubq_s16(step2[29], step2[26]); |
| step1[27] = vqsubq_s16(step2[28], step2[27]); |
| step1[28] = vqaddq_s16(step2[27], step2[28]); |
| step1[29] = vqaddq_s16(step2[26], step2[29]); |
| step1[30] = vqaddq_s16(step2[25], step2[30]); |
| step1[31] = vqaddq_s16(step2[24], step2[31]); |
| |
| // stage 8 |
| |
| btf_16_lane_0_1_neon(step1[27], step1[20], c1, &step2[27], &step2[20]); |
| btf_16_lane_0_1_neon(step1[26], step1[21], c1, &step2[26], &step2[21]); |
| btf_16_lane_0_1_neon(step1[25], step1[22], c1, &step2[25], &step2[22]); |
| btf_16_lane_0_1_neon(step1[24], step1[23], c1, &step2[24], &step2[23]); |
| |
| step2[0] = vqaddq_s16(step1[0], step1[15]); |
| step2[1] = vqaddq_s16(step1[1], step1[14]); |
| step2[2] = vqaddq_s16(step1[2], step1[13]); |
| step2[3] = vqaddq_s16(step1[3], step1[12]); |
| step2[4] = vqaddq_s16(step1[4], step1[11]); |
| step2[5] = vqaddq_s16(step1[5], step1[10]); |
| step2[6] = vqaddq_s16(step1[6], step1[9]); |
| step2[7] = vqaddq_s16(step1[7], step1[8]); |
| step2[8] = vqsubq_s16(step1[7], step1[8]); |
| step2[9] = vqsubq_s16(step1[6], step1[9]); |
| step2[10] = vqsubq_s16(step1[5], step1[10]); |
| step2[11] = vqsubq_s16(step1[4], step1[11]); |
| step2[12] = vqsubq_s16(step1[3], step1[12]); |
| step2[13] = vqsubq_s16(step1[2], step1[13]); |
| step2[14] = vqsubq_s16(step1[1], step1[14]); |
| step2[15] = vqsubq_s16(step1[0], step1[15]); |
| step2[16] = step1[16]; |
| step2[17] = step1[17]; |
| step2[18] = step1[18]; |
| step2[19] = step1[19]; |
| step2[28] = step1[28]; |
| step2[29] = step1[29]; |
| step2[30] = step1[30]; |
| step2[31] = step1[31]; |
| |
| // stage 9 |
| |
| out[0] = vqaddq_s16(step2[0], step2[31]); |
| out[1] = vqaddq_s16(step2[1], step2[30]); |
| out[2] = vqaddq_s16(step2[2], step2[29]); |
| out[3] = vqaddq_s16(step2[3], step2[28]); |
| out[4] = vqaddq_s16(step2[4], step2[27]); |
| out[5] = vqaddq_s16(step2[5], step2[26]); |
| out[6] = vqaddq_s16(step2[6], step2[25]); |
| out[7] = vqaddq_s16(step2[7], step2[24]); |
| out[8] = vqaddq_s16(step2[8], step2[23]); |
| out[9] = vqaddq_s16(step2[9], step2[22]); |
| out[10] = vqaddq_s16(step2[10], step2[21]); |
| out[11] = vqaddq_s16(step2[11], step2[20]); |
| out[12] = vqaddq_s16(step2[12], step2[19]); |
| out[13] = vqaddq_s16(step2[13], step2[18]); |
| out[14] = vqaddq_s16(step2[14], step2[17]); |
| out[15] = vqaddq_s16(step2[15], step2[16]); |
| out[16] = vqsubq_s16(step2[15], step2[16]); |
| out[17] = vqsubq_s16(step2[14], step2[17]); |
| out[18] = vqsubq_s16(step2[13], step2[18]); |
| out[19] = vqsubq_s16(step2[12], step2[19]); |
| out[20] = vqsubq_s16(step2[11], step2[20]); |
| out[21] = vqsubq_s16(step2[10], step2[21]); |
| out[22] = vqsubq_s16(step2[9], step2[22]); |
| out[23] = vqsubq_s16(step2[8], step2[23]); |
| out[24] = vqsubq_s16(step2[7], step2[24]); |
| out[25] = vqsubq_s16(step2[6], step2[25]); |
| out[26] = vqsubq_s16(step2[5], step2[26]); |
| out[27] = vqsubq_s16(step2[4], step2[27]); |
| out[28] = vqsubq_s16(step2[3], step2[28]); |
| out[29] = vqsubq_s16(step2[2], step2[29]); |
| out[30] = vqsubq_s16(step2[1], step2[30]); |
| out[31] = vqsubq_s16(step2[0], step2[31]); |
| } |
| |
| static INLINE void idct32_low16_neon(int16x8_t *in, int16x8_t *out, |
| int8_t cos_bit, int bit) { |
| (void)bit; |
| const int32_t *cospi = cospi_arr(cos_bit); |
| int16x8_t step1[32], step2[32]; |
| int32x4_t t32[16]; |
| const int16x4_t c0 = set_s16x4_neon((int16_t)cospi[8], (int16_t)cospi[56], |
| (int16_t)cospi[40], (int16_t)cospi[24]); |
| const int16x4_t c1 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], (int16_t)cospi[48]); |
| const int16x4_t c2 = |
| set_s16x4_neon((int16_t)(-cospi[8]), (int16_t)(-cospi[56]), |
| (int16_t)(-cospi[40]), (int16_t)(-cospi[24])); |
| const int16x4_t c3 = |
| set_s16x4_neon((int16_t)(-cospi[32]), (int16_t)(-cospi[32]), |
| (int16_t)(-cospi[16]), (int16_t)(-cospi[48])); |
| |
| // stage 1 |
| // stage 2 |
| |
| btf_16_neon(in[1], cospi[62], cospi[2], &step2[16], &step2[31]); |
| btf_16_neon(in[15], -cospi[34], cospi[30], &step2[17], &step2[30]); |
| btf_16_neon(in[9], cospi[46], cospi[18], &step2[18], &step2[29]); |
| btf_16_neon(in[7], -cospi[50], cospi[14], &step2[19], &step2[28]); |
| btf_16_neon(in[5], cospi[54], cospi[10], &step2[20], &step2[27]); |
| btf_16_neon(in[11], -cospi[42], cospi[22], &step2[21], &step2[26]); |
| btf_16_neon(in[13], cospi[38], cospi[26], &step2[22], &step2[25]); |
| btf_16_neon(in[3], -cospi[58], cospi[6], &step2[23], &step2[24]); |
| |
| step2[0] = in[0]; |
| step2[2] = in[8]; |
| step2[4] = in[4]; |
| step2[6] = in[12]; |
| step2[8] = in[2]; |
| step2[10] = in[10]; |
| step2[12] = in[6]; |
| step2[14] = in[14]; |
| |
| // stage 3 |
| |
| btf_16_neon(step2[8], cospi[60], cospi[4], &step1[8], &step1[15]); |
| btf_16_neon(step2[14], -cospi[36], cospi[28], &step1[9], &step1[14]); |
| btf_16_neon(step2[10], cospi[44], cospi[20], &step1[10], &step1[13]); |
| btf_16_neon(step2[12], -cospi[52], cospi[12], &step1[11], &step1[12]); |
| |
| step1[0] = step2[0]; |
| step1[2] = step2[2]; |
| step1[4] = step2[4]; |
| step1[6] = step2[6]; |
| step1[16] = vqaddq_s16(step2[16], step2[17]); |
| step1[17] = vqsubq_s16(step2[16], step2[17]); |
| step1[18] = vqsubq_s16(step2[19], step2[18]); |
| step1[19] = vqaddq_s16(step2[19], step2[18]); |
| step1[20] = vqaddq_s16(step2[20], step2[21]); |
| step1[21] = vqsubq_s16(step2[20], step2[21]); |
| step1[22] = vqsubq_s16(step2[23], step2[22]); |
| step1[23] = vqaddq_s16(step2[23], step2[22]); |
| step1[24] = vqaddq_s16(step2[24], step2[25]); |
| step1[25] = vqsubq_s16(step2[24], step2[25]); |
| step1[26] = vqsubq_s16(step2[27], step2[26]); |
| step1[27] = vqaddq_s16(step2[27], step2[26]); |
| step1[28] = vqaddq_s16(step2[28], step2[29]); |
| step1[29] = vqsubq_s16(step2[28], step2[29]); |
| step1[30] = vqsubq_s16(step2[31], step2[30]); |
| step1[31] = vqaddq_s16(step2[31], step2[30]); |
| |
| // stage 4 |
| |
| btf_16_neon(step1[4], cospi[56], cospi[8], &step2[4], &step2[7]); |
| btf_16_neon(step1[6], -cospi[40], cospi[24], &step2[5], &step2[6]); |
| btf_16_lane_0_1_neon(step1[30], step1[17], c0, &step2[30], &step2[17]); |
| btf_16_lane_1_0_neon(step1[18], step1[29], c2, &step2[18], &step2[29]); |
| btf_16_lane_2_3_neon(step1[26], step1[21], c0, &step2[26], &step2[21]); |
| btf_16_lane_3_2_neon(step1[22], step1[25], c2, &step2[22], &step2[25]); |
| |
| step2[0] = step1[0]; |
| step2[2] = step1[2]; |
| step2[8] = vqaddq_s16(step1[8], step1[9]); |
| step2[9] = vqsubq_s16(step1[8], step1[9]); |
| step2[10] = vqsubq_s16(step1[11], step1[10]); |
| step2[11] = vqaddq_s16(step1[11], step1[10]); |
| step2[12] = vqaddq_s16(step1[12], step1[13]); |
| step2[13] = vqsubq_s16(step1[12], step1[13]); |
| step2[14] = vqsubq_s16(step1[15], step1[14]); |
| step2[15] = vqaddq_s16(step1[15], step1[14]); |
| step2[16] = step1[16]; |
| step2[19] = step1[19]; |
| step2[20] = step1[20]; |
| step2[23] = step1[23]; |
| step2[24] = step1[24]; |
| step2[27] = step1[27]; |
| step2[28] = step1[28]; |
| step2[31] = step1[31]; |
| |
| // stage 5 |
| |
| t32[0] = vmull_n_s16(vget_low_s16(step2[0]), cospi[32]); |
| t32[1] = vmull_n_s16(vget_high_s16(step2[0]), cospi[32]); |
| |
| step1[0] = vcombine_s16(vrshrn_n_s32(t32[0], INV_COS_BIT), |
| vrshrn_n_s32(t32[1], INV_COS_BIT)); |
| |
| btf_16_neon(step2[2], cospi[48], cospi[16], &step1[2], &step1[3]); |
| btf_16_lane_2_3_neon(step2[14], step2[9], c1, &step1[14], &step1[9]); |
| btf_16_lane_3_2_neon(step2[10], step2[13], c3, &step1[10], &step1[13]); |
| |
| step1[4] = vqaddq_s16(step2[4], step2[5]); |
| step1[5] = vqsubq_s16(step2[4], step2[5]); |
| step1[6] = vqsubq_s16(step2[7], step2[6]); |
| step1[7] = vqaddq_s16(step2[7], step2[6]); |
| step1[8] = step2[8]; |
| step1[11] = step2[11]; |
| step1[12] = step2[12]; |
| step1[15] = step2[15]; |
| step1[16] = vqaddq_s16(step2[16], step2[19]); |
| step1[17] = vqaddq_s16(step2[17], step2[18]); |
| step1[18] = vqsubq_s16(step2[17], step2[18]); |
| step1[19] = vqsubq_s16(step2[16], step2[19]); |
| step1[20] = vqsubq_s16(step2[23], step2[20]); |
| step1[21] = vqsubq_s16(step2[22], step2[21]); |
| step1[22] = vqaddq_s16(step2[22], step2[21]); |
| step1[23] = vqaddq_s16(step2[23], step2[20]); |
| step1[24] = vqaddq_s16(step2[24], step2[27]); |
| step1[25] = vqaddq_s16(step2[25], step2[26]); |
| step1[26] = vqsubq_s16(step2[25], step2[26]); |
| step1[27] = vqsubq_s16(step2[24], step2[27]); |
| step1[28] = vqsubq_s16(step2[31], step2[28]); |
| step1[29] = vqsubq_s16(step2[30], step2[29]); |
| step1[30] = vqaddq_s16(step2[30], step2[29]); |
| step1[31] = vqaddq_s16(step2[31], step2[28]); |
| |
| // stage 6 |
| |
| btf_16_lane_0_1_neon(step1[6], step1[5], c1, &step2[6], &step2[5]); |
| btf_16_lane_2_3_neon(step1[29], step1[18], c1, &step2[29], &step2[18]); |
| btf_16_lane_2_3_neon(step1[28], step1[19], c1, &step2[28], &step2[19]); |
| btf_16_lane_3_2_neon(step1[20], step1[27], c3, &step2[20], &step2[27]); |
| btf_16_lane_3_2_neon(step1[21], step1[26], c3, &step2[21], &step2[26]); |
| |
| step2[0] = vqaddq_s16(step1[0], step1[3]); |
| step2[1] = vqaddq_s16(step1[0], step1[2]); |
| step2[2] = vqsubq_s16(step1[0], step1[2]); |
| step2[3] = vqsubq_s16(step1[0], step1[3]); |
| step2[4] = step1[4]; |
| step2[7] = step1[7]; |
| step2[8] = vqaddq_s16(step1[8], step1[11]); |
| step2[9] = vqaddq_s16(step1[9], step1[10]); |
| step2[10] = vqsubq_s16(step1[9], step1[10]); |
| step2[11] = vqsubq_s16(step1[8], step1[11]); |
| step2[12] = vqsubq_s16(step1[15], step1[12]); |
| step2[13] = vqsubq_s16(step1[14], step1[13]); |
| step2[14] = vqaddq_s16(step1[14], step1[13]); |
| step2[15] = vqaddq_s16(step1[15], step1[12]); |
| step2[16] = step1[16]; |
| step2[17] = step1[17]; |
| step2[22] = step1[22]; |
| step2[23] = step1[23]; |
| step2[24] = step1[24]; |
| step2[25] = step1[25]; |
| step2[30] = step1[30]; |
| step2[31] = step1[31]; |
| |
| // stage 7 |
| |
| btf_16_lane_0_1_neon(step2[13], step2[10], c1, &step1[13], &step1[10]); |
| btf_16_lane_0_1_neon(step2[12], step2[11], c1, &step1[12], &step1[11]); |
| |
| step1[0] = vqaddq_s16(step2[0], step2[7]); |
| step1[1] = vqaddq_s16(step2[1], step2[6]); |
| step1[2] = vqaddq_s16(step2[2], step2[5]); |
| step1[3] = vqaddq_s16(step2[3], step2[4]); |
| step1[4] = vqsubq_s16(step2[3], step2[4]); |
| step1[5] = vqsubq_s16(step2[2], step2[5]); |
| step1[6] = vqsubq_s16(step2[1], step2[6]); |
| step1[7] = vqsubq_s16(step2[0], step2[7]); |
| step1[8] = step2[8]; |
| step1[9] = step2[9]; |
| step1[14] = step2[14]; |
| step1[15] = step2[15]; |
| step1[16] = vqaddq_s16(step2[16], step2[23]); |
| step1[17] = vqaddq_s16(step2[17], step2[22]); |
| step1[18] = vqaddq_s16(step2[18], step2[21]); |
| step1[19] = vqaddq_s16(step2[19], step2[20]); |
| step1[20] = vqsubq_s16(step2[19], step2[20]); |
| step1[21] = vqsubq_s16(step2[18], step2[21]); |
| step1[22] = vqsubq_s16(step2[17], step2[22]); |
| step1[23] = vqsubq_s16(step2[16], step2[23]); |
| step1[24] = vqsubq_s16(step2[31], step2[24]); |
| step1[25] = vqsubq_s16(step2[30], step2[25]); |
| step1[26] = vqsubq_s16(step2[29], step2[26]); |
| step1[27] = vqsubq_s16(step2[28], step2[27]); |
| step1[28] = vqaddq_s16(step2[27], step2[28]); |
| step1[29] = vqaddq_s16(step2[26], step2[29]); |
| step1[30] = vqaddq_s16(step2[25], step2[30]); |
| step1[31] = vqaddq_s16(step2[24], step2[31]); |
| |
| // stage 8 |
| |
| btf_16_lane_0_1_neon(step1[27], step1[20], c1, &step2[27], &step2[20]); |
| btf_16_lane_0_1_neon(step1[26], step1[21], c1, &step2[26], &step2[21]); |
| btf_16_lane_0_1_neon(step1[25], step1[22], c1, &step2[25], &step2[22]); |
| btf_16_lane_0_1_neon(step1[24], step1[23], c1, &step2[24], &step2[23]); |
| |
| step2[0] = vqaddq_s16(step1[0], step1[15]); |
| step2[1] = vqaddq_s16(step1[1], step1[14]); |
| step2[2] = vqaddq_s16(step1[2], step1[13]); |
| step2[3] = vqaddq_s16(step1[3], step1[12]); |
| step2[4] = vqaddq_s16(step1[4], step1[11]); |
| step2[5] = vqaddq_s16(step1[5], step1[10]); |
| step2[6] = vqaddq_s16(step1[6], step1[9]); |
| step2[7] = vqaddq_s16(step1[7], step1[8]); |
| step2[8] = vqsubq_s16(step1[7], step1[8]); |
| step2[9] = vqsubq_s16(step1[6], step1[9]); |
| step2[10] = vqsubq_s16(step1[5], step1[10]); |
| step2[11] = vqsubq_s16(step1[4], step1[11]); |
| step2[12] = vqsubq_s16(step1[3], step1[12]); |
| step2[13] = vqsubq_s16(step1[2], step1[13]); |
| step2[14] = vqsubq_s16(step1[1], step1[14]); |
| step2[15] = vqsubq_s16(step1[0], step1[15]); |
| step2[16] = step1[16]; |
| step2[17] = step1[17]; |
| step2[18] = step1[18]; |
| step2[19] = step1[19]; |
| step2[28] = step1[28]; |
| step2[29] = step1[29]; |
| step2[30] = step1[30]; |
| step2[31] = step1[31]; |
| |
| // stage 9 |
| |
| out[0] = vqaddq_s16(step2[0], step2[31]); |
| out[1] = vqaddq_s16(step2[1], step2[30]); |
| out[2] = vqaddq_s16(step2[2], step2[29]); |
| out[3] = vqaddq_s16(step2[3], step2[28]); |
| out[4] = vqaddq_s16(step2[4], step2[27]); |
| out[5] = vqaddq_s16(step2[5], step2[26]); |
| out[6] = vqaddq_s16(step2[6], step2[25]); |
| out[7] = vqaddq_s16(step2[7], step2[24]); |
| out[8] = vqaddq_s16(step2[8], step2[23]); |
| out[9] = vqaddq_s16(step2[9], step2[22]); |
| out[10] = vqaddq_s16(step2[10], step2[21]); |
| out[11] = vqaddq_s16(step2[11], step2[20]); |
| out[12] = vqaddq_s16(step2[12], step2[19]); |
| out[13] = vqaddq_s16(step2[13], step2[18]); |
| out[14] = vqaddq_s16(step2[14], step2[17]); |
| out[15] = vqaddq_s16(step2[15], step2[16]); |
| out[16] = vqsubq_s16(step2[15], step2[16]); |
| out[17] = vqsubq_s16(step2[14], step2[17]); |
| out[18] = vqsubq_s16(step2[13], step2[18]); |
| out[19] = vqsubq_s16(step2[12], step2[19]); |
| out[20] = vqsubq_s16(step2[11], step2[20]); |
| out[21] = vqsubq_s16(step2[10], step2[21]); |
| out[22] = vqsubq_s16(step2[9], step2[22]); |
| out[23] = vqsubq_s16(step2[8], step2[23]); |
| out[24] = vqsubq_s16(step2[7], step2[24]); |
| out[25] = vqsubq_s16(step2[6], step2[25]); |
| out[26] = vqsubq_s16(step2[5], step2[26]); |
| out[27] = vqsubq_s16(step2[4], step2[27]); |
| out[28] = vqsubq_s16(step2[3], step2[28]); |
| out[29] = vqsubq_s16(step2[2], step2[29]); |
| out[30] = vqsubq_s16(step2[1], step2[30]); |
| out[31] = vqsubq_s16(step2[0], step2[31]); |
| } |
| static INLINE void idct64_stage9_neon(int16x8_t *step2, int16x8_t *step1, |
| int8_t cos_bit) { |
| const int32_t *cospi = cospi_arr(cos_bit); |
| const int16x4_t c3 = set_s16x4_neon((int16_t)cospi[32], (int16_t)cospi[32], |
| (int16_t)cospi[16], (int16_t)cospi[48]); |
| |
| btf_16_lane_0_1_neon(step2[27], step2[20], c3, &step1[27], &step1[20]); |
| btf_16_lane_0_1_neon(step2[26], step2[21], c3, &step1[26], &step1[21]); |
| btf_16_lane_0_1_neon(step2[25], step2[22], c3, &step1[25], &step1[22]); |
| btf_16_lane_0_1_neon(step2[24], step2[23], c3, &step1[24], &step1[23]); |
| |
| step1[0] = vqaddq_s16(step2[0], step2[15]); |
| step1[1] = vqaddq_s16(step2[1], step2[14]); |
| step1[2] = vqaddq_s16(step2[2], step2[13]); |
| step1[3] = vqaddq_s16(step2[3], step2[12]); |
| step1[4] = vqaddq_s16(step2[4], step2[11]); |
| step1[5] = vqaddq_s16(step2[5], step2[10]); |
| step1[6] = vqaddq_s16(step2[6], step2[9]); |
| step1[7] = vqaddq_s16(step2[7], step2[8]); |
| step1[8] = vqsubq_s16(step2[7], step2[8]); |
| step1[9] = vqsubq_s16(step2[6], step2[9]); |
| step1[10] = vqsubq_s16(step2[5], step2[10]); |
| step1 |