blob: 7373659e193c857edc0352f670a33150d130aab0 [file] [log] [blame]
Yaowu Xuc27fc142016-08-22 16:08:15 -07001/*
2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Yaowu Xuf883b422016-08-30 14:01:10 -070011#ifndef AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
12#define AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
Yaowu Xuc27fc142016-08-22 16:08:15 -070013
14#include "aom_dsp/mips/fwd_txfm_msa.h"
15#include "aom_dsp/mips/txfm_macros_msa.h"
16#include "aom_ports/mem.h"
17
Yaowu Xuf883b422016-08-30 14:01:10 -070018#define AOM_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
Yaowu Xuc27fc142016-08-22 16:08:15 -070019 out3, out4, out5, out6, out7) \
20 { \
21 v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
22 v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \
23 v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \
24 cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \
25 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \
26 cospi_24_64, -cospi_24_64, 0, 0 }; \
27 \
28 SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \
29 cnst2_m = -cnst0_m; \
30 ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
31 SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \
32 cnst4_m = -cnst2_m; \
33 ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
34 \
35 ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \
36 ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
37 DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m, \
38 cnst2_m, cnst3_m, in7, in0, in4, in3); \
39 \
40 SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \
41 cnst2_m = -cnst0_m; \
42 ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
43 SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \
44 cnst4_m = -cnst2_m; \
45 ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
46 \
47 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
48 ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
49 \
50 DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m, \
51 cnst2_m, cnst3_m, in5, in2, in6, in1); \
52 BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
53 out7 = -s0_m; \
54 out0 = s1_m; \
55 \
56 SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m); \
57 \
58 ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \
59 cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
60 cnst1_m = cnst0_m; \
61 \
62 ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
63 ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
64 DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst2_m, \
65 cnst3_m, cnst1_m, out1, out6, s0_m, s1_m); \
66 \
67 SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \
68 cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
69 \
70 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
71 ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \
72 out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
73 out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
74 out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
75 out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
76 \
77 out1 = -out1; \
78 out3 = -out3; \
79 out5 = -out5; \
80 }
81
Yaowu Xuf883b422016-08-30 14:01:10 -070082#define AOM_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) \
Yaowu Xuc27fc142016-08-22 16:08:15 -070083 { \
84 v4i32 s0_m, s1_m, s2_m, s3_m, constant_m; \
85 v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m; \
86 \
87 UNPCK_R_SH_SW(in0, in0_r_m); \
88 UNPCK_R_SH_SW(in1, in1_r_m); \
89 UNPCK_R_SH_SW(in2, in2_r_m); \
90 UNPCK_R_SH_SW(in3, in3_r_m); \
91 \
92 constant_m = __msa_fill_w(sinpi_4_9); \
93 MUL2(in0_r_m, constant_m, in3_r_m, constant_m, s1_m, s0_m); \
94 \
95 constant_m = __msa_fill_w(sinpi_1_9); \
96 s0_m += in0_r_m * constant_m; \
97 s1_m -= in1_r_m * constant_m; \
98 \
99 constant_m = __msa_fill_w(sinpi_2_9); \
100 s0_m += in1_r_m * constant_m; \
101 s1_m += in3_r_m * constant_m; \
102 \
103 s2_m = in0_r_m + in1_r_m - in3_r_m; \
104 \
105 constant_m = __msa_fill_w(sinpi_3_9); \
106 MUL2(in2_r_m, constant_m, s2_m, constant_m, s3_m, in1_r_m); \
107 \
108 in0_r_m = s0_m + s3_m; \
109 s2_m = s1_m - s3_m; \
110 s3_m = s1_m - s0_m + s3_m; \
111 \
112 SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS); \
113 PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, s3_m, s3_m, \
114 out0, out1, out2, out3); \
115 }
Yaowu Xuf883b422016-08-30 14:01:10 -0700116#endif // AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_