Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 1 | /* |
Yaowu Xu | 9c01aa1 | 2016-09-01 14:32:49 -0700 | [diff] [blame] | 2 | * Copyright (c) 2016, Alliance for Open Media. All rights reserved |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 3 | * |
Yaowu Xu | 9c01aa1 | 2016-09-01 14:32:49 -0700 | [diff] [blame] | 4 | * This source code is subject to the terms of the BSD 2 Clause License and |
| 5 | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
| 6 | * was not distributed with this source code in the LICENSE file, you can |
| 7 | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
| 8 | * Media Patent License 1.0 was not distributed with this source code in the |
| 9 | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include "aom_dsp/mips/macros_msa.h" |
| 13 | |
| 14 | static void avg_width4_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, |
| 15 | int32_t dst_stride, int32_t height) { |
| 16 | int32_t cnt; |
| 17 | uint32_t out0, out1, out2, out3; |
| 18 | v16u8 src0, src1, src2, src3; |
| 19 | v16u8 dst0, dst1, dst2, dst3; |
| 20 | |
| 21 | if (0 == (height % 4)) { |
| 22 | for (cnt = (height / 4); cnt--;) { |
| 23 | LD_UB4(src, src_stride, src0, src1, src2, src3); |
| 24 | src += (4 * src_stride); |
| 25 | |
| 26 | LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); |
| 27 | |
| 28 | AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1, |
| 29 | dst2, dst3); |
| 30 | |
| 31 | out0 = __msa_copy_u_w((v4i32)dst0, 0); |
| 32 | out1 = __msa_copy_u_w((v4i32)dst1, 0); |
| 33 | out2 = __msa_copy_u_w((v4i32)dst2, 0); |
| 34 | out3 = __msa_copy_u_w((v4i32)dst3, 0); |
| 35 | SW4(out0, out1, out2, out3, dst, dst_stride); |
| 36 | dst += (4 * dst_stride); |
| 37 | } |
| 38 | } else if (0 == (height % 2)) { |
| 39 | for (cnt = (height / 2); cnt--;) { |
| 40 | LD_UB2(src, src_stride, src0, src1); |
| 41 | src += (2 * src_stride); |
| 42 | |
| 43 | LD_UB2(dst, dst_stride, dst0, dst1); |
| 44 | |
| 45 | AVER_UB2_UB(src0, dst0, src1, dst1, dst0, dst1); |
| 46 | |
| 47 | out0 = __msa_copy_u_w((v4i32)dst0, 0); |
| 48 | out1 = __msa_copy_u_w((v4i32)dst1, 0); |
| 49 | SW(out0, dst); |
| 50 | dst += dst_stride; |
| 51 | SW(out1, dst); |
| 52 | dst += dst_stride; |
| 53 | } |
| 54 | } |
| 55 | } |
| 56 | |
| 57 | static void avg_width8_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, |
| 58 | int32_t dst_stride, int32_t height) { |
| 59 | int32_t cnt; |
| 60 | uint64_t out0, out1, out2, out3; |
| 61 | v16u8 src0, src1, src2, src3; |
| 62 | v16u8 dst0, dst1, dst2, dst3; |
| 63 | |
| 64 | for (cnt = (height / 4); cnt--;) { |
| 65 | LD_UB4(src, src_stride, src0, src1, src2, src3); |
| 66 | src += (4 * src_stride); |
| 67 | LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); |
| 68 | |
| 69 | AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1, |
| 70 | dst2, dst3); |
| 71 | |
| 72 | out0 = __msa_copy_u_d((v2i64)dst0, 0); |
| 73 | out1 = __msa_copy_u_d((v2i64)dst1, 0); |
| 74 | out2 = __msa_copy_u_d((v2i64)dst2, 0); |
| 75 | out3 = __msa_copy_u_d((v2i64)dst3, 0); |
| 76 | SD4(out0, out1, out2, out3, dst, dst_stride); |
| 77 | dst += (4 * dst_stride); |
| 78 | } |
| 79 | } |
| 80 | |
| 81 | static void avg_width16_msa(const uint8_t *src, int32_t src_stride, |
| 82 | uint8_t *dst, int32_t dst_stride, int32_t height) { |
| 83 | int32_t cnt; |
| 84 | v16u8 src0, src1, src2, src3, src4, src5, src6, src7; |
| 85 | v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; |
| 86 | |
| 87 | for (cnt = (height / 8); cnt--;) { |
| 88 | LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); |
| 89 | src += (8 * src_stride); |
| 90 | LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); |
| 91 | |
| 92 | AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1, |
| 93 | dst2, dst3); |
| 94 | AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5, |
| 95 | dst6, dst7); |
| 96 | ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst, dst_stride); |
| 97 | dst += (8 * dst_stride); |
| 98 | } |
| 99 | } |
| 100 | |
| 101 | static void avg_width32_msa(const uint8_t *src, int32_t src_stride, |
| 102 | uint8_t *dst, int32_t dst_stride, int32_t height) { |
| 103 | int32_t cnt; |
| 104 | uint8_t *dst_dup = dst; |
| 105 | v16u8 src0, src1, src2, src3, src4, src5, src6, src7; |
| 106 | v16u8 src8, src9, src10, src11, src12, src13, src14, src15; |
| 107 | v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; |
| 108 | v16u8 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15; |
| 109 | |
| 110 | for (cnt = (height / 8); cnt--;) { |
| 111 | LD_UB4(src, src_stride, src0, src2, src4, src6); |
| 112 | LD_UB4(src + 16, src_stride, src1, src3, src5, src7); |
| 113 | src += (4 * src_stride); |
| 114 | LD_UB4(dst_dup, dst_stride, dst0, dst2, dst4, dst6); |
| 115 | LD_UB4(dst_dup + 16, dst_stride, dst1, dst3, dst5, dst7); |
| 116 | dst_dup += (4 * dst_stride); |
| 117 | LD_UB4(src, src_stride, src8, src10, src12, src14); |
| 118 | LD_UB4(src + 16, src_stride, src9, src11, src13, src15); |
| 119 | src += (4 * src_stride); |
| 120 | LD_UB4(dst_dup, dst_stride, dst8, dst10, dst12, dst14); |
| 121 | LD_UB4(dst_dup + 16, dst_stride, dst9, dst11, dst13, dst15); |
| 122 | dst_dup += (4 * dst_stride); |
| 123 | |
| 124 | AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1, |
| 125 | dst2, dst3); |
| 126 | AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5, |
| 127 | dst6, dst7); |
| 128 | AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, dst8, dst9, |
| 129 | dst10, dst11); |
| 130 | AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15, dst12, |
| 131 | dst13, dst14, dst15); |
| 132 | |
| 133 | ST_UB4(dst0, dst2, dst4, dst6, dst, dst_stride); |
| 134 | ST_UB4(dst1, dst3, dst5, dst7, dst + 16, dst_stride); |
| 135 | dst += (4 * dst_stride); |
| 136 | ST_UB4(dst8, dst10, dst12, dst14, dst, dst_stride); |
| 137 | ST_UB4(dst9, dst11, dst13, dst15, dst + 16, dst_stride); |
| 138 | dst += (4 * dst_stride); |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | static void avg_width64_msa(const uint8_t *src, int32_t src_stride, |
| 143 | uint8_t *dst, int32_t dst_stride, int32_t height) { |
| 144 | int32_t cnt; |
| 145 | uint8_t *dst_dup = dst; |
| 146 | v16u8 src0, src1, src2, src3, src4, src5, src6, src7; |
| 147 | v16u8 src8, src9, src10, src11, src12, src13, src14, src15; |
| 148 | v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; |
| 149 | v16u8 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15; |
| 150 | |
| 151 | for (cnt = (height / 4); cnt--;) { |
| 152 | LD_UB4(src, 16, src0, src1, src2, src3); |
| 153 | src += src_stride; |
| 154 | LD_UB4(src, 16, src4, src5, src6, src7); |
| 155 | src += src_stride; |
| 156 | LD_UB4(src, 16, src8, src9, src10, src11); |
| 157 | src += src_stride; |
| 158 | LD_UB4(src, 16, src12, src13, src14, src15); |
| 159 | src += src_stride; |
| 160 | |
| 161 | LD_UB4(dst_dup, 16, dst0, dst1, dst2, dst3); |
| 162 | dst_dup += dst_stride; |
| 163 | LD_UB4(dst_dup, 16, dst4, dst5, dst6, dst7); |
| 164 | dst_dup += dst_stride; |
| 165 | LD_UB4(dst_dup, 16, dst8, dst9, dst10, dst11); |
| 166 | dst_dup += dst_stride; |
| 167 | LD_UB4(dst_dup, 16, dst12, dst13, dst14, dst15); |
| 168 | dst_dup += dst_stride; |
| 169 | |
| 170 | AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1, |
| 171 | dst2, dst3); |
| 172 | AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5, |
| 173 | dst6, dst7); |
| 174 | AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, dst8, dst9, |
| 175 | dst10, dst11); |
| 176 | AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15, dst12, |
| 177 | dst13, dst14, dst15); |
| 178 | |
| 179 | ST_UB4(dst0, dst1, dst2, dst3, dst, 16); |
| 180 | dst += dst_stride; |
| 181 | ST_UB4(dst4, dst5, dst6, dst7, dst, 16); |
| 182 | dst += dst_stride; |
| 183 | ST_UB4(dst8, dst9, dst10, dst11, dst, 16); |
| 184 | dst += dst_stride; |
| 185 | ST_UB4(dst12, dst13, dst14, dst15, dst, 16); |
| 186 | dst += dst_stride; |
| 187 | } |
| 188 | } |
| 189 | |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 190 | void aom_convolve_avg_msa(const uint8_t *src, ptrdiff_t src_stride, |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 191 | uint8_t *dst, ptrdiff_t dst_stride, |
| 192 | const int16_t *filter_x, int32_t filter_x_stride, |
| 193 | const int16_t *filter_y, int32_t filter_y_stride, |
| 194 | int32_t w, int32_t h) { |
| 195 | (void)filter_x; |
| 196 | (void)filter_y; |
| 197 | (void)filter_x_stride; |
| 198 | (void)filter_y_stride; |
| 199 | |
| 200 | switch (w) { |
| 201 | case 4: { |
| 202 | avg_width4_msa(src, src_stride, dst, dst_stride, h); |
| 203 | break; |
| 204 | } |
| 205 | case 8: { |
| 206 | avg_width8_msa(src, src_stride, dst, dst_stride, h); |
| 207 | break; |
| 208 | } |
| 209 | case 16: { |
| 210 | avg_width16_msa(src, src_stride, dst, dst_stride, h); |
| 211 | break; |
| 212 | } |
| 213 | case 32: { |
| 214 | avg_width32_msa(src, src_stride, dst, dst_stride, h); |
| 215 | break; |
| 216 | } |
| 217 | case 64: { |
| 218 | avg_width64_msa(src, src_stride, dst, dst_stride, h); |
| 219 | break; |
| 220 | } |
| 221 | default: { |
| 222 | int32_t lp, cnt; |
| 223 | for (cnt = h; cnt--;) { |
| 224 | for (lp = 0; lp < w; ++lp) { |
| 225 | dst[lp] = (((dst[lp] + src[lp]) + 1) >> 1); |
| 226 | } |
| 227 | src += src_stride; |
| 228 | dst += dst_stride; |
| 229 | } |
| 230 | break; |
| 231 | } |
| 232 | } |
| 233 | } |