Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 1 | /* |
Yaowu Xu | 9c01aa1 | 2016-09-01 14:32:49 -0700 | [diff] [blame] | 2 | * Copyright (c) 2016, Alliance for Open Media. All rights reserved |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 3 | * |
Yaowu Xu | 9c01aa1 | 2016-09-01 14:32:49 -0700 | [diff] [blame] | 4 | * This source code is subject to the terms of the BSD 2 Clause License and |
| 5 | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
| 6 | * was not distributed with this source code in the LICENSE file, you can |
| 7 | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
| 8 | * Media Patent License 1.0 was not distributed with this source code in the |
| 9 | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <assert.h> |
| 13 | |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 14 | #include "aom/aom_integer.h" |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 15 | #include "aom_ports/mem.h" |
| 16 | #include "aom_dsp/blend.h" |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 17 | #include "aom_dsp/aom_dsp_common.h" |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 18 | |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 19 | #include "./aom_dsp_rtcd.h" |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 20 | |
Angie Chiang | 7b51709 | 2017-07-20 17:31:28 -0700 | [diff] [blame] | 21 | #if CONFIG_CONVOLVE_ROUND |
| 22 | // Blending with alpha mask. Mask values come from the range [0, 64], |
| 23 | // as described for AOM_BLEND_A64 in aom_dsp/blend.h. src0 or src1 can |
| 24 | // be the same as dst, or dst can be different from both sources. |
| 25 | |
| 26 | void aom_blend_a64_d32_mask_c(int32_t *dst, uint32_t dst_stride, |
| 27 | const int32_t *src0, uint32_t src0_stride, |
| 28 | const int32_t *src1, uint32_t src1_stride, |
| 29 | const uint8_t *mask, uint32_t mask_stride, int h, |
| 30 | int w, int subh, int subw) { |
| 31 | int i, j; |
| 32 | |
| 33 | assert(IMPLIES(src0 == dst, src0_stride == dst_stride)); |
| 34 | assert(IMPLIES(src1 == dst, src1_stride == dst_stride)); |
| 35 | |
| 36 | assert(h >= 1); |
| 37 | assert(w >= 1); |
| 38 | assert(IS_POWER_OF_TWO(h)); |
| 39 | assert(IS_POWER_OF_TWO(w)); |
| 40 | |
| 41 | if (subw == 0 && subh == 0) { |
| 42 | for (i = 0; i < h; ++i) { |
| 43 | for (j = 0; j < w; ++j) { |
| 44 | const int m = mask[i * mask_stride + j]; |
| 45 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
| 46 | src1[i * src1_stride + j]); |
| 47 | } |
| 48 | } |
| 49 | } else if (subw == 1 && subh == 1) { |
| 50 | for (i = 0; i < h; ++i) { |
| 51 | for (j = 0; j < w; ++j) { |
| 52 | const int m = ROUND_POWER_OF_TWO( |
| 53 | mask[(2 * i) * mask_stride + (2 * j)] + |
| 54 | mask[(2 * i + 1) * mask_stride + (2 * j)] + |
| 55 | mask[(2 * i) * mask_stride + (2 * j + 1)] + |
| 56 | mask[(2 * i + 1) * mask_stride + (2 * j + 1)], |
| 57 | 2); |
| 58 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
| 59 | src1[i * src1_stride + j]); |
| 60 | } |
| 61 | } |
| 62 | } else if (subw == 1 && subh == 0) { |
| 63 | for (i = 0; i < h; ++i) { |
| 64 | for (j = 0; j < w; ++j) { |
| 65 | const int m = AOM_BLEND_AVG(mask[i * mask_stride + (2 * j)], |
| 66 | mask[i * mask_stride + (2 * j + 1)]); |
| 67 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
| 68 | src1[i * src1_stride + j]); |
| 69 | } |
| 70 | } |
| 71 | } else { |
| 72 | for (i = 0; i < h; ++i) { |
| 73 | for (j = 0; j < w; ++j) { |
| 74 | const int m = AOM_BLEND_AVG(mask[(2 * i) * mask_stride + j], |
| 75 | mask[(2 * i + 1) * mask_stride + j]); |
| 76 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
| 77 | src1[i * src1_stride + j]); |
| 78 | } |
| 79 | } |
| 80 | } |
| 81 | } |
| 82 | #endif // CONFIG_CONVOLVE_ROUND |
| 83 | |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 84 | // Blending with alpha mask. Mask values come from the range [0, 64], |
Yaowu Xu | 9c01aa1 | 2016-09-01 14:32:49 -0700 | [diff] [blame] | 85 | // as described for AOM_BLEND_A64 in aom_dsp/blend.h. src0 or src1 can |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 86 | // be the same as dst, or dst can be different from both sources. |
| 87 | |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 88 | void aom_blend_a64_mask_c(uint8_t *dst, uint32_t dst_stride, |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 89 | const uint8_t *src0, uint32_t src0_stride, |
| 90 | const uint8_t *src1, uint32_t src1_stride, |
| 91 | const uint8_t *mask, uint32_t mask_stride, int h, |
| 92 | int w, int subh, int subw) { |
| 93 | int i, j; |
| 94 | |
| 95 | assert(IMPLIES(src0 == dst, src0_stride == dst_stride)); |
| 96 | assert(IMPLIES(src1 == dst, src1_stride == dst_stride)); |
| 97 | |
| 98 | assert(h >= 1); |
| 99 | assert(w >= 1); |
| 100 | assert(IS_POWER_OF_TWO(h)); |
| 101 | assert(IS_POWER_OF_TWO(w)); |
| 102 | |
| 103 | if (subw == 0 && subh == 0) { |
| 104 | for (i = 0; i < h; ++i) { |
| 105 | for (j = 0; j < w; ++j) { |
| 106 | const int m = mask[i * mask_stride + j]; |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 107 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 108 | src1[i * src1_stride + j]); |
| 109 | } |
| 110 | } |
| 111 | } else if (subw == 1 && subh == 1) { |
| 112 | for (i = 0; i < h; ++i) { |
| 113 | for (j = 0; j < w; ++j) { |
| 114 | const int m = ROUND_POWER_OF_TWO( |
| 115 | mask[(2 * i) * mask_stride + (2 * j)] + |
| 116 | mask[(2 * i + 1) * mask_stride + (2 * j)] + |
| 117 | mask[(2 * i) * mask_stride + (2 * j + 1)] + |
| 118 | mask[(2 * i + 1) * mask_stride + (2 * j + 1)], |
| 119 | 2); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 120 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 121 | src1[i * src1_stride + j]); |
| 122 | } |
| 123 | } |
| 124 | } else if (subw == 1 && subh == 0) { |
| 125 | for (i = 0; i < h; ++i) { |
| 126 | for (j = 0; j < w; ++j) { |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 127 | const int m = AOM_BLEND_AVG(mask[i * mask_stride + (2 * j)], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 128 | mask[i * mask_stride + (2 * j + 1)]); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 129 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 130 | src1[i * src1_stride + j]); |
| 131 | } |
| 132 | } |
| 133 | } else { |
| 134 | for (i = 0; i < h; ++i) { |
| 135 | for (j = 0; j < w; ++j) { |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 136 | const int m = AOM_BLEND_AVG(mask[(2 * i) * mask_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 137 | mask[(2 * i + 1) * mask_stride + j]); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 138 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 139 | src1[i * src1_stride + j]); |
| 140 | } |
| 141 | } |
| 142 | } |
| 143 | } |
| 144 | |
Sebastien Alaiwan | 71e8784 | 2017-04-12 16:03:28 +0200 | [diff] [blame] | 145 | #if CONFIG_HIGHBITDEPTH |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 146 | void aom_highbd_blend_a64_mask_c(uint8_t *dst_8, uint32_t dst_stride, |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 147 | const uint8_t *src0_8, uint32_t src0_stride, |
| 148 | const uint8_t *src1_8, uint32_t src1_stride, |
| 149 | const uint8_t *mask, uint32_t mask_stride, |
| 150 | int h, int w, int subh, int subw, int bd) { |
| 151 | int i, j; |
| 152 | uint16_t *dst = CONVERT_TO_SHORTPTR(dst_8); |
| 153 | const uint16_t *src0 = CONVERT_TO_SHORTPTR(src0_8); |
| 154 | const uint16_t *src1 = CONVERT_TO_SHORTPTR(src1_8); |
Yaowu Xu | 9c01aa1 | 2016-09-01 14:32:49 -0700 | [diff] [blame] | 155 | (void)bd; |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 156 | |
| 157 | assert(IMPLIES(src0 == dst, src0_stride == dst_stride)); |
| 158 | assert(IMPLIES(src1 == dst, src1_stride == dst_stride)); |
| 159 | |
| 160 | assert(h >= 1); |
| 161 | assert(w >= 1); |
| 162 | assert(IS_POWER_OF_TWO(h)); |
| 163 | assert(IS_POWER_OF_TWO(w)); |
| 164 | |
| 165 | assert(bd == 8 || bd == 10 || bd == 12); |
| 166 | |
| 167 | if (subw == 0 && subh == 0) { |
| 168 | for (i = 0; i < h; ++i) { |
| 169 | for (j = 0; j < w; ++j) { |
| 170 | const int m = mask[i * mask_stride + j]; |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 171 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 172 | src1[i * src1_stride + j]); |
| 173 | } |
| 174 | } |
| 175 | } else if (subw == 1 && subh == 1) { |
| 176 | for (i = 0; i < h; ++i) { |
| 177 | for (j = 0; j < w; ++j) { |
| 178 | const int m = ROUND_POWER_OF_TWO( |
| 179 | mask[(2 * i) * mask_stride + (2 * j)] + |
| 180 | mask[(2 * i + 1) * mask_stride + (2 * j)] + |
| 181 | mask[(2 * i) * mask_stride + (2 * j + 1)] + |
| 182 | mask[(2 * i + 1) * mask_stride + (2 * j + 1)], |
| 183 | 2); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 184 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 185 | src1[i * src1_stride + j]); |
| 186 | } |
| 187 | } |
| 188 | } else if (subw == 1 && subh == 0) { |
| 189 | for (i = 0; i < h; ++i) { |
| 190 | for (j = 0; j < w; ++j) { |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 191 | const int m = AOM_BLEND_AVG(mask[i * mask_stride + (2 * j)], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 192 | mask[i * mask_stride + (2 * j + 1)]); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 193 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 194 | src1[i * src1_stride + j]); |
| 195 | } |
| 196 | } |
| 197 | } else { |
| 198 | for (i = 0; i < h; ++i) { |
| 199 | for (j = 0; j < w; ++j) { |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 200 | const int m = AOM_BLEND_AVG(mask[(2 * i) * mask_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 201 | mask[(2 * i + 1) * mask_stride + j]); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 202 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 203 | src1[i * src1_stride + j]); |
| 204 | } |
| 205 | } |
| 206 | } |
| 207 | } |
Sebastien Alaiwan | 71e8784 | 2017-04-12 16:03:28 +0200 | [diff] [blame] | 208 | #endif // CONFIG_HIGHBITDEPTH |