Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 1 | /* |
Yaowu Xu | 9c01aa1 | 2016-09-01 14:32:49 -0700 | [diff] [blame] | 2 | * Copyright (c) 2016, Alliance for Open Media. All rights reserved |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 3 | * |
Yaowu Xu | 9c01aa1 | 2016-09-01 14:32:49 -0700 | [diff] [blame] | 4 | * This source code is subject to the terms of the BSD 2 Clause License and |
| 5 | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
| 6 | * was not distributed with this source code in the LICENSE file, you can |
| 7 | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
| 8 | * Media Patent License 1.0 was not distributed with this source code in the |
| 9 | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <assert.h> |
| 13 | |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 14 | #include "aom/aom_integer.h" |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 15 | #include "aom_ports/mem.h" |
| 16 | #include "aom_dsp/blend.h" |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 17 | #include "aom_dsp/aom_dsp_common.h" |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 18 | |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 19 | #include "./aom_dsp_rtcd.h" |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 20 | |
Angie Chiang | 7b51709 | 2017-07-20 17:31:28 -0700 | [diff] [blame] | 21 | // Blending with alpha mask. Mask values come from the range [0, 64], |
| 22 | // as described for AOM_BLEND_A64 in aom_dsp/blend.h. src0 or src1 can |
| 23 | // be the same as dst, or dst can be different from both sources. |
| 24 | |
David Barker | d3b9973 | 2018-01-30 16:09:42 +0000 | [diff] [blame] | 25 | // NOTE(david.barker): The input and output of aom_blend_a64_d32_mask_c() are |
| 26 | // in a higher intermediate precision, and will later be rounded down to pixel |
| 27 | // precision. |
| 28 | // Thus, in order to avoid double-rounding, we want to use normal right shifts |
| 29 | // within this function, not ROUND_POWER_OF_TWO. |
| 30 | // This works because of the identity: |
| 31 | // ROUND_POWER_OF_TWO(x >> y, z) == ROUND_POWER_OF_TWO(x, y+z) |
| 32 | // |
| 33 | // In contrast, the output of the non-d32 functions will not be further rounded, |
| 34 | // so we *should* use ROUND_POWER_OF_TWO there. |
| 35 | |
Angie Chiang | 7b51709 | 2017-07-20 17:31:28 -0700 | [diff] [blame] | 36 | void aom_blend_a64_d32_mask_c(int32_t *dst, uint32_t dst_stride, |
| 37 | const int32_t *src0, uint32_t src0_stride, |
| 38 | const int32_t *src1, uint32_t src1_stride, |
| 39 | const uint8_t *mask, uint32_t mask_stride, int h, |
| 40 | int w, int subh, int subw) { |
| 41 | int i, j; |
| 42 | |
| 43 | assert(IMPLIES(src0 == dst, src0_stride == dst_stride)); |
| 44 | assert(IMPLIES(src1 == dst, src1_stride == dst_stride)); |
| 45 | |
| 46 | assert(h >= 1); |
| 47 | assert(w >= 1); |
| 48 | assert(IS_POWER_OF_TWO(h)); |
| 49 | assert(IS_POWER_OF_TWO(w)); |
| 50 | |
| 51 | if (subw == 0 && subh == 0) { |
| 52 | for (i = 0; i < h; ++i) { |
| 53 | for (j = 0; j < w; ++j) { |
Debargha Mukherjee | 52a096f | 2018-03-01 19:28:12 +0000 | [diff] [blame] | 54 | const int m = mask[i * mask_stride + j]; |
David Barker | d3b9973 | 2018-01-30 16:09:42 +0000 | [diff] [blame] | 55 | dst[i * dst_stride + j] = |
| 56 | ((m * src0[i * src0_stride + j] + |
Debargha Mukherjee | 52a096f | 2018-03-01 19:28:12 +0000 | [diff] [blame] | 57 | (AOM_BLEND_A64_MAX_ALPHA - m) * src1[i * src1_stride + j]) >> |
| 58 | AOM_BLEND_A64_ROUND_BITS); |
Angie Chiang | 7b51709 | 2017-07-20 17:31:28 -0700 | [diff] [blame] | 59 | } |
| 60 | } |
| 61 | } else if (subw == 1 && subh == 1) { |
| 62 | for (i = 0; i < h; ++i) { |
| 63 | for (j = 0; j < w; ++j) { |
Debargha Mukherjee | 52a096f | 2018-03-01 19:28:12 +0000 | [diff] [blame] | 64 | const int m = ROUND_POWER_OF_TWO( |
| 65 | mask[(2 * i) * mask_stride + (2 * j)] + |
| 66 | mask[(2 * i + 1) * mask_stride + (2 * j)] + |
| 67 | mask[(2 * i) * mask_stride + (2 * j + 1)] + |
| 68 | mask[(2 * i + 1) * mask_stride + (2 * j + 1)], |
| 69 | 2); |
David Barker | d3b9973 | 2018-01-30 16:09:42 +0000 | [diff] [blame] | 70 | dst[i * dst_stride + j] = |
| 71 | ((m * src0[i * src0_stride + j] + |
Debargha Mukherjee | 52a096f | 2018-03-01 19:28:12 +0000 | [diff] [blame] | 72 | (AOM_BLEND_A64_MAX_ALPHA - m) * src1[i * src1_stride + j]) >> |
| 73 | AOM_BLEND_A64_ROUND_BITS); |
Angie Chiang | 7b51709 | 2017-07-20 17:31:28 -0700 | [diff] [blame] | 74 | } |
| 75 | } |
| 76 | } else if (subw == 1 && subh == 0) { |
| 77 | for (i = 0; i < h; ++i) { |
| 78 | for (j = 0; j < w; ++j) { |
Debargha Mukherjee | 52a096f | 2018-03-01 19:28:12 +0000 | [diff] [blame] | 79 | const int m = AOM_BLEND_AVG(mask[i * mask_stride + (2 * j)], |
| 80 | mask[i * mask_stride + (2 * j + 1)]); |
David Barker | d3b9973 | 2018-01-30 16:09:42 +0000 | [diff] [blame] | 81 | dst[i * dst_stride + j] = |
| 82 | ((m * src0[i * src0_stride + j] + |
Debargha Mukherjee | 52a096f | 2018-03-01 19:28:12 +0000 | [diff] [blame] | 83 | (AOM_BLEND_A64_MAX_ALPHA - m) * src1[i * src1_stride + j]) >> |
| 84 | AOM_BLEND_A64_ROUND_BITS); |
Angie Chiang | 7b51709 | 2017-07-20 17:31:28 -0700 | [diff] [blame] | 85 | } |
| 86 | } |
| 87 | } else { |
| 88 | for (i = 0; i < h; ++i) { |
| 89 | for (j = 0; j < w; ++j) { |
Debargha Mukherjee | 52a096f | 2018-03-01 19:28:12 +0000 | [diff] [blame] | 90 | const int m = AOM_BLEND_AVG(mask[(2 * i) * mask_stride + j], |
| 91 | mask[(2 * i + 1) * mask_stride + j]); |
David Barker | d3b9973 | 2018-01-30 16:09:42 +0000 | [diff] [blame] | 92 | dst[i * dst_stride + j] = |
| 93 | ((m * src0[i * src0_stride + j] + |
Debargha Mukherjee | 52a096f | 2018-03-01 19:28:12 +0000 | [diff] [blame] | 94 | (AOM_BLEND_A64_MAX_ALPHA - m) * src1[i * src1_stride + j]) >> |
| 95 | AOM_BLEND_A64_ROUND_BITS); |
Angie Chiang | 7b51709 | 2017-07-20 17:31:28 -0700 | [diff] [blame] | 96 | } |
| 97 | } |
| 98 | } |
| 99 | } |
Angie Chiang | 7b51709 | 2017-07-20 17:31:28 -0700 | [diff] [blame] | 100 | |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 101 | // Blending with alpha mask. Mask values come from the range [0, 64], |
Yaowu Xu | 9c01aa1 | 2016-09-01 14:32:49 -0700 | [diff] [blame] | 102 | // as described for AOM_BLEND_A64 in aom_dsp/blend.h. src0 or src1 can |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 103 | // be the same as dst, or dst can be different from both sources. |
| 104 | |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 105 | void aom_blend_a64_mask_c(uint8_t *dst, uint32_t dst_stride, |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 106 | const uint8_t *src0, uint32_t src0_stride, |
| 107 | const uint8_t *src1, uint32_t src1_stride, |
| 108 | const uint8_t *mask, uint32_t mask_stride, int h, |
| 109 | int w, int subh, int subw) { |
| 110 | int i, j; |
| 111 | |
| 112 | assert(IMPLIES(src0 == dst, src0_stride == dst_stride)); |
| 113 | assert(IMPLIES(src1 == dst, src1_stride == dst_stride)); |
| 114 | |
| 115 | assert(h >= 1); |
| 116 | assert(w >= 1); |
| 117 | assert(IS_POWER_OF_TWO(h)); |
| 118 | assert(IS_POWER_OF_TWO(w)); |
| 119 | |
| 120 | if (subw == 0 && subh == 0) { |
| 121 | for (i = 0; i < h; ++i) { |
| 122 | for (j = 0; j < w; ++j) { |
| 123 | const int m = mask[i * mask_stride + j]; |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 124 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 125 | src1[i * src1_stride + j]); |
| 126 | } |
| 127 | } |
| 128 | } else if (subw == 1 && subh == 1) { |
| 129 | for (i = 0; i < h; ++i) { |
| 130 | for (j = 0; j < w; ++j) { |
| 131 | const int m = ROUND_POWER_OF_TWO( |
| 132 | mask[(2 * i) * mask_stride + (2 * j)] + |
| 133 | mask[(2 * i + 1) * mask_stride + (2 * j)] + |
| 134 | mask[(2 * i) * mask_stride + (2 * j + 1)] + |
| 135 | mask[(2 * i + 1) * mask_stride + (2 * j + 1)], |
| 136 | 2); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 137 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 138 | src1[i * src1_stride + j]); |
| 139 | } |
| 140 | } |
| 141 | } else if (subw == 1 && subh == 0) { |
| 142 | for (i = 0; i < h; ++i) { |
| 143 | for (j = 0; j < w; ++j) { |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 144 | const int m = AOM_BLEND_AVG(mask[i * mask_stride + (2 * j)], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 145 | mask[i * mask_stride + (2 * j + 1)]); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 146 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 147 | src1[i * src1_stride + j]); |
| 148 | } |
| 149 | } |
| 150 | } else { |
| 151 | for (i = 0; i < h; ++i) { |
| 152 | for (j = 0; j < w; ++j) { |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 153 | const int m = AOM_BLEND_AVG(mask[(2 * i) * mask_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 154 | mask[(2 * i + 1) * mask_stride + j]); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 155 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 156 | src1[i * src1_stride + j]); |
| 157 | } |
| 158 | } |
| 159 | } |
| 160 | } |
| 161 | |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 162 | void aom_highbd_blend_a64_mask_c(uint8_t *dst_8, uint32_t dst_stride, |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 163 | const uint8_t *src0_8, uint32_t src0_stride, |
| 164 | const uint8_t *src1_8, uint32_t src1_stride, |
| 165 | const uint8_t *mask, uint32_t mask_stride, |
| 166 | int h, int w, int subh, int subw, int bd) { |
| 167 | int i, j; |
| 168 | uint16_t *dst = CONVERT_TO_SHORTPTR(dst_8); |
| 169 | const uint16_t *src0 = CONVERT_TO_SHORTPTR(src0_8); |
| 170 | const uint16_t *src1 = CONVERT_TO_SHORTPTR(src1_8); |
Yaowu Xu | 9c01aa1 | 2016-09-01 14:32:49 -0700 | [diff] [blame] | 171 | (void)bd; |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 172 | |
| 173 | assert(IMPLIES(src0 == dst, src0_stride == dst_stride)); |
| 174 | assert(IMPLIES(src1 == dst, src1_stride == dst_stride)); |
| 175 | |
| 176 | assert(h >= 1); |
| 177 | assert(w >= 1); |
| 178 | assert(IS_POWER_OF_TWO(h)); |
| 179 | assert(IS_POWER_OF_TWO(w)); |
| 180 | |
| 181 | assert(bd == 8 || bd == 10 || bd == 12); |
| 182 | |
| 183 | if (subw == 0 && subh == 0) { |
| 184 | for (i = 0; i < h; ++i) { |
| 185 | for (j = 0; j < w; ++j) { |
| 186 | const int m = mask[i * mask_stride + j]; |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 187 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 188 | src1[i * src1_stride + j]); |
| 189 | } |
| 190 | } |
| 191 | } else if (subw == 1 && subh == 1) { |
| 192 | for (i = 0; i < h; ++i) { |
| 193 | for (j = 0; j < w; ++j) { |
| 194 | const int m = ROUND_POWER_OF_TWO( |
| 195 | mask[(2 * i) * mask_stride + (2 * j)] + |
| 196 | mask[(2 * i + 1) * mask_stride + (2 * j)] + |
| 197 | mask[(2 * i) * mask_stride + (2 * j + 1)] + |
| 198 | mask[(2 * i + 1) * mask_stride + (2 * j + 1)], |
| 199 | 2); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 200 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 201 | src1[i * src1_stride + j]); |
| 202 | } |
| 203 | } |
| 204 | } else if (subw == 1 && subh == 0) { |
| 205 | for (i = 0; i < h; ++i) { |
| 206 | for (j = 0; j < w; ++j) { |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 207 | const int m = AOM_BLEND_AVG(mask[i * mask_stride + (2 * j)], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 208 | mask[i * mask_stride + (2 * j + 1)]); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 209 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 210 | src1[i * src1_stride + j]); |
| 211 | } |
| 212 | } |
| 213 | } else { |
| 214 | for (i = 0; i < h; ++i) { |
| 215 | for (j = 0; j < w; ++j) { |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 216 | const int m = AOM_BLEND_AVG(mask[(2 * i) * mask_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 217 | mask[(2 * i + 1) * mask_stride + j]); |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame] | 218 | dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 219 | src1[i * src1_stride + j]); |
| 220 | } |
| 221 | } |
| 222 | } |
| 223 | } |