Scott LaVarnway | 6f4b8dc | 2014-07-30 12:16:04 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2014 The WebM project authors. All Rights Reserved. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license |
| 5 | * that can be found in the LICENSE file in the root of the source |
| 6 | * tree. An additional intellectual property rights grant can be found |
| 7 | * in the file PATENTS. All contributing project authors may |
| 8 | * be found in the AUTHORS file in the root of the source tree. |
| 9 | */ |
| 10 | |
| 11 | #include <arm_neon.h> |
| 12 | #include "./vp9_rtcd.h" |
| 13 | #include "./vpx_config.h" |
| 14 | |
| 15 | #include "vpx/vpx_integer.h" |
| 16 | |
| 17 | void vp9_subtract_block_neon(int rows, int cols, |
| 18 | int16_t *diff, ptrdiff_t diff_stride, |
| 19 | const uint8_t *src, ptrdiff_t src_stride, |
| 20 | const uint8_t *pred, ptrdiff_t pred_stride) { |
| 21 | int r, c; |
| 22 | |
| 23 | if (cols > 16) { |
| 24 | for (r = 0; r < rows; ++r) { |
| 25 | for (c = 0; c < cols; c += 32) { |
| 26 | const uint8x16_t v_src_00 = vld1q_u8(&src[c + 0]); |
| 27 | const uint8x16_t v_src_16 = vld1q_u8(&src[c + 16]); |
| 28 | const uint8x16_t v_pred_00 = vld1q_u8(&pred[c + 0]); |
| 29 | const uint8x16_t v_pred_16 = vld1q_u8(&pred[c + 16]); |
| 30 | const uint16x8_t v_diff_lo_00 = vsubl_u8(vget_low_u8(v_src_00), |
| 31 | vget_low_u8(v_pred_00)); |
| 32 | const uint16x8_t v_diff_hi_00 = vsubl_u8(vget_high_u8(v_src_00), |
| 33 | vget_high_u8(v_pred_00)); |
| 34 | const uint16x8_t v_diff_lo_16 = vsubl_u8(vget_low_u8(v_src_16), |
| 35 | vget_low_u8(v_pred_16)); |
| 36 | const uint16x8_t v_diff_hi_16 = vsubl_u8(vget_high_u8(v_src_16), |
| 37 | vget_high_u8(v_pred_16)); |
| 38 | vst1q_s16(&diff[c + 0], vreinterpretq_s16_u16(v_diff_lo_00)); |
| 39 | vst1q_s16(&diff[c + 8], vreinterpretq_s16_u16(v_diff_hi_00)); |
| 40 | vst1q_s16(&diff[c + 16], vreinterpretq_s16_u16(v_diff_lo_16)); |
| 41 | vst1q_s16(&diff[c + 24], vreinterpretq_s16_u16(v_diff_hi_16)); |
| 42 | } |
| 43 | diff += diff_stride; |
| 44 | pred += pred_stride; |
| 45 | src += src_stride; |
| 46 | } |
| 47 | } else if (cols > 8) { |
| 48 | for (r = 0; r < rows; ++r) { |
| 49 | const uint8x16_t v_src = vld1q_u8(&src[0]); |
| 50 | const uint8x16_t v_pred = vld1q_u8(&pred[0]); |
| 51 | const uint16x8_t v_diff_lo = vsubl_u8(vget_low_u8(v_src), |
| 52 | vget_low_u8(v_pred)); |
| 53 | const uint16x8_t v_diff_hi = vsubl_u8(vget_high_u8(v_src), |
| 54 | vget_high_u8(v_pred)); |
| 55 | vst1q_s16(&diff[0], vreinterpretq_s16_u16(v_diff_lo)); |
| 56 | vst1q_s16(&diff[8], vreinterpretq_s16_u16(v_diff_hi)); |
| 57 | diff += diff_stride; |
| 58 | pred += pred_stride; |
| 59 | src += src_stride; |
| 60 | } |
| 61 | } else if (cols > 4) { |
| 62 | for (r = 0; r < rows; ++r) { |
| 63 | const uint8x8_t v_src = vld1_u8(&src[0]); |
| 64 | const uint8x8_t v_pred = vld1_u8(&pred[0]); |
| 65 | const uint16x8_t v_diff = vsubl_u8(v_src, v_pred); |
| 66 | vst1q_s16(&diff[0], vreinterpretq_s16_u16(v_diff)); |
| 67 | diff += diff_stride; |
| 68 | pred += pred_stride; |
| 69 | src += src_stride; |
| 70 | } |
| 71 | } else { |
| 72 | for (r = 0; r < rows; ++r) { |
| 73 | for (c = 0; c < cols; ++c) |
| 74 | diff[c] = src[c] - pred[c]; |
| 75 | |
| 76 | diff += diff_stride; |
| 77 | pred += pred_stride; |
| 78 | src += src_stride; |
| 79 | } |
| 80 | } |
| 81 | } |