Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2014 The WebM project authors. All Rights Reserved. |
| 3 | * |
| 4 | * Usee of this source code is governed by a BSD-style license |
| 5 | * that can be found in the LICENSE file in the root of the source |
| 6 | * tree. An additional intellectual property rights grant can be found |
| 7 | * in the file PATENTS. All contributing project authors may |
| 8 | * be found in the AUTHORS file in the root of the source tree. |
| 9 | */ |
| 10 | |
| 11 | #include <immintrin.h> // AVX2 |
| 12 | |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame^] | 13 | #include "./av1_rtcd.h" |
| 14 | #include "aom/aom_integer.h" |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 15 | |
Yaowu Xu | f883b42 | 2016-08-30 14:01:10 -0700 | [diff] [blame^] | 16 | int64_t av1_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff, |
| 17 | intptr_t block_size, int64_t *ssz) { |
Yaowu Xu | c27fc14 | 2016-08-22 16:08:15 -0700 | [diff] [blame] | 18 | __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg; |
| 19 | __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi; |
| 20 | __m256i sse_reg_64hi, ssz_reg_64hi; |
| 21 | __m128i sse_reg128, ssz_reg128; |
| 22 | int64_t sse; |
| 23 | int i; |
| 24 | const __m256i zero_reg = _mm256_set1_epi16(0); |
| 25 | |
| 26 | // init sse and ssz registerd to zero |
| 27 | sse_reg = _mm256_set1_epi16(0); |
| 28 | ssz_reg = _mm256_set1_epi16(0); |
| 29 | |
| 30 | for (i = 0; i < block_size; i += 16) { |
| 31 | // load 32 bytes from coeff and dqcoeff |
| 32 | coeff_reg = _mm256_loadu_si256((const __m256i *)(coeff + i)); |
| 33 | dqcoeff_reg = _mm256_loadu_si256((const __m256i *)(dqcoeff + i)); |
| 34 | // dqcoeff - coeff |
| 35 | dqcoeff_reg = _mm256_sub_epi16(dqcoeff_reg, coeff_reg); |
| 36 | // madd (dqcoeff - coeff) |
| 37 | dqcoeff_reg = _mm256_madd_epi16(dqcoeff_reg, dqcoeff_reg); |
| 38 | // madd coeff |
| 39 | coeff_reg = _mm256_madd_epi16(coeff_reg, coeff_reg); |
| 40 | // expand each double word of madd (dqcoeff - coeff) to quad word |
| 41 | exp_dqcoeff_lo = _mm256_unpacklo_epi32(dqcoeff_reg, zero_reg); |
| 42 | exp_dqcoeff_hi = _mm256_unpackhi_epi32(dqcoeff_reg, zero_reg); |
| 43 | // expand each double word of madd (coeff) to quad word |
| 44 | exp_coeff_lo = _mm256_unpacklo_epi32(coeff_reg, zero_reg); |
| 45 | exp_coeff_hi = _mm256_unpackhi_epi32(coeff_reg, zero_reg); |
| 46 | // add each quad word of madd (dqcoeff - coeff) and madd (coeff) |
| 47 | sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_lo); |
| 48 | ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_lo); |
| 49 | sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_hi); |
| 50 | ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_hi); |
| 51 | } |
| 52 | // save the higher 64 bit of each 128 bit lane |
| 53 | sse_reg_64hi = _mm256_srli_si256(sse_reg, 8); |
| 54 | ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8); |
| 55 | // add the higher 64 bit to the low 64 bit |
| 56 | sse_reg = _mm256_add_epi64(sse_reg, sse_reg_64hi); |
| 57 | ssz_reg = _mm256_add_epi64(ssz_reg, ssz_reg_64hi); |
| 58 | |
| 59 | // add each 64 bit from each of the 128 bit lane of the 256 bit |
| 60 | sse_reg128 = _mm_add_epi64(_mm256_castsi256_si128(sse_reg), |
| 61 | _mm256_extractf128_si256(sse_reg, 1)); |
| 62 | |
| 63 | ssz_reg128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_reg), |
| 64 | _mm256_extractf128_si256(ssz_reg, 1)); |
| 65 | |
| 66 | // store the results |
| 67 | _mm_storel_epi64((__m128i *)(&sse), sse_reg128); |
| 68 | |
| 69 | _mm_storel_epi64((__m128i *)(ssz), ssz_reg128); |
| 70 | return sse; |
| 71 | } |