[x86]: Improve av1_quantize_fp_avx2().
1.13x to 1.49x faster than the previous version
depending on the last nonzero coeff position.
Bug: b/235228922
Change-Id: I0cf3fed0d74da3cca13faea799baa92f57ff03aa
diff --git a/av1/encoder/x86/av1_quantize_avx2.c b/av1/encoder/x86/av1_quantize_avx2.c
index 591edd7..d6b0e17 100644
--- a/av1/encoder/x86/av1_quantize_avx2.c
+++ b/av1/encoder/x86/av1_quantize_avx2.c
@@ -95,6 +95,21 @@
} \
} while (0)
+static INLINE __m256i load_coefficients_avx2(const tran_low_t *coeff_ptr) {
+ const __m256i coeff1 = _mm256_load_si256((__m256i *)coeff_ptr);
+ const __m256i coeff2 = _mm256_load_si256((__m256i *)(coeff_ptr + 8));
+ return _mm256_packs_epi32(coeff1, coeff2);
+}
+
+static INLINE void store_coefficients_avx2(__m256i coeff_vals,
+ tran_low_t *coeff_ptr) {
+ __m256i coeff_sign = _mm256_srai_epi16(coeff_vals, 15);
+ __m256i coeff_vals_lo = _mm256_unpacklo_epi16(coeff_vals, coeff_sign);
+ __m256i coeff_vals_hi = _mm256_unpackhi_epi16(coeff_vals, coeff_sign);
+ _mm256_store_si256((__m256i *)coeff_ptr, coeff_vals_lo);
+ _mm256_store_si256((__m256i *)(coeff_ptr + 8), coeff_vals_hi);
+}
+
static INLINE uint16_t quant_gather_eob(__m256i eob) {
const __m128i eob_lo = _mm256_castsi256_si128(eob);
const __m128i eob_hi = _mm256_extractf128_si256(eob, 1);
@@ -104,34 +119,6 @@
return INT16_MAX - _mm_extract_epi16(eob_s, 0);
}
-static INLINE void quantize(const __m256i *thr, const __m256i *qp, __m256i *c,
- const int16_t *iscan_ptr, tran_low_t *qcoeff,
- tran_low_t *dqcoeff, __m256i *eob) {
- const __m256i abs_coeff = _mm256_abs_epi16(*c);
- __m256i mask = _mm256_cmpgt_epi16(abs_coeff, *thr);
- mask = _mm256_or_si256(mask, _mm256_cmpeq_epi16(abs_coeff, *thr));
- const int nzflag = _mm256_movemask_epi8(mask);
-
- if (nzflag) {
- __m256i q = _mm256_adds_epi16(abs_coeff, qp[0]);
- q = _mm256_mulhi_epi16(q, qp[1]);
- q = _mm256_sign_epi16(q, *c);
- const __m256i dq = _mm256_mullo_epi16(q, qp[2]);
-
- store_two_quan(q, qcoeff, dq, dqcoeff);
- const __m256i zero = _mm256_setzero_si256();
- const __m256i iscan = _mm256_loadu_si256((const __m256i *)iscan_ptr);
- const __m256i zero_coeff = _mm256_cmpeq_epi16(dq, zero);
- const __m256i nzero_coeff = _mm256_cmpeq_epi16(zero_coeff, zero);
- __m256i cur_eob = _mm256_sub_epi16(iscan, nzero_coeff);
- cur_eob = _mm256_and_si256(cur_eob, nzero_coeff);
- *eob = _mm256_max_epi16(*eob, cur_eob);
- } else {
- write_zero(qcoeff);
- write_zero(dqcoeff);
- }
-}
-
static INLINE __m256i scan_eob_256(const __m256i *iscan_ptr,
__m256i *coeff256) {
const __m256i iscan = _mm256_loadu_si256(iscan_ptr);
@@ -235,6 +222,42 @@
*eob_ptr = accumulate_eob(eob);
}
+static AOM_FORCE_INLINE __m256i get_max_lane_eob(const int16_t *iscan,
+ __m256i v_eobmax,
+ __m256i v_mask) {
+ const __m256i v_iscan = _mm256_loadu_si256((const __m256i *)iscan);
+ const __m256i v_iscan_perm = _mm256_permute4x64_epi64(v_iscan, 0xD8);
+ const __m256i v_iscan_plus1 = _mm256_sub_epi16(v_iscan_perm, v_mask);
+ const __m256i v_nz_iscan = _mm256_and_si256(v_iscan_plus1, v_mask);
+ return _mm256_max_epi16(v_eobmax, v_nz_iscan);
+}
+
+static AOM_FORCE_INLINE void quantize_fp_16(
+ const __m256i *thr, const __m256i *qp, const tran_low_t *coeff_ptr,
+ const int16_t *iscan_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ __m256i *eob) {
+ const __m256i coeff = load_coefficients_avx2(coeff_ptr);
+ const __m256i abs_coeff = _mm256_abs_epi16(coeff);
+ const __m256i mask = _mm256_cmpgt_epi16(abs_coeff, *thr);
+ const int nzflag = _mm256_movemask_epi8(mask);
+
+ if (nzflag) {
+ const __m256i tmp_rnd = _mm256_adds_epi16(abs_coeff, qp[0]);
+ const __m256i abs_q = _mm256_mulhi_epi16(tmp_rnd, qp[1]);
+ const __m256i q = _mm256_sign_epi16(abs_q, coeff);
+ const __m256i dq = _mm256_mullo_epi16(q, qp[2]);
+ const __m256i nz_mask = _mm256_cmpgt_epi16(abs_q, _mm256_setzero_si256());
+
+ store_coefficients_avx2(q, qcoeff_ptr);
+ store_coefficients_avx2(dq, dqcoeff_ptr);
+
+ *eob = get_max_lane_eob(iscan_ptr, *eob, nz_mask);
+ } else {
+ write_zero(qcoeff_ptr);
+ write_zero(dqcoeff_ptr);
+ }
+}
+
void av1_quantize_fp_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const int16_t *zbin_ptr, const int16_t *round_ptr,
const int16_t *quant_ptr,
@@ -245,17 +268,18 @@
(void)scan_ptr;
(void)zbin_ptr;
(void)quant_shift_ptr;
- const unsigned int step = 16;
- __m256i qp[3];
- __m256i coeff, thr;
const int log_scale = 0;
+ const int step = 16;
+ __m256i qp[3], thr;
+ __m256i eob = _mm256_setzero_si256();
init_qp(round_ptr, quant_ptr, dequant_ptr, log_scale, &thr, qp);
- read_coeff(coeff_ptr, &coeff);
+ // Subtracting 1 here eliminates a _mm256_cmpeq_epi16() instruction when
+ // calculating the zbin mask.
+ thr = _mm256_sub_epi16(thr, _mm256_set1_epi16(1));
- __m256i eob = _mm256_setzero_si256();
- quantize(&thr, qp, &coeff, iscan_ptr, qcoeff_ptr, dqcoeff_ptr, &eob);
+ quantize_fp_16(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr, &eob);
coeff_ptr += step;
qcoeff_ptr += step;
@@ -263,11 +287,14 @@
iscan_ptr += step;
n_coeffs -= step;
- update_qp(log_scale, &thr, qp);
+ qp[0] = _mm256_permute2x128_si256(qp[0], qp[0], 0x11);
+ qp[1] = _mm256_permute2x128_si256(qp[1], qp[1], 0x11);
+ qp[2] = _mm256_permute2x128_si256(qp[2], qp[2], 0x11);
+ thr = _mm256_permute2x128_si256(thr, thr, 0x11);
while (n_coeffs > 0) {
- read_coeff(coeff_ptr, &coeff);
- quantize(&thr, qp, &coeff, iscan_ptr, qcoeff_ptr, dqcoeff_ptr, &eob);
+ quantize_fp_16(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr,
+ &eob);
coeff_ptr += step;
qcoeff_ptr += step;