blob: a09bd53cedf1c9e4f662747ac9e8b470c8b131f6 [file] [log] [blame] [edit]
/*
* Copyright (c) 2021, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 3-Clause Clear License
* and the Alliance for Open Media Patent License 1.0. If the BSD 3-Clause Clear
* License was not distributed with this source code in the LICENSE file, you
* can obtain it at aomedia.org/license/software-license/bsd-3-c-c/. If the
* Alliance for Open Media Patent License 1.0 was not distributed with this
* source code in the PATENTS file, you can obtain it at
* aomedia.org/license/patent-license/.
*/
#include "av1/common/cfl.h"
#include "av1/common/reconintra.h"
#include "av1/encoder/block.h"
#include "av1/encoder/encodetxb.h"
#include "av1/encoder/hybrid_fwd_txfm.h"
#include "av1/common/idct.h"
#include "av1/encoder/model_rd.h"
#include "av1/encoder/random.h"
#include "av1/encoder/rdopt_utils.h"
#include "av1/encoder/tx_prune_model_weights.h"
#include "av1/encoder/tx_search.h"
struct rdcost_block_args {
const AV1_COMP *cpi;
MACROBLOCK *x;
ENTROPY_CONTEXT t_above[MAX_MIB_SIZE];
ENTROPY_CONTEXT t_left[MAX_MIB_SIZE];
RD_STATS rd_stats;
int64_t current_rd;
int64_t best_rd;
int exit_early;
int incomplete_exit;
FAST_TX_SEARCH_MODE ftxs_mode;
int skip_trellis;
};
typedef struct {
int64_t rd;
int txb_entropy_ctx;
TX_TYPE tx_type;
} TxCandidateInfo;
typedef struct {
int leaf;
int8_t children[4];
} RD_RECORD_IDX_NODE;
typedef struct tx_size_rd_info_node {
TXB_RD_INFO *rd_info_array; // Points to array of size TX_TYPES.
struct tx_size_rd_info_node *children[4];
} TXB_RD_INFO_NODE;
// origin_threshold * 128 / 100
static const uint32_t skip_pred_threshold[3][BLOCK_SIZES_ALL] = {
{
64, 64, 64, 70, 60, 60, 68, 68, 68, 68, 68,
68, 68, 68, 68, 68, 64, 64, 70, 70, 68, 68,
},
{
88, 88, 88, 86, 87, 87, 68, 68, 68, 68, 68,
68, 68, 68, 68, 68, 88, 88, 86, 86, 68, 68,
},
{
90, 93, 93, 90, 93, 93, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 90, 90, 90, 90, 74, 74,
},
};
// lookup table for predict_skip_txfm
// int max_tx_size = max_txsize_rect_lookup[bsize];
// if (tx_size_high[max_tx_size] > 16 || tx_size_wide[max_tx_size] > 16)
// max_tx_size = AOMMIN(max_txsize_lookup[bsize], TX_16X16);
static const TX_SIZE max_predict_sf_tx_size[BLOCK_SIZES_ALL] = {
TX_4X4, TX_4X8, TX_8X4, TX_8X8, TX_8X16, TX_16X8,
TX_16X16, TX_16X16, TX_16X16, TX_16X16, TX_16X16, TX_16X16,
TX_16X16, TX_16X16, TX_16X16, TX_16X16, TX_4X16, TX_16X4,
TX_8X8, TX_8X8, TX_16X16, TX_16X16,
};
// look-up table for sqrt of number of pixels in a transform block
// rounded up to the nearest integer.
static const int sqrt_tx_pixels_2d[TX_SIZES_ALL] = { 4, 8, 16, 32, 32, 6, 6,
12, 12, 23, 23, 32, 32, 8,
8, 16, 16, 23, 23 };
static int find_tx_size_rd_info(TXB_RD_RECORD *cur_record,
const uint32_t hash) {
// Linear search through the circular buffer to find matching hash.
for (int i = cur_record->index_start - 1; i >= 0; i--) {
if (cur_record->hash_vals[i] == hash) return i;
}
for (int i = cur_record->num - 1; i >= cur_record->index_start; i--) {
if (cur_record->hash_vals[i] == hash) return i;
}
int index;
// If not found - add new RD info into the buffer and return its index
if (cur_record->num < TX_SIZE_RD_RECORD_BUFFER_LEN) {
index = (cur_record->index_start + cur_record->num) %
TX_SIZE_RD_RECORD_BUFFER_LEN;
cur_record->num++;
} else {
index = cur_record->index_start;
cur_record->index_start =
(cur_record->index_start + 1) % TX_SIZE_RD_RECORD_BUFFER_LEN;
}
cur_record->hash_vals[index] = hash;
av1_zero(cur_record->tx_rd_info[index]);
return index;
}
#if !CONFIG_NEW_TX_PARTITION
static const RD_RECORD_IDX_NODE rd_record_tree_8x8[] = {
{ 1, { 0 } },
};
static const RD_RECORD_IDX_NODE rd_record_tree_8x16[] = {
{ 0, { 1, 2, -1, -1 } },
{ 1, { 0, 0, 0, 0 } },
{ 1, { 0, 0, 0, 0 } },
};
static const RD_RECORD_IDX_NODE rd_record_tree_16x8[] = {
{ 0, { 1, 2, -1, -1 } },
{ 1, { 0 } },
{ 1, { 0 } },
};
static const RD_RECORD_IDX_NODE rd_record_tree_16x16[] = {
{ 0, { 1, 2, 3, 4 } }, { 1, { 0 } }, { 1, { 0 } }, { 1, { 0 } }, { 1, { 0 } },
};
static const RD_RECORD_IDX_NODE rd_record_tree_1_2[] = {
{ 0, { 1, 2, -1, -1 } },
{ 0, { 3, 4, 5, 6 } },
{ 0, { 7, 8, 9, 10 } },
};
static const RD_RECORD_IDX_NODE rd_record_tree_2_1[] = {
{ 0, { 1, 2, -1, -1 } },
{ 0, { 3, 4, 7, 8 } },
{ 0, { 5, 6, 9, 10 } },
};
static const RD_RECORD_IDX_NODE rd_record_tree_sqr[] = {
{ 0, { 1, 2, 3, 4 } }, { 0, { 5, 6, 9, 10 } }, { 0, { 7, 8, 11, 12 } },
{ 0, { 13, 14, 17, 18 } }, { 0, { 15, 16, 19, 20 } },
};
static const RD_RECORD_IDX_NODE rd_record_tree_64x128[] = {
{ 0, { 2, 3, 4, 5 } }, { 0, { 6, 7, 8, 9 } },
{ 0, { 10, 11, 14, 15 } }, { 0, { 12, 13, 16, 17 } },
{ 0, { 18, 19, 22, 23 } }, { 0, { 20, 21, 24, 25 } },
{ 0, { 26, 27, 30, 31 } }, { 0, { 28, 29, 32, 33 } },
{ 0, { 34, 35, 38, 39 } }, { 0, { 36, 37, 40, 41 } },
};
static const RD_RECORD_IDX_NODE rd_record_tree_128x64[] = {
{ 0, { 2, 3, 6, 7 } }, { 0, { 4, 5, 8, 9 } },
{ 0, { 10, 11, 18, 19 } }, { 0, { 12, 13, 20, 21 } },
{ 0, { 14, 15, 22, 23 } }, { 0, { 16, 17, 24, 25 } },
{ 0, { 26, 27, 34, 35 } }, { 0, { 28, 29, 36, 37 } },
{ 0, { 30, 31, 38, 39 } }, { 0, { 32, 33, 40, 41 } },
};
static const RD_RECORD_IDX_NODE rd_record_tree_128x128[] = {
{ 0, { 4, 5, 8, 9 } }, { 0, { 6, 7, 10, 11 } },
{ 0, { 12, 13, 16, 17 } }, { 0, { 14, 15, 18, 19 } },
{ 0, { 20, 21, 28, 29 } }, { 0, { 22, 23, 30, 31 } },
{ 0, { 24, 25, 32, 33 } }, { 0, { 26, 27, 34, 35 } },
{ 0, { 36, 37, 44, 45 } }, { 0, { 38, 39, 46, 47 } },
{ 0, { 40, 41, 48, 49 } }, { 0, { 42, 43, 50, 51 } },
{ 0, { 52, 53, 60, 61 } }, { 0, { 54, 55, 62, 63 } },
{ 0, { 56, 57, 64, 65 } }, { 0, { 58, 59, 66, 67 } },
{ 0, { 68, 69, 76, 77 } }, { 0, { 70, 71, 78, 79 } },
{ 0, { 72, 73, 80, 81 } }, { 0, { 74, 75, 82, 83 } },
};
static const RD_RECORD_IDX_NODE rd_record_tree_1_4[] = {
{ 0, { 1, -1, 2, -1 } },
{ 0, { 3, 4, -1, -1 } },
{ 0, { 5, 6, -1, -1 } },
};
static const RD_RECORD_IDX_NODE rd_record_tree_4_1[] = {
{ 0, { 1, 2, -1, -1 } },
{ 0, { 3, 4, -1, -1 } },
{ 0, { 5, 6, -1, -1 } },
};
static const RD_RECORD_IDX_NODE *rd_record_tree[BLOCK_SIZES_ALL] = {
NULL, // BLOCK_4X4
NULL, // BLOCK_4X8
NULL, // BLOCK_8X4
rd_record_tree_8x8, // BLOCK_8X8
rd_record_tree_8x16, // BLOCK_8X16
rd_record_tree_16x8, // BLOCK_16X8
rd_record_tree_16x16, // BLOCK_16X16
rd_record_tree_1_2, // BLOCK_16X32
rd_record_tree_2_1, // BLOCK_32X16
rd_record_tree_sqr, // BLOCK_32X32
rd_record_tree_1_2, // BLOCK_32X64
rd_record_tree_2_1, // BLOCK_64X32
rd_record_tree_sqr, // BLOCK_64X64
rd_record_tree_64x128, // BLOCK_64X128
rd_record_tree_128x64, // BLOCK_128X64
rd_record_tree_128x128, // BLOCK_128X128
NULL, // BLOCK_4X16
NULL, // BLOCK_16X4
rd_record_tree_1_4, // BLOCK_8X32
rd_record_tree_4_1, // BLOCK_32X8
rd_record_tree_1_4, // BLOCK_16X64
rd_record_tree_4_1, // BLOCK_64X16
};
static const int rd_record_tree_size[BLOCK_SIZES_ALL] = {
0, // BLOCK_4X4
0, // BLOCK_4X8
0, // BLOCK_8X4
sizeof(rd_record_tree_8x8) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_8X8
sizeof(rd_record_tree_8x16) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_8X16
sizeof(rd_record_tree_16x8) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_16X8
sizeof(rd_record_tree_16x16) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_16X16
sizeof(rd_record_tree_1_2) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_16X32
sizeof(rd_record_tree_2_1) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_32X16
sizeof(rd_record_tree_sqr) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_32X32
sizeof(rd_record_tree_1_2) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_32X64
sizeof(rd_record_tree_2_1) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_64X32
sizeof(rd_record_tree_sqr) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_64X64
sizeof(rd_record_tree_64x128) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_64X128
sizeof(rd_record_tree_128x64) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_128X64
sizeof(rd_record_tree_128x128) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_128X128
0, // BLOCK_4X16
0, // BLOCK_16X4
sizeof(rd_record_tree_1_4) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_8X32
sizeof(rd_record_tree_4_1) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_32X8
sizeof(rd_record_tree_1_4) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_16X64
sizeof(rd_record_tree_4_1) / sizeof(RD_RECORD_IDX_NODE), // BLOCK_64X16
};
static INLINE void init_rd_record_tree(TXB_RD_INFO_NODE *tree,
BLOCK_SIZE bsize) {
const RD_RECORD_IDX_NODE *rd_record = rd_record_tree[bsize];
const int size = rd_record_tree_size[bsize];
for (int i = 0; i < size; ++i) {
if (rd_record[i].leaf) {
av1_zero(tree[i].children);
} else {
for (int j = 0; j < 4; ++j) {
const int8_t idx = rd_record[i].children[j];
tree[i].children[j] = idx > 0 ? &tree[idx] : NULL;
}
}
}
}
// Go through all TX blocks that could be used in TX size search, compute
// residual hash values for them and find matching RD info that stores previous
// RD search results for these TX blocks. The idea is to prevent repeated
// rate/distortion computations that happen because of the combination of
// partition and TX size search. The resulting RD info records are returned in
// the form of a quadtree for easier access in actual TX size search.
static int find_tx_size_rd_records(MACROBLOCK *x, BLOCK_SIZE bsize,
TXB_RD_INFO_NODE *dst_rd_info) {
TxfmSearchInfo *txfm_info = &x->txfm_search_info;
TXB_RD_RECORD *rd_records_table[4] = { txfm_info->txb_rd_record_8X8,
txfm_info->txb_rd_record_16X16,
txfm_info->txb_rd_record_32X32,
txfm_info->txb_rd_record_64X64 };
const TX_SIZE max_square_tx_size = max_txsize_lookup[bsize];
const int bw = block_size_wide[bsize];
const int bh = block_size_high[bsize];
// Hashing is performed only for square TX sizes larger than TX_4X4
if (max_square_tx_size < TX_8X8) return 0;
const int diff_stride = bw;
const struct macroblock_plane *const p = &x->plane[0];
const int16_t *diff = &p->src_diff[0];
init_rd_record_tree(dst_rd_info, bsize);
// Coordinates of the top-left corner of current block within the superblock
// measured in pixels:
const int mi_row = x->e_mbd.mi_row;
const int mi_col = x->e_mbd.mi_col;
const int mi_row_in_sb = (mi_row % MAX_MIB_SIZE) << MI_SIZE_LOG2;
const int mi_col_in_sb = (mi_col % MAX_MIB_SIZE) << MI_SIZE_LOG2;
int cur_rd_info_idx = 0;
int cur_tx_depth = 0;
TX_SIZE cur_tx_size = max_txsize_rect_lookup[bsize];
while (cur_tx_depth <= MAX_VARTX_DEPTH) {
const int cur_tx_bw = tx_size_wide[cur_tx_size];
const int cur_tx_bh = tx_size_high[cur_tx_size];
if (cur_tx_bw < 8 || cur_tx_bh < 8) break;
const TX_SIZE next_tx_size = sub_tx_size_map[cur_tx_size];
const int tx_size_idx = cur_tx_size - TX_8X8;
for (int row = 0; row < bh; row += cur_tx_bh) {
for (int col = 0; col < bw; col += cur_tx_bw) {
if (cur_tx_bw != cur_tx_bh) {
// Use dummy nodes for all rectangular transforms within the
// TX size search tree.
dst_rd_info[cur_rd_info_idx].rd_info_array = NULL;
} else {
// Get spatial location of this TX block within the superblock
// (measured in cur_tx_bsize units).
const int row_in_sb = (mi_row_in_sb + row) / cur_tx_bh;
const int col_in_sb = (mi_col_in_sb + col) / cur_tx_bw;
int16_t hash_data[MAX_SB_SQUARE];
int16_t *cur_hash_row = hash_data;
const int16_t *cur_diff_row = diff + row * diff_stride + col;
for (int i = 0; i < cur_tx_bh; i++) {
memcpy(cur_hash_row, cur_diff_row, sizeof(*hash_data) * cur_tx_bw);
cur_hash_row += cur_tx_bw;
cur_diff_row += diff_stride;
}
const int hash = av1_get_crc32c_value(
&txfm_info->mb_rd_record.crc_calculator, (uint8_t *)hash_data,
2 * cur_tx_bw * cur_tx_bh);
// Find corresponding RD info based on the hash value.
const int record_idx =
row_in_sb * (MAX_MIB_SIZE >> (tx_size_idx + 1)) + col_in_sb;
TXB_RD_RECORD *records = &rd_records_table[tx_size_idx][record_idx];
int idx = find_tx_size_rd_info(records, hash);
dst_rd_info[cur_rd_info_idx].rd_info_array =
&records->tx_rd_info[idx];
}
++cur_rd_info_idx;
}
}
cur_tx_size = next_tx_size;
++cur_tx_depth;
}
return 1;
}
#endif // !CONFIG_NEW_TX_PARTITION
static INLINE uint32_t get_block_residue_hash(MACROBLOCK *x, BLOCK_SIZE bsize) {
const int rows = block_size_high[bsize];
const int cols = block_size_wide[bsize];
const int16_t *diff = x->plane[0].src_diff;
const uint32_t hash =
av1_get_crc32c_value(&x->txfm_search_info.mb_rd_record.crc_calculator,
(uint8_t *)diff, 2 * rows * cols);
return (hash << 5) + bsize;
}
static INLINE int32_t find_mb_rd_info(const MB_RD_RECORD *const mb_rd_record,
const int64_t ref_best_rd,
const uint32_t hash) {
int32_t match_index = -1;
if (ref_best_rd != INT64_MAX) {
for (int i = 0; i < mb_rd_record->num; ++i) {
const int index = (mb_rd_record->index_start + i) % RD_RECORD_BUFFER_LEN;
// If there is a match in the tx_rd_record, fetch the RD decision and
// terminate early.
if (mb_rd_record->tx_rd_info[index].hash_value == hash) {
match_index = index;
break;
}
}
}
return match_index;
}
static AOM_INLINE void fetch_tx_rd_info(int n4,
const MB_RD_INFO *const tx_rd_info,
RD_STATS *const rd_stats,
MACROBLOCK *const x) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = xd->mi[0];
mbmi->tx_size = tx_rd_info->tx_size;
memcpy(x->txfm_search_info.blk_skip, tx_rd_info->blk_skip,
sizeof(tx_rd_info->blk_skip[0]) * n4);
av1_copy(mbmi->inter_tx_size, tx_rd_info->inter_tx_size);
#if CONFIG_NEW_TX_PARTITION
av1_copy(mbmi->tx_partition_type, tx_rd_info->tx_partition_type);
#endif // CONFIG_NEW_TX_PARTITION
av1_copy_array(xd->tx_type_map, tx_rd_info->tx_type_map, n4);
*rd_stats = tx_rd_info->rd_stats;
}
// Compute the pixel domain distortion from diff on all visible 4x4s in the
// transform block.
static INLINE int64_t pixel_diff_dist(const MACROBLOCK *x, int plane,
int blk_row, int blk_col,
const BLOCK_SIZE plane_bsize,
const BLOCK_SIZE tx_bsize,
unsigned int *block_mse_q8) {
int visible_rows, visible_cols;
const MACROBLOCKD *xd = &x->e_mbd;
get_txb_dimensions(xd, plane, plane_bsize, blk_row, blk_col, tx_bsize, NULL,
NULL, &visible_cols, &visible_rows);
const int diff_stride = block_size_wide[plane_bsize];
const int16_t *diff = x->plane[plane].src_diff;
diff += ((blk_row * diff_stride + blk_col) << MI_SIZE_LOG2);
uint64_t sse =
aom_sum_squares_2d_i16(diff, diff_stride, visible_cols, visible_rows);
if (block_mse_q8 != NULL) {
if (visible_cols > 0 && visible_rows > 0)
*block_mse_q8 =
(unsigned int)((256 * sse) / (visible_cols * visible_rows));
else
*block_mse_q8 = UINT_MAX;
}
return sse;
}
// Computes the residual block's SSE and mean on all visible 4x4s in the
// transform block
static INLINE int64_t pixel_diff_stats(
MACROBLOCK *x, int plane, int blk_row, int blk_col,
const BLOCK_SIZE plane_bsize, const BLOCK_SIZE tx_bsize,
unsigned int *block_mse_q8, int64_t *per_px_mean, uint64_t *block_var) {
int visible_rows, visible_cols;
const MACROBLOCKD *xd = &x->e_mbd;
get_txb_dimensions(xd, plane, plane_bsize, blk_row, blk_col, tx_bsize, NULL,
NULL, &visible_cols, &visible_rows);
const int diff_stride = block_size_wide[plane_bsize];
const int16_t *diff = x->plane[plane].src_diff;
diff += ((blk_row * diff_stride + blk_col) << MI_SIZE_LOG2);
uint64_t sse = 0;
int sum = 0;
sse = aom_sum_sse_2d_i16(diff, diff_stride, visible_cols, visible_rows, &sum);
if (visible_cols > 0 && visible_rows > 0) {
aom_clear_system_state();
double norm_factor = 1.0 / (visible_cols * visible_rows);
int sign_sum = sum > 0 ? 1 : -1;
// Conversion to transform domain
*per_px_mean = (int64_t)(norm_factor * abs(sum)) << 7;
*per_px_mean = sign_sum * (*per_px_mean);
*block_mse_q8 = (unsigned int)(norm_factor * (256 * sse));
*block_var = (uint64_t)(sse - (uint64_t)(norm_factor * sum * sum));
} else {
*block_mse_q8 = UINT_MAX;
}
return sse;
}
// Uses simple features on top of DCT coefficients to quickly predict
// whether optimal RD decision is to skip encoding the residual.
// The sse value is stored in dist.
static int predict_skip_txfm(const AV1_COMMON *cm, MACROBLOCK *x,
BLOCK_SIZE bsize, int64_t *dist,
int reduced_tx_set) {
const TxfmSearchParams *txfm_params = &x->txfm_search_params;
const int bw = block_size_wide[bsize];
const int bh = block_size_high[bsize];
const MACROBLOCKD *xd = &x->e_mbd;
(void)cm;
const int32_t dc_q =
av1_dc_quant_QTX(x->qindex, 0, cm->seq_params.base_y_dc_delta_q, xd->bd);
*dist = pixel_diff_dist(x, 0, 0, 0, bsize, bsize, NULL);
const int64_t mse = *dist / bw / bh;
// Normalized quantizer takes the transform upscaling factor (8 for tx size
// smaller than 32) into account.
const int32_t normalized_dc_q =
ROUND_POWER_OF_TWO(dc_q, (3 + QUANT_TABLE_BITS));
const int64_t mse_thresh = (int64_t)normalized_dc_q * normalized_dc_q / 8;
// For faster early skip decision, use dist to compare against threshold so
// that quality risk is less for the skip=1 decision. Otherwise, use mse
// since the fwd_txfm coeff checks will take care of quality
// TODO(any): Use dist to return 0 when skip_txfm_level is 1
int64_t pred_err = (txfm_params->skip_txfm_level >= 2) ? *dist : mse;
// Predict not to skip when error is larger than threshold.
if (pred_err > mse_thresh) return 0;
// Return as skip otherwise for aggressive early skip
else if (txfm_params->skip_txfm_level >= 2)
return 1;
const int max_tx_size = max_predict_sf_tx_size[bsize];
const int tx_h = tx_size_high[max_tx_size];
const int tx_w = tx_size_wide[max_tx_size];
DECLARE_ALIGNED(32, tran_low_t, coefs[32 * 32]);
TxfmParam param;
param.tx_type = DCT_DCT;
param.tx_size = max_tx_size;
param.bd = xd->bd;
param.lossless = 0;
param.tx_set_type = av1_get_ext_tx_set_type(
param.tx_size, is_inter_block(xd->mi[0], xd->tree_type), reduced_tx_set);
const int bd_idx = (xd->bd == 8) ? 0 : ((xd->bd == 10) ? 1 : 2);
const uint32_t max_qcoef_thresh = skip_pred_threshold[bd_idx][bsize];
const int16_t *src_diff = x->plane[0].src_diff;
const int n_coeff = tx_w * tx_h;
const int32_t ac_q = av1_ac_quant_QTX(x->qindex, 0, xd->bd);
const uint32_t dc_thresh =
(uint32_t)ROUND_POWER_OF_TWO((max_qcoef_thresh * dc_q), QUANT_TABLE_BITS);
const uint32_t ac_thresh =
(uint32_t)ROUND_POWER_OF_TWO((max_qcoef_thresh * ac_q), QUANT_TABLE_BITS);
for (int row = 0; row < bh; row += tx_h) {
for (int col = 0; col < bw; col += tx_w) {
av1_fwd_txfm(src_diff + col, coefs, bw, &param);
// Operating on TX domain, not pixels; we want the QTX quantizers
const uint32_t dc_coef = (((uint32_t)abs(coefs[0])) << 7);
if (dc_coef >= dc_thresh) return 0;
for (int i = 1; i < n_coeff; ++i) {
const uint32_t ac_coef = (((uint32_t)abs(coefs[i])) << 7);
if (ac_coef >= ac_thresh) return 0;
}
}
src_diff += tx_h * bw;
}
return 1;
}
// Used to set proper context for early termination with skip = 1.
static AOM_INLINE void set_skip_txfm(MACROBLOCK *x, RD_STATS *rd_stats,
int bsize, int64_t dist) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = xd->mi[0];
const int n4 = bsize_to_num_blk(bsize);
const TX_SIZE tx_size = max_txsize_rect_lookup[bsize];
memset(xd->tx_type_map, DCT_DCT, sizeof(xd->tx_type_map[0]) * n4);
#if CONFIG_CROSS_CHROMA_TX
memset(xd->cctx_type_map, CCTX_NONE, sizeof(xd->cctx_type_map[0]) * n4);
#endif // CONFIG_CROSS_CHROMA_TX
memset(mbmi->inter_tx_size, tx_size, sizeof(mbmi->inter_tx_size));
#if CONFIG_NEW_TX_PARTITION
memset(mbmi->tx_partition_type, TX_PARTITION_NONE,
sizeof(mbmi->tx_partition_type));
#endif // CONFIG_NEW_TX_PARTITION
mbmi->tx_size = tx_size;
for (int i = 0; i < n4; ++i)
set_blk_skip(x->txfm_search_info.blk_skip, 0, i, 1);
rd_stats->skip_txfm = 1;
dist = ROUND_POWER_OF_TWO(dist, (xd->bd - 8) * 2);
rd_stats->dist = rd_stats->sse = (dist << 4);
// Though decision is to make the block as skip based on luma stats,
// it is possible that block becomes non skip after chroma rd. In addition
// intermediate non skip costs calculated by caller function will be
// incorrect, if rate is set as zero (i.e., if zero_blk_rate is not
// accounted). Hence intermediate rate is populated to code the luma tx blks
// as skip, the caller function based on final rd decision (i.e., skip vs
// non-skip) sets the final rate accordingly. Here the rate populated
// corresponds to coding all the tx blocks with zero_blk_rate (based on max tx
// size possible) in the current block. Eg: For 128*128 block, rate would be
// 4 * zero_blk_rate where zero_blk_rate corresponds to coding of one 64x64 tx
// block as 'all zeros'
ENTROPY_CONTEXT ctxa[MAX_MIB_SIZE];
ENTROPY_CONTEXT ctxl[MAX_MIB_SIZE];
av1_get_entropy_contexts(bsize, &xd->plane[0], ctxa, ctxl);
ENTROPY_CONTEXT *ta = ctxa;
ENTROPY_CONTEXT *tl = ctxl;
const TX_SIZE txs_ctx = get_txsize_entropy_ctx(tx_size);
TXB_CTX txb_ctx;
get_txb_ctx(bsize, tx_size, 0, ta, tl, &txb_ctx
#if CONFIG_FORWARDSKIP
,
mbmi->fsc_mode[xd->tree_type == CHROMA_PART]
#endif // CONFIG_FORWARDSKIP
);
const int zero_blk_rate = x->coeff_costs.coeff_costs[txs_ctx][PLANE_TYPE_Y]
.txb_skip_cost[txb_ctx.txb_skip_ctx][1];
rd_stats->rate = zero_blk_rate *
(block_size_wide[bsize] >> tx_size_wide_log2[tx_size]) *
(block_size_high[bsize] >> tx_size_high_log2[tx_size]);
}
static AOM_INLINE void save_tx_rd_info(int n4, uint32_t hash,
const MACROBLOCK *const x,
const RD_STATS *const rd_stats,
MB_RD_RECORD *tx_rd_record) {
int index;
if (tx_rd_record->num < RD_RECORD_BUFFER_LEN) {
index =
(tx_rd_record->index_start + tx_rd_record->num) % RD_RECORD_BUFFER_LEN;
++tx_rd_record->num;
} else {
index = tx_rd_record->index_start;
tx_rd_record->index_start =
(tx_rd_record->index_start + 1) % RD_RECORD_BUFFER_LEN;
}
MB_RD_INFO *const tx_rd_info = &tx_rd_record->tx_rd_info[index];
const MACROBLOCKD *const xd = &x->e_mbd;
const MB_MODE_INFO *const mbmi = xd->mi[0];
tx_rd_info->hash_value = hash;
tx_rd_info->tx_size = mbmi->tx_size;
memcpy(tx_rd_info->blk_skip, x->txfm_search_info.blk_skip,
sizeof(tx_rd_info->blk_skip[0]) * n4);
av1_copy(tx_rd_info->inter_tx_size, mbmi->inter_tx_size);
#if CONFIG_NEW_TX_PARTITION
av1_copy(tx_rd_info->tx_partition_type, mbmi->tx_partition_type);
#endif // CONFIG_NEW_TX_PARTITION
av1_copy_array(tx_rd_info->tx_type_map, xd->tx_type_map, n4);
tx_rd_info->rd_stats = *rd_stats;
}
// NOTE: CONFIG_COLLECT_RD_STATS has 3 possible values
// 0: Do not collect any RD stats
// 1: Collect RD stats for transform units
// 2: Collect RD stats for partition units
#if CONFIG_COLLECT_RD_STATS
static AOM_INLINE void get_energy_distribution_fine(
const AV1_COMP *cpi, BLOCK_SIZE bsize, const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride, int need_4th, double *hordist,
double *verdist) {
const int bw = block_size_wide[bsize];
const int bh = block_size_high[bsize];
unsigned int esq[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
if (bsize < BLOCK_16X16 || (bsize >= BLOCK_4X16 && bsize <= BLOCK_32X8)) {
// Special cases: calculate 'esq' values manually, as we don't have 'vf'
// functions for the 16 (very small) sub-blocks of this block.
const int w_shift = (bw == 4) ? 0 : (bw == 8) ? 1 : (bw == 16) ? 2 : 3;
const int h_shift = (bh == 4) ? 0 : (bh == 8) ? 1 : (bh == 16) ? 2 : 3;
assert(bw <= 32);
assert(bh <= 32);
assert(((bw - 1) >> w_shift) + (((bh - 1) >> h_shift) << 2) == 15);
const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
const uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
for (int i = 0; i < bh; ++i)
for (int j = 0; j < bw; ++j) {
const int index = (j >> w_shift) + ((i >> h_shift) << 2);
esq[index] += (src16[j + i * src_stride] - dst16[j + i * dst_stride]) *
(src16[j + i * src_stride] - dst16[j + i * dst_stride]);
}
} else { // Calculate 'esq' values using 'vf' functions on the 16 sub-blocks.
const int f_index =
(bsize < BLOCK_SIZES) ? bsize - BLOCK_16X16 : bsize - BLOCK_8X16;
assert(f_index >= 0 && f_index < BLOCK_SIZES_ALL);
const BLOCK_SIZE subsize = (BLOCK_SIZE)f_index;
assert(block_size_wide[bsize] == 4 * block_size_wide[subsize]);
assert(block_size_high[bsize] == 4 * block_size_high[subsize]);
cpi->fn_ptr[subsize].vf(src, src_stride, dst, dst_stride, &esq[0]);
cpi->fn_ptr[subsize].vf(src + bw / 4, src_stride, dst + bw / 4, dst_stride,
&esq[1]);
cpi->fn_ptr[subsize].vf(src + bw / 2, src_stride, dst + bw / 2, dst_stride,
&esq[2]);
cpi->fn_ptr[subsize].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
dst_stride, &esq[3]);
src += bh / 4 * src_stride;
dst += bh / 4 * dst_stride;
cpi->fn_ptr[subsize].vf(src, src_stride, dst, dst_stride, &esq[4]);
cpi->fn_ptr[subsize].vf(src + bw / 4, src_stride, dst + bw / 4, dst_stride,
&esq[5]);
cpi->fn_ptr[subsize].vf(src + bw / 2, src_stride, dst + bw / 2, dst_stride,
&esq[6]);
cpi->fn_ptr[subsize].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
dst_stride, &esq[7]);
src += bh / 4 * src_stride;
dst += bh / 4 * dst_stride;
cpi->fn_ptr[subsize].vf(src, src_stride, dst, dst_stride, &esq[8]);
cpi->fn_ptr[subsize].vf(src + bw / 4, src_stride, dst + bw / 4, dst_stride,
&esq[9]);
cpi->fn_ptr[subsize].vf(src + bw / 2, src_stride, dst + bw / 2, dst_stride,
&esq[10]);
cpi->fn_ptr[subsize].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
dst_stride, &esq[11]);
src += bh / 4 * src_stride;
dst += bh / 4 * dst_stride;
cpi->fn_ptr[subsize].vf(src, src_stride, dst, dst_stride, &esq[12]);
cpi->fn_ptr[subsize].vf(src + bw / 4, src_stride, dst + bw / 4, dst_stride,
&esq[13]);
cpi->fn_ptr[subsize].vf(src + bw / 2, src_stride, dst + bw / 2, dst_stride,
&esq[14]);
cpi->fn_ptr[subsize].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
dst_stride, &esq[15]);
}
double total = (double)esq[0] + esq[1] + esq[2] + esq[3] + esq[4] + esq[5] +
esq[6] + esq[7] + esq[8] + esq[9] + esq[10] + esq[11] +
esq[12] + esq[13] + esq[14] + esq[15];
if (total > 0) {
const double e_recip = 1.0 / total;
hordist[0] = ((double)esq[0] + esq[4] + esq[8] + esq[12]) * e_recip;
hordist[1] = ((double)esq[1] + esq[5] + esq[9] + esq[13]) * e_recip;
hordist[2] = ((double)esq[2] + esq[6] + esq[10] + esq[14]) * e_recip;
if (need_4th) {
hordist[3] = ((double)esq[3] + esq[7] + esq[11] + esq[15]) * e_recip;
}
verdist[0] = ((double)esq[0] + esq[1] + esq[2] + esq[3]) * e_recip;
verdist[1] = ((double)esq[4] + esq[5] + esq[6] + esq[7]) * e_recip;
verdist[2] = ((double)esq[8] + esq[9] + esq[10] + esq[11]) * e_recip;
if (need_4th) {
verdist[3] = ((double)esq[12] + esq[13] + esq[14] + esq[15]) * e_recip;
}
} else {
hordist[0] = verdist[0] = 0.25;
hordist[1] = verdist[1] = 0.25;
hordist[2] = verdist[2] = 0.25;
if (need_4th) {
hordist[3] = verdist[3] = 0.25;
}
}
}
static double get_sse_norm(const int16_t *diff, int stride, int w, int h) {
double sum = 0.0;
for (int j = 0; j < h; ++j) {
for (int i = 0; i < w; ++i) {
const int err = diff[j * stride + i];
sum += err * err;
}
}
assert(w > 0 && h > 0);
return sum / (w * h);
}
static double get_sad_norm(const int16_t *diff, int stride, int w, int h) {
double sum = 0.0;
for (int j = 0; j < h; ++j) {
for (int i = 0; i < w; ++i) {
sum += abs(diff[j * stride + i]);
}
}
assert(w > 0 && h > 0);
return sum / (w * h);
}
static AOM_INLINE void get_2x2_normalized_sses_and_sads(
const AV1_COMP *const cpi, BLOCK_SIZE tx_bsize, const uint8_t *const src,
int src_stride, const uint8_t *const dst, int dst_stride,
const int16_t *const src_diff, int diff_stride, double *const sse_norm_arr,
double *const sad_norm_arr) {
const BLOCK_SIZE tx_bsize_half =
get_partition_subsize(tx_bsize, PARTITION_SPLIT);
if (tx_bsize_half == BLOCK_INVALID) { // manually calculate stats
const int half_width = block_size_wide[tx_bsize] / 2;
const int half_height = block_size_high[tx_bsize] / 2;
for (int row = 0; row < 2; ++row) {
for (int col = 0; col < 2; ++col) {
const int16_t *const this_src_diff =
src_diff + row * half_height * diff_stride + col * half_width;
if (sse_norm_arr) {
sse_norm_arr[row * 2 + col] =
get_sse_norm(this_src_diff, diff_stride, half_width, half_height);
}
if (sad_norm_arr) {
sad_norm_arr[row * 2 + col] =
get_sad_norm(this_src_diff, diff_stride, half_width, half_height);
}
}
}
} else { // use function pointers to calculate stats
const int half_width = block_size_wide[tx_bsize_half];
const int half_height = block_size_high[tx_bsize_half];
const int num_samples_half = half_width * half_height;
for (int row = 0; row < 2; ++row) {
for (int col = 0; col < 2; ++col) {
const uint8_t *const this_src =
src + row * half_height * src_stride + col * half_width;
const uint8_t *const this_dst =
dst + row * half_height * dst_stride + col * half_width;
if (sse_norm_arr) {
unsigned int this_sse;
cpi->fn_ptr[tx_bsize_half].vf(this_src, src_stride, this_dst,
dst_stride, &this_sse);
sse_norm_arr[row * 2 + col] = (double)this_sse / num_samples_half;
}
if (sad_norm_arr) {
const unsigned int this_sad = cpi->fn_ptr[tx_bsize_half].sdf(
this_src, src_stride, this_dst, dst_stride);
sad_norm_arr[row * 2 + col] = (double)this_sad / num_samples_half;
}
}
}
}
}
#if CONFIG_COLLECT_RD_STATS == 1
static double get_mean(const int16_t *diff, int stride, int w, int h) {
double sum = 0.0;
for (int j = 0; j < h; ++j) {
for (int i = 0; i < w; ++i) {
sum += diff[j * stride + i];
}
}
assert(w > 0 && h > 0);
return sum / (w * h);
}
static AOM_INLINE void PrintTransformUnitStats(
const AV1_COMP *const cpi, MACROBLOCK *x, const RD_STATS *const rd_stats,
int blk_row, int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
TX_TYPE tx_type, int64_t rd) {
if (rd_stats->rate == INT_MAX || rd_stats->dist == INT64_MAX) return;
// Generate small sample to restrict output size.
static unsigned int seed = 21743;
if (lcg_rand16(&seed) % 256 > 0) return;
const char output_file[] = "tu_stats.txt";
FILE *fout = fopen(output_file, "a");
if (!fout) return;
const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
const MACROBLOCKD *const xd = &x->e_mbd;
const int plane = 0;
struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
const int txw = tx_size_wide[tx_size];
const int txh = tx_size_high[tx_size];
const int dequant_shift = xd->bd - 5;
const int q_step =
ROUND_POWER_OF_TWO(p->dequant_QTX[1], QUANT_TABLE_BITS) >> dequant_shift;
const int num_samples = txw * txh;
const double rate_norm = (double)rd_stats->rate / num_samples;
const double dist_norm = (double)rd_stats->dist / num_samples;
fprintf(fout, "%g %g", rate_norm, dist_norm);
const int src_stride = p->src.stride;
const uint8_t *const src =
&p->src.buf[(blk_row * src_stride + blk_col) << MI_SIZE_LOG2];
const int dst_stride = pd->dst.stride;
const uint8_t *const dst =
&pd->dst.buf[(blk_row * dst_stride + blk_col) << MI_SIZE_LOG2];
unsigned int sse;
cpi->fn_ptr[tx_bsize].vf(src, src_stride, dst, dst_stride, &sse);
const double sse_norm = (double)sse / num_samples;
const unsigned int sad =
cpi->fn_ptr[tx_bsize].sdf(src, src_stride, dst, dst_stride);
const double sad_norm = (double)sad / num_samples;
fprintf(fout, " %g %g", sse_norm, sad_norm);
const int diff_stride = block_size_wide[plane_bsize];
const int16_t *const src_diff =
&p->src_diff[(blk_row * diff_stride + blk_col) << MI_SIZE_LOG2];
double sse_norm_arr[4], sad_norm_arr[4];
get_2x2_normalized_sses_and_sads(cpi, tx_bsize, src, src_stride, dst,
dst_stride, src_diff, diff_stride,
sse_norm_arr, sad_norm_arr);
for (int i = 0; i < 4; ++i) {
fprintf(fout, " %g", sse_norm_arr[i]);
}
for (int i = 0; i < 4; ++i) {
fprintf(fout, " %g", sad_norm_arr[i]);
}
const TX_TYPE_1D tx_type_1d_row = htx_tab[tx_type];
const TX_TYPE_1D tx_type_1d_col = vtx_tab[tx_type];
fprintf(fout, " %d %d %d %d %d", q_step, tx_size_wide[tx_size],
tx_size_high[tx_size], tx_type_1d_row, tx_type_1d_col);
int model_rate;
int64_t model_dist;
model_rd_sse_fn[MODELRD_CURVFIT](cpi, x, tx_bsize, plane, sse, num_samples,
&model_rate, &model_dist);
const double model_rate_norm = (double)model_rate / num_samples;
const double model_dist_norm = (double)model_dist / num_samples;
fprintf(fout, " %g %g", model_rate_norm, model_dist_norm);
const double mean = get_mean(src_diff, diff_stride, txw, txh);
float hor_corr, vert_corr;
av1_get_horver_correlation_full(src_diff, diff_stride, txw, txh, &hor_corr,
&vert_corr);
fprintf(fout, " %g %g %g", mean, hor_corr, vert_corr);
double hdist[4] = { 0 }, vdist[4] = { 0 };
get_energy_distribution_fine(cpi, tx_bsize, src, src_stride, dst, dst_stride,
1, hdist, vdist);
fprintf(fout, " %g %g %g %g %g %g %g %g", hdist[0], hdist[1], hdist[2],
hdist[3], vdist[0], vdist[1], vdist[2], vdist[3]);
fprintf(fout, " %d %" PRId64, x->rdmult, rd);
fprintf(fout, "\n");
fclose(fout);
}
#endif // CONFIG_COLLECT_RD_STATS == 1
#if CONFIG_COLLECT_RD_STATS >= 2
static int64_t get_sse(const AV1_COMP *cpi, const MACROBLOCK *x) {
const AV1_COMMON *cm = &cpi->common;
const int num_planes = av1_num_planes(cm);
const MACROBLOCKD *xd = &x->e_mbd;
const MB_MODE_INFO *mbmi = xd->mi[0];
int64_t total_sse = 0;
for (int plane = 0; plane < num_planes; ++plane) {
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
const BLOCK_SIZE bs = get_plane_block_size(mbmi->sb_type, pd->subsampling_x,
pd->subsampling_y);
unsigned int sse;
if (x->skip_chroma_rd && plane) continue;
cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride,
&sse);
total_sse += sse;
}
total_sse <<= 4;
return total_sse;
}
static int get_est_rate_dist(const TileDataEnc *tile_data, BLOCK_SIZE bsize,
int64_t sse, int *est_residue_cost,
int64_t *est_dist) {
aom_clear_system_state();
const InterModeRdModel *md = &tile_data->inter_mode_rd_models[bsize];
if (md->ready) {
if (sse < md->dist_mean) {
*est_residue_cost = 0;
*est_dist = sse;
} else {
*est_dist = (int64_t)round(md->dist_mean);
const double est_ld = md->a * sse + md->b;
// Clamp estimated rate cost by INT_MAX / 2.
// TODO(angiebird@google.com): find better solution than clamping.
if (fabs(est_ld) < 1e-2) {
*est_residue_cost = INT_MAX / 2;
} else {
double est_residue_cost_dbl = ((sse - md->dist_mean) / est_ld);
if (est_residue_cost_dbl < 0) {
*est_residue_cost = 0;
} else {
*est_residue_cost =
(int)AOMMIN((int64_t)round(est_residue_cost_dbl), INT_MAX / 2);
}
}
if (*est_residue_cost <= 0) {
*est_residue_cost = 0;
*est_dist = sse;
}
}
return 1;
}
return 0;
}
static double get_highbd_diff_mean(const uint8_t *src8, int src_stride,
const uint8_t *dst8, int dst_stride, int w,
int h) {
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
double sum = 0.0;
for (int j = 0; j < h; ++j) {
for (int i = 0; i < w; ++i) {
const int diff = src[j * src_stride + i] - dst[j * dst_stride + i];
sum += diff;
}
}
assert(w > 0 && h > 0);
return sum / (w * h);
}
static AOM_INLINE void PrintPredictionUnitStats(const AV1_COMP *const cpi,
const TileDataEnc *tile_data,
MACROBLOCK *x,
const RD_STATS *const rd_stats,
BLOCK_SIZE plane_bsize) {
if (rd_stats->rate == INT_MAX || rd_stats->dist == INT64_MAX) return;
if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1 &&
(tile_data == NULL ||
!tile_data->inter_mode_rd_models[plane_bsize].ready))
return;
(void)tile_data;
// Generate small sample to restrict output size.
static unsigned int seed = 95014;
if ((lcg_rand16(&seed) % (1 << (14 - num_pels_log2_lookup[plane_bsize]))) !=
1)
return;
const char output_file[] = "pu_stats.txt";
FILE *fout = fopen(output_file, "a");
if (!fout) return;
MACROBLOCKD *const xd = &x->e_mbd;
const int plane = 0;
struct macroblock_plane *const p = &x->plane[plane];
struct macroblockd_plane *pd = &xd->plane[plane];
const int diff_stride = block_size_wide[plane_bsize];
int bw, bh;
get_txb_dimensions(xd, plane, plane_bsize, 0, 0, plane_bsize, NULL, NULL, &bw,
&bh);
const int num_samples = bw * bh;
const int dequant_shift = xd->bd - 5;
const int q_step =
ROUND_POWER_OF_TWO(p->dequant_QTX[1], QUANT_TABLE_BITS) >> dequant_shift;
const int shift = (xd->bd - 8);
const double rate_norm = (double)rd_stats->rate / num_samples;
const double dist_norm = (double)rd_stats->dist / num_samples;
const double rdcost_norm =
(double)RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist) / num_samples;
fprintf(fout, "%g %g %g", rate_norm, dist_norm, rdcost_norm);
const int src_stride = p->src.stride;
const uint8_t *const src = p->src.buf;
const int dst_stride = pd->dst.stride;
const uint8_t *const dst = pd->dst.buf;
const int16_t *const src_diff = p->src_diff;
int64_t sse = calculate_sse(xd, p, pd, bw, bh);
const double sse_norm = (double)sse / num_samples;
const unsigned int sad =
cpi->fn_ptr[plane_bsize].sdf(src, src_stride, dst, dst_stride);
const double sad_norm =
(double)sad / (1 << num_pels_log2_lookup[plane_bsize]);
fprintf(fout, " %g %g", sse_norm, sad_norm);
double sse_norm_arr[4], sad_norm_arr[4];
get_2x2_normalized_sses_and_sads(cpi, plane_bsize, src, src_stride, dst,
dst_stride, src_diff, diff_stride,
sse_norm_arr, sad_norm_arr);
if (shift) {
for (int k = 0; k < 4; ++k) sse_norm_arr[k] /= (1 << (2 * shift));
for (int k = 0; k < 4; ++k) sad_norm_arr[k] /= (1 << shift);
}
for (int i = 0; i < 4; ++i) {
fprintf(fout, " %g", sse_norm_arr[i]);
}
for (int i = 0; i < 4; ++i) {
fprintf(fout, " %g", sad_norm_arr[i]);
}
fprintf(fout, " %d %d %d %d", q_step, x->rdmult, bw, bh);
int model_rate;
int64_t model_dist;
model_rd_sse_fn[MODELRD_CURVFIT](cpi, x, plane_bsize, plane, sse, num_samples,
&model_rate, &model_dist);
const double model_rdcost_norm =
(double)RDCOST(x->rdmult, model_rate, model_dist) / num_samples;
const double model_rate_norm = (double)model_rate / num_samples;
const double model_dist_norm = (double)model_dist / num_samples;
fprintf(fout, " %g %g %g", model_rate_norm, model_dist_norm,
model_rdcost_norm);
double mean;
mean = get_highbd_diff_mean(p->src.buf, p->src.stride, pd->dst.buf,
pd->dst.stride, bw, bh);
mean /= (1 << shift);
float hor_corr, vert_corr;
av1_get_horver_correlation_full(src_diff, diff_stride, bw, bh, &hor_corr,
&vert_corr);
fprintf(fout, " %g %g %g", mean, hor_corr, vert_corr);
double hdist[4] = { 0 }, vdist[4] = { 0 };
get_energy_distribution_fine(cpi, plane_bsize, src, src_stride, dst,
dst_stride, 1, hdist, vdist);
fprintf(fout, " %g %g %g %g %g %g %g %g", hdist[0], hdist[1], hdist[2],
hdist[3], vdist[0], vdist[1], vdist[2], vdist[3]);
if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1) {
assert(tile_data->inter_mode_rd_models[plane_bsize].ready);
const int64_t overall_sse = get_sse(cpi, x);
int est_residue_cost = 0;
int64_t est_dist = 0;
get_est_rate_dist(tile_data, plane_bsize, overall_sse, &est_residue_cost,
&est_dist);
const double est_residue_cost_norm = (double)est_residue_cost / num_samples;
const double est_dist_norm = (double)est_dist / num_samples;
const double est_rdcost_norm =
(double)RDCOST(x->rdmult, est_residue_cost, est_dist) / num_samples;
fprintf(fout, " %g %g %g", est_residue_cost_norm, est_dist_norm,
est_rdcost_norm);
}
fprintf(fout, "\n");
fclose(fout);
}
#endif // CONFIG_COLLECT_RD_STATS >= 2
#endif // CONFIG_COLLECT_RD_STATS
static AOM_INLINE void inverse_transform_block_facade(MACROBLOCK *const x,
int plane, int block,
int blk_row, int blk_col,
int eob,
int reduced_tx_set) {
if (!eob) return;
struct macroblock_plane *const p = &x->plane[plane];
MACROBLOCKD *const xd = &x->e_mbd;
tran_low_t *dqcoeff = p->dqcoeff + BLOCK_OFFSET(block);
const PLANE_TYPE plane_type = get_plane_type(plane);
const TX_SIZE tx_size = av1_get_tx_size(plane, xd);
const TX_TYPE tx_type = av1_get_tx_type(xd, plane_type, blk_row, blk_col,
tx_size, reduced_tx_set);
struct macroblockd_plane *const pd = &xd->plane[plane];
const int dst_stride = pd->dst.stride;
uint8_t *dst = &pd->dst.buf[(blk_row * dst_stride + blk_col) << MI_SIZE_LOG2];
av1_inverse_transform_block(xd, dqcoeff, plane, tx_type, tx_size, dst,
dst_stride, eob, reduced_tx_set);
}
static INLINE void recon_intra(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
int block, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
const TXB_CTX *const txb_ctx, int skip_trellis,
TX_TYPE best_tx_type, int do_quant,
int *rate_cost, uint16_t best_eob) {
const AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = xd->mi[0];
const int is_inter = is_inter_block(mbmi, xd->tree_type);
if (!is_inter && best_eob &&
(blk_row + tx_size_high_unit[tx_size] < mi_size_high[plane_bsize] ||
blk_col + tx_size_wide_unit[tx_size] < mi_size_wide[plane_bsize])) {
#if CONFIG_CROSS_CHROMA_TX
CctxType cctx_type = av1_get_cctx_type(xd, blk_row, blk_col);
#if !CCTX_INTRA
assert(cctx_type == CCTX_NONE);
#endif // !CCTX_INTRA
#endif // CONFIG_CROSS_CHROMA_TX
// if the quantized coefficients are stored in the dqcoeff buffer, we don't
// need to do transform and quantization again.
if (do_quant) {
TxfmParam txfm_param_intra;
QUANT_PARAM quant_param_intra;
av1_setup_xform(cm, x,
#if CONFIG_IST
plane,
#endif
tx_size, best_tx_type,
#if CONFIG_CROSS_CHROMA_TX
cctx_type,
#endif // CONFIG_CROSS_CHROMA_TX
&txfm_param_intra);
av1_setup_quant(tx_size, !skip_trellis,
skip_trellis
? (USE_B_QUANT_NO_TRELLIS ? AV1_XFORM_QUANT_B
: AV1_XFORM_QUANT_FP)
: AV1_XFORM_QUANT_FP,
cpi->oxcf.q_cfg.quant_b_adapt, &quant_param_intra);
av1_setup_qmatrix(&cm->quant_params, xd, plane, tx_size, best_tx_type,
&quant_param_intra);
av1_xform_quant(
#if CONFIG_FORWARDSKIP
cm,
#endif // CONFIG_FORWARDSKIP
x, plane, block, blk_row, blk_col, plane_bsize, &txfm_param_intra,
&quant_param_intra);
if (quant_param_intra.use_optimize_b) {
av1_optimize_b(cpi, x, plane, block, tx_size, best_tx_type,
#if CONFIG_CROSS_CHROMA_TX
cctx_type, blk_row, blk_col,
#endif // CONFIG_CROSS_CHROMA_TX
txb_ctx, rate_cost);
}
}
#if CONFIG_CROSS_CHROMA_TX && CCTX_INTRA
// In CONFIG_CROSS_CHROMA_TX, reconstruction for U plane relies on dqcoeffs
// of V plane, so the below operators for U are performed together with V
// once dqcoeffs of V are obtained.
if (plane == AOM_PLANE_V) {
tran_low_t *dqcoeff_u =
x->plane[AOM_PLANE_U].dqcoeff + BLOCK_OFFSET(block);
tran_low_t *dqcoeff_v =
x->plane[AOM_PLANE_V].dqcoeff + BLOCK_OFFSET(block);
const int max_uv_eob = AOMMAX(x->plane[AOM_PLANE_U].eobs[block],
x->plane[AOM_PLANE_V].eobs[block]);
av1_inv_cross_chroma_tx_block(dqcoeff_u, dqcoeff_v, tx_size, cctx_type);
inverse_transform_block_facade(x, AOM_PLANE_U, block, blk_row, blk_col,
max_uv_eob,
cm->features.reduced_tx_set_used);
}
if (plane != AOM_PLANE_U) {
#endif // CONFIG_CROSS_CHROMA_TX && CCTX_INTRA
inverse_transform_block_facade(x, plane, block, blk_row, blk_col,
x->plane[plane].eobs[block],
cm->features.reduced_tx_set_used);
#if CONFIG_CROSS_CHROMA_TX && CCTX_INTRA
}
#endif // CONFIG_CROSS_CHROMA_TX && CCTX_INTRA
// This may happen because of hash collision. The eob stored in the hash
// table is non-zero, but the real eob is zero. We need to make sure tx_type
// is DCT_DCT in this case.
if (plane == 0 && x->plane[plane].eobs[block] == 0 &&
best_tx_type != DCT_DCT) {
update_txk_array(xd, blk_row, blk_col, tx_size, DCT_DCT);
}
}
}
static unsigned pixel_dist_visible_only(
const AV1_COMP *const cpi, const MACROBLOCK *x, const uint8_t *src,
const int src_stride, const uint8_t *dst, const int dst_stride,
const BLOCK_SIZE tx_bsize, int txb_rows, int txb_cols, int visible_rows,
int visible_cols) {
unsigned sse;
if (txb_rows == visible_rows && txb_cols == visible_cols) {
cpi->fn_ptr[tx_bsize].vf(src, src_stride, dst, dst_stride, &sse);
return sse;
}
const MACROBLOCKD *xd = &x->e_mbd;
uint64_t sse64 = aom_highbd_sse_odd_size(src, src_stride, dst, dst_stride,
visible_cols, visible_rows);
return (unsigned int)ROUND_POWER_OF_TWO(sse64, (xd->bd - 8) * 2);
return sse;
}
// Compute the pixel domain distortion from src and dst on all visible 4x4s in
// the
// transform block.
static unsigned pixel_dist(const AV1_COMP *const cpi, const MACROBLOCK *x,
int plane, const uint8_t *src, const int src_stride,
const uint8_t *dst, const int dst_stride,
int blk_row, int blk_col,
const BLOCK_SIZE plane_bsize,
const BLOCK_SIZE tx_bsize) {
int txb_rows, txb_cols, visible_rows, visible_cols;
const MACROBLOCKD *xd = &x->e_mbd;
get_txb_dimensions(xd, plane, plane_bsize, blk_row, blk_col, tx_bsize,
&txb_cols, &txb_rows, &visible_cols, &visible_rows);
assert(visible_rows > 0);
assert(visible_cols > 0);
unsigned sse = pixel_dist_visible_only(cpi, x, src, src_stride, dst,
dst_stride, tx_bsize, txb_rows,
txb_cols, visible_rows, visible_cols);
return sse;
}
static INLINE int64_t dist_block_px_domain(const AV1_COMP *cpi, MACROBLOCK *x,
int plane, BLOCK_SIZE plane_bsize,
int block, int blk_row, int blk_col,
TX_SIZE tx_size) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const uint16_t eob = p->eobs[block];
const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
const int bsw = block_size_wide[tx_bsize];
const int bsh = block_size_high[tx_bsize];
const int src_stride = x->plane[plane].src.stride;
const int dst_stride = xd->plane[plane].dst.stride;
// Scale the transform block index to pixel unit.
const int src_idx = (blk_row * src_stride + blk_col) << MI_SIZE_LOG2;
const int dst_idx = (blk_row * dst_stride + blk_col) << MI_SIZE_LOG2;
const uint8_t *src = &x->plane[plane].src.buf[src_idx];
const uint8_t *dst = &xd->plane[plane].dst.buf[dst_idx];
#if CONFIG_IST
tran_low_t *dqcoeff = p->dqcoeff + BLOCK_OFFSET(block);
#else
const tran_low_t *dqcoeff = p->dqcoeff + BLOCK_OFFSET(block);
#endif
assert(cpi != NULL);
assert(tx_size_wide_log2[0] == tx_size_high_log2[0]);
uint8_t *recon;
DECLARE_ALIGNED(16, uint16_t, recon16[MAX_TX_SQUARE]);
recon = CONVERT_TO_BYTEPTR(recon16);
aom_highbd_convolve_copy(CONVERT_TO_SHORTPTR(dst), dst_stride,
CONVERT_TO_SHORTPTR(recon), MAX_TX_SIZE, bsw, bsh);
const PLANE_TYPE plane_type = get_plane_type(plane);
TX_TYPE tx_type = av1_get_tx_type(xd, plane_type, blk_row, blk_col, tx_size,
cpi->common.features.reduced_tx_set_used);
av1_inverse_transform_block(xd, dqcoeff, plane, tx_type, tx_size, recon,
MAX_TX_SIZE, eob,
cpi->common.features.reduced_tx_set_used);
return 16 * pixel_dist(cpi, x, plane, src, src_stride, recon, MAX_TX_SIZE,
blk_row, blk_col, plane_bsize, tx_bsize);
}
#if CONFIG_CROSS_CHROMA_TX
// Evaluate U and V distortion jointly for cross chroma component transform
// search.
static INLINE int64_t joint_uv_dist_block_px_domain(
const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE plane_bsize, int block,
int blk_row, int blk_col, TX_SIZE tx_size) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p_u = &x->plane[AOM_PLANE_U];
const struct macroblock_plane *const p_v = &x->plane[AOM_PLANE_V];
const struct macroblockd_plane *const pd_u = &xd->plane[AOM_PLANE_U];
const struct macroblockd_plane *const pd_v = &xd->plane[AOM_PLANE_V];
const uint16_t max_uv_eob = AOMMAX(p_u->eobs[block], p_v->eobs[block]);
const int eob_max = av1_get_max_eob(tx_size);
const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
const int bsw = block_size_wide[tx_bsize];
const int bsh = block_size_high[tx_bsize];
// Scale the transform block index to pixel unit.
const int src_idx_u = (blk_row * p_u->src.stride + blk_col) << MI_SIZE_LOG2;
const int src_idx_v = (blk_row * p_v->src.stride + blk_col) << MI_SIZE_LOG2;
const int dst_idx_u = (blk_row * pd_u->dst.stride + blk_col) << MI_SIZE_LOG2;
const int dst_idx_v = (blk_row * pd_v->dst.stride + blk_col) << MI_SIZE_LOG2;
const uint8_t *src_u = &p_u->src.buf[src_idx_u];
const uint8_t *src_v = &p_v->src.buf[src_idx_v];
const uint8_t *dst_u = &pd_u->dst.buf[dst_idx_u];
const uint8_t *dst_v = &pd_v->dst.buf[dst_idx_v];
// p_u->dqcoeff and p_v->dqcoeff must remain unchanged here because the best
// dqcoeff in the CCTX domain may be used in the search later.
DECLARE_ALIGNED(32, tran_low_t, tmp_dqcoeff_u[MAX_TX_SQUARE]);
DECLARE_ALIGNED(32, tran_low_t, tmp_dqcoeff_v[MAX_TX_SQUARE]);
memcpy(tmp_dqcoeff_u, p_u->dqcoeff + BLOCK_OFFSET(block),
sizeof(tran_low_t) * eob_max);
memcpy(tmp_dqcoeff_v, p_v->dqcoeff + BLOCK_OFFSET(block),
sizeof(tran_low_t) * eob_max);
assert(p_u->eobs[block] > 0);
assert(cpi != NULL);
assert(tx_size_wide_log2[0] == tx_size_high_log2[0]);
uint8_t *recon_u, *recon_v;
DECLARE_ALIGNED(16, uint16_t, recon16_u[MAX_TX_SQUARE]);
DECLARE_ALIGNED(16, uint16_t, recon16_v[MAX_TX_SQUARE]);
recon_u = CONVERT_TO_BYTEPTR(recon16_u);
recon_v = CONVERT_TO_BYTEPTR(recon16_v);
aom_highbd_convolve_copy(CONVERT_TO_SHORTPTR(dst_u), pd_u->dst.stride,
CONVERT_TO_SHORTPTR(recon_u), MAX_TX_SIZE, bsw, bsh);
aom_highbd_convolve_copy(CONVERT_TO_SHORTPTR(dst_v), pd_v->dst.stride,
CONVERT_TO_SHORTPTR(recon_v), MAX_TX_SIZE, bsw, bsh);
CctxType cctx_type = av1_get_cctx_type(xd, blk_row, blk_col);
TX_TYPE tx_type =
av1_get_tx_type(xd, PLANE_TYPE_UV, blk_row, blk_col, tx_size,
cpi->common.features.reduced_tx_set_used);
av1_inv_cross_chroma_tx_block(tmp_dqcoeff_u, tmp_dqcoeff_v, tx_size,
cctx_type);
// TODO(kslu): handle transform domain eobs in addition to cctx domain eobs
av1_inverse_transform_block(xd, tmp_dqcoeff_u, AOM_PLANE_U, tx_type, tx_size,
recon_u, MAX_TX_SIZE, max_uv_eob,
cpi->common.features.reduced_tx_set_used);
av1_inverse_transform_block(xd, tmp_dqcoeff_v, AOM_PLANE_V, tx_type, tx_size,
recon_v, MAX_TX_SIZE, max_uv_eob,
cpi->common.features.reduced_tx_set_used);
int64_t dist_u =
pixel_dist(cpi, x, AOM_PLANE_U, src_u, p_u->src.stride, recon_u,
MAX_TX_SIZE, blk_row, blk_col, plane_bsize, tx_bsize);
int64_t dist_v =
pixel_dist(cpi, x, AOM_PLANE_V, src_v, p_v->src.stride, recon_v,
MAX_TX_SIZE, blk_row, blk_col, plane_bsize, tx_bsize);
return 16 * (dist_u + dist_v);
}
#endif // CONFIG_CROSS_CHROMA_TX
static uint32_t get_intra_txb_hash(MACROBLOCK *x, int plane, int blk_row,
int blk_col, BLOCK_SIZE plane_bsize,
TX_SIZE tx_size) {
int16_t tmp_data[64 * 64];
const int diff_stride = block_size_wide[plane_bsize];
const int16_t *diff = x->plane[plane].src_diff;
const int16_t *cur_diff_row = diff + 4 * blk_row * diff_stride + 4 * blk_col;
const int txb_w = tx_size_wide[tx_size];
const int txb_h = tx_size_high[tx_size];
uint8_t *hash_data = (uint8_t *)cur_diff_row;
if (txb_w != diff_stride) {
int16_t *cur_hash_row = tmp_data;
for (int i = 0; i < txb_h; i++) {
memcpy(cur_hash_row, cur_diff_row, sizeof(*diff) * txb_w);
cur_hash_row += txb_w;
cur_diff_row += diff_stride;
}
hash_data = (uint8_t *)tmp_data;
}
CRC32C *crc = &x->txfm_search_info.mb_rd_record.crc_calculator;
const uint32_t hash = av1_get_crc32c_value(crc, hash_data, 2 * txb_w * txb_h);
return (hash << 5) + tx_size;
}
// pruning thresholds for prune_txk_type and prune_txk_type_separ
static const int prune_factors[5] = { 200, 200, 120, 80, 40 }; // scale 1000
static const int mul_factors[5] = { 80, 80, 70, 50, 30 }; // scale 100
static INLINE int is_intra_hash_match(const AV1_COMP *cpi, MACROBLOCK *x,
int plane, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
const TXB_CTX *const txb_ctx,
TXB_RD_INFO **intra_txb_rd_info,
const int tx_type_map_idx,
uint16_t *cur_joint_ctx) {
MACROBLOCKD *xd = &x->e_mbd;
TxfmSearchInfo *txfm_info = &x->txfm_search_info;
assert(cpi->sf.tx_sf.use_intra_txb_hash &&
frame_is_intra_only(&cpi->common) &&
!is_inter_block(xd->mi[0], xd->tree_type) && plane == 0 &&
tx_size_wide[tx_size] == tx_size_high[tx_size]);
const uint32_t intra_hash =
get_intra_txb_hash(x, plane, blk_row, blk_col, plane_bsize, tx_size);
const int intra_hash_idx =
find_tx_size_rd_info(&txfm_info->txb_rd_record_intra, intra_hash);
*intra_txb_rd_info =
&txfm_info->txb_rd_record_intra.tx_rd_info[intra_hash_idx];
#if CONFIG_FORWARDSKIP
*cur_joint_ctx = txb_ctx->txb_skip_ctx;
if (xd->mi[0]->fsc_mode[xd->tree_type == CHROMA_PART] == 0) {
*cur_joint_ctx += (txb_ctx->dc_sign_ctx << 8);
}
#else
*cur_joint_ctx = (txb_ctx->dc_sign_ctx << 8) + txb_ctx->txb_skip_ctx;
#endif // CONFIG_FORWARDSKIP
if ((*intra_txb_rd_info)->entropy_context == *cur_joint_ctx &&
txfm_info->txb_rd_record_intra.tx_rd_info[intra_hash_idx].valid) {
xd->tx_type_map[tx_type_map_idx] = (*intra_txb_rd_info)->tx_type;
const TX_TYPE ref_tx_type =
av1_get_tx_type(xd, get_plane_type(plane), blk_row, blk_col, tx_size,
cpi->common.features.reduced_tx_set_used);
#if CONFIG_FORWARDSKIP
const int fsc_invalid =
!xd->mi[0]->fsc_mode[xd->tree_type == CHROMA_PART] &&
(*intra_txb_rd_info)->tx_type == IDTX;
if (fsc_invalid) return 0;
#endif // CONFIG_FORWARDSKIP
return (ref_tx_type == (*intra_txb_rd_info)->tx_type);
}
return 0;
}
// R-D costs are sorted in ascending order.
static INLINE void sort_rd(int64_t rds[], int txk[], int len) {
int i, j, k;
for (i = 1; i <= len - 1; ++i) {
for (j = 0; j < i; ++j) {
if (rds[j] > rds[i]) {
int64_t temprd;
int tempi;
temprd = rds[i];
tempi = txk[i];
for (k = i; k > j; k--) {
rds[k] = rds[k - 1];
txk[k] = txk[k - 1];
}
rds[j] = temprd;
txk[j] = tempi;
break;
}
}
}
}
static INLINE void dist_block_tx_domain(MACROBLOCK *x, int plane, int block,
TX_SIZE tx_size, int64_t *out_dist,
int64_t *out_sse) {
const struct macroblock_plane *const p = &x->plane[plane];
// Transform domain distortion computation is more efficient as it does
// not involve an inverse transform, but it is less accurate.
const int buffer_length = av1_get_max_eob(tx_size);
int64_t this_sse;
// TX-domain results need to shift down to Q2/D10 to match pixel
// domain distortion values which are in Q2^2
int shift = (MAX_TX_SCALE - av1_get_tx_scale(tx_size)) * 2;
const int block_offset = BLOCK_OFFSET(block);
tran_low_t *const coeff = p->coeff + block_offset;
tran_low_t *const dqcoeff = p->dqcoeff + block_offset;
MACROBLOCKD *const xd = &x->e_mbd;
*out_dist =
av1_highbd_block_error(coeff, dqcoeff, buffer_length, &this_sse, xd->bd);
*out_dist = RIGHT_SIGNED_SHIFT(*out_dist, shift);
*out_sse = RIGHT_SIGNED_SHIFT(this_sse, shift);
}
uint16_t prune_txk_type_separ(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
int block, TX_SIZE tx_size, int blk_row,
int blk_col, BLOCK_SIZE plane_bsize, int *txk_map,
int16_t allowed_tx_mask, int prune_factor,
const TXB_CTX *const txb_ctx,
int reduced_tx_set_used, int64_t ref_best_rd,
int num_sel) {
const AV1_COMMON *cm = &cpi->common;
int idx;
int64_t rds_v[4];
int64_t rds_h[4];
int idx_v[4] = { 0, 1, 2, 3 };
int idx_h[4] = { 0, 1, 2, 3 };
int skip_v[4] = { 0 };
int skip_h[4] = { 0 };
const int idx_map[16] = {
DCT_DCT, DCT_ADST, DCT_FLIPADST, V_DCT,
ADST_DCT, ADST_ADST, ADST_FLIPADST, V_ADST,
FLIPADST_DCT, FLIPADST_ADST, FLIPADST_FLIPADST, V_FLIPADST,
H_DCT, H_ADST, H_FLIPADST, IDTX
};
const int sel_pattern_v[16] = {
0, 0, 1, 1, 0, 2, 1, 2, 2, 0, 3, 1, 3, 2, 3, 3
};
const int sel_pattern_h[16] = {
0, 1, 0, 1, 2, 0, 2, 1, 2, 3, 0, 3, 1, 3, 2, 3
};
QUANT_PARAM quant_param;
TxfmParam txfm_param;
av1_setup_xform(cm, x,
#if CONFIG_IST
plane,
#endif
tx_size, DCT_DCT,
#if CONFIG_CROSS_CHROMA_TX
CCTX_NONE,
#endif // CONFIG_CROSS_CHROMA_TX
&txfm_param);
av1_setup_quant(tx_size, 1, AV1_XFORM_QUANT_B, cpi->oxcf.q_cfg.quant_b_adapt,
&quant_param);
int tx_type;
// to ensure we can try ones even outside of ext_tx_set of current block
// this function should only be called for size < 16
assert(txsize_sqr_up_map[tx_size] <= TX_16X16);
#if CONFIG_DDT_INTER
txfm_param.tx_set_type = EXT_TX_SET_ALL24;
#else
txfm_param.tx_set_type = EXT_TX_SET_ALL16;
#endif // CONFIG_DDT_INTER
int rate_cost = 0;
int64_t dist = 0, sse = 0;
// evaluate horizontal with vertical DCT
for (idx = 0; idx < 4; ++idx) {
tx_type = idx_map[idx];
txfm_param.tx_type = tx_type;
av1_xform_quant(
#if CONFIG_FORWARDSKIP
cm,
#endif // CONFIG_FORWARDSKIP
x, plane, block, blk_row, blk_col, plane_bsize, &txfm_param,
&quant_param);
dist_block_tx_domain(x, plane, block, tx_size, &dist, &sse);
rate_cost = av1_cost_coeffs_txb_laplacian(
#if CONFIG_FORWARDSKIP
cm,
#endif // CONFIG_FORWARDSKIP
x, plane, block, tx_size, tx_type,
#if CONFIG_CROSS_CHROMA_TX
CCTX_NONE, blk_row, blk_col,
#endif // CONFIG_CROSS_CHROMA_TX
txb_ctx, reduced_tx_set_used, 0);
rds_h[idx] = RDCOST(x->rdmult, rate_cost, dist);
if ((rds_h[idx] - (rds_h[idx] >> 2)) > ref_best_rd) {
skip_h[idx] = 1;
}
}
sort_rd(rds_h, idx_h, 4);
for (idx = 1; idx < 4; idx++) {
if (rds_h[idx] > rds_h[0] * 1.2) skip_h[idx_h[idx]] = 1;
}
if (skip_h[idx_h[0]]) return (uint16_t)0xFFFF;
// evaluate vertical with the best horizontal chosen
rds_v[0] = rds_h[0];
int start_v = 1, end_v = 4;
const int *idx_map_v = idx_map + idx_h[0];
for (idx = start_v; idx < end_v; ++idx) {
tx_type = idx_map_v[idx_v[idx] * 4];
txfm_param.tx_type = tx_type;
av1_xform_quant(
#if CONFIG_FORWARDSKIP
cm,
#endif // CONFIG_FORWARDSKIP
x, plane, block, blk_row, blk_col, plane_bsize, &txfm_param,
&quant_param);
dist_block_tx_domain(x, plane, block, tx_size, &dist, &sse);
rate_cost = av1_cost_coeffs_txb_laplacian(
#if CONFIG_FORWARDSKIP
cm,
#endif // CONFIG_FORWARDSKIP
x, plane, block, tx_size, tx_type,
#if CONFIG_CROSS_CHROMA_TX
CCTX_NONE, blk_row, blk_col,
#endif // CONFIG_CROSS_CHROMA_TX
txb_ctx, reduced_tx_set_used, 0);
rds_v[idx] = RDCOST(x->rdmult, rate_cost, dist);
if ((rds_v[idx] - (rds_v[idx] >> 2)) > ref_best_rd) {
skip_v[idx] = 1;
}
}
sort_rd(rds_v, idx_v, 4);
for (idx = 1; idx < 4; idx++) {
if (rds_v[idx] > rds_v[0] * 1.2) skip_v[idx_v[idx]] = 1;
}
// combine rd_h and rd_v to prune tx candidates
int i_v, i_h;
int64_t rds[16];
#if CONFIG_DDT_INTER
// Pruning is not applied to DDT for now.
int num_cand = 0, last = TX_TYPES_TRIG - 1;
#else
int num_cand = 0, last = TX_TYPES - 1;
#endif // CONFIG_DDT_INTER
for (int i = 0; i < 16; i++) {
i_v = sel_pattern_v[i];
i_h = sel_pattern_h[i];
tx_type = idx_map[idx_v[i_v] * 4 + idx_h[i_h]];
if (!(allowed_tx_mask & (1 << tx_type)) || skip_h[idx_h[i_h]] ||
skip_v[idx_v[i_v]]) {
txk_map[last] = tx_type;
last--;
} else {
txk_map[num_cand] = tx_type;
rds[num_cand] = rds_v[i_v] + rds_h[i_h];
if (rds[num_cand] == 0) rds[num_cand] = 1;
num_cand++;
}
}
sort_rd(rds, txk_map, num_cand);
uint16_t prune = (uint16_t)(~(1 << txk_map[0]));
num_sel = AOMMIN(num_sel, num_cand);
for (int i = 1; i < num_sel; i++) {
int64_t factor = 1800 * (rds[i] - rds[0]) / (rds[0]);
if (factor < (int64_t)prune_factor)
prune &= ~(1 << txk_map[i]);
else
break;
}
return prune;
}
uint16_t prune_txk_type(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
int block, TX_SIZE tx_size, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, int *txk_map,
uint16_t allowed_tx_mask, int prune_factor,
const TXB_CTX *const txb_ctx, int reduced_tx_set_used) {
const AV1_COMMON *cm = &cpi->common;
int tx_type;
#if CONFIG_DDT_INTER
// Pruning is not applied to DDT for now.
int64_t rds[TX_TYPES_TRIG];
#else
int64_t rds[TX_TYPES];
#endif // CONFIG_DDT_INTER
int num_cand = 0;
#if CONFIG_DDT_INTER
int last = TX_TYPES_TRIG - 1;
#else
int last = TX_TYPES - 1;
#endif // CONFIG_DDT_INTER
TxfmParam txfm_param;
QUANT_PARAM quant_param;
av1_setup_xform(cm, x,
#if CONFIG_IST
plane,
#endif
tx_size, DCT_DCT,
#if CONFIG_CROSS_CHROMA_TX
CCTX_NONE,
#endif // CONFIG_CROSS_CHROMA_TX
&txfm_param);
av1_setup_quant(tx_size, 1, AV1_XFORM_QUANT_B, cpi->oxcf.q_cfg.quant_b_adapt,
&quant_param);
#if CONFIG_DDT_INTER
for (int idx = 0; idx < TX_TYPES_TRIG; idx++) {
#else
for (int idx = 0; idx < TX_TYPES; idx++) {
#endif // CONFIG_DDT_INTER
tx_type = idx;
int rate_cost = 0;
int64_t dist = 0, sse = 0;
if (!(allowed_tx_mask & (1 << tx_type))) {
txk_map[last] = tx_type;
last--;
continue;
}
txfm_param.tx_type = tx_type;
// do txfm and quantization
av1_xform_quant(
#if CONFIG_FORWARDSKIP
cm,
#endif // CONFIG_FORWARDSKIP
x, plane, block, blk_row, blk_col, plane_bsize, &txfm_param,
&quant_param);
// estimate rate cost
rate_cost = av1_cost_coeffs_txb_laplacian(
#if CONFIG_FORWARDSKIP
cm,
#endif // CONFIG_FORWARDSKIP
x, plane, block, tx_size, tx_type,
#if CONFIG_CROSS_CHROMA_TX
CCTX_NONE, blk_row, blk_col,
#endif // CONFIG_CROSS_CHROMA_TX
txb_ctx, reduced_tx_set_used, 0);
// tx domain dist
dist_block_tx_domain(x, plane, block, tx_size, &dist, &sse);
txk_map[num_cand] = tx_type;
rds[num_cand] = RDCOST(x->rdmult, rate_cost, dist);
if (rds[num_cand] == 0) rds[num_cand] = 1;
num_cand++;
}
if (num_cand == 0) return (uint16_t)0xFFFF;
sort_rd(rds, txk_map, num_cand);
uint16_t prune = (uint16_t)(~(1 << txk_map[0]));
// 0 < prune_factor <= 1000 controls aggressiveness
int64_t factor = 0;
for (int idx = 1; idx < num_cand; idx++) {
factor = 1000 * (rds[idx] - rds[0]) / rds[0];
if (factor < (int64_t)prune_factor)
prune &= ~(1 << txk_map[idx]);
else
break;
}
return prune;
}
// These thresholds were calibrated to provide a certain number of TX types
// pruned by the model on average, i.e. selecting a threshold with index i
// will lead to pruning i+1 TX types on average
static const float *prune_2D_adaptive_thresholds[] = {
// TX_4X4
(float[]){ 0.00549f, 0.01306f, 0.02039f, 0.02747f, 0.03406f, 0.04065f,
0.04724f, 0.05383f, 0.06067f, 0.06799f, 0.07605f, 0.08533f,
0.09778f, 0.11780f },
// TX_8X8
(float[]){ 0.00037f, 0.00183f, 0.00525f, 0.01038f, 0.01697f, 0.02502f,
0.03381f, 0.04333f, 0.05286f, 0.06287f, 0.07434f, 0.08850f,
0.10803f, 0.14124f },
// TX_16X16
(float[]){ 0.01404f, 0.02000f, 0.04211f, 0.05164f, 0.05798f, 0.06335f,
0.06897f, 0.07629f, 0.08875f, 0.11169f },
// TX_32X32
NULL,
// TX_64X64
NULL,
// TX_4X8
(float[]){ 0.00183f, 0.00745f, 0.01428f, 0.02185f, 0.02966f, 0.03723f,
0.04456f, 0.05188f, 0.05920f, 0.06702f, 0.07605f, 0.08704f,
0.10168f, 0.12585f },
// TX_8X4
(float[]){ 0.00085f, 0.00476f, 0.01135f, 0.01892f, 0.02698f, 0.03528f,
0.04358f, 0.05164f, 0.05994f, 0.06848f, 0.07849f, 0.09021f,
0.10583f, 0.13123f },
// TX_8X16
(float[]){ 0.00037f, 0.00232f, 0.00671f, 0.01257f, 0.01965f, 0.02722f,
0.03552f, 0.04382f, 0.05237f, 0.06189f, 0.07336f, 0.08728f,
0.10730f, 0.14221f },
// TX_16X8
(float[]){ 0.00061f, 0.00330f, 0.00818f, 0.01453f, 0.02185f, 0.02966f,
0.03772f, 0.04578f, 0.05383f, 0.06262f, 0.07288f, 0.08582f,
0.10339f, 0.13464f },
// TX_16X32
NULL,
// TX_32X16
NULL,
// TX_32X64
NULL,
// TX_64X32
NULL,
// TX_4X16
(float[]){ 0.00232f, 0.00671f, 0.01257f, 0.01941f, 0.02673f, 0.03430f,
0.04211f, 0.04968f, 0.05750f, 0.06580f, 0.07507f, 0.08655f,
0.10242f, 0.12878f },
// TX_16X4
(float[]){ 0.00110f, 0.00525f, 0.01208f, 0.01990f, 0.02795f, 0.03601f,
0.04358f, 0.05115f, 0.05896f, 0.06702f, 0.07629f, 0.08752f,
0.10217f, 0.12610f },
// TX_8X32
NULL,
// TX_32X8
NULL,
// TX_16X64
NULL,
// TX_64X16
NULL,
};
// Probablities are sorted in descending order.
static INLINE void sort_probability(float prob[], int txk[], int len) {
int i, j, k;
for (i = 1; i <= len - 1; ++i) {
for (j = 0; j < i; ++j) {
if (prob[j] < prob[i]) {
float temp;
int tempi;
temp = prob[i];
tempi = txk[i];
for (k = i; k > j; k--) {
prob[k] = prob[k - 1];
txk[k] = txk[k - 1];
}
prob[j] = temp;
txk[j] = tempi;
break;
}
}
}
}
static INLINE float get_adaptive_thresholds(
TX_SIZE tx_size, TxSetType tx_set_type,
TX_TYPE_PRUNE_MODE prune_2d_txfm_mode) {
const int prune_aggr_table[5][2] = {
{ 4, 1 }, { 6, 3 }, { 9, 6 }, { 9, 6 }, { 12, 9 }
};
int pruning_aggressiveness = 0;
#if CONFIG_DDT_INTER
if (tx_set_type == EXT_TX_SET_ALL24)
#else
if (tx_set_type == EXT_TX_SET_ALL16)
#endif // CONFIG_DDT_INTER
pruning_aggressiveness =
prune_aggr_table[prune_2d_txfm_mode - TX_TYPE_PRUNE_1][0];
else if (tx_set_type == EXT_TX_SET_DTT9_IDTX_1DDCT)
pruning_aggressiveness =
prune_aggr_table[prune_2d_txfm_mode - TX_TYPE_PRUNE_1][1];
return prune_2D_adaptive_thresholds[tx_size][pruning_aggressiveness];
}
static AOM_INLINE void get_energy_distribution_finer(const int16_t *diff,
int stride, int bw, int bh,
float *hordist,
float *verdist) {
// First compute downscaled block energy values (esq); downscale factors
// are defined by w_shift and h_shift.
unsigned int esq[256];
const int w_shift = bw <= 8 ? 0 : 1;
const int h_shift = bh <= 8 ? 0 : 1;
const int esq_w = bw >> w_shift;
const int esq_h = bh >> h_shift;
const int esq_sz = esq_w * esq_h;
int i, j;
memset(esq, 0, esq_sz * sizeof(esq[0]));
if (w_shift) {
for (i = 0; i < bh; i++) {
unsigned int *cur_esq_row = esq + (i >> h_shift) * esq_w;
const int16_t *cur_diff_row = diff + i * stride;
for (j = 0; j < bw; j += 2) {
cur_esq_row[j >> 1] += (cur_diff_row[j] * cur_diff_row[j] +
cur_diff_row[j + 1] * cur_diff_row[j + 1]);
}
}
} else {
for (i = 0; i < bh; i++) {
unsigned int *cur_esq_row = esq + (i >> h_shift) * esq_w;
const int16_t *cur_diff_row = diff + i * stride;
for (j = 0; j < bw; j++) {
cur_esq_row[j] += cur_diff_row[j] * cur_diff_row[j];
}
}
}
uint64_t total = 0;
for (i = 0; i < esq_sz; i++) total += esq[i];
// Output hordist and verdist arrays are normalized 1D projections of esq
if (total == 0) {
float hor_val = 1.0f / esq_w;
for (j = 0; j < esq_w - 1; j++) hordist[j] = hor_val;
float ver_val = 1.0f / esq_h;
for (i = 0; i < esq_h - 1; i++) verdist[i] = ver_val;
return;
}
const float e_recip = 1.0f / (float)total;
memset(hordist, 0, (esq_w - 1) * sizeof(hordist[0]));
memset(verdist, 0, (esq_h - 1) * sizeof(verdist[0]));
const unsigned int *cur_esq_row;
for (i = 0; i < esq_h - 1; i++) {
cur_esq_row = esq + i * esq_w;
for (j = 0; j < esq_w - 1; j++) {
hordist[j] += (float)cur_esq_row[j];
verdist[i] += (float)cur_esq_row[j];
}
verdist[i] += (float)cur_esq_row[j];
}
cur_esq_row = esq + i * esq_w;
for (j = 0; j < esq_w - 1; j++) hordist[j] += (float)cur_esq_row[j];
for (j = 0; j < esq_w - 1; j++) hordist[j] *= e_recip;
for (i = 0; i < esq_h - 1; i++) verdist[i] *= e_recip;
}
static void prune_tx_2D(MACROBLOCK *x, BLOCK_SIZE bsize, TX_SIZE tx_size,
int blk_row, int blk_col, TxSetType tx_set_type,
TX_TYPE_PRUNE_MODE prune_2d_txfm_mode, int *txk_map,
uint16_t *allowed_tx_mask) {
int tx_type_table_2D[16] = {
DCT_DCT, DCT_ADST, DCT_FLIPADST, V_DCT,
ADST_DCT, ADST_ADST, ADST_FLIPADST, V_ADST,
FLIPADST_DCT, FLIPADST_ADST, FLIPADST_FLIPADST, V_FLIPADST,
H_DCT, H_ADST, H_FLIPADST, IDTX
};
#if CONFIG_DDT_INTER
if (tx_set_type != EXT_TX_SET_ALL24 &&
#else
if (tx_set_type != EXT_TX_SET_ALL16 &&
#endif // CONFIG_DDT_INTER
tx_set_type != EXT_TX_SET_DTT9_IDTX_1DDCT)
return;
#if CONFIG_NN_V2
NN_CONFIG_V2 *nn_config_hor = av1_tx_type_nnconfig_map_hor[tx_size];
NN_CONFIG_V2 *nn_config_ver = av1_tx_type_nnconfig_map_ver[tx_size];
#else
const NN_CONFIG *nn_config_hor = av1_tx_type_nnconfig_map_hor[tx_size];
const NN_CONFIG *nn_config_ver = av1_tx_type_nnconfig_map_ver[tx_size];
#endif
if (!nn_config_hor || !nn_config_ver) return; // Model not established yet.
aom_clear_system_state();
float hfeatures[16], vfeatures[16];
float hscores[4], vscores[4];
float scores_2D_raw[16];
float scores_2D[16];
const int bw = tx_size_wide[tx_size];
const int bh = tx_size_high[tx_size];
const int hfeatures_num = bw <= 8 ? bw : bw / 2;
const int vfeatures_num = bh <= 8 ? bh : bh / 2;
assert(hfeatures_num <= 16);
assert(vfeatures_num <= 16);
const struct macroblock_plane *const p = &x->plane[0];
const int diff_stride = block_size_wide[bsize];
const int16_t *diff = p->src_diff + 4 * blk_row * diff_stride + 4 * blk_col;
get_energy_distribution_finer(diff, diff_stride, bw, bh, hfeatures,
vfeatures);
av1_get_horver_correlation_full(diff, diff_stride, bw, bh,
&hfeatures[hfeatures_num - 1],
&vfeatures[vfeatures_num - 1]);
aom_clear_system_state();
#if CONFIG_NN_V2
av1_nn_predict_v2(hfeatures, nn_config_hor, 0, hscores);
av1_nn_predict_v2(vfeatures, nn_config_ver, 0, vscores);
#else
av1_nn_predict(hfeatures, nn_config_hor, 1, hscores);
av1_nn_predict(vfeatures, nn_config_ver, 1, vscores);
#endif
aom_clear_system_state();
for (int i = 0; i < 4; i++) {
float *cur_scores_2D = scores_2D_raw + i * 4;
cur_scores_2D[0] = vscores[i] * hscores[0];
cur_scores_2D[1] = vscores[i] * hscores[1];
cur_scores_2D[2] = vscores[i] * hscores[2];
cur_scores_2D[3] = vscores[i] * hscores[3];
}
av1_nn_softmax(scores_2D_raw, scores_2D, 16);
const float score_thresh =
get_adaptive_thresholds(tx_size, tx_set_type, prune_2d_txfm_mode);
// Always keep the TX type with the highest score, prune all others with
// score below score_thresh.
int max_score_i = 0;
float max_score = 0.0f;
uint16_t allow_bitmask = 0;
float sum_score = 0.0;
// Calculate sum of allowed tx type score and Populate allow bit mask based
// on score_thresh and allowed_tx_mask
#if CONFIG_DDT_INTER
for (int tx_idx = 0; tx_idx < TX_TYPES_TRIG; tx_idx++) {
#else
for (int tx_idx = 0; tx_idx < TX_TYPES; tx_idx++) {
#endif // CONFIG_DDT_INTER
int allow_tx_type = *allowed_tx_mask & (1 << tx_type_table_2D[tx_idx]);
if (scores_2D[tx_idx] > max_score && allow_tx_type) {
max_score = scores_2D[tx_idx];
max_score_i = tx_idx;
}
if (scores_2D[tx_idx] >= score_thresh && allow_tx_type) {
// Set allow mask based on score_thresh
allow_bitmask |= (1 << tx_type_table_2D[tx_idx]);
// Accumulate score of allowed tx type
sum_score += scores_2D[tx_idx];
}
}
if (!((allow_bitmask >> max_score_i) & 0x01)) {
// Set allow mask based on tx type with max score
allow_bitmask |= (1 << tx_type_table_2D[max_score_i]);
sum_score += scores_2D[max_score_i];
}
// Sort tx type probability of all types
#if CONFIG_DDT_INTER
sort_probability(scores_2D, tx_type_table_2D, TX_TYPES_TRIG);
#else
sort_probability(scores_2D, tx_type_table_2D, TX_TYPES);
#endif // CONFIG_DDT_INTER
// Enable more pruning based on tx type probability and number of allowed tx
// types
if (prune_2d_txfm_mode >= TX_TYPE_PRUNE_4) {
float temp_score = 0.0;
float score_ratio = 0.0;
int tx_idx, tx_count = 0;
const float inv_sum_score = 100 / sum_score;
// Get allowed tx types based on sorted probability score and tx count
#if CONFIG_DDT_INTER
for (tx_idx = 0; tx_idx < TX_TYPES_TRIG; tx_idx++) {
#else
for (tx_idx = 0; tx_idx < TX_TYPES; tx_idx++) {
#endif // CONFIG_DDT_INTER
// Skip the tx type which has more than 30% of cumulative
// probability and allowed tx type count is more than 2
if (score_ratio > 30.0 && tx_count >= 2) break;
// Calculate cumulative probability of allowed tx types
if (allow_bitmask & (1 << tx_type_table_2D[tx_idx])) {
// Calculate cumulative probability
temp_score += scores_2D[tx_idx];
// Calculate percentage of cumulative probability of allowed tx type
score_ratio = temp_score * inv_sum_score;
tx_count++;
}
}
// Set remaining tx types as pruned
#if CONFIG_DDT_INTER
for (; tx_idx < TX_TYPES_TRIG; tx_idx++)
#else
for (; tx_idx < TX_TYPES; tx_idx++)
#endif // CONFIG_DDT_INTER
allow_bitmask &= ~(1 << tx_type_table_2D[tx_idx]);
}
memcpy(txk_map, tx_type_table_2D, sizeof(tx_type_table_2D));
*allowed_tx_mask = allow_bitmask;
}
static float get_dev(float mean, double x2_sum, int num) {
const float e_x2 = (float)(x2_sum / num);
const float diff = e_x2 - mean * mean;
const float dev = (diff > 0) ? sqrtf(diff) : 0;
return dev;
}
// Feature used by the model to predict tx split: the mean and standard
// deviation values of the block and sub-blocks.
static AOM_INLINE void get_mean_dev_features(int bd, const int16_t *data,
int stride, int bw, int bh,
float *feature) {
const int16_t *const data_ptr = &data[0];
const int subh = (bh >= bw) ? (bh >> 1) : bh;
const int subw = (bw >= bh) ? (bw >> 1) : bw;
const int num = bw * bh;
const int sub_num = subw * subh;
int feature_idx = 2;
int total_x_sum = 0;
int64_t total_x2_sum = 0;
int blk_idx = 0;
double mean2_sum = 0.0f;
float dev_sum = 0.0f;
for (int row = 0; row < bh; row += subh) {
for (int col = 0; col < bw; col += subw) {
int x_sum;
int64_t x2_sum;
// TODO(any): Write a SIMD version. Clear registers.
aom_get_blk_sse_sum(data_ptr + row * stride + col, stride, subw, subh,
&x_sum, &x2_sum);
x_sum >>= (bd - 8);
x2_sum >>= (bd - 8) * 2;
total_x_sum += x_sum;
total_x2_sum += x2_sum;
aom_clear_system_state();
const float mean = (float)x_sum / sub_num;
const float dev = get_dev(mean, (double)x2_sum, sub_num);
feature[feature_idx++] = mean;
feature[feature_idx++] = dev;
mean2_sum += (double)(mean * mean);
dev_sum += dev;
blk_idx++;
}
}
const float lvl0_mean = (float)total_x_sum / num;
feature[0] = lvl0_mean;
feature[1] = get_dev(lvl0_mean, (double)total_x2_sum, num);
if (blk_idx > 1) {
// Deviation of means.
feature[feature_idx++] = get_dev(lvl0_mean, mean2_sum, blk_idx);
// Mean of deviations.
feature[feature_idx++] = dev_sum / blk_idx;
}
}
static int ml_predict_tx_split(MACROBLOCK *x, BLOCK_SIZE bsize, int blk_row,
int blk_col, TX_SIZE tx_size) {
const NN_CONFIG *nn_config = av1_tx_split_nnconfig_map[tx_size];
if (!nn_config) return -1;
const int diff_stride = block_size_wide[bsize];
const int16_t *diff =
x->plane[0].src_diff + 4 * blk_row * diff_stride + 4 * blk_col;
const int bw = tx_size_wide[tx_size];
const int bh = tx_size_high[tx_size];
aom_clear_system_state();
float features[64] = { 0.0f };
get_mean_dev_features(x->e_mbd.bd, diff, diff_stride, bw, bh, features);
float score = 0.0f;
av1_nn_predict(features, nn_config, 1, &score);
aom_clear_system_state();
int int_score = (int)(score * 10000);
return clamp(int_score, -80000, 80000);
}
static INLINE uint16_t
get_tx_mask(const AV1_COMP *cpi, MACROBLOCK *x, int plane, int block,
int blk_row, int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
const TXB_CTX *const txb_ctx, FAST_TX_SEARCH_MODE ftxs_mode,
int64_t ref_best_rd, TX_TYPE *allowed_txk_types, int *txk_map) {
const AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = xd->mi[0];
const TxfmSearchParams *txfm_params = &x->txfm_search_params;
const int is_inter = is_inter_block(mbmi, xd->tree_type);
const int fast_tx_search = ftxs_mode & FTXS_DCT_AND_1D_DCT_ONLY;
// if txk_allowed = TX_TYPES, >1 tx types are allowed, else, if txk_allowed <
// TX_TYPES, only that specific tx type is allowed.
TX_TYPE txk_allowed = TX_TYPES;
if ((!is_inter && txfm_params->use_default_intra_tx_type) ||
(is_inter && txfm_params->use_default_inter_tx_type)) {
txk_allowed =
get_default_tx_type(0, xd, tx_size, cpi->is_screen_content_type);
} else if (x->rd_model == LOW_TXFM_RD) {
if (plane == 0) txk_allowed = DCT_DCT;
}
const TxSetType tx_set_type = av1_get_ext_tx_set_type(
tx_size, is_inter, cm->features.reduced_tx_set_used);
TX_TYPE uv_tx_type = DCT_DCT;
if (plane) {
// tx_type of PLANE_TYPE_UV should be the same as PLANE_TYPE_Y
uv_tx_type = txk_allowed =
av1_get_tx_type(xd, get_plane_type(plane), blk_row, blk_col, tx_size,
cm->features.reduced_tx_set_used);
}
PREDICTION_MODE intra_dir =
mbmi->filter_intra_mode_info.use_filter_intra
? fimode_to_intradir[mbmi->filter_intra_mode_info.filter_intra_mode]
: mbmi->mode;
uint16_t ext_tx_used_flag =
cpi->sf.tx_sf.tx_type_search.use_reduced_intra_txset &&
tx_set_type == EXT_TX_SET_DTT4_IDTX_1DDCT
? av1_reduced_intra_tx_used_flag[intra_dir]
: av1_ext_tx_used_flag[tx_set_type];
if (xd->lossless[mbmi->segment_id] || txsize_sqr_up_map[tx_size] > TX_32X32 ||
ext_tx_used_flag == 0x0001 ||
(is_inter && cpi->oxcf.txfm_cfg.use_inter_dct_only) ||
(!is_inter && cpi->oxcf.txfm_cfg.use_intra_dct_only)) {
txk_allowed = DCT_DCT;
}
if (cpi->oxcf.txfm_cfg.enable_flip_idtx == 0)
ext_tx_used_flag &= DCT_ADST_TX_MASK;
#if CONFIG_DST_32X32
if (!is_inter && (txsize_sqr_up_map[tx_size] == TX_32X32))
ext_tx_used_flag &= DCT_ADST_TX_MASK;
#endif
uint16_t allowed_tx_mask = 0; // 1: allow; 0: skip.
if (txk_allowed < TX_TYPES) {
allowed_tx_mask = 1 << txk_allowed;
allowed_tx_mask &= ext_tx_used_flag;
} else if (fast_tx_search) {
allowed_tx_mask = 0x0c01; // V_DCT, H_DCT, DCT_DCT
allowed_tx_mask &= ext_tx_used_flag;
} else {
assert(plane == 0);
allowed_tx_mask = ext_tx_used_flag;
int num_allowed = 0;
const FRAME_UPDATE_TYPE update_type = get_frame_update_type(&cpi->gf_group);
const int *tx_type_probs =
cpi->frame_probs.tx_type_probs[update_type][tx_size];
int i;
if (cpi->sf.tx_sf.tx_type_search.prune_tx_type_using_stats) {
static const int thresh_arr[2][7] = { { 10, 15, 15, 10, 15, 15, 15 },
{ 10, 17, 17, 10, 17, 17, 17 } };
const int thresh =
thresh_arr[cpi->sf.tx_sf.tx_type_search.prune_tx_type_using_stats - 1]
[update_type];
uint16_t prune = 0;
int max_prob = -1;
int max_idx = 0;
#if CONFIG_DDT_INTER
for (i = 0; i < TX_TYPES_TRIG; i++) {
#else
for (i = 0; i < TX_TYPES; i++) {
#endif // CONFIG_DDT_INTER
if (tx_type_probs[i] > max_prob && (allowed_tx_mask & (1 << i))) {
max_prob = tx_type_probs[i];
max_idx = i;
}
if (tx_type_probs[i] < thresh) prune |= (1 << i);
}
if ((prune >> max_idx) & 0x01) prune &= ~(1 << max_idx);
allowed_tx_mask &= (~prune);
}
#if CONFIG_DDT_INTER
for (i = 0; i < TX_TYPES_TRIG; i++) {
#else
for (i = 0; i < TX_TYPES; i++) {
#endif // CONFIG_DDT_INTER
if (allowed_tx_mask & (1 << i)) num_allowed++;
}
assert(num_allowed > 0);
#if CONFIG_DEBUG && CONFIG_CROSS_CHROMA_TX
if (plane) {
const CctxType cctx_type = av1_get_cctx_type(xd, blk_row, blk_col);
assert(cctx_type == CCTX_NONE);
}
#endif // CONFIG_DEBUG && CONFIG_CROSS_CHROMA_TX
if (num_allowed > 2 && cpi->sf.tx_sf.tx_type_search.prune_tx_type_est_rd) {
int pf = prune_factors[txfm_params->prune_2d_txfm_mode];
int mf = mul_factors[txfm_params->prune_2d_txfm_mode];
if (num_allowed <= 7) {
const uint16_t prune =
prune_txk_type(cpi, x, plane, block, tx_size, blk_row, blk_col,
plane_bsize, txk_map, allowed_tx_mask, pf, txb_ctx,
cm->features.reduced_tx_set_used);
allowed_tx_mask &= (~prune);
} else {
const int num_sel = (num_allowed * mf + 50) / 100;
const uint16_t prune = prune_txk_type_separ(
cpi, x, plane, block, tx_size, blk_row, blk_col, plane_bsize,
txk_map, allowed_tx_mask, pf, txb_ctx,
cm->features.reduced_tx_set_used, ref_best_rd, num_sel);
allowed_tx_mask &= (~prune);
}
} else {
assert(num_allowed > 0);
int allowed_tx_count =
(txfm_params->prune_2d_txfm_mode >= TX_TYPE_PRUNE_4) ? 1 : 5;
// !fast_tx_search && txk_end != txk_start && plane == 0
if (txfm_params->prune_2d_txfm_mode >= TX_TYPE_PRUNE_1 && is_inter &&
num_allowed > allowed_tx_count) {
prune_tx_2D(x, plane_bsize, tx_size, blk_row, blk_col, tx_set_type,
txfm_params->prune_2d_txfm_mode, txk_map, &allowed_tx_mask);
}
}
}
#if CONFIG_FORWARDSKIP
if (mbmi->fsc_mode[xd->tree_type == CHROMA_PART] &&
txsize_sqr_up_map[tx_size] < TX_32X32 && plane == PLANE_TYPE_Y) {
txk_allowed = IDTX;
allowed_tx_mask = (1 << txk_allowed);
}
if (mbmi->fsc_mode[xd->tree_type == CHROMA_PART] == 0 && is_inter == 0 &&
(allowed_tx_mask >> IDTX)) {
uint16_t fsc_mask = UINT16_MAX - (1 << IDTX);
allowed_tx_mask &= fsc_mask;
}
#endif // CONFIG_FORWARDSKIP
// Need to have at least one transform type allowed.
if (allowed_tx_mask == 0) {
txk_allowed = (plane ? uv_tx_type : DCT_DCT);
allowed_tx_mask = (1 << txk_allowed);
}
assert(IMPLIES(txk_allowed < TX_TYPES, allowed_tx_mask == 1 << txk_allowed));
*allowed_txk_types = txk_allowed;
return allowed_tx_mask;
}
#if CONFIG_RD_DEBUG
static INLINE void update_txb_coeff_cost(RD_STATS *rd_stats, int plane,
TX_SIZE tx_size, int blk_row,
int blk_col, int txb_coeff_cost) {
(void)blk_row;
(void)blk_col;
(void)tx_size;
rd_stats->txb_coeff_cost[plane] += txb_coeff_cost;
{
const int txb_h = tx_size_high_unit[tx_size];
const int txb_w = tx_size_wide_unit[tx_size];
int idx, idy;
for (idy = 0; idy < txb_h; ++idy)
for (idx = 0; idx < txb_w; ++idx)
rd_stats->txb_coeff_cost_map[plane][blk_row + idy][blk_col + idx] = 0;
rd_stats->txb_coeff_cost_map[plane][blk_row][blk_col] = txb_coeff_cost;
}
assert(blk_row < TXB_COEFF_COST_MAP_SIZE);
assert(blk_col < TXB_COEFF_COST_MAP_SIZE);
}
#endif
static INLINE int cost_coeffs(
#if CONFIG_FORWARDSKIP
const AV1_COMMON *cm,
#endif // CONFIG_FORWARDSKIP
MACROBLOCK *x, int plane, int block, TX_SIZE tx_size, const TX_TYPE tx_type,
#if CONFIG_CROSS_CHROMA_TX
const CctxType cctx_type, int blk_row, int blk_col,
#endif // CONFIG_CROSS_CHROMA_TX
const TXB_CTX *const txb_ctx, int reduced_tx_set_used) {
#if TXCOEFF_COST_TIMER
struct aom_usec_timer timer;
aom_usec_timer_start(&timer);
#endif
const int cost = av1_cost_coeffs_txb(
#if CONFIG_FORWARDSKIP
cm,
#endif // CONFIG_FORWARDSKIP
x, plane, block, tx_size, tx_type,
#if CONFIG_CROSS_CHROMA_TX
cctx_type, blk_row, blk_col,