| /* |
| * Copyright (c) 2016, Alliance for Open Media. All rights reserved |
| * |
| * This source code is subject to the terms of the BSD 2 Clause License and |
| * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
| * was not distributed with this source code in the LICENSE file, you can |
| * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
| * Media Patent License 1.0 was not distributed with this source code in the |
| * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
| |
| */ |
| |
| #include <assert.h> |
| #include <limits.h> |
| #include <math.h> |
| #include <stdio.h> |
| |
| #include "config/aom_dsp_rtcd.h" |
| #include "config/av1_rtcd.h" |
| |
| #include "aom_dsp/aom_dsp_common.h" |
| #include "aom_dsp/txfm_common.h" |
| #include "aom_ports/mem.h" |
| |
| #include "av1/common/blockd.h" |
| #include "av1/common/mvref_common.h" |
| #include "av1/common/pred_common.h" |
| #include "av1/common/reconinter.h" |
| #include "av1/common/reconintra.h" |
| |
| #include "av1/encoder/encodemv.h" |
| #include "av1/encoder/encoder.h" |
| #include "av1/encoder/intra_mode_search.h" |
| #include "av1/encoder/model_rd.h" |
| #include "av1/encoder/motion_search_facade.h" |
| #include "av1/encoder/nonrd_opt.h" |
| #include "av1/encoder/rdopt.h" |
| #include "av1/encoder/reconinter_enc.h" |
| #include "av1/encoder/var_based_part.h" |
| |
| #define CALC_BIASED_RDCOST(rdcost) (7 * (rdcost) >> 3) |
| extern int g_pick_inter_mode_cnt; |
| /*!\cond */ |
| typedef struct { |
| uint8_t *data; |
| int stride; |
| int in_use; |
| } PRED_BUFFER; |
| |
| typedef struct { |
| PRED_BUFFER *best_pred; |
| PREDICTION_MODE best_mode; |
| TX_SIZE best_tx_size; |
| TX_TYPE tx_type; |
| MV_REFERENCE_FRAME best_ref_frame; |
| MV_REFERENCE_FRAME best_second_ref_frame; |
| uint8_t best_mode_skip_txfm; |
| uint8_t best_mode_initial_skip_flag; |
| int_interpfilters best_pred_filter; |
| MOTION_MODE best_motion_mode; |
| WarpedMotionParams wm_params; |
| int num_proj_ref; |
| uint8_t blk_skip[MAX_MIB_SIZE * MAX_MIB_SIZE / 4]; |
| PALETTE_MODE_INFO pmi; |
| int64_t best_sse; |
| } BEST_PICKMODE; |
| |
| typedef struct { |
| MV_REFERENCE_FRAME ref_frame; |
| PREDICTION_MODE pred_mode; |
| } REF_MODE; |
| |
| typedef struct { |
| MV_REFERENCE_FRAME ref_frame[2]; |
| PREDICTION_MODE pred_mode; |
| } COMP_REF_MODE; |
| |
| typedef struct { |
| InterpFilter filter_x; |
| InterpFilter filter_y; |
| } INTER_FILTER; |
| |
| /*!\brief Structure to store parameters and statistics used in non-rd inter mode |
| * evaluation. |
| */ |
| typedef struct { |
| BEST_PICKMODE best_pickmode; |
| RD_STATS this_rdc; |
| RD_STATS best_rdc; |
| int64_t uv_dist[RTC_INTER_MODES][REF_FRAMES]; |
| struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE]; |
| unsigned int vars[RTC_INTER_MODES][REF_FRAMES]; |
| unsigned int ref_costs_single[REF_FRAMES]; |
| int_mv frame_mv[MB_MODE_COUNT][REF_FRAMES]; |
| int_mv frame_mv_best[MB_MODE_COUNT][REF_FRAMES]; |
| int single_inter_mode_costs[RTC_INTER_MODES][REF_FRAMES]; |
| int use_ref_frame_mask[REF_FRAMES]; |
| uint8_t mode_checked[MB_MODE_COUNT][REF_FRAMES]; |
| } InterModeSearchStateNonrd; |
| /*!\endcond */ |
| |
| #define NUM_COMP_INTER_MODES_RT (6) |
| #define NUM_INTER_MODES 12 |
| |
| // GLOBALMV in the set below is in fact ZEROMV as we don't do global ME in RT |
| // mode |
| static const REF_MODE ref_mode_set[NUM_INTER_MODES] = { |
| { LAST_FRAME, NEARESTMV }, { LAST_FRAME, NEARMV }, |
| { LAST_FRAME, GLOBALMV }, { LAST_FRAME, NEWMV }, |
| { GOLDEN_FRAME, NEARESTMV }, { GOLDEN_FRAME, NEARMV }, |
| { GOLDEN_FRAME, GLOBALMV }, { GOLDEN_FRAME, NEWMV }, |
| { ALTREF_FRAME, NEARESTMV }, { ALTREF_FRAME, NEARMV }, |
| { ALTREF_FRAME, GLOBALMV }, { ALTREF_FRAME, NEWMV }, |
| }; |
| |
| static const COMP_REF_MODE comp_ref_mode_set[NUM_COMP_INTER_MODES_RT] = { |
| { { LAST_FRAME, GOLDEN_FRAME }, GLOBAL_GLOBALMV }, |
| { { LAST_FRAME, GOLDEN_FRAME }, NEAREST_NEARESTMV }, |
| { { LAST_FRAME, LAST2_FRAME }, GLOBAL_GLOBALMV }, |
| { { LAST_FRAME, LAST2_FRAME }, NEAREST_NEARESTMV }, |
| { { LAST_FRAME, ALTREF_FRAME }, GLOBAL_GLOBALMV }, |
| { { LAST_FRAME, ALTREF_FRAME }, NEAREST_NEARESTMV }, |
| }; |
| |
| static const INTER_FILTER filters_ref_set[9] = { |
| { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR }, { EIGHTTAP_SMOOTH, EIGHTTAP_SMOOTH }, |
| { EIGHTTAP_REGULAR, EIGHTTAP_SMOOTH }, { EIGHTTAP_SMOOTH, EIGHTTAP_REGULAR }, |
| { MULTITAP_SHARP, MULTITAP_SHARP }, { EIGHTTAP_REGULAR, MULTITAP_SHARP }, |
| { MULTITAP_SHARP, EIGHTTAP_REGULAR }, { EIGHTTAP_SMOOTH, MULTITAP_SHARP }, |
| { MULTITAP_SHARP, EIGHTTAP_SMOOTH } |
| }; |
| |
| enum { |
| // INTER_ALL = (1 << NEARESTMV) | (1 << NEARMV) | (1 << NEWMV), |
| INTER_NEAREST = (1 << NEARESTMV), |
| INTER_NEAREST_NEW = (1 << NEARESTMV) | (1 << NEWMV), |
| INTER_NEAREST_NEAR = (1 << NEARESTMV) | (1 << NEARMV), |
| INTER_NEAR_NEW = (1 << NEARMV) | (1 << NEWMV), |
| }; |
| |
| // The original scan order (default_scan_8x8) is modified according to the extra |
| // transpose in hadamard c implementation, i.e., aom_hadamard_lp_8x8_c and |
| // aom_hadamard_8x8_c. |
| DECLARE_ALIGNED(16, static const int16_t, default_scan_8x8_transpose[64]) = { |
| 0, 8, 1, 2, 9, 16, 24, 17, 10, 3, 4, 11, 18, 25, 32, 40, |
| 33, 26, 19, 12, 5, 6, 13, 20, 27, 34, 41, 48, 56, 49, 42, 35, |
| 28, 21, 14, 7, 15, 22, 29, 36, 43, 50, 57, 58, 51, 44, 37, 30, |
| 23, 31, 38, 45, 52, 59, 60, 53, 46, 39, 47, 54, 61, 62, 55, 63 |
| }; |
| |
| // The original scan order (av1_default_iscan_8x8) is modified to match |
| // hadamard AVX2 implementation, i.e., aom_hadamard_lp_8x8_avx2 and |
| // aom_hadamard_8x8_avx2. Since hadamard AVX2 implementation will modify the |
| // order of coefficients, such that the normal scan order is no longer |
| // guaranteed to scan low coefficients first, therefore we modify the scan order |
| // accordingly. |
| // Note that this one has to be used together with default_scan_8x8_transpose. |
| DECLARE_ALIGNED(16, static const int16_t, |
| av1_default_iscan_8x8_transpose[64]) = { |
| 0, 2, 3, 9, 10, 20, 21, 35, 1, 4, 8, 11, 19, 22, 34, 36, |
| 5, 7, 12, 18, 23, 33, 37, 48, 6, 13, 17, 24, 32, 38, 47, 49, |
| 14, 16, 25, 31, 39, 46, 50, 57, 15, 26, 30, 40, 45, 51, 56, 58, |
| 27, 29, 41, 44, 52, 55, 59, 62, 28, 42, 43, 53, 54, 60, 61, 63 |
| }; |
| |
| // The original scan order (default_scan_16x16) is modified according to the |
| // extra transpose in hadamard c implementation in lp case, i.e., |
| // aom_hadamard_lp_16x16_c. |
| DECLARE_ALIGNED(16, static const int16_t, |
| default_scan_lp_16x16_transpose[256]) = { |
| 0, 8, 2, 4, 10, 16, 24, 18, 12, 6, 64, 14, 20, 26, 32, |
| 40, 34, 28, 22, 72, 66, 68, 74, 80, 30, 36, 42, 48, 56, 50, |
| 44, 38, 88, 82, 76, 70, 128, 78, 84, 90, 96, 46, 52, 58, 1, |
| 9, 3, 60, 54, 104, 98, 92, 86, 136, 130, 132, 138, 144, 94, 100, |
| 106, 112, 62, 5, 11, 17, 25, 19, 13, 7, 120, 114, 108, 102, 152, |
| 146, 140, 134, 192, 142, 148, 154, 160, 110, 116, 122, 65, 15, 21, 27, |
| 33, 41, 35, 29, 23, 73, 67, 124, 118, 168, 162, 156, 150, 200, 194, |
| 196, 202, 208, 158, 164, 170, 176, 126, 69, 75, 81, 31, 37, 43, 49, |
| 57, 51, 45, 39, 89, 83, 77, 71, 184, 178, 172, 166, 216, 210, 204, |
| 198, 206, 212, 218, 224, 174, 180, 186, 129, 79, 85, 91, 97, 47, 53, |
| 59, 61, 55, 105, 99, 93, 87, 137, 131, 188, 182, 232, 226, 220, 214, |
| 222, 228, 234, 240, 190, 133, 139, 145, 95, 101, 107, 113, 63, 121, 115, |
| 109, 103, 153, 147, 141, 135, 248, 242, 236, 230, 238, 244, 250, 193, 143, |
| 149, 155, 161, 111, 117, 123, 125, 119, 169, 163, 157, 151, 201, 195, 252, |
| 246, 254, 197, 203, 209, 159, 165, 171, 177, 127, 185, 179, 173, 167, 217, |
| 211, 205, 199, 207, 213, 219, 225, 175, 181, 187, 189, 183, 233, 227, 221, |
| 215, 223, 229, 235, 241, 191, 249, 243, 237, 231, 239, 245, 251, 253, 247, |
| 255 |
| }; |
| |
| #if CONFIG_AV1_HIGHBITDEPTH |
| // The original scan order (default_scan_16x16) is modified according to the |
| // extra shift in hadamard c implementation in fp case, i.e., |
| // aom_hadamard_16x16_c. Note that 16x16 lp and fp hadamard generate different |
| // outputs, so we handle them separately. |
| DECLARE_ALIGNED(16, static const int16_t, |
| default_scan_fp_16x16_transpose[256]) = { |
| 0, 4, 2, 8, 6, 16, 20, 18, 12, 10, 64, 14, 24, 22, 32, |
| 36, 34, 28, 26, 68, 66, 72, 70, 80, 30, 40, 38, 48, 52, 50, |
| 44, 42, 84, 82, 76, 74, 128, 78, 88, 86, 96, 46, 56, 54, 1, |
| 5, 3, 60, 58, 100, 98, 92, 90, 132, 130, 136, 134, 144, 94, 104, |
| 102, 112, 62, 9, 7, 17, 21, 19, 13, 11, 116, 114, 108, 106, 148, |
| 146, 140, 138, 192, 142, 152, 150, 160, 110, 120, 118, 65, 15, 25, 23, |
| 33, 37, 35, 29, 27, 69, 67, 124, 122, 164, 162, 156, 154, 196, 194, |
| 200, 198, 208, 158, 168, 166, 176, 126, 73, 71, 81, 31, 41, 39, 49, |
| 53, 51, 45, 43, 85, 83, 77, 75, 180, 178, 172, 170, 212, 210, 204, |
| 202, 206, 216, 214, 224, 174, 184, 182, 129, 79, 89, 87, 97, 47, 57, |
| 55, 61, 59, 101, 99, 93, 91, 133, 131, 188, 186, 228, 226, 220, 218, |
| 222, 232, 230, 240, 190, 137, 135, 145, 95, 105, 103, 113, 63, 117, 115, |
| 109, 107, 149, 147, 141, 139, 244, 242, 236, 234, 238, 248, 246, 193, 143, |
| 153, 151, 161, 111, 121, 119, 125, 123, 165, 163, 157, 155, 197, 195, 252, |
| 250, 254, 201, 199, 209, 159, 169, 167, 177, 127, 181, 179, 173, 171, 213, |
| 211, 205, 203, 207, 217, 215, 225, 175, 185, 183, 189, 187, 229, 227, 221, |
| 219, 223, 233, 231, 241, 191, 245, 243, 237, 235, 239, 249, 247, 253, 251, |
| 255 |
| }; |
| #endif |
| |
| // The original scan order (av1_default_iscan_16x16) is modified to match |
| // hadamard AVX2 implementation, i.e., aom_hadamard_lp_16x16_avx2. |
| // Since hadamard AVX2 implementation will modify the order of coefficients, |
| // such that the normal scan order is no longer guaranteed to scan low |
| // coefficients first, therefore we modify the scan order accordingly. Note that |
| // this one has to be used together with default_scan_lp_16x16_transpose. |
| DECLARE_ALIGNED(16, static const int16_t, |
| av1_default_iscan_lp_16x16_transpose[256]) = { |
| 0, 44, 2, 46, 3, 63, 9, 69, 1, 45, 4, 64, 8, 68, 11, |
| 87, 5, 65, 7, 67, 12, 88, 18, 94, 6, 66, 13, 89, 17, 93, |
| 24, 116, 14, 90, 16, 92, 25, 117, 31, 123, 15, 91, 26, 118, 30, |
| 122, 41, 148, 27, 119, 29, 121, 42, 149, 48, 152, 28, 120, 43, 150, |
| 47, 151, 62, 177, 10, 86, 20, 96, 21, 113, 35, 127, 19, 95, 22, |
| 114, 34, 126, 37, 144, 23, 115, 33, 125, 38, 145, 52, 156, 32, 124, |
| 39, 146, 51, 155, 58, 173, 40, 147, 50, 154, 59, 174, 73, 181, 49, |
| 153, 60, 175, 72, 180, 83, 198, 61, 176, 71, 179, 84, 199, 98, 202, |
| 70, 178, 85, 200, 97, 201, 112, 219, 36, 143, 54, 158, 55, 170, 77, |
| 185, 53, 157, 56, 171, 76, 184, 79, 194, 57, 172, 75, 183, 80, 195, |
| 102, 206, 74, 182, 81, 196, 101, 205, 108, 215, 82, 197, 100, 204, 109, |
| 216, 131, 223, 99, 203, 110, 217, 130, 222, 140, 232, 111, 218, 129, 221, |
| 141, 233, 160, 236, 128, 220, 142, 234, 159, 235, 169, 245, 78, 193, 104, |
| 208, 105, 212, 135, 227, 103, 207, 106, 213, 134, 226, 136, 228, 107, 214, |
| 133, 225, 137, 229, 164, 240, 132, 224, 138, 230, 163, 239, 165, 241, 139, |
| 231, 162, 238, 166, 242, 189, 249, 161, 237, 167, 243, 188, 248, 190, 250, |
| 168, 244, 187, 247, 191, 251, 210, 254, 186, 246, 192, 252, 209, 253, 211, |
| 255 |
| }; |
| |
| #if CONFIG_AV1_HIGHBITDEPTH |
| // The original scan order (av1_default_iscan_16x16) is modified to match |
| // hadamard AVX2 implementation, i.e., aom_hadamard_16x16_avx2. |
| // Since hadamard AVX2 implementation will modify the order of coefficients, |
| // such that the normal scan order is no longer guaranteed to scan low |
| // coefficients first, therefore we modify the scan order accordingly. Note that |
| // this one has to be used together with default_scan_fp_16x16_transpose. |
| DECLARE_ALIGNED(16, static const int16_t, |
| av1_default_iscan_fp_16x16_transpose[256]) = { |
| 0, 44, 2, 46, 1, 45, 4, 64, 3, 63, 9, 69, 8, 68, 11, |
| 87, 5, 65, 7, 67, 6, 66, 13, 89, 12, 88, 18, 94, 17, 93, |
| 24, 116, 14, 90, 16, 92, 15, 91, 26, 118, 25, 117, 31, 123, 30, |
| 122, 41, 148, 27, 119, 29, 121, 28, 120, 43, 150, 42, 149, 48, 152, |
| 47, 151, 62, 177, 10, 86, 20, 96, 19, 95, 22, 114, 21, 113, 35, |
| 127, 34, 126, 37, 144, 23, 115, 33, 125, 32, 124, 39, 146, 38, 145, |
| 52, 156, 51, 155, 58, 173, 40, 147, 50, 154, 49, 153, 60, 175, 59, |
| 174, 73, 181, 72, 180, 83, 198, 61, 176, 71, 179, 70, 178, 85, 200, |
| 84, 199, 98, 202, 97, 201, 112, 219, 36, 143, 54, 158, 53, 157, 56, |
| 171, 55, 170, 77, 185, 76, 184, 79, 194, 57, 172, 75, 183, 74, 182, |
| 81, 196, 80, 195, 102, 206, 101, 205, 108, 215, 82, 197, 100, 204, 99, |
| 203, 110, 217, 109, 216, 131, 223, 130, 222, 140, 232, 111, 218, 129, 221, |
| 128, 220, 142, 234, 141, 233, 160, 236, 159, 235, 169, 245, 78, 193, 104, |
| 208, 103, 207, 106, 213, 105, 212, 135, 227, 134, 226, 136, 228, 107, 214, |
| 133, 225, 132, 224, 138, 230, 137, 229, 164, 240, 163, 239, 165, 241, 139, |
| 231, 162, 238, 161, 237, 167, 243, 166, 242, 189, 249, 188, 248, 190, 250, |
| 168, 244, 187, 247, 186, 246, 192, 252, 191, 251, 210, 254, 209, 253, 211, |
| 255 |
| }; |
| #endif |
| |
| static INLINE int early_term_inter_search_with_sse(int early_term_idx, |
| BLOCK_SIZE bsize, |
| int64_t this_sse, |
| int64_t best_sse, |
| PREDICTION_MODE this_mode) { |
| // Aggressiveness to terminate inter mode search early is adjusted based on |
| // speed and block size. |
| static const double early_term_thresh[4][4] = { { 0.65, 0.65, 0.65, 0.7 }, |
| { 0.6, 0.65, 0.85, 0.9 }, |
| { 0.5, 0.5, 0.55, 0.6 }, |
| { 0.6, 0.75, 0.85, 0.85 } }; |
| static const double early_term_thresh_newmv_nearestmv[4] = { 0.3, 0.3, 0.3, |
| 0.3 }; |
| |
| const int size_group = size_group_lookup[bsize]; |
| assert(size_group < 4); |
| assert((early_term_idx > 0) && (early_term_idx < EARLY_TERM_INDICES)); |
| const double threshold = |
| ((early_term_idx == EARLY_TERM_IDX_4) && |
| (this_mode == NEWMV || this_mode == NEARESTMV)) |
| ? early_term_thresh_newmv_nearestmv[size_group] |
| : early_term_thresh[early_term_idx - 1][size_group]; |
| |
| // Terminate inter mode search early based on best sse so far. |
| if ((early_term_idx > 0) && (threshold * this_sse > best_sse)) { |
| return 1; |
| } |
| return 0; |
| } |
| |
| static INLINE void init_best_pickmode(BEST_PICKMODE *bp) { |
| bp->best_sse = INT64_MAX; |
| bp->best_mode = NEARESTMV; |
| bp->best_ref_frame = LAST_FRAME; |
| bp->best_second_ref_frame = NONE_FRAME; |
| bp->best_tx_size = TX_8X8; |
| bp->tx_type = DCT_DCT; |
| bp->best_pred_filter = av1_broadcast_interp_filter(EIGHTTAP_REGULAR); |
| bp->best_mode_skip_txfm = 0; |
| bp->best_mode_initial_skip_flag = 0; |
| bp->best_pred = NULL; |
| bp->best_motion_mode = SIMPLE_TRANSLATION; |
| bp->num_proj_ref = 0; |
| memset(&bp->wm_params, 0, sizeof(bp->wm_params)); |
| memset(&bp->blk_skip, 0, sizeof(bp->blk_skip)); |
| memset(&bp->pmi, 0, sizeof(bp->pmi)); |
| } |
| |
| static INLINE int subpel_select(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, |
| int_mv *mv, MV ref_mv, FULLPEL_MV start_mv, |
| bool fullpel_performed_well) { |
| const int frame_lowmotion = cpi->rc.avg_frame_low_motion; |
| // Reduce MV precision for higher int MV value & frame-level motion |
| if (cpi->sf.rt_sf.reduce_mv_pel_precision_highmotion >= 3) { |
| int mv_thresh = 4; |
| const int is_low_resoln = |
| (cpi->common.width * cpi->common.height <= 320 * 240); |
| mv_thresh = (bsize > BLOCK_32X32) ? 2 : (bsize > BLOCK_16X16) ? 4 : 6; |
| if (frame_lowmotion > 0 && frame_lowmotion < 40) mv_thresh = 12; |
| mv_thresh = (is_low_resoln) ? mv_thresh >> 1 : mv_thresh; |
| if (abs(mv->as_fullmv.row) >= mv_thresh || |
| abs(mv->as_fullmv.col) >= mv_thresh) |
| return HALF_PEL; |
| } else if (cpi->sf.rt_sf.reduce_mv_pel_precision_highmotion >= 1) { |
| int mv_thresh; |
| const int th_vals[2][3] = { { 4, 8, 10 }, { 4, 6, 8 } }; |
| const int th_idx = cpi->sf.rt_sf.reduce_mv_pel_precision_highmotion - 1; |
| assert(th_idx >= 0 && th_idx < 2); |
| if (frame_lowmotion > 0 && frame_lowmotion < 40) |
| mv_thresh = 12; |
| else |
| mv_thresh = (bsize >= BLOCK_32X32) ? th_vals[th_idx][0] |
| : (bsize >= BLOCK_16X16) ? th_vals[th_idx][1] |
| : th_vals[th_idx][2]; |
| if (abs(mv->as_fullmv.row) >= (mv_thresh << 1) || |
| abs(mv->as_fullmv.col) >= (mv_thresh << 1)) |
| return FULL_PEL; |
| else if (abs(mv->as_fullmv.row) >= mv_thresh || |
| abs(mv->as_fullmv.col) >= mv_thresh) |
| return HALF_PEL; |
| } |
| // Reduce MV precision for relatively static (e.g. background), low-complex |
| // large areas |
| if (cpi->sf.rt_sf.reduce_mv_pel_precision_lowcomplex >= 2) { |
| const int qband = x->qindex >> (QINDEX_BITS - 2); |
| assert(qband < 4); |
| if (x->content_state_sb.source_sad_nonrd <= kVeryLowSad && |
| bsize > BLOCK_16X16 && qband != 0) { |
| if (x->source_variance < 500) |
| return FULL_PEL; |
| else if (x->source_variance < 5000) |
| return HALF_PEL; |
| } |
| } else if (cpi->sf.rt_sf.reduce_mv_pel_precision_lowcomplex >= 1) { |
| if (fullpel_performed_well && ref_mv.row == 0 && ref_mv.col == 0 && |
| start_mv.row == 0 && start_mv.col == 0) |
| return HALF_PEL; |
| } |
| return cpi->sf.mv_sf.subpel_force_stop; |
| } |
| |
| static bool use_aggressive_subpel_search_method( |
| MACROBLOCK *x, bool use_adaptive_subpel_search, |
| const bool fullpel_performed_well) { |
| if (!use_adaptive_subpel_search) return false; |
| const int qband = x->qindex >> (QINDEX_BITS - 2); |
| assert(qband < 4); |
| if ((qband > 0) && (fullpel_performed_well || |
| (x->content_state_sb.source_sad_nonrd <= kLowSad) || |
| (x->source_variance < 100))) |
| return true; |
| return false; |
| } |
| |
| /*!\brief Runs Motion Estimation for a specific block and specific ref frame. |
| * |
| * \ingroup nonrd_mode_search |
| * \callgraph |
| * \callergraph |
| * Finds the best Motion Vector by running Motion Estimation for a specific |
| * block and a specific reference frame. Exits early if RDCost of Full Pel part |
| * exceeds best RD Cost fund so far |
| * \param[in] cpi Top-level encoder structure |
| * \param[in] x Pointer to structure holding all the |
| * data for the current macroblock |
| * \param[in] bsize Current block size |
| * \param[in] mi_row Row index in 4x4 units |
| * \param[in] mi_col Column index in 4x4 units |
| * \param[in] tmp_mv Pointer to best found New MV |
| * \param[in] rate_mv Pointer to Rate of the best new MV |
| * \param[in] best_rd_sofar RD Cost of the best mode found so far |
| * \param[in] use_base_mv Flag, indicating that tmp_mv holds |
| * specific MV to start the search with |
| * |
| * \return Returns 0 if ME was terminated after Full Pel Search because too |
| * high RD Cost. Otherwise returns 1. Best New MV is placed into \c tmp_mv. |
| * Rate estimation for this vector is placed to \c rate_mv |
| */ |
| static int combined_motion_search(AV1_COMP *cpi, MACROBLOCK *x, |
| BLOCK_SIZE bsize, int mi_row, int mi_col, |
| int_mv *tmp_mv, int *rate_mv, |
| int64_t best_rd_sofar, int use_base_mv) { |
| MACROBLOCKD *xd = &x->e_mbd; |
| const AV1_COMMON *cm = &cpi->common; |
| const int num_planes = av1_num_planes(cm); |
| const SPEED_FEATURES *sf = &cpi->sf; |
| MB_MODE_INFO *mi = xd->mi[0]; |
| struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } }; |
| int step_param = (sf->rt_sf.fullpel_search_step_param) |
| ? sf->rt_sf.fullpel_search_step_param |
| : cpi->mv_search_params.mv_step_param; |
| FULLPEL_MV start_mv; |
| const int ref = mi->ref_frame[0]; |
| const MV ref_mv = av1_get_ref_mv(x, mi->ref_mv_idx).as_mv; |
| MV center_mv; |
| int dis; |
| int rv = 0; |
| int cost_list[5]; |
| int search_subpel = 1; |
| const YV12_BUFFER_CONFIG *scaled_ref_frame = |
| av1_get_scaled_ref_frame(cpi, ref); |
| |
| if (scaled_ref_frame) { |
| int i; |
| // Swap out the reference frame for a version that's been scaled to |
| // match the resolution of the current frame, allowing the existing |
| // motion search code to be used without additional modifications. |
| for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0]; |
| av1_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL, |
| num_planes); |
| } |
| |
| start_mv = get_fullmv_from_mv(&ref_mv); |
| |
| if (!use_base_mv) |
| center_mv = ref_mv; |
| else |
| center_mv = tmp_mv->as_mv; |
| |
| const SEARCH_METHODS search_method = sf->mv_sf.search_method; |
| const search_site_config *src_search_sites = |
| av1_get_search_site_config(cpi, x, search_method); |
| FULLPEL_MOTION_SEARCH_PARAMS full_ms_params; |
| av1_make_default_fullpel_ms_params(&full_ms_params, cpi, x, bsize, ¢er_mv, |
| src_search_sites, |
| /*fine_search_interval=*/0); |
| |
| const unsigned int full_var_rd = av1_full_pixel_search( |
| start_mv, &full_ms_params, step_param, cond_cost_list(cpi, cost_list), |
| &tmp_mv->as_fullmv, NULL); |
| |
| // calculate the bit cost on motion vector |
| MV mvp_full = get_mv_from_fullmv(&tmp_mv->as_fullmv); |
| |
| *rate_mv = av1_mv_bit_cost(&mvp_full, &ref_mv, x->mv_costs->nmv_joint_cost, |
| x->mv_costs->mv_cost_stack, MV_COST_WEIGHT); |
| |
| // TODO(kyslov) Account for Rate Mode! |
| rv = !(RDCOST(x->rdmult, (*rate_mv), 0) > best_rd_sofar); |
| |
| if (rv && search_subpel) { |
| SUBPEL_MOTION_SEARCH_PARAMS ms_params; |
| av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize, &ref_mv, |
| cost_list); |
| const bool fullpel_performed_well = |
| (bsize == BLOCK_64X64 && full_var_rd * 40 < 62267 * 7) || |
| (bsize == BLOCK_32X32 && full_var_rd * 8 < 42380) || |
| (bsize == BLOCK_16X16 && full_var_rd * 8 < 10127); |
| if (sf->rt_sf.reduce_mv_pel_precision_highmotion || |
| sf->rt_sf.reduce_mv_pel_precision_lowcomplex) |
| ms_params.forced_stop = subpel_select(cpi, x, bsize, tmp_mv, ref_mv, |
| start_mv, fullpel_performed_well); |
| |
| MV subpel_start_mv = get_mv_from_fullmv(&tmp_mv->as_fullmv); |
| assert(av1_is_subpelmv_in_range(&ms_params.mv_limits, subpel_start_mv)); |
| // adaptively downgrade subpel search method based on block properties |
| if (use_aggressive_subpel_search_method( |
| x, sf->rt_sf.use_adaptive_subpel_search, fullpel_performed_well)) |
| av1_find_best_sub_pixel_tree_pruned_more(xd, cm, &ms_params, |
| subpel_start_mv, &tmp_mv->as_mv, |
| &dis, &x->pred_sse[ref], NULL); |
| else |
| cpi->mv_search_params.find_fractional_mv_step( |
| xd, cm, &ms_params, subpel_start_mv, &tmp_mv->as_mv, &dis, |
| &x->pred_sse[ref], NULL); |
| *rate_mv = |
| av1_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->mv_costs->nmv_joint_cost, |
| x->mv_costs->mv_cost_stack, MV_COST_WEIGHT); |
| } |
| |
| if (scaled_ref_frame) { |
| int i; |
| for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i]; |
| } |
| // The final MV can not be equal to the reference MV as this will trigger an |
| // assert later. This can happen if both NEAREST and NEAR modes were skipped. |
| rv = (tmp_mv->as_mv.col != ref_mv.col || tmp_mv->as_mv.row != ref_mv.row); |
| return rv; |
| } |
| |
| /*!\brief Searches for the best New Motion Vector. |
| * |
| * \ingroup nonrd_mode_search |
| * \callgraph |
| * \callergraph |
| * Finds the best Motion Vector by doing Motion Estimation. Uses reduced |
| * complexity ME for non-LAST frames or calls \c combined_motion_search |
| * for LAST reference frame |
| * \param[in] cpi Top-level encoder structure |
| * \param[in] x Pointer to structure holding all the |
| * data for the current macroblock |
| * \param[in] frame_mv Array that holds MVs for all modes |
| * and ref frames |
| * \param[in] ref_frame Reference frame for which to find |
| * the best New MVs |
| * \param[in] gf_temporal_ref Flag, indicating temporal reference |
| * for GOLDEN frame |
| * \param[in] bsize Current block size |
| * \param[in] mi_row Row index in 4x4 units |
| * \param[in] mi_col Column index in 4x4 units |
| * \param[in] rate_mv Pointer to Rate of the best new MV |
| * \param[in] best_rdc Pointer to the RD Cost for the best |
| * mode found so far |
| * |
| * \return Returns -1 if the search was not done, otherwise returns 0. |
| * Best New MV is placed into \c frame_mv array, Rate estimation for this |
| * vector is placed to \c rate_mv |
| */ |
| static int search_new_mv(AV1_COMP *cpi, MACROBLOCK *x, |
| int_mv frame_mv[][REF_FRAMES], |
| MV_REFERENCE_FRAME ref_frame, int gf_temporal_ref, |
| BLOCK_SIZE bsize, int mi_row, int mi_col, int *rate_mv, |
| RD_STATS *best_rdc) { |
| MACROBLOCKD *const xd = &x->e_mbd; |
| MB_MODE_INFO *const mi = xd->mi[0]; |
| AV1_COMMON *cm = &cpi->common; |
| if (ref_frame > LAST_FRAME && cpi->oxcf.rc_cfg.mode == AOM_CBR && |
| gf_temporal_ref) { |
| int tmp_sad; |
| int dis; |
| |
| if (bsize < BLOCK_16X16) return -1; |
| |
| tmp_sad = av1_int_pro_motion_estimation( |
| cpi, x, bsize, mi_row, mi_col, |
| &x->mbmi_ext.ref_mv_stack[ref_frame][0].this_mv.as_mv); |
| |
| if (tmp_sad > x->pred_mv_sad[LAST_FRAME]) return -1; |
| |
| frame_mv[NEWMV][ref_frame].as_int = mi->mv[0].as_int; |
| int_mv best_mv = mi->mv[0]; |
| best_mv.as_mv.row >>= 3; |
| best_mv.as_mv.col >>= 3; |
| MV ref_mv = av1_get_ref_mv(x, 0).as_mv; |
| frame_mv[NEWMV][ref_frame].as_mv.row >>= 3; |
| frame_mv[NEWMV][ref_frame].as_mv.col >>= 3; |
| |
| SUBPEL_MOTION_SEARCH_PARAMS ms_params; |
| av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize, &ref_mv, NULL); |
| if (cpi->sf.rt_sf.reduce_mv_pel_precision_highmotion || |
| cpi->sf.rt_sf.reduce_mv_pel_precision_lowcomplex) { |
| FULLPEL_MV start_mv = { .row = 0, .col = 0 }; |
| ms_params.forced_stop = |
| subpel_select(cpi, x, bsize, &best_mv, ref_mv, start_mv, false); |
| } |
| MV start_mv = get_mv_from_fullmv(&best_mv.as_fullmv); |
| assert(av1_is_subpelmv_in_range(&ms_params.mv_limits, start_mv)); |
| cpi->mv_search_params.find_fractional_mv_step( |
| xd, cm, &ms_params, start_mv, &best_mv.as_mv, &dis, |
| &x->pred_sse[ref_frame], NULL); |
| frame_mv[NEWMV][ref_frame].as_int = best_mv.as_int; |
| |
| // When NEWMV is same as ref_mv from the drl, it is preferred to code the |
| // MV as NEARESTMV or NEARMV. In this case, NEWMV needs to be skipped to |
| // avoid an assert failure at a later stage. The scenario can occur if |
| // NEARESTMV was not evaluated for ALTREF. |
| if (frame_mv[NEWMV][ref_frame].as_mv.col == ref_mv.col && |
| frame_mv[NEWMV][ref_frame].as_mv.row == ref_mv.row) |
| return -1; |
| |
| *rate_mv = av1_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv, &ref_mv, |
| x->mv_costs->nmv_joint_cost, |
| x->mv_costs->mv_cost_stack, MV_COST_WEIGHT); |
| } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col, |
| &frame_mv[NEWMV][ref_frame], rate_mv, |
| best_rdc->rdcost, 0)) { |
| return -1; |
| } |
| |
| return 0; |
| } |
| |
| static void estimate_single_ref_frame_costs(const AV1_COMMON *cm, |
| const MACROBLOCKD *xd, |
| const ModeCosts *mode_costs, |
| int segment_id, BLOCK_SIZE bsize, |
| unsigned int *ref_costs_single) { |
| int seg_ref_active = |
| segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME); |
| if (seg_ref_active) { |
| memset(ref_costs_single, 0, REF_FRAMES * sizeof(*ref_costs_single)); |
| } else { |
| int intra_inter_ctx = av1_get_intra_inter_context(xd); |
| ref_costs_single[INTRA_FRAME] = |
| mode_costs->intra_inter_cost[intra_inter_ctx][0]; |
| unsigned int base_cost = mode_costs->intra_inter_cost[intra_inter_ctx][1]; |
| if (cm->current_frame.reference_mode == REFERENCE_MODE_SELECT && |
| is_comp_ref_allowed(bsize)) { |
| const int comp_ref_type_ctx = av1_get_comp_reference_type_context(xd); |
| base_cost += mode_costs->comp_ref_type_cost[comp_ref_type_ctx][1]; |
| } |
| ref_costs_single[LAST_FRAME] = base_cost; |
| ref_costs_single[GOLDEN_FRAME] = base_cost; |
| ref_costs_single[ALTREF_FRAME] = base_cost; |
| // add cost for last, golden, altref |
| ref_costs_single[LAST_FRAME] += mode_costs->single_ref_cost[0][0][0]; |
| ref_costs_single[GOLDEN_FRAME] += mode_costs->single_ref_cost[0][0][1]; |
| ref_costs_single[GOLDEN_FRAME] += mode_costs->single_ref_cost[0][1][0]; |
| ref_costs_single[ALTREF_FRAME] += mode_costs->single_ref_cost[0][0][1]; |
| ref_costs_single[ALTREF_FRAME] += mode_costs->single_ref_cost[0][2][0]; |
| } |
| } |
| |
| static INLINE void set_force_skip_flag(const AV1_COMP *const cpi, |
| MACROBLOCK *const x, unsigned int sse, |
| int *force_skip) { |
| if (x->txfm_search_params.tx_mode_search_type == TX_MODE_SELECT && |
| cpi->sf.rt_sf.tx_size_level_based_on_qstep && |
| cpi->sf.rt_sf.tx_size_level_based_on_qstep >= 2) { |
| const int qstep = x->plane[0].dequant_QTX[1] >> (x->e_mbd.bd - 5); |
| const unsigned int qstep_sq = qstep * qstep; |
| // If the sse is low for low source variance blocks, mark those as |
| // transform skip. |
| // Note: Though qstep_sq is based on ac qstep, the threshold is kept |
| // low so that reliable early estimate of tx skip can be obtained |
| // through its comparison with sse. |
| if (sse < qstep_sq && x->source_variance < qstep_sq && |
| x->color_sensitivity[0] == 0 && x->color_sensitivity[1] == 0) |
| *force_skip = 1; |
| } |
| } |
| |
| #define CAP_TX_SIZE_FOR_BSIZE_GT32(tx_mode_search_type, bsize) \ |
| (((tx_mode_search_type) != ONLY_4X4 && (bsize) > BLOCK_32X32) ? true : false) |
| #define TX_SIZE_FOR_BSIZE_GT32 (TX_16X16) |
| |
| static TX_SIZE calculate_tx_size(const AV1_COMP *const cpi, BLOCK_SIZE bsize, |
| MACROBLOCK *const x, unsigned int var, |
| unsigned int sse, int *force_skip) { |
| MACROBLOCKD *const xd = &x->e_mbd; |
| TX_SIZE tx_size; |
| const TxfmSearchParams *txfm_params = &x->txfm_search_params; |
| if (txfm_params->tx_mode_search_type == TX_MODE_SELECT) { |
| int multiplier = 8; |
| unsigned int var_thresh = 0; |
| unsigned int is_high_var = 1; |
| // Use quantizer based thresholds to determine transform size. |
| if (cpi->sf.rt_sf.tx_size_level_based_on_qstep) { |
| const int qband = x->qindex >> (QINDEX_BITS - 2); |
| const int mult[4] = { 8, 7, 6, 5 }; |
| assert(qband < 4); |
| multiplier = mult[qband]; |
| const int qstep = x->plane[0].dequant_QTX[1] >> (xd->bd - 5); |
| const unsigned int qstep_sq = qstep * qstep; |
| var_thresh = qstep_sq * 2; |
| if (cpi->sf.rt_sf.tx_size_level_based_on_qstep >= 2) { |
| // If the sse is low for low source variance blocks, mark those as |
| // transform skip. |
| // Note: Though qstep_sq is based on ac qstep, the threshold is kept |
| // low so that reliable early estimate of tx skip can be obtained |
| // through its comparison with sse. |
| if (sse < qstep_sq && x->source_variance < qstep_sq && |
| x->color_sensitivity[0] == 0 && x->color_sensitivity[1] == 0) |
| *force_skip = 1; |
| // Further lower transform size based on aq mode only if residual |
| // variance is high. |
| is_high_var = (var >= var_thresh); |
| } |
| } |
| // Choose larger transform size for blocks where dc component is dominant or |
| // the ac component is low. |
| if (sse > ((var * multiplier) >> 2) || (var < var_thresh)) |
| tx_size = |
| AOMMIN(max_txsize_lookup[bsize], |
| tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]); |
| else |
| tx_size = TX_8X8; |
| |
| if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ && |
| cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id) && is_high_var) |
| tx_size = TX_8X8; |
| else if (tx_size > TX_16X16) |
| tx_size = TX_16X16; |
| } else { |
| tx_size = |
| AOMMIN(max_txsize_lookup[bsize], |
| tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]); |
| } |
| |
| if (CAP_TX_SIZE_FOR_BSIZE_GT32(txfm_params->tx_mode_search_type, bsize)) |
| tx_size = TX_SIZE_FOR_BSIZE_GT32; |
| |
| return AOMMIN(tx_size, TX_16X16); |
| } |
| |
| static const uint8_t b_width_log2_lookup[BLOCK_SIZES] = { 0, 0, 1, 1, 1, 2, |
| 2, 2, 3, 3, 3, 4, |
| 4, 4, 5, 5 }; |
| static const uint8_t b_height_log2_lookup[BLOCK_SIZES] = { 0, 1, 0, 1, 2, 1, |
| 2, 3, 2, 3, 4, 3, |
| 4, 5, 4, 5 }; |
| |
| static void block_variance(const uint8_t *src, int src_stride, |
| const uint8_t *ref, int ref_stride, int w, int h, |
| unsigned int *sse, int *sum, int block_size, |
| uint32_t *sse8x8, int *sum8x8, uint32_t *var8x8) { |
| int k = 0; |
| *sse = 0; |
| *sum = 0; |
| |
| // This function is called for block sizes >= BLOCK_32x32. As per the design |
| // the aom_get_var_sse_sum_8x8_quad() processes four 8x8 blocks (in a 8x32) |
| // per call. Hence the width and height of the block need to be at least 8 and |
| // 32 samples respectively. |
| assert(w >= 32); |
| assert(h >= 8); |
| for (int i = 0; i < h; i += block_size) { |
| for (int j = 0; j < w; j += 32) { |
| aom_get_var_sse_sum_8x8_quad( |
| src + src_stride * i + j, src_stride, ref + ref_stride * i + j, |
| ref_stride, &sse8x8[k], &sum8x8[k], sse, sum, &var8x8[k]); |
| k += 4; |
| } |
| } |
| } |
| |
| static void block_variance_16x16_dual(const uint8_t *src, int src_stride, |
| const uint8_t *ref, int ref_stride, int w, |
| int h, unsigned int *sse, int *sum, |
| int block_size, uint32_t *sse16x16, |
| uint32_t *var16x16) { |
| int k = 0; |
| *sse = 0; |
| *sum = 0; |
| // This function is called for block sizes >= BLOCK_32x32. As per the design |
| // the aom_get_var_sse_sum_16x16_dual() processes four 16x16 blocks (in a |
| // 16x32) per call. Hence the width and height of the block need to be at |
| // least 16 and 32 samples respectively. |
| assert(w >= 32); |
| assert(h >= 16); |
| for (int i = 0; i < h; i += block_size) { |
| for (int j = 0; j < w; j += 32) { |
| aom_get_var_sse_sum_16x16_dual(src + src_stride * i + j, src_stride, |
| ref + ref_stride * i + j, ref_stride, |
| &sse16x16[k], sse, sum, &var16x16[k]); |
| k += 2; |
| } |
| } |
| } |
| |
| static void calculate_variance(int bw, int bh, TX_SIZE tx_size, |
| unsigned int *sse_i, int *sum_i, |
| unsigned int *var_o, unsigned int *sse_o, |
| int *sum_o) { |
| const BLOCK_SIZE unit_size = txsize_to_bsize[tx_size]; |
| const int nw = 1 << (bw - b_width_log2_lookup[unit_size]); |
| const int nh = 1 << (bh - b_height_log2_lookup[unit_size]); |
| int i, j, k = 0; |
| |
| for (i = 0; i < nh; i += 2) { |
| for (j = 0; j < nw; j += 2) { |
| sse_o[k] = sse_i[i * nw + j] + sse_i[i * nw + j + 1] + |
| sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1]; |
| sum_o[k] = sum_i[i * nw + j] + sum_i[i * nw + j + 1] + |
| sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1]; |
| var_o[k] = sse_o[k] - (uint32_t)(((int64_t)sum_o[k] * sum_o[k]) >> |
| (b_width_log2_lookup[unit_size] + |
| b_height_log2_lookup[unit_size] + 6)); |
| k++; |
| } |
| } |
| } |
| |
| // Adjust the ac_thr according to speed, width, height and normalized sum |
| static int ac_thr_factor(const int speed, const int width, const int height, |
| const int norm_sum) { |
| if (speed >= 8 && norm_sum < 5) { |
| if (width <= 640 && height <= 480) |
| return 4; |
| else |
| return 2; |
| } |
| return 1; |
| } |
| |
| // Sets early_term flag based on chroma planes prediction |
| static INLINE void set_early_term_based_on_uv_plane( |
| AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, MACROBLOCKD *xd, int mi_row, |
| int mi_col, int *early_term, int num_blk, const unsigned int *sse_tx, |
| const unsigned int *var_tx, int sum, unsigned int var, unsigned int sse) { |
| AV1_COMMON *const cm = &cpi->common; |
| struct macroblock_plane *const p = &x->plane[0]; |
| const uint32_t dc_quant = p->dequant_QTX[0]; |
| const uint32_t ac_quant = p->dequant_QTX[1]; |
| const int64_t dc_thr = dc_quant * dc_quant >> 6; |
| int64_t ac_thr = ac_quant * ac_quant >> 6; |
| const int bw = b_width_log2_lookup[bsize]; |
| const int bh = b_height_log2_lookup[bsize]; |
| int ac_test = 1; |
| int dc_test = 1; |
| const int norm_sum = abs(sum) >> (bw + bh); |
| |
| #if CONFIG_AV1_TEMPORAL_DENOISING |
| if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc(cpi) && |
| cpi->oxcf.speed > 5) |
| ac_thr = av1_scale_acskip_thresh(ac_thr, cpi->denoiser.denoising_level, |
| norm_sum, cpi->svc.temporal_layer_id); |
| else |
| ac_thr *= ac_thr_factor(cpi->oxcf.speed, cm->width, cm->height, norm_sum); |
| #else |
| ac_thr *= ac_thr_factor(cpi->oxcf.speed, cm->width, cm->height, norm_sum); |
| |
| #endif |
| |
| for (int k = 0; k < num_blk; k++) { |
| // Check if all ac coefficients can be quantized to zero. |
| if (!(var_tx[k] < ac_thr || var == 0)) { |
| ac_test = 0; |
| break; |
| } |
| // Check if dc coefficient can be quantized to zero. |
| if (!(sse_tx[k] - var_tx[k] < dc_thr || sse == var)) { |
| dc_test = 0; |
| break; |
| } |
| } |
| |
| // Check if chroma can be skipped based on ac and dc test flags. |
| if (ac_test && dc_test) { |
| int skip_uv[2] = { 0 }; |
| unsigned int var_uv[2]; |
| unsigned int sse_uv[2]; |
| // Transform skipping test in UV planes. |
| for (int i = 1; i <= 2; i++) { |
| int j = i - 1; |
| skip_uv[j] = 1; |
| if (x->color_sensitivity[j]) { |
| skip_uv[j] = 0; |
| struct macroblock_plane *const puv = &x->plane[i]; |
| struct macroblockd_plane *const puvd = &xd->plane[i]; |
| const BLOCK_SIZE uv_bsize = get_plane_block_size( |
| bsize, puvd->subsampling_x, puvd->subsampling_y); |
| // Adjust these thresholds for UV. |
| const int64_t uv_dc_thr = |
| (puv->dequant_QTX[0] * puv->dequant_QTX[0]) >> 3; |
| const int64_t uv_ac_thr = |
| (puv->dequant_QTX[1] * puv->dequant_QTX[1]) >> 3; |
| av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, i, |
| i); |
| var_uv[j] = cpi->ppi->fn_ptr[uv_bsize].vf(puv->src.buf, puv->src.stride, |
| puvd->dst.buf, |
| puvd->dst.stride, &sse_uv[j]); |
| if ((var_uv[j] < uv_ac_thr || var_uv[j] == 0) && |
| (sse_uv[j] - var_uv[j] < uv_dc_thr || sse_uv[j] == var_uv[j])) |
| skip_uv[j] = 1; |
| else |
| break; |
| } |
| } |
| if (skip_uv[0] & skip_uv[1]) { |
| *early_term = 1; |
| } |
| } |
| } |
| |
| static INLINE void calc_rate_dist_block_param(AV1_COMP *cpi, MACROBLOCK *x, |
| RD_STATS *rd_stats, |
| int calculate_rd, int *early_term, |
| BLOCK_SIZE bsize, |
| unsigned int sse) { |
| if (calculate_rd) { |
| if (!*early_term) { |
| const int bw = block_size_wide[bsize]; |
| const int bh = block_size_high[bsize]; |
| |
| model_rd_with_curvfit(cpi, x, bsize, AOM_PLANE_Y, rd_stats->sse, bw * bh, |
| &rd_stats->rate, &rd_stats->dist); |
| } |
| |
| if (*early_term) { |
| rd_stats->rate = 0; |
| rd_stats->dist = sse << 4; |
| } |
| } |
| } |
| |
| static void model_skip_for_sb_y_large_64(AV1_COMP *cpi, BLOCK_SIZE bsize, |
| int mi_row, int mi_col, MACROBLOCK *x, |
| MACROBLOCKD *xd, RD_STATS *rd_stats, |
| int *early_term, int calculate_rd, |
| int64_t best_sse, |
| unsigned int *var_output, |
| unsigned int var_prune_threshold) { |
| // Note our transform coeffs are 8 times an orthogonal transform. |
| // Hence quantizer step is also 8 times. To get effective quantizer |
| // we need to divide by 8 before sending to modeling function. |
| unsigned int sse; |
| struct macroblock_plane *const p = &x->plane[0]; |
| struct macroblockd_plane *const pd = &xd->plane[0]; |
| int test_skip = 1; |
| unsigned int var; |
| int sum; |
| const int bw = b_width_log2_lookup[bsize]; |
| const int bh = b_height_log2_lookup[bsize]; |
| unsigned int sse16x16[64] = { 0 }; |
| unsigned int var16x16[64] = { 0 }; |
| assert(xd->mi[0]->tx_size == TX_16X16); |
| assert(bsize > BLOCK_32X32); |
| |
| // Calculate variance for whole partition, and also save 16x16 blocks' |
| // variance to be used in following transform skipping test. |
| block_variance_16x16_dual(p->src.buf, p->src.stride, pd->dst.buf, |
| pd->dst.stride, 4 << bw, 4 << bh, &sse, &sum, 16, |
| sse16x16, var16x16); |
| |
| var = sse - (unsigned int)(((int64_t)sum * sum) >> (bw + bh + 4)); |
| if (var_output) { |
| *var_output = var; |
| if (*var_output > var_prune_threshold) { |
| return; |
| } |
| } |
| |
| rd_stats->sse = sse; |
| // Skipping test |
| *early_term = 0; |
| set_force_skip_flag(cpi, x, sse, early_term); |
| // The code below for setting skip flag assumes transform size of at least |
| // 8x8, so force this lower limit on transform. |
| MB_MODE_INFO *const mi = xd->mi[0]; |
| if (!calculate_rd && cpi->sf.rt_sf.sse_early_term_inter_search && |
| early_term_inter_search_with_sse( |
| cpi->sf.rt_sf.sse_early_term_inter_search, bsize, sse, best_sse, |
| mi->mode)) |
| test_skip = 0; |
| |
| if (*early_term) test_skip = 0; |
| |
| // Evaluate if the partition block is a skippable block in Y plane. |
| if (test_skip) { |
| const unsigned int *sse_tx = sse16x16; |
| const unsigned int *var_tx = var16x16; |
| const unsigned int num_block = (1 << (bw + bh - 2)) >> 2; |
| set_early_term_based_on_uv_plane(cpi, x, bsize, xd, mi_row, mi_col, |
| early_term, num_block, sse_tx, var_tx, sum, |
| var, sse); |
| } |
| calc_rate_dist_block_param(cpi, x, rd_stats, calculate_rd, early_term, bsize, |
| sse); |
| } |
| |
| static void model_skip_for_sb_y_large(AV1_COMP *cpi, BLOCK_SIZE bsize, |
| int mi_row, int mi_col, MACROBLOCK *x, |
| MACROBLOCKD *xd, RD_STATS *rd_stats, |
| int *early_term, int calculate_rd, |
| int64_t best_sse, |
| unsigned int *var_output, |
| unsigned int var_prune_threshold) { |
| if (x->force_zeromv_skip_for_blk) { |
| *early_term = 1; |
| rd_stats->rate = 0; |
| rd_stats->dist = 0; |
| rd_stats->sse = 0; |
| return; |
| } |
| |
| // For block sizes greater than 32x32, the transform size is always 16x16. |
| // This function avoids calling calculate_variance() for tx_size 16x16 cases |
| // by directly populating variance at tx_size level from |
| // block_variance_16x16_dual() function. |
| const TxfmSearchParams *txfm_params = &x->txfm_search_params; |
| if (CAP_TX_SIZE_FOR_BSIZE_GT32(txfm_params->tx_mode_search_type, bsize)) { |
| xd->mi[0]->tx_size = TX_SIZE_FOR_BSIZE_GT32; |
| model_skip_for_sb_y_large_64(cpi, bsize, mi_row, mi_col, x, xd, rd_stats, |
| early_term, calculate_rd, best_sse, var_output, |
| var_prune_threshold); |
| return; |
| } |
| |
| // Note our transform coeffs are 8 times an orthogonal transform. |
| // Hence quantizer step is also 8 times. To get effective quantizer |
| // we need to divide by 8 before sending to modeling function. |
| unsigned int sse; |
| struct macroblock_plane *const p = &x->plane[0]; |
| struct macroblockd_plane *const pd = &xd->plane[0]; |
| int test_skip = 1; |
| unsigned int var; |
| int sum; |
| |
| const int bw = b_width_log2_lookup[bsize]; |
| const int bh = b_height_log2_lookup[bsize]; |
| unsigned int sse8x8[256] = { 0 }; |
| int sum8x8[256] = { 0 }; |
| unsigned int var8x8[256] = { 0 }; |
| TX_SIZE tx_size; |
| |
| // Calculate variance for whole partition, and also save 8x8 blocks' variance |
| // to be used in following transform skipping test. |
| block_variance(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, |
| 4 << bw, 4 << bh, &sse, &sum, 8, sse8x8, sum8x8, var8x8); |
| var = sse - (unsigned int)(((int64_t)sum * sum) >> (bw + bh + 4)); |
| if (var_output) { |
| *var_output = var; |
| if (*var_output > var_prune_threshold) { |
| return; |
| } |
| } |
| |
| rd_stats->sse = sse; |
| // Skipping test |
| *early_term = 0; |
| tx_size = calculate_tx_size(cpi, bsize, x, var, sse, early_term); |
| assert(tx_size <= TX_16X16); |
| // The code below for setting skip flag assumes transform size of at least |
| // 8x8, so force this lower limit on transform. |
| if (tx_size < TX_8X8) tx_size = TX_8X8; |
| xd->mi[0]->tx_size = tx_size; |
| |
| MB_MODE_INFO *const mi = xd->mi[0]; |
| if (!calculate_rd && cpi->sf.rt_sf.sse_early_term_inter_search && |
| early_term_inter_search_with_sse( |
| cpi->sf.rt_sf.sse_early_term_inter_search, bsize, sse, best_sse, |
| mi->mode)) |
| test_skip = 0; |
| |
| if (*early_term) test_skip = 0; |
| |
| // Evaluate if the partition block is a skippable block in Y plane. |
| if (test_skip) { |
| unsigned int sse16x16[64] = { 0 }; |
| int sum16x16[64] = { 0 }; |
| unsigned int var16x16[64] = { 0 }; |
| const unsigned int *sse_tx = sse8x8; |
| const unsigned int *var_tx = var8x8; |
| unsigned int num_blks = 1 << (bw + bh - 2); |
| |
| if (tx_size >= TX_16X16) { |
| calculate_variance(bw, bh, TX_8X8, sse8x8, sum8x8, var16x16, sse16x16, |
| sum16x16); |
| sse_tx = sse16x16; |
| var_tx = var16x16; |
| num_blks = num_blks >> 2; |
| } |
| set_early_term_based_on_uv_plane(cpi, x, bsize, xd, mi_row, mi_col, |
| early_term, num_blks, sse_tx, var_tx, sum, |
| var, sse); |
| } |
| calc_rate_dist_block_param(cpi, x, rd_stats, calculate_rd, early_term, bsize, |
| sse); |
| } |
| |
| static void model_rd_for_sb_y(const AV1_COMP *const cpi, BLOCK_SIZE bsize, |
| MACROBLOCK *x, MACROBLOCKD *xd, |
| RD_STATS *rd_stats, unsigned int *var_out, |
| int calculate_rd, int *early_term) { |
| if (x->force_zeromv_skip_for_blk && early_term != NULL) { |
| *early_term = 1; |
| rd_stats->rate = 0; |
| rd_stats->dist = 0; |
| rd_stats->sse = 0; |
| } |
| |
| // Note our transform coeffs are 8 times an orthogonal transform. |
| // Hence quantizer step is also 8 times. To get effective quantizer |
| // we need to divide by 8 before sending to modeling function. |
| const int ref = xd->mi[0]->ref_frame[0]; |
| |
| assert(bsize < BLOCK_SIZES_ALL); |
| |
| struct macroblock_plane *const p = &x->plane[0]; |
| struct macroblockd_plane *const pd = &xd->plane[0]; |
| unsigned int sse; |
| int rate; |
| int64_t dist; |
| |
| unsigned int var = cpi->ppi->fn_ptr[bsize].vf( |
| p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, &sse); |
| int force_skip = 0; |
| xd->mi[0]->tx_size = calculate_tx_size(cpi, bsize, x, var, sse, &force_skip); |
| if (var_out) { |
| *var_out = var; |
| } |
| |
| if (calculate_rd && (!force_skip || ref == INTRA_FRAME)) { |
| const int bwide = block_size_wide[bsize]; |
| const int bhigh = block_size_high[bsize]; |
| model_rd_with_curvfit(cpi, x, bsize, AOM_PLANE_Y, sse, bwide * bhigh, &rate, |
| &dist); |
| } else { |
| rate = INT_MAX; // this will be overwritten later with block_yrd |
| dist = INT_MAX; |
| } |
| rd_stats->sse = sse; |
| x->pred_sse[ref] = (unsigned int)AOMMIN(sse, UINT_MAX); |
| |
| if (force_skip && ref > INTRA_FRAME) { |
| rate = 0; |
| dist = (int64_t)sse << 4; |
| } |
| |
| assert(rate >= 0); |
| |
| rd_stats->skip_txfm = (rate == 0); |
| rate = AOMMIN(rate, INT_MAX); |
| rd_stats->rate = rate; |
| rd_stats->dist = dist; |
| } |
| |
| static INLINE void aom_process_hadamard_lp_8x16(MACROBLOCK *x, |
| int max_blocks_high, |
| int max_blocks_wide, |
| int num_4x4_w, int step, |
| int block_step) { |
| struct macroblock_plane *const p = &x->plane[0]; |
| const int bw = 4 * num_4x4_w; |
| const int num_4x4 = AOMMIN(num_4x4_w, max_blocks_wide); |
| int block = 0; |
| |
| for (int r = 0; r < max_blocks_high; r += block_step) { |
| for (int c = 0; c < num_4x4; c += 2 * block_step) { |
| const int16_t *src_diff = &p->src_diff[(r * bw + c) << 2]; |
| int16_t *low_coeff = (int16_t *)p->coeff + BLOCK_OFFSET(block); |
| aom_hadamard_lp_8x8_dual(src_diff, (ptrdiff_t)bw, low_coeff); |
| block += 2 * step; |
| } |
| } |
| } |
| |
| #define DECLARE_BLOCK_YRD_BUFFERS() \ |
| DECLARE_ALIGNED(64, tran_low_t, dqcoeff_buf[16 * 16]); \ |
| DECLARE_ALIGNED(64, tran_low_t, qcoeff_buf[16 * 16]); \ |
| DECLARE_ALIGNED(64, tran_low_t, coeff_buf[16 * 16]); \ |
| uint16_t eob[1]; |
| |
| #define DECLARE_BLOCK_YRD_VARS() \ |
| /* When is_tx_8x8_dual_applicable is true, we compute the txfm for the \ |
| * entire bsize and write macroblock_plane::coeff. So low_coeff is kept \ |
| * as a non-const so we can reassign it to macroblock_plane::coeff. */ \ |
| int16_t *low_coeff = (int16_t *)coeff_buf; \ |
| int16_t *const low_qcoeff = (int16_t *)qcoeff_buf; \ |
| int16_t *const low_dqcoeff = (int16_t *)dqcoeff_buf; \ |
| const SCAN_ORDER *const scan_order = &av1_scan_orders[tx_size][DCT_DCT]; \ |
| const int diff_stride = bw; |
| |
| #define DECLARE_LOOP_VARS_BLOCK_YRD() \ |
| const int16_t *src_diff = &p->src_diff[(r * diff_stride + c) << 2]; |
| |
| #if CONFIG_AV1_HIGHBITDEPTH |
| #define DECLARE_BLOCK_YRD_HBD_VARS() \ |
| tran_low_t *const coeff = coeff_buf; \ |
| tran_low_t *const qcoeff = qcoeff_buf; \ |
| tran_low_t *const dqcoeff = dqcoeff_buf; |
| |
| static AOM_FORCE_INLINE void update_yrd_loop_vars_hbd( |
| MACROBLOCK *x, int *skippable, const int step, const int ncoeffs, |
| tran_low_t *const coeff, tran_low_t *const qcoeff, |
| tran_low_t *const dqcoeff, RD_STATS *this_rdc, int *eob_cost, |
| const int tx_blk_id) { |
| const int is_txfm_skip = (ncoeffs == 0); |
| *skippable &= is_txfm_skip; |
| x->txfm_search_info.blk_skip[tx_blk_id] = is_txfm_skip; |
| *eob_cost += get_msb(ncoeffs + 1); |
| |
| int64_t dummy; |
| if (ncoeffs == 1) |
| this_rdc->rate += (int)abs(qcoeff[0]); |
| else if (ncoeffs > 1) |
| this_rdc->rate += aom_satd(qcoeff, step << 4); |
| |
| this_rdc->dist += av1_block_error(coeff, dqcoeff, step << 4, &dummy) >> 2; |
| } |
| #endif |
| static AOM_FORCE_INLINE void update_yrd_loop_vars( |
| MACROBLOCK *x, int *skippable, const int step, const int ncoeffs, |
| int16_t *const low_coeff, int16_t *const low_qcoeff, |
| int16_t *const low_dqcoeff, RD_STATS *this_rdc, int *eob_cost, |
| const int tx_blk_id) { |
| const int is_txfm_skip = (ncoeffs == 0); |
| *skippable &= is_txfm_skip; |
| x->txfm_search_info.blk_skip[tx_blk_id] = is_txfm_skip; |
| *eob_cost += get_msb(ncoeffs + 1); |
| if (ncoeffs == 1) |
| this_rdc->rate += (int)abs(low_qcoeff[0]); |
| else if (ncoeffs > 1) |
| this_rdc->rate += aom_satd_lp(low_qcoeff, step << 4); |
| |
| this_rdc->dist += av1_block_error_lp(low_coeff, low_dqcoeff, step << 4) >> 2; |
| } |
| |
| /*!\brief Calculates RD Cost using Hadamard transform. |
| * |
| * \ingroup nonrd_mode_search |
| * \callgraph |
| * \callergraph |
| * Calculates RD Cost using Hadamard transform. For low bit depth this function |
| * uses low-precision set of functions (16-bit) and 32 bit for high bit depth |
| * \param[in] x Pointer to structure holding all the data for |
| the current macroblock |
| * \param[in] this_rdc Pointer to calculated RD Cost |
| * \param[in] skippable Pointer to a flag indicating possible tx skip |
| * \param[in] bsize Current block size |
| * \param[in] tx_size Transform size |
| * \param[in] is_inter_mode Flag to indicate inter mode |
| * |
| * \remark Nothing is returned. Instead, calculated RD cost is placed to |
| * \c this_rdc. \c skippable flag is set if there is no non-zero quantized |
| * coefficients for Hadamard transform |
| */ |
| static void block_yrd(MACROBLOCK *x, RD_STATS *this_rdc, int *skippable, |
| const BLOCK_SIZE bsize, const TX_SIZE tx_size, |
| const int is_inter_mode) { |
| MACROBLOCKD *xd = &x->e_mbd; |
| const struct macroblockd_plane *pd = &xd->plane[0]; |
| struct macroblock_plane *const p = &x->plane[0]; |
| assert(bsize < BLOCK_SIZES_ALL); |
| const int num_4x4_w = mi_size_wide[bsize]; |
| const int num_4x4_h = mi_size_high[bsize]; |
| const int step = 1 << (tx_size << 1); |
| const int block_step = (1 << tx_size); |
| const int row_step = step * num_4x4_w >> tx_size; |
| int block = 0; |
| const int max_blocks_wide = |
| num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >> 5); |
| const int max_blocks_high = |
| num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >> 5); |
| int eob_cost = 0; |
| const int bw = 4 * num_4x4_w; |
| const int bh = 4 * num_4x4_h; |
| const int use_hbd = is_cur_buf_hbd(xd); |
| int num_blk_skip_w = num_4x4_w; |
| int sh_blk_skip = 0; |
| if (is_inter_mode) { |
| num_blk_skip_w = num_4x4_w >> 1; |
| sh_blk_skip = 1; |
| } |
| |
| #if CONFIG_AV1_HIGHBITDEPTH |
| if (use_hbd) { |
| aom_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, |
| p->src.stride, pd->dst.buf, pd->dst.stride); |
| } else { |
| aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride, |
| pd->dst.buf, pd->dst.stride); |
| } |
| #else |
| aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride, |
| pd->dst.buf, pd->dst.stride); |
| #endif |
| |
| // Keep the intermediate value on the stack here. Writing directly to |
| // skippable causes speed regression due to load-and-store issues in |
| // update_yrd_loop_vars. |
| int temp_skippable = 1; |
| this_rdc->dist = 0; |
| this_rdc->rate = 0; |
| // For block sizes 8x16 or above, Hadamard txfm of two adjacent 8x8 blocks |
| // can be done per function call. Hence the call of Hadamard txfm is |
| // abstracted here for the specified cases. |
| int is_tx_8x8_dual_applicable = |
| (tx_size == TX_8X8 && block_size_wide[bsize] >= 16 && |
| block_size_high[bsize] >= 8); |
| |
| #if CONFIG_AV1_HIGHBITDEPTH |
| // As of now, dual implementation of hadamard txfm is available for low |
| // bitdepth. |
| if (use_hbd) is_tx_8x8_dual_applicable = 0; |
| #endif |
| |
| if (is_tx_8x8_dual_applicable) { |
| aom_process_hadamard_lp_8x16(x, max_blocks_high, max_blocks_wide, num_4x4_w, |
| step, block_step); |
| } |
| |
| DECLARE_BLOCK_YRD_BUFFERS() |
| DECLARE_BLOCK_YRD_VARS() |
| #if CONFIG_AV1_HIGHBITDEPTH |
| DECLARE_BLOCK_YRD_HBD_VARS() |
| #else |
| (void)use_hbd; |
| #endif |
| |
| // Keep track of the row and column of the blocks we use so that we know |
| // if we are in the unrestricted motion border. |
| for (int r = 0; r < max_blocks_high; r += block_step) { |
| for (int c = 0, s = 0; c < max_blocks_wide; c += block_step, s += step) { |
| DECLARE_LOOP_VARS_BLOCK_YRD() |
| |
| switch (tx_size) { |
| #if CONFIG_AV1_HIGHBITDEPTH |
| case TX_16X16: |
| if (use_hbd) { |
| aom_hadamard_16x16(src_diff, diff_stride, coeff); |
| av1_quantize_fp(coeff, 16 * 16, p->zbin_QTX, p->round_fp_QTX, |
| p->quant_fp_QTX, p->quant_shift_QTX, qcoeff, |
| dqcoeff, p->dequant_QTX, eob, |
| // default_scan_fp_16x16_transpose and |
| // av1_default_iscan_fp_16x16_transpose have to be |
| // used together. |
| default_scan_fp_16x16_transpose, |
| av1_default_iscan_fp_16x16_transpose); |
| } else { |
| aom_hadamard_lp_16x16(src_diff, diff_stride, low_coeff); |
| av1_quantize_lp(low_coeff, 16 * 16, p->round_fp_QTX, |
| p->quant_fp_QTX, low_qcoeff, low_dqcoeff, |
| p->dequant_QTX, eob, |
| // default_scan_lp_16x16_transpose and |
| // av1_default_iscan_lp_16x16_transpose have to be |
| // used together. |
| default_scan_lp_16x16_transpose, |
| av1_default_iscan_lp_16x16_transpose); |
| } |
| break; |
| case TX_8X8: |
| if (use_hbd) { |
| aom_hadamard_8x8(src_diff, diff_stride, coeff); |
| av1_quantize_fp( |
| coeff, 8 * 8, p->zbin_QTX, p->round_fp_QTX, p->quant_fp_QTX, |
| p->quant_shift_QTX, qcoeff, dqcoeff, p->dequant_QTX, eob, |
| default_scan_8x8_transpose, av1_default_iscan_8x8_transpose); |
| } else { |
| if (is_tx_8x8_dual_applicable) { |
| // The coeffs are pre-computed for the whole block, so re-assign |
| // low_coeff to the appropriate location. |
| const int block_offset = BLOCK_OFFSET(block + s); |
| low_coeff = (int16_t *)p->coeff + block_offset; |
| } else { |
| aom_hadamard_lp_8x8(src_diff, diff_stride, low_coeff); |
| } |
| av1_quantize_lp( |
| low_coeff, 8 * 8, p->round_fp_QTX, p->quant_fp_QTX, low_qcoeff, |
| low_dqcoeff, p->dequant_QTX, eob, |
| // default_scan_8x8_transpose and |
| // av1_default_iscan_8x8_transpose have to be used together. |
| default_scan_8x8_transpose, av1_default_iscan_8x8_transpose); |
| } |
| break; |
| default: |
| assert(tx_size == TX_4X4); |
| // In tx_size=4x4 case, aom_fdct4x4 and aom_fdct4x4_lp generate |
| // normal coefficients order, so we don't need to change the scan |
| // order here. |
| if (use_hbd) { |
| aom_fdct4x4(src_diff, coeff, diff_stride); |
| av1_quantize_fp(coeff, 4 * 4, p->zbin_QTX, p->round_fp_QTX, |
| p->quant_fp_QTX, p->quant_shift_QTX, qcoeff, |
| dqcoeff, p->dequant_QTX, eob, scan_order->scan, |
| scan_order->iscan); |
| } else { |
| aom_fdct4x4_lp(src_diff, low_coeff, diff_stride); |
| av1_quantize_lp(low_coeff, 4 * 4, p->round_fp_QTX, p->quant_fp_QTX, |
| low_qcoeff, low_dqcoeff, p->dequant_QTX, eob, |
| scan_order->scan, scan_order->iscan); |
| } |
| break; |
| #else |
| case TX_16X16: |
| aom_hadamard_lp_16x16(src_diff, diff_stride, low_coeff); |
| av1_quantize_lp(low_coeff, 16 * 16, p->round_fp_QTX, p->quant_fp_QTX, |
| low_qcoeff, low_dqcoeff, p->dequant_QTX, eob, |
| default_scan_lp_16x16_transpose, |
| av1_default_iscan_lp_16x16_transpose); |
| break; |
| case TX_8X8: |
| if (is_tx_8x8_dual_applicable) { |
| // The coeffs are pre-computed for the whole block, so re-assign |
| // low_coeff to the appropriate location. |
| const int block_offset = BLOCK_OFFSET(block + s); |
| low_coeff = (int16_t *)p->coeff + block_offset; |
| } else { |
| aom_hadamard_lp_8x8(src_diff, diff_stride, low_coeff); |
| } |
| av1_quantize_lp(low_coeff, 8 * 8, p->round_fp_QTX, p->quant_fp_QTX, |
| low_qcoeff, low_dqcoeff, p->dequant_QTX, eob, |
| default_scan_8x8_transpose, |
| av1_default_iscan_8x8_transpose); |
| break; |
| default: |
| aom_fdct4x4_lp(src_diff, low_coeff, diff_stride); |
| av1_quantize_lp(low_coeff, 4 * 4, p->round_fp_QTX, p->quant_fp_QTX, |
| low_qcoeff, low_dqcoeff, p->dequant_QTX, eob, |
| scan_order->scan, scan_order->iscan); |
| break; |
| #endif |
| } |
| assert(*eob <= 1024); |
| #if CONFIG_AV1_HIGHBITDEPTH |
| if (use_hbd) |
| update_yrd_loop_vars_hbd(x, &temp_skippable, step, *eob, coeff, qcoeff, |
| dqcoeff, this_rdc, &eob_cost, |
| (r * num_blk_skip_w + c) >> sh_blk_skip); |
| else |
| #endif |
| update_yrd_loop_vars(x, &temp_skippable, step, *eob, low_coeff, |
| low_qcoeff, low_dqcoeff, this_rdc, &eob_cost, |
| (r * num_blk_skip_w + c) >> sh_blk_skip); |
| } |
| block += row_step; |
| } |
| |
| this_rdc->skip_txfm = *skippable = temp_skippable; |
| if (this_rdc->sse < INT64_MAX) { |
| this_rdc->sse = (this_rdc->sse << 6) >> 2; |
| if (temp_skippable) { |
| this_rdc->dist = 0; |
| this_rdc->dist = this_rdc->sse; |
| return; |
| } |
| } |
| |
| // If skippable is set, rate gets clobbered later. |
| this_rdc->rate <<= (2 + AV1_PROB_COST_SHIFT); |
| this_rdc->rate += (eob_cost << AV1_PROB_COST_SHIFT); |
| } |
| |
| // Explicitly enumerate the cases so the compiler can generate SIMD for the |
| // function. According to the disassembler, gcc generates SSE codes for each of |
| // the possible block sizes. The hottest case is tx_width 16, which takes up |
| // about 8% of the self cycle of av1_nonrd_pick_inter_mode_sb. Since |
| // av1_nonrd_pick_inter_mode_sb takes up about 3% of total encoding time, the |
| // potential room of improvement for writing AVX2 optimization is only 3% * 8% = |
| // 0.24% of total encoding time. |
| static AOM_INLINE void scale_square_buf_vals(int16_t *dst, const int tx_width, |
| const int16_t *src, |
| const int src_stride) { |
| #define DO_SCALING \ |
| do { \ |
| for (int idy = 0; idy < tx_width; ++idy) { \ |
| for (int idx = 0; idx < tx_width; ++idx) { \ |
| dst[idy * tx_width + idx] = src[idy * src_stride + idx] * 8; \ |
| } \ |
| } \ |
| } while (0) |
| |
| if (tx_width == 4) { |
| DO_SCALING; |
| } else if (tx_width == 8) { |
| DO_SCALING; |
| } else if (tx_width == 16) { |
| DO_SCALING; |
| } else { |
| assert(0); |
| } |
| |
| #undef DO_SCALING |
| } |
| |
| /*!\brief Calculates RD Cost when the block uses Identity transform. |
| * Note that thie function is only for low bit depth encoding, since it |
| * is called in real-time mode for now, which sets high bit depth to 0: |
| * -DCONFIG_AV1_HIGHBITDEPTH=0 |
| * |
| * \ingroup nonrd_mode_search |
| * \callgraph |
| * \callergraph |
| * Calculates RD Cost. For low bit depth this function |
| * uses low-precision set of functions (16-bit) and 32 bit for high bit depth |
| * \param[in] x Pointer to structure holding all the data for |
| the current macroblock |
| * \param[in] this_rdc Pointer to calculated RD Cost |
| * \param[in] skippable Pointer to a flag indicating possible tx skip |
| * \param[in] bsize Current block size |
| * \param[in] tx_size Transform size |
| * |
| * \remark Nothing is returned. Instead, calculated RD cost is placed to |
| * \c this_rdc. \c skippable flag is set if all coefficients are zero. |
| */ |
| static void block_yrd_idtx(MACROBLOCK *x, RD_STATS *this_rdc, int *skippable, |
| const BLOCK_SIZE bsize, const TX_SIZE tx_size) { |
| MACROBLOCKD *xd = &x->e_mbd; |
| const struct macroblockd_plane *pd = &xd->plane[0]; |
| struct macroblock_plane *const p = &x->plane[0]; |
| assert(bsize < BLOCK_SIZES_ALL); |
| const int num_4x4_w = mi_size_wide[bsize]; |
| const int num_4x4_h = mi_size_high[bsize]; |
| const int step = 1 << (tx_size << 1); |
| const int block_step = (1 << tx_size); |
| const int max_blocks_wide = |
| num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >> 5); |
| const int max_blocks_high = |
| num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >> 5); |
| int eob_cost = 0; |
| const int bw = 4 * num_4x4_w; |
| const int bh = 4 * num_4x4_h; |
| const int num_blk_skip_w = num_4x4_w >> 1; |
| const int sh_blk_skip = 1; |
| // Keep the intermediate value on the stack here. Writing directly to |
| // skippable causes speed regression due to load-and-store issues in |
| // update_yrd_loop_vars. |
| int temp_skippable = 1; |
| int tx_wd = 0; |
| switch (tx_size) { |
| case TX_64X64: |
| assert(0); // Not implemented |
| break; |
| case TX_32X32: |
| assert(0); // Not used |
| break; |
| case TX_16X16: tx_wd = 16; break; |
| case TX_8X8: tx_wd = 8; break; |
| default: |
| assert(tx_size == TX_4X4); |
| tx_wd = 4; |
| break; |
| } |
| this_rdc->dist = 0; |
| this_rdc->rate = 0; |
| aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride, |
| pd->dst.buf, pd->dst.stride); |
| // Keep track of the row and column of the blocks we use so that we know |
| // if we are in the unrestricted motion border. |
| DECLARE_BLOCK_YRD_BUFFERS() |
| DECLARE_BLOCK_YRD_VARS() |
| for (int r = 0; r < max_blocks_high; r += block_step) { |
| for (int c = 0, s = 0; c < max_blocks_wide; c += block_step, s += step) { |
| DECLARE_LOOP_VARS_BLOCK_YRD() |
| scale_square_buf_vals(low_coeff, tx_wd, src_diff, diff_stride); |
| av1_quantize_lp(low_coeff, tx_wd * tx_wd, p->round_fp_QTX, |
| p->quant_fp_QTX, low_qcoeff, low_dqcoeff, p->dequant_QTX, |
| eob, scan_order->scan, scan_order->iscan); |
| assert(*eob <= 1024); |
| update_yrd_loop_vars(x, &temp_skippable, step, *eob, low_coeff, |
| low_qcoeff, low_dqcoeff, this_rdc, &eob_cost, |
| (r * num_blk_skip_w + c) >> sh_blk_skip); |
| } |
| } |
| this_rdc->skip_txfm = *skippable = temp_skippable; |
| if (this_rdc->sse < INT64_MAX) { |
| this_rdc->sse = (this_rdc->sse << 6) >> 2; |
| if (temp_skippable) { |
| this_rdc->dist = 0; |
| this_rdc->dist = this_rdc->sse; |
| return; |
| } |
| } |
| // If skippable is set, rate gets clobbered later. |
| this_rdc->rate <<= (2 + AV1_PROB_COST_SHIFT); |
| this_rdc->rate += (eob_cost << AV1_PROB_COST_SHIFT); |
| } |
| |
| static INLINE void init_mbmi(MB_MODE_INFO *mbmi, PREDICTION_MODE pred_mode, |
| MV_REFERENCE_FRAME ref_frame0, |
| MV_REFERENCE_FRAME ref_frame1, |
| const AV1_COMMON *cm) { |
| PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info; |
| mbmi->ref_mv_idx = 0; |
| mbmi->mode = pred_mode; |
| mbmi->uv_mode = UV_DC_PRED; |
| mbmi->ref_frame[0] = ref_frame0; |
| mbmi->ref_frame[1] = ref_frame1; |
| pmi->palette_size[0] = 0; |
| pmi->palette_size[1] = 0; |
| mbmi->filter_intra_mode_info.use_filter_intra = 0; |
| mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0; |
| mbmi->motion_mode = SIMPLE_TRANSLATION; |
| mbmi->num_proj_ref = 1; |
| mbmi->interintra_mode = 0; |
| set_default_interp_filters(mbmi, cm->features.interp_filter); |
| } |
| |
| #if CONFIG_INTERNAL_STATS |
| static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, |
| int mode_index) { |
| #else |
| static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { |
| #endif // CONFIG_INTERNAL_STATS |
| MACROBLOCKD *const xd = &x->e_mbd; |
| TxfmSearchInfo *txfm_info = &x->txfm_search_info; |
| |
| // Take a snapshot of the coding context so it can be |
| // restored if we decide to encode this way |
| ctx->rd_stats.skip_txfm = txfm_info->skip_txfm; |
| |
| ctx->skippable = txfm_info->skip_txfm; |
| #if CONFIG_INTERNAL_STATS |
| ctx->best_mode_index = mode_index; |
| #endif // CONFIG_INTERNAL_STATS |
| ctx->mic = *xd->mi[0]; |
| ctx->skippable = txfm_info->skip_txfm; |
| av1_copy_mbmi_ext_to_mbmi_ext_frame(&ctx->mbmi_ext_best, &x->mbmi_ext, |
| av1_ref_frame_type(xd->mi[0]->ref_frame)); |
| } |
| |
| static int get_pred_buffer(PRED_BUFFER *p, int len) { |
| for (int i = 0; i < len; i++) { |
| if (!p[i].in_use) { |
| p[i].in_use = 1; |
| return i; |
| } |
| } |
| return -1; |
| } |
| |
| static void free_pred_buffer(PRED_BUFFER *p) { |
| if (p != NULL) p->in_use = 0; |
| } |
| |
| static INLINE int get_drl_cost(const PREDICTION_MODE this_mode, |
| const int ref_mv_idx, |
| const MB_MODE_INFO_EXT *mbmi_ext, |
| const int (*const drl_mode_cost0)[2], |
| int8_t ref_frame_type) { |
| int cost = 0; |
| if (this_mode == NEWMV || this_mode == NEW_NEWMV) { |
| for (int idx = 0; idx < 2; ++idx) { |
| if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) { |
| uint8_t drl_ctx = av1_drl_ctx(mbmi_ext->weight[ref_frame_type], idx); |
| cost += drl_mode_cost0[drl_ctx][ref_mv_idx != idx]; |
| if (ref_mv_idx == idx) return cost; |
| } |
| } |
| return cost; |
| } |
| |
| if (have_nearmv_in_inter_mode(this_mode)) { |
| for (int idx = 1; idx < 3; ++idx) { |
| if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) { |
| uint8_t drl_ctx = av1_drl_ctx(mbmi_ext->weight[ref_frame_type], idx); |
| cost += drl_mode_cost0[drl_ctx][ref_mv_idx != (idx - 1)]; |
| if (ref_mv_idx == (idx - 1)) return cost; |
| } |
| } |
| return cost; |
| } |
| return cost; |
| } |
| |
| static int cost_mv_ref(const ModeCosts *const mode_costs, PREDICTION_MODE mode, |
| int16_t mode_context) { |
| if (is_inter_compound_mode(mode)) { |
| return mode_costs |
| ->inter_compound_mode_cost[mode_context][INTER_COMPOUND_OFFSET(mode)]; |
| } |
| |
| int mode_cost = 0; |
| int16_t mode_ctx = mode_context & NEWMV_CTX_MASK; |
| |
| assert(is_inter_mode(mode)); |
| |
| if (mode == NEWMV) { |
| mode_cost = mode_costs->newmv_mode_cost[mode_ctx][0]; |
| return mode_cost; |
| } else { |
| mode_cost = mode_costs->newmv_mode_cost[mode_ctx][1]; |
| mode_ctx = (mode_context >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK; |
| |
| if (mode == GLOBALMV) { |
| mode_cost += mode_costs->zeromv_mode_cost[mode_ctx][0]; |
| return mode_cost; |
| } else { |
| mode_cost += mode_costs->zeromv_mode_cost[mode_ctx][1]; |
| mode_ctx = (mode_context >> REFMV_OFFSET) & REFMV_CTX_MASK; |
| mode_cost += mode_costs->refmv_mode_cost[mode_ctx][mode != NEARESTMV]; |
| return mode_cost; |
| } |
| } |
| } |
| |
| static void newmv_diff_bias(MACROBLOCKD *xd, PREDICTION_MODE this_mode, |
| RD_STATS *this_rdc, BLOCK_SIZE bsize, int mv_row, |
| int mv_col, int speed, uint32_t spatial_variance, |
| CONTENT_STATE_SB content_state_sb) { |
| // Bias against MVs associated with NEWMV mode that are very different from |
| // top/left neighbors. |
| if (this_mode == NEWMV) { |
| int al_mv_average_row; |
| int al_mv_average_col; |
| int row_diff, col_diff; |
| int above_mv_valid = 0; |
| int left_mv_valid = 0; |
| int above_row = INVALID_MV_ROW_COL, above_col = INVALID_MV_ROW_COL; |
| int left_row = INVALID_MV_ROW_COL, left_col = INVALID_MV_ROW_COL; |
| if (bsize >= BLOCK_64X64 && content_state_sb.source_sad_nonrd != kHighSad && |
| spatial_variance < 300 && |
| (mv_row > 16 || mv_row < -16 || mv_col > 16 || mv_col < -16)) { |
| this_rdc->rdcost = this_rdc->rdcost << 2; |
| return; |
| } |
| if (xd->above_mbmi) { |
| above_mv_valid = xd->above_mbmi->mv[0].as_int != INVALID_MV; |
| above_row = xd->above_mbmi->mv[0].as_mv.row; |
| above_col = xd->above_mbmi->mv[0].as_mv.col; |
| } |
| if (xd->left_mbmi) { |
| left_mv_valid = xd->left_mbmi->mv[0].as_int != INVALID_MV; |
| left_row = xd->left_mbmi->mv[0].as_mv.row; |
| left_col = xd->left_mbmi->mv[0].as_mv.col; |
| } |
| if (above_mv_valid && left_mv_valid) { |
| al_mv_average_row = (above_row + left_row + 1) >> 1; |
| al_mv_average_col = (above_col + left_col + 1) >> 1; |
| } else if (above_mv_valid) { |
| al_mv_average_row = above_row; |
| al_mv_average_col = above_col; |
| } else if (left_mv_valid) { |
| al_mv_average_row = left_row; |
| al_mv_average_col = left_col; |
| } else { |
| al_mv_average_row = al_mv_average_col = 0; |
| } |
| row_diff = al_mv_average_row - mv_row; |
| col_diff = al_mv_average_col - mv_col; |
| if (row_diff > 80 || row_diff < -80 || col_diff > 80 || col_diff < -80) { |
| if (bsize >= BLOCK_32X32) |
| this_rdc->rdcost = this_rdc->rdcost << 1; |
| else |
| this_rdc->rdcost = 5 * this_rdc->rdcost >> 2; |
| } |
| } else { |
| // Bias for speed >= 8 for low spatial variance. |
| if (speed >= 8 && spatial_variance < 150 && |
| (mv_row > 64 || mv_row < -64 || mv_col > 64 || mv_col < -64)) |
| this_rdc->rdcost = 5 * this_rdc->rdcost >> 2; |
| } |
| } |
| |
| static int64_t model_rd_for_sb_uv(AV1_COMP *cpi, BLOCK_SIZE plane_bsize, |
| MACROBLOCK *x, MACROBLOCKD *xd, |
| RD_STATS *this_rdc, int start_plane, |
| int stop_plane) { |
| // Note our transform coeffs are 8 times an orthogonal transform. |
| // Hence quantizer step is also 8 times. To get effective quantizer |
| // we need to divide by 8 before sending to modeling function. |
| unsigned int sse; |
| int rate; |
| int64_t dist; |
| int i; |
| int64_t tot_sse = 0; |
| |
| this_rdc->rate = 0; |
| this_rdc->dist = 0; |
| this_rdc->skip_txfm = 0; |
| |
| for (i = start_plane; i <= stop_plane; ++i) { |
| struct macroblock_plane *const p = &x->plane[i]; |
| struct macroblockd_plane *const pd = &xd->plane[i]; |
| const uint32_t dc_quant = p->dequant_QTX[0]; |
| const uint32_t ac_quant = p->dequant_QTX[1]; |
| const BLOCK_SIZE bs = plane_bsize; |
| unsigned int var; |
| if (!x->color_sensitivity[i - 1]) continue; |
| |
| var = cpi->ppi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf, |
| pd->dst.stride, &sse); |
| assert(sse >= var); |
| tot_sse += sse; |
| |
| av1_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs], |
| dc_quant >> 3, &rate, &dist); |
| |
| this_rdc->rate += rate >> 1; |
| this_rdc->dist += dist << 3; |
| |
| av1_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], ac_quant >> 3, |
| &rate, &dist); |
| |
| this_rdc->rate += rate; |
| this_rdc->dist += dist << 4; |
| } |
| |
| if (this_rdc->rate == 0) { |
| this_rdc->skip_txfm = 1; |
| } |
| |
| if (RDCOST(x->rdmult, this_rdc->rate, this_rdc->dist) >= |
| RDCOST(x->rdmult, 0, tot_sse << 4)) { |
| this_rdc->rate = 0; |
| this_rdc->dist = tot_sse << 4; |
| this_rdc->skip_txfm = 1; |
| } |
| |
| return tot_sse; |
| } |
| |
| /*!\cond */ |
| struct estimate_block_intra_args { |
| AV1_COMP *cpi; |
| MACROBLOCK *x; |
| PREDICTION_MODE mode; |
| int skippable; |
| RD_STATS *rdc; |
| }; |
| /*!\endcond */ |
| |
| /*!\brief Estimation of RD cost of an intra mode for Non-RD optimized case. |
| * |
| * \ingroup nonrd_mode_search |
| * \callgraph |
| * \callergraph |
| * Calculates RD Cost for an intra mode for a single TX block using Hadamard |
| * transform. |
| * \param[in] plane Color plane |
| * \param[in] block Index of a TX block in a prediction block |
| * \param[in] row Row of a current TX block |
| * \param[in] col Column of a current TX block |
| * \param[in] plane_bsize Block size of a current prediction block |
| * \param[in] tx_size Transform size |
| * \param[in] arg Pointer to a structure that holds parameters |
| * for intra mode search |
| * |
| * \remark Nothing is returned. Instead, best mode and RD Cost of the best mode |
| * are set in \c args->rdc and \c args->mode |
| */ |
| static void estimate_block_intra(int plane, int block, int row, int col, |
| BLOCK_SIZE plane_bsize, TX_SIZE tx_size, |
| void *arg) { |
| struct estimate_block_intra_args *const args = arg; |
| AV1_COMP *const cpi = args->cpi; |
| AV1_COMMON *const cm = &cpi->common; |
| MACROBLOCK *const x = args->x; |
| MACROBLOCKD *const xd = &x->e_mbd; |
| struct macroblock_plane *const p = &x->plane[plane]; |
| struct macroblockd_plane *const pd = &xd->plane[plane]; |
| const BLOCK_SIZE bsize_tx = txsize_to_bsize[tx_size]; |
| uint8_t *const src_buf_base = p->src.buf; |
| uint8_t *const dst_buf_base = pd->dst.buf; |
| const int64_t src_stride = p->src.stride; |
| const int64_t dst_stride = pd->dst.stride; |
| RD_STATS this_rdc; |
| |
| (void)block; |
| (void)plane_bsize; |
| |
| av1_predict_intra_block_facade(cm, xd, plane, col, row, tx_size); |
| av1_invalid_rd_stats(&this_rdc); |
| |
| p->src.buf = &src_buf_base[4 * (row * src_stride + col)]; |
| pd->dst.buf = &dst_buf_base[4 * (row * dst_stride + col)]; |
| |
| if (plane == 0) { |
| block_yrd(x, &this_rdc, &args->skippable, bsize_tx, |
| AOMMIN(tx_size, TX_16X16), 0); |
| } else { |
| model_rd_for_sb_uv(cpi, bsize_tx, x, xd, &this_rdc, plane, plane); |
| } |
| |
| p->src.buf = src_buf_base; |
| pd->dst.buf = dst_buf_base; |
| args->rdc->rate += this_rdc.rate; |
| args->rdc->dist += this_rdc.dist; |
| } |
| |
| static INLINE void update_thresh_freq_fact(AV1_COMP *cpi, MACROBLOCK *x, |
| BLOCK_SIZE bsize, |
| MV_REFERENCE_FRAME ref_frame, |
| THR_MODES best_mode_idx, |
| PREDICTION_MODE mode) { |
| const THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)]; |
| const BLOCK_SIZE min_size = AOMMAX(bsize - 3, BLOCK_4X4); |
| const BLOCK_SIZE max_size = AOMMIN(bsize + 6, BLOCK_128X128); |
| for (BLOCK_SIZE bs = min_size; bs <= max_size; bs += 3) { |
| int *freq_fact = &x->thresh_freq_fact[bs][thr_mode_idx]; |
| if (thr_mode_idx == best_mode_idx) { |
| *freq_fact -= (*freq_fact >> 4); |
| } else { |
| *freq_fact = |
| AOMMIN(*freq_fact + RD_THRESH_INC, |
| cpi->sf.inter_sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT); |
| } |
| } |
| } |
| |
| #if CONFIG_AV1_TEMPORAL_DENOISING |
| static void av1_pickmode_ctx_den_update( |
| AV1_PICKMODE_CTX_DEN *ctx_den, int64_t zero_last_cost_orig, |
| unsigned int ref_frame_cost[REF_FRAMES], |
| int_mv frame_mv[MB_MODE_COUNT][REF_FRAMES], int reuse_inter_pred, |
| BEST_PICKMODE *bp) { |
| ctx_den->zero_last_cost_orig = zero_last_cost_orig; |
| ctx_den->ref_frame_cost = ref_frame_cost; |
| ctx_den->frame_mv = frame_mv; |
| ctx_den->reuse_inter_pred = reuse_inter_pred; |
| ctx_den->best_tx_size = bp->best_tx_size; |
| ctx_den->best_mode = bp->best_mode; |
| ctx_den->best_ref_frame = bp->best_ref_frame; |
| ctx_den->best_pred_filter = bp->best_pred_filter; |
| ctx_den->best_mode_skip_txfm = bp->best_mode_skip_txfm; |
| } |
| |
| static void recheck_zeromv_after_denoising( |
| AV1_COMP *cpi, MB_MODE_INFO *const mi, MACROBLOCK *x, MACROBLOCKD *const xd, |
| AV1_DENOISER_DECISION decision, AV1_PICKMODE_CTX_DEN *ctx_den, |
| struct buf_2d yv12_mb[4][MAX_MB_PLANE], RD_STATS *best_rdc, |
| BEST_PICKMODE *best_pickmode, BLOCK_SIZE bsize, int mi_row, int mi_col) { |
| // If INTRA or GOLDEN reference was selected, re-evaluate ZEROMV on |
| // denoised result. Only do this under noise conditions, and if rdcost of |
| // ZEROMV on original source is not significantly higher than rdcost of best |
| // mode. |
| if (cpi->noise_estimate.enabled && cpi->noise_estimate.level > kLow && |
| ctx_den->zero_last_cost_orig < (best_rdc->rdcost << 3) && |
| ((ctx_den->best_ref_frame == INTRA_FRAME && decision >= FILTER_BLOCK) || |
| (ctx_den->best_ref_frame == GOLDEN_FRAME && |
| cpi->svc.number_spatial_layers == 1 && |
| decision == FILTER_ZEROMV_BLOCK))) { |
| // Check if we should pick ZEROMV on denoised signal. |
| AV1_COMMON *const cm = &cpi->common; |
| RD_STATS this_rdc; |
| const ModeCosts *mode_costs = &x->mode_costs; |
| TxfmSearchInfo *txfm_info = &x->txfm_search_info; |
| MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext; |
| |
| mi->mode = GLOBALMV; |
| mi->ref_frame[0] = LAST_FRAME; |
| mi->ref_frame[1] = NONE_FRAME; |
| set_ref_ptrs(cm, xd, mi->ref_frame[0], NONE_FRAME); |
| mi->mv[0].as_int = 0; |
| mi->interp_filters = av1_broadcast_interp_filter(EIGHTTAP_REGULAR); |
| xd->plane[0].pre[0] = yv12_mb[LAST_FRAME][0]; |
| av1_enc_build_inter_predictor_y(xd, mi_row, mi_col); |
| unsigned int var; |
| model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc, &var, 1, NULL); |
| |
| const int16_t mode_ctx = |
| av1_mode_context_analyzer(mbmi_ext->mode_context, mi->ref_frame); |
| this_rdc.rate += cost_mv_ref(mode_costs, GLOBALMV, mode_ctx); |
| |
| this_rdc.rate += ctx_den->ref_frame_cost[LAST_FRAME]; |
| this_rdc.rdcost = RDCOST(x->rdmult, this_rdc.rate, this_rdc.dist); |
| txfm_info->skip_txfm = this_rdc.skip_txfm; |
| // Don't switch to ZEROMV if the rdcost for ZEROMV on denoised source |
| // is higher than best_ref mode (on original source). |
| if (this_rdc.rdcost > best_rdc->rdcost) { |
| this_rdc = *best_rdc; |
| mi->mode = best_pickmode->best_mode; |
| mi->ref_frame[0] = best_pickmode->best_ref_frame; |
| set_ref_ptrs(cm, xd, mi->ref_frame[0], NONE_FRAME); |
| mi->interp_filters = best_pickmode->best_pred_filter; |
| if (best_pickmode->best_ref_frame == INTRA_FRAME) { |
| mi->mv[0].as_int = INVALID_MV; |
| } else { |
| mi->mv[0].as_int = ctx_den |
| ->frame_mv[best_pickmode->best_mode] |
| [best_pickmode->best_ref_frame] |
| .as_int; |
| if (ctx_den->reuse_inter_pred) { |
| xd->plane[0].pre[0] = yv12_mb[GOLDEN_FRAME][0]; |
| av1_enc_build_inter_predictor_y(xd, mi_row, mi_col); |
| } |
| } |
| mi->tx_size = best_pickmode->best_tx_size; |
| txfm_info->skip_txfm = best_pickmode->best_mode_skip_txfm; |
| } else { |
| ctx_den->best_ref_frame = LAST_FRAME; |
| *best_rdc = this_rdc; |
| } |
| } |
| } |
| #endif // CONFIG_AV1_TEMPORAL_DENOISING |
| |
| #define FILTER_SEARCH_SIZE 2 |
| |
| /*!\brief Searches for the best interpolation filter |
| * |
| * \ingroup nonrd_mode_search |
| * \callgraph |
| * \callergraph |
| * Iterates through subset of possible interpolation filters (EIGHTTAP_REGULAR, |
| * EIGTHTAP_SMOOTH, MULTITAP_SHARP, depending on FILTER_SEARCH_SIZE) and selects |
| * the one that gives lowest RD cost. RD cost is calculated using curvfit model. |
| * Support for dual filters (different filters in the x & y directions) is |
| * allowed if sf.interp_sf.disable_dual_filter = 0. |
| * |
| * \param[in] cpi Top-level encoder structure |
| * \param[in] x Pointer to structure holding all the |
| * data for the current macroblock |
| * \param[in] this_rdc Pointer to calculated RD Cost |
| * \param[in] inter_pred_params_sr Pointer to structure holding parameters of |
| inter prediction for single reference |
| * \param[in] mi_row Row index in 4x4 units |
| * \param[in] mi_col Column index in 4x4 units |
| * \param[in] tmp_buffer Pointer to a temporary buffer for |
| * prediction re-use |
| * \param[in] bsize Current block size |
| * \param[in] reuse_inter_pred Flag, indicating prediction re-use |
| * \param[out] this_mode_pred Pointer to store prediction buffer |
| * for prediction re-use |
| * \param[out] this_early_term Flag, indicating that transform can be |
| * skipped |
| * \param[out] var The residue variance of the current |
| * predictor. |
| * \param[in] use_model_yrd_large Flag, indicating special logic to handle |
| * large blocks |
| * \param[in] best_sse Best sse so far. |
| * \param[in] comp_pred Flag, indicating compound mode. |
| * |
| * \remark Nothing is returned. Instead, calculated RD cost is placed to |
| * \c this_rdc and best filter is placed to \c mi->interp_filters. In case |
| * \c reuse_inter_pred flag is set, this function also outputs |
| * \c this_mode_pred. Also \c this_early_temp is set if transform can be |
| * skipped |
| */ |
| static void search_filter_ref(AV1_COMP *cpi, MACROBLOCK *x, RD_STATS *this_rdc, |
| InterPredParams *inter_pred_params_sr, int mi_row, |
| int mi_col, PRED_BUFFER *tmp_buffer, |
| BLOCK_SIZE bsize, int reuse_inter_pred, |
| PRED_BUFFER **this_mode_pred, |
| int *this_early_term, unsigned int *var, |
| int use_model_yrd_large, int64_t best_sse, |
| int comp_pred) { |
| AV1_COMMON *const cm = &cpi->common; |
| MACROBLOCKD *const xd = &x->e_mbd; |
| struct macroblockd_plane *const pd = &xd->plane[0]; |
| MB_MODE_INFO *const mi = xd->mi[0]; |
| const int bw = block_size_wide[bsize]; |
| int dim_factor = |
| (cpi->sf.interp_sf.disable_dual_filter == 0) ? FILTER_SEARCH_SIZE : 1; |
| RD_STATS pf_rd_stats[FILTER_SEARCH_SIZE * FILTER_SEARCH_SIZE] = { 0 }; |
| TX_SIZE pf_tx_size[FILTER_SEARCH_SIZE * FILTER_SEARCH_SIZE] = { 0 }; |
| PRED_BUFFER *current_pred = *this_mode_pred; |
| int best_skip = 0; |
| int best_early_term = 0; |
| int64_t best_cost = INT64_MAX; |
| int best_filter_index = -1; |
| |
| SubpelParams subpel_params; |
| // Initialize inter prediction params at mode level for single reference |
| // mode. |
| if (!comp_pred) |
| init_inter_mode_params(&mi->mv[0].as_mv, inter_pred_params_sr, |
| &subpel_params, xd->block_ref_scale_factors[0], |
| pd->pre->width, pd->pre->height); |
| for (int i = 0; i < FILTER_SEARCH_SIZE * FILTER_SEARCH_SIZE; ++i) { |
| int64_t cost; |
| if (cpi->sf.interp_sf.disable_dual_filter && |
| filters_ref_set[i].filter_x != filters_ref_set[i].filter_y) |
| continue; |
| mi->interp_filters.as_filters.x_filter = filters_ref_set[i].filter_x; |
| mi->interp_filters.as_filters.y_filter = filters_ref_set[i].filter_y; |
| if (!comp_pred) |
| av1_enc_build_inter_predictor_y_nonrd(xd, inter_pred_params_sr, |
| &subpel_params); |
| else |
| av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, 0, 0); |
| unsigned int curr_var = UINT_MAX; |
| if (use_model_yrd_large) |
| model_skip_for_sb_y_large(cpi, bsize, mi_row, mi_col, x, xd, |
| &pf_rd_stats[i], this_early_term, 1, best_sse, |
| &curr_var, UINT_MAX); |
| else |
| model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rd_stats[i], &curr_var, 1, NULL); |
| pf_rd_stats[i].rate += av1_get_switchable_rate( |
| x, xd, cm->features.interp_filter, cm->seq_params->enable_dual_filter); |
| cost = RDCOST(x->rdmult, pf_rd_stats[i].rate, pf_rd_stats[i].dist); |
| pf_tx_size[i] = mi->tx_size; |
| if (cost < best_cost) { |
| *var = curr_var; |
| best_filter_index = i; |
| best_cost = cost; |
| best_skip = pf_rd_stats[i].skip_txfm; |
| best_early_term = *this_early_term; |
| if (reuse_inter_pred) { |
| if (*this_mode_pred != current_pred) { |
| free_pred_buffer(*this_mode_pred); |
| *this_mode_pred = current_pred; |
| } |
| current_pred = &tmp_buffer[get_pred_buffer(tmp_buffer, 3)]; |
| pd->dst.buf = current_pred->data; |
| pd->dst.stride = bw; |
| } |
| } |
| } |
| assert(best_filter_index >= 0 && |
| best_filter_index < dim_factor * FILTER_SEARCH_SIZE); |
| if (reuse_inter_pred && *this_mode_pred != current_pred) |
| free_pred_buffer(current_pred); |
| |
| mi->interp_filters.as_filters.x_filter = |
| filters_ref_set[best_filter_index].filter_x; |
| mi->interp_filters.as_filters.y_filter = |
| filters_ref_set[best_filter_index].filter_y; |
| mi->tx_size = pf_tx_size[best_filter_index]; |
| this_rdc->rate = pf_rd_stats[best_filter_index].rate; |
| this_rdc->dist = pf_rd_stats[best_filter_index].dist; |
| this_rdc->sse = pf_rd_stats[best_filter_index].sse; |
| this_rdc->skip_txfm = (best_skip || best_early_term); |
| *this_early_term = best_early_term; |
| if (reuse_inter_pred) { |
| pd->dst.buf = (*this_mode_pred)->data; |
| pd->dst.stride = (*this_mode_pred)->stride; |
| } else if (best_filter_index < dim_factor * FILTER_SEARCH_SIZE - 1) { |
| if (!comp_pred) |
| av1_enc_build_inter_predictor_y_nonrd(xd, inter_pred_params_sr, |
| &subpel_params); |
| else |
| av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, 0, 0); |
| } |
| } |
| #if !CONFIG_REALTIME_ONLY |
| #define MOTION_MODE_SEARCH_SIZE 2 |
| |
| static AOM_INLINE int is_warped_mode_allowed(const AV1_COMP *cpi, |
| MACROBLOCK *const x, |
| const MB_MODE_INFO *mbmi) { |
| const FeatureFlags *const features = &cpi->common.features; |
| const MACROBLOCKD *xd = &x->e_mbd; |
| |
| if (cpi->sf.inter_sf.extra_prune_warped) return 0; |
| if (has_second_ref(mbmi)) return 0; |
| MOTION_MODE last_motion_mode_allowed = SIMPLE_TRANSLATION; |
| |
| if (features->switchable_motion_mode) { |
| // Determine which motion modes to search if more than SIMPLE_TRANSLATION |
| // is allowed. |
| last_motion_mode_allowed = motion_mode_allowed( |
| xd->global_motion, xd, mbmi, features->allow_warped_motion); |
| } |
| |
| if (last_motion_mode_allowed == WARPED_CAUSAL) { |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| static void calc_num_proj_ref(AV1_COMP *cpi, MACROBLOCK *x, MB_MODE_INFO *mi) { |
| AV1_COMMON *const cm = &cpi->common; |
| MACROBLOCKD *const xd = &x->e_mbd; |
| const FeatureFlags *const features = &cm->features; |
| |
| mi->num_proj_ref = 1; |
| WARP_SAMPLE_INFO *const warp_sample_info = |
| &x->warp_sample_info[mi->ref_frame[0]]; |
| int *pts0 = warp_sample_info->pts; |
| int *pts_inref0 = warp_sample_info->pts_inref; |
| MOTION_MODE last_motion_mode_allowed = SIMPLE_TRANSLATION; |
| |
| if (features->switchable_motion_mode) { |
| // Determine which motion modes to search if more than SIMPLE_TRANSLATION |
| // is allowed. |
| last_motion_mode_allowed = motion_mode_allowed( |
| xd->global_motion, xd, mi, features->allow_warped_motion); |
| } |
| |
| if (last_motion_mode_allowed == WARPED_CAUSAL) { |
| if (warp_sample_info->num < 0) { |
| warp_sample_info->num = av1_findSamples(cm, xd, pts0, pts_inref0); |
| } |
| mi->num_proj_ref = warp_sample_info->num; |
| } |
| } |
| |
| static void search_motion_mode(AV1_COMP *cpi, MACROBLOCK *x, RD_STATS *this_rdc, |
| int mi_row, int mi_col, BLOCK_SIZE bsize, |
| int *this_early_term, int use_model_yrd_large, |
| int *rate_mv, int64_t best_sse) { |
| AV1_COMMON *const cm = &cpi->common; |
| MACROBLOCKD *const xd = &x->e_mbd; |
| const FeatureFlags *const features = &cm->features; |
| MB_MODE_INFO *const mi = xd->mi[0]; |
| RD_STATS pf_rd_stats[MOTION_MODE_SEARCH_SIZE] = { 0 }; |
| int best_skip = 0; |
| int best_early_term = 0; |
| int64_t best_cost = INT64_MAX; |
| int best_mode_index = -1; |
| const int interp_filter = features->interp_filter; |
| |
| const MOTION_MODE motion_modes[MOTION_MODE_SEARCH_SIZE] = { |
| SIMPLE_TRANSLATION, WARPED_CAUSAL |
| }; |
| int mode_search_size = is_warped_mode_allowed(cpi, x, mi) ? 2 : 1; |
| |
| WARP_SAMPLE_INFO *const warp_sample_info = |
| &x->warp_sample_info[mi->ref_frame[0]]; |
| int *pts0 = warp_sample_info->pts; |
| int *pts_inref0 = warp_sample_info->pts_inref; |
| |
| const int total_samples = mi->num_proj_ref; |
| if (total_samples == 0) { |
| // Do not search WARPED_CAUSAL if there are no samples to use to determine |
| // warped parameters. |
| mode_search_size = 1; |
| } |
| |
| const MB_MODE_INFO base_mbmi = *mi; |
| MB_MODE_INFO best_mbmi; |
| |
| for (int i = 0; i < mode_search_size; ++i) { |
| int64_t cost = INT64_MAX; |
| MOTION_MODE motion_mode = motion_modes[i]; |
| *mi = base_mbmi; |
| mi->motion_mode = motion_mode; |
| if (motion_mode == SIMPLE_TRANSLATION) { |
| mi->interp_filters = av1_broadcast_interp_filter(EIGHTTAP_REGULAR); |
| |
| av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, 0, 0); |
| if (use_model_yrd_large) |
| model_skip_for_sb_y_large(cpi, bsize, mi_row, mi_col, x, xd, |
| &pf_rd_stats[i], this_early_term, 1, best_sse, |
| NULL, UINT_MAX); |
| else |
| model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rd_stats[i], NULL, 1, NULL); |
| pf_rd_stats[i].rate += |
| av1_get_switchable_rate(x, xd, cm->features.interp_filter, |
| cm->seq_params->enable_dual_filter); |
| cost = RDCOST(x->rdmult, pf_rd_stats[i].rate, pf_rd_stats[i].dist); |
| } else if (motion_mode == WARPED_CAUSAL) { |
| int pts[SAMPLES_ARRAY_SIZE], pts_inref[SAMPLES_ARRAY_SIZE]; |
| const ModeCosts *mode_costs = &x->mode_costs; |
| mi->wm_params.wmtype = DEFAULT_WMTYPE; |
| mi->interp_filters = |
| av1_broadcast_interp_filter(av1_unswitchable_filter(interp_filter)); |
| |
| memcpy(pts, pts0, total_samples * 2 * sizeof(*pts0)); |
| memcpy(pts_inref, pts_inref0, total_samples * 2 * sizeof(*pts_inref0)); |
| // Select the samples according to motion vector difference |
| if (mi->num_proj_ref > 1) { |
| mi->num_proj_ref = av1_selectSamples(&mi->mv[0].as_mv, pts, pts_inref, |
| mi->num_proj_ref, bsize); |
| } |
| |
| // Compute the warped motion parameters with a least squares fit |
| // using the collected samples |
| if (!av1_find_projection(mi->num_proj_ref, pts, pts_inref, bsize, |
| mi->mv[0].as_mv.row, mi->mv[0].as_mv.col, |
| &mi->wm_params, mi_row, mi_col)) { |
| if (mi->mode == NEWMV) { |
| const int_mv mv0 = mi->mv[0]; |
| const WarpedMotionParams wm_params0 = mi->wm_params; |
| const int num_proj_ref0 = mi->num_proj_ref; |
| |
| const int_mv ref_mv = av1_get_ref_mv(x, 0); |
| SUBPEL_MOTION_SEARCH_PARAMS ms_params; |
| av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize, |
| &ref_mv.as_mv, NULL); |
| |
| // Refine MV in a small range. |
| av1_refine_warped_mv(xd, cm, &ms_params, bsize, pts0, pts_inref0, |
| total_samples); |
| if (mi->mv[0].as_int == ref_mv.as_int) { |
| continue; |
| } |
| |
| if (mv0.as_int != mi->mv[0].as_int) { |
| // Keep the refined MV and WM parameters. |
| int tmp_rate_mv = av1_mv_bit_cost( |
| &mi->mv[0].as_mv, &ref_mv.as_mv, x->mv_costs->nmv_joint_cost, |
| x->mv_costs->mv_cost_stack, MV_COST_WEIGHT); |
| *rate_mv = tmp_rate_mv; |
| } else { |
| // Restore the old MV and WM parameters. |
| mi->mv[0] = mv0; |
| mi->wm_params = wm_params0; |
| mi->num_proj_ref = num_proj_ref0; |
| } |
| } |
| // Build the warped predictor |
| av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, 0, |
| av1_num_planes(cm) - 1); |
| if (use_model_yrd_large) |
| model_skip_for_sb_y_large(cpi, bsize, mi_row, mi_col, x, xd, |
| &pf_rd_stats[i], this_early_term, 1, |
| best_sse, NULL, UINT_MAX); |
| else |
| model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rd_stats[i], NULL, 1, NULL); |
| |
| pf_rd_stats[i].rate += |
| mode_costs->motion_mode_cost[bsize][mi->motion_mode]; |
| cost = RDCOST(x->rdmult, pf_rd_stats[i].rate, pf_rd_stats[i].dist); |
| } else { |
| cost = INT64_MAX; |
| } |
| } |
| if (cost < best_cost) { |
| best_mode_index = i; |
| best_cost = cost; |
| best_skip = pf_rd_stats[i].skip_txfm; |
| best_early_term = *this_early_term; |
| best_mbmi = *mi; |
| } |
| } |
| assert(best_mode_index >= 0 && best_mode_index < FILTER_SEARCH_SIZE); |
| |
| *mi = best_mbmi; |
| this_rdc->rate = pf_rd_stats[best_mode_index].rate; |
| this_rdc->dist = pf_rd_stats[best_mode_index].dist; |
| this_rdc->sse = pf_rd_stats[best_mode_index].sse; |
| this_rdc->skip_txfm = (best_skip || best_early_term); |
| *this_early_term = best_early_term; |
| if (best_mode_index < FILTER_SEARCH_SIZE - 1) { |
| av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, 0, 0); |
| } |
| } |
| #endif // !CONFIG_REALTIME_ONLY |
| |
| #define COLLECT_PICK_MODE_STAT 0 |
| #define COLLECT_NON_SQR_STAT 0 |
| |
| #if COLLECT_PICK_MODE_STAT |
| #include "aom_ports/aom_timer.h" |
| typedef struct _mode_search_stat { |
| int32_t num_blocks[BLOCK_SIZES]; |
| int64_t total_block_times[BLOCK_SIZES]; |
| int32_t num_searches[BLOCK_SIZES][MB_MODE_COUNT]; |
| int32_t num_nonskipped_searches[BLOCK_SIZES][MB_MODE_COUNT]; |
| int64_t search_times[BLOCK_SIZES][MB_MODE_COUNT]; |
| int64_t nonskipped_search_times[BLOCK_SIZES][MB_MODE_COUNT]; |
| int64_t ms_time[BLOCK_SIZES][MB_MODE_COUNT]; |
| int64_t ifs_time[BLOCK_SIZES][MB_MODE_COUNT]; |
| int64_t model_rd_time[BLOCK_SIZES][MB_MODE_COUNT]; |
| int64_t txfm_time[BLOCK_SIZES][MB_MODE_COUNT]; |
| struct aom_usec_timer timer1; |
| struct aom_usec_timer timer2; |
| struct aom_usec_timer bsize_timer; |
| } mode_search_stat; |
| |
| static mode_search_stat ms_stat; |
| |
| static AOM_INLINE void print_stage_time(const char *stage_name, |
| int64_t stage_time, |
| int64_t total_time) { |
| printf(" %s: %ld (%f%%)\n", stage_name, stage_time, |
| 100 * stage_time / (float)total_time); |
| } |
| |
| static void print_time(const mode_search_stat *const ms_stat, |
| const BLOCK_SIZE bsize, const int mi_rows, |
| const int mi_cols, const int mi_row, const int mi_col) { |
| if ((mi_row + mi_size_high[bsize] >= mi_rows) && |
| (mi_col + mi_size_wide[bsize] >= mi_cols)) { |
| int64_t total_time = 0l; |
| int32_t total_blocks = 0; |
| for (BLOCK_SIZE bs = 0; bs < BLOCK_SIZES; bs++) { |
| total_time += ms_stat->total_block_times[bs]; |
| total_blocks += ms_stat->num_blocks[bs]; |
| } |
| |
| printf("\n"); |
| for (BLOCK_SIZE bs = 0; bs < BLOCK_SIZES; bs++) { |
| if (ms_stat->num_blocks[bs] == 0) { |
| continue; |
| } |
| if (!COLLECT_NON_SQR_STAT && block_size_wide[bs] != block_size_high[bs]) { |
| continue; |
| } |
| |
| printf("BLOCK_%dX%d Num %d, Time: %ld (%f%%), Avg_time %f:\n", |
| block_size_wide[bs], block_size_high[bs], ms_stat->num_blocks[bs], |
| ms_stat->total_block_times[bs], |
| 100 * ms_stat->total_block_times[bs] / (float)total_time, |
| (float)ms_stat->total_block_times[bs] / ms_stat->num_blocks[bs]); |
| for (int j = 0; j < MB_MODE_COUNT; j++) { |
| if (ms_stat->nonskipped_search_times[bs][j] == 0) { |
| continue; |
| } |
| |
| int64_t total_mode_time = ms_stat->nonskipped_search_times[bs][j]; |
| printf(" Mode %d, %d/%d tps %f\n", j, |
| ms_stat->num_nonskipped_searches[bs][j], |
| ms_stat->num_searches[bs][j], |
| ms_stat->num_nonskipped_searches[bs][j] > 0 |
| ? (float)ms_stat->nonskipped_search_times[bs][j] / |
| ms_stat->num_nonskipped_searches[bs][j] |
| : 0l); |
| if (j >= |