blob: 2e024e3da47c653426abc197dacbeaaed43ed02f [file] [log] [blame]
Yaowu Xuc27fc142016-08-22 16:08:15 -07001/*
Yaowu Xu2ab7ff02016-09-02 12:04:54 -07002 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
Yaowu Xuc27fc142016-08-22 16:08:15 -07003 *
Yaowu Xu2ab7ff02016-09-02 12:04:54 -07004 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
Yaowu Xuc27fc142016-08-22 16:08:15 -070010 */
11
Yaowu Xuf883b422016-08-30 14:01:10 -070012#ifndef AV1_COMMON_ONYXC_INT_H_
13#define AV1_COMMON_ONYXC_INT_H_
Yaowu Xuc27fc142016-08-22 16:08:15 -070014
Yaowu Xuf883b422016-08-30 14:01:10 -070015#include "./aom_config.h"
Yaowu Xucaf20232016-10-18 17:15:40 -070016#include "./av1_rtcd.h"
Yaowu Xuf883b422016-08-30 14:01:10 -070017#include "aom/internal/aom_codec_internal.h"
18#include "aom_util/aom_thread.h"
Alex Converseeb780e72016-12-13 12:46:41 -080019#if CONFIG_ANS
20#include "aom_dsp/ans.h"
21#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -070022#include "av1/common/alloccommon.h"
Yaowu Xuc27fc142016-08-22 16:08:15 -070023#include "av1/common/entropy.h"
24#include "av1/common/entropymode.h"
Yaowu Xucaf20232016-10-18 17:15:40 -070025#include "av1/common/entropymv.h"
Yaowu Xuc27fc142016-08-22 16:08:15 -070026#include "av1/common/frame_buffers.h"
Yaowu Xucaf20232016-10-18 17:15:40 -070027#include "av1/common/loopfilter.h"
28#include "av1/common/mv.h"
Yaowu Xuc27fc142016-08-22 16:08:15 -070029#include "av1/common/quant_common.h"
Debargha Mukherjee5cd2ab92016-09-08 15:15:17 -070030#if CONFIG_LOOP_RESTORATION
Yaowu Xuc27fc142016-08-22 16:08:15 -070031#include "av1/common/restoration.h"
Debargha Mukherjee5cd2ab92016-09-08 15:15:17 -070032#endif // CONFIG_LOOP_RESTORATION
Yaowu Xucaf20232016-10-18 17:15:40 -070033#include "av1/common/tile_common.h"
Yushin Cho77bba8d2016-11-04 16:36:56 -070034#include "av1/common/odintrin.h"
35#if CONFIG_PVQ
36#include "av1/common/pvq.h"
37#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -070038
39#ifdef __cplusplus
40extern "C" {
41#endif
42
43#define REF_FRAMES_LOG2 3
44#define REF_FRAMES (1 << REF_FRAMES_LOG2)
45
46// 4 scratch frames for the new frames to support a maximum of 4 cores decoding
47// in parallel, 3 for scaled references on the encoder.
48// TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number
49// of framebuffers.
50// TODO(jkoleszar): These 3 extra references could probably come from the
51// normal reference pool.
52#define FRAME_BUFFERS (REF_FRAMES + 7)
53
Arild Fuldseth (arilfuld)5114b7b2016-11-09 13:32:54 +010054#if CONFIG_REFERENCE_BUFFER
55/* Constant values while waiting for the sequence header */
56#define FRAME_ID_NUMBERS_PRESENT_FLAG 1
57#define FRAME_ID_LENGTH_MINUS7 8 // Allows frame id up to 2^15-1
58#define DELTA_FRAME_ID_LENGTH_MINUS2 12 // Allows frame id deltas up to 2^14-1
59#endif
60
Yaowu Xuc27fc142016-08-22 16:08:15 -070061#if CONFIG_EXT_REFS
62#define FRAME_CONTEXTS_LOG2 3
63#else
64#define FRAME_CONTEXTS_LOG2 2
65#endif
66
67#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2)
68
69#define NUM_PING_PONG_BUFFERS 2
70
71typedef enum {
72 SINGLE_REFERENCE = 0,
73 COMPOUND_REFERENCE = 1,
74 REFERENCE_MODE_SELECT = 2,
75 REFERENCE_MODES = 3,
76} REFERENCE_MODE;
77
78typedef enum {
79 RESET_FRAME_CONTEXT_NONE = 0,
80 RESET_FRAME_CONTEXT_CURRENT = 1,
81 RESET_FRAME_CONTEXT_ALL = 2,
82} RESET_FRAME_CONTEXT_MODE;
83
84typedef enum {
85 /**
86 * Update frame context to values resulting from forward probability
87 * updates signaled in the frame header
88 */
89 REFRESH_FRAME_CONTEXT_FORWARD,
90 /**
91 * Update frame context to values resulting from backward probability
92 * updates based on entropy/counts in the decoded frame
93 */
94 REFRESH_FRAME_CONTEXT_BACKWARD,
95} REFRESH_FRAME_CONTEXT_MODE;
96
97typedef struct {
98 int_mv mv[2];
Yaowu Xu4306b6e2016-09-27 12:55:32 -070099#if CONFIG_REF_MV
100 int_mv pred_mv[2];
101#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -0700102 MV_REFERENCE_FRAME ref_frame[2];
103} MV_REF;
104
105typedef struct {
106 int ref_count;
107 MV_REF *mvs;
108 int mi_rows;
109 int mi_cols;
Yaowu Xuf883b422016-08-30 14:01:10 -0700110 aom_codec_frame_buffer_t raw_frame_buffer;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700111 YV12_BUFFER_CONFIG buf;
Fangwen Fu8d164de2016-12-14 13:40:54 -0800112#if CONFIG_TEMPMV_SIGNALING
113 uint8_t intra_only;
114#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -0700115 // The Following variables will only be used in frame parallel decode.
116
117 // frame_worker_owner indicates which FrameWorker owns this buffer. NULL means
118 // that no FrameWorker owns, or is decoding, this buffer.
Yaowu Xuf883b422016-08-30 14:01:10 -0700119 AVxWorker *frame_worker_owner;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700120
121 // row and col indicate which position frame has been decoded to in real
122 // pixel unit. They are reset to -1 when decoding begins and set to INT_MAX
123 // when the frame is fully decoded.
124 int row;
125 int col;
126} RefCntBuffer;
127
128typedef struct BufferPool {
129// Protect BufferPool from being accessed by several FrameWorkers at
130// the same time during frame parallel decode.
131// TODO(hkuang): Try to use atomic variable instead of locking the whole pool.
132#if CONFIG_MULTITHREAD
133 pthread_mutex_t pool_mutex;
134#endif
135
136 // Private data associated with the frame buffer callbacks.
137 void *cb_priv;
138
Yaowu Xuf883b422016-08-30 14:01:10 -0700139 aom_get_frame_buffer_cb_fn_t get_fb_cb;
140 aom_release_frame_buffer_cb_fn_t release_fb_cb;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700141
142 RefCntBuffer frame_bufs[FRAME_BUFFERS];
143
144 // Frame buffers allocated internally by the codec.
145 InternalFrameBufferList int_frame_buffers;
146} BufferPool;
147
Yaowu Xuf883b422016-08-30 14:01:10 -0700148typedef struct AV1Common {
149 struct aom_internal_error_info error;
150 aom_color_space_t color_space;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700151 int color_range;
152 int width;
153 int height;
154 int render_width;
155 int render_height;
156 int last_width;
157 int last_height;
158
159 // TODO(jkoleszar): this implies chroma ss right now, but could vary per
160 // plane. Revisit as part of the future change to YV12_BUFFER_CONFIG to
161 // support additional planes.
162 int subsampling_x;
163 int subsampling_y;
164
Yaowu Xuf883b422016-08-30 14:01:10 -0700165#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700166 // Marks if we need to use 16bit frame buffers (1: yes, 0: no).
167 int use_highbitdepth;
168#endif
169#if CONFIG_CLPF
Yaowu Xud71be782016-10-14 08:47:03 -0700170 // Two bits are used to signal the strength for all blocks and the
171 // valid values are:
172 // 0: no filtering
173 // 1: strength = 1
174 // 2: strength = 2
175 // 3: strength = 4
Steinar Midtskogenecf9a0c2016-09-13 16:37:13 +0200176 int clpf_strength_y;
177 int clpf_strength_u;
178 int clpf_strength_v;
Yaowu Xud71be782016-10-14 08:47:03 -0700179
180 // If clpf_strength_y is not 0, another two bits are used to signal
181 // the filter block size. The valid values for clfp_size are:
182 // 0: no block signalling
183 // 1: 32x32
184 // 2: 64x64
185 // 3: 128x128
186 CLPF_BLOCK_SIZE clpf_size;
187
188 // Buffer for storing whether to filter individual blocks.
189 int8_t *clpf_blocks;
190 int clpf_stride;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700191#endif
192
193 YV12_BUFFER_CONFIG *frame_to_show;
194 RefCntBuffer *prev_frame;
195
196 // TODO(hkuang): Combine this with cur_buf in macroblockd.
197 RefCntBuffer *cur_frame;
198
199 int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */
200
201 // Prepare ref_frame_map for the next frame.
202 // Only used in frame parallel decode.
203 int next_ref_frame_map[REF_FRAMES];
204
205 // TODO(jkoleszar): could expand active_ref_idx to 4, with 0 as intra, and
206 // roll new_fb_idx into it.
207
208 // Each Inter frame can reference INTER_REFS_PER_FRAME buffers
209 RefBuffer frame_refs[INTER_REFS_PER_FRAME];
210
211 int new_fb_idx;
212
213 FRAME_TYPE last_frame_type; /* last frame's frame type for motion search.*/
Yaowu Xuc27fc142016-08-22 16:08:15 -0700214 FRAME_TYPE frame_type;
215
216 int show_frame;
217 int last_show_frame;
218 int show_existing_frame;
219#if CONFIG_EXT_REFS
220 // Flag for a frame used as a reference - not written to the bitstream
221 int is_reference_frame;
222#endif // CONFIG_EXT_REFS
223
224 // Flag signaling that the frame is encoded using only INTRA modes.
225 uint8_t intra_only;
226 uint8_t last_intra_only;
227
228 int allow_high_precision_mv;
229
Urvang Joshib100db72016-10-12 16:28:56 -0700230#if CONFIG_PALETTE
Yaowu Xuc27fc142016-08-22 16:08:15 -0700231 int allow_screen_content_tools;
Urvang Joshib100db72016-10-12 16:28:56 -0700232#endif // CONFIG_PALETTE
Yaowu Xuc27fc142016-08-22 16:08:15 -0700233
234 // Flag signaling which frame contexts should be reset to default values.
235 RESET_FRAME_CONTEXT_MODE reset_frame_context;
236
237 // MBs, mb_rows/cols is in 16-pixel units; mi_rows/cols is in
238 // MODE_INFO (8-pixel) units.
239 int MBs;
240 int mb_rows, mi_rows;
241 int mb_cols, mi_cols;
242 int mi_stride;
243
244 /* profile settings */
245 TX_MODE tx_mode;
246
247 int base_qindex;
248 int y_dc_delta_q;
249 int uv_dc_delta_q;
250 int uv_ac_delta_q;
251 int16_t y_dequant[MAX_SEGMENTS][2];
252 int16_t uv_dequant[MAX_SEGMENTS][2];
253
254#if CONFIG_AOM_QM
255 // Global quant matrix tables
256 qm_val_t *giqmatrix[NUM_QM_LEVELS][2][2][TX_SIZES];
257 qm_val_t *gqmatrix[NUM_QM_LEVELS][2][2][TX_SIZES];
258
259 // Local quant matrix tables for each frame
260 qm_val_t *y_iqmatrix[MAX_SEGMENTS][2][TX_SIZES];
261 qm_val_t *uv_iqmatrix[MAX_SEGMENTS][2][TX_SIZES];
262 // Encoder
263 qm_val_t *y_qmatrix[MAX_SEGMENTS][2][TX_SIZES];
264 qm_val_t *uv_qmatrix[MAX_SEGMENTS][2][TX_SIZES];
265
266 int using_qmatrix;
267 int min_qmlevel;
268 int max_qmlevel;
269#endif
270#if CONFIG_NEW_QUANT
271 dequant_val_type_nuq y_dequant_nuq[MAX_SEGMENTS][QUANT_PROFILES][COEF_BANDS];
272 dequant_val_type_nuq uv_dequant_nuq[MAX_SEGMENTS][QUANT_PROFILES][COEF_BANDS];
273#endif
274
275 /* We allocate a MODE_INFO struct for each macroblock, together with
276 an extra row on top and column on the left to simplify prediction. */
277 int mi_alloc_size;
278 MODE_INFO *mip; /* Base of allocated array */
279 MODE_INFO *mi; /* Corresponds to upper left visible macroblock */
280
281 // TODO(agrange): Move prev_mi into encoder structure.
282 // prev_mip and prev_mi will only be allocated in encoder.
283 MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
284 MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
285
286 // Separate mi functions between encoder and decoder.
Yaowu Xuf883b422016-08-30 14:01:10 -0700287 int (*alloc_mi)(struct AV1Common *cm, int mi_size);
288 void (*free_mi)(struct AV1Common *cm);
289 void (*setup_mi)(struct AV1Common *cm);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700290
291 // Grid of pointers to 8x8 MODE_INFO structs. Any 8x8 not in the visible
292 // area will be NULL.
293 MODE_INFO **mi_grid_base;
294 MODE_INFO **mi_grid_visible;
295 MODE_INFO **prev_mi_grid_base;
296 MODE_INFO **prev_mi_grid_visible;
297
298 // Whether to use previous frame's motion vectors for prediction.
299 int use_prev_frame_mvs;
300
301 // Persistent mb segment id map used in prediction.
302 int seg_map_idx;
303 int prev_seg_map_idx;
304
305 uint8_t *seg_map_array[NUM_PING_PONG_BUFFERS];
306 uint8_t *last_frame_seg_map;
307 uint8_t *current_frame_seg_map;
308 int seg_map_alloc_size;
309
James Zern7b9407a2016-05-18 23:48:05 -0700310 InterpFilter interp_filter;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700311
312 loop_filter_info_n lf_info;
313#if CONFIG_LOOP_RESTORATION
Debargha Mukherjeea43a2d92017-01-03 15:14:57 -0800314 RestorationInfo rst_info[MAX_MB_PLANE];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700315 RestorationInternal rst_internal;
316#endif // CONFIG_LOOP_RESTORATION
317
318 // Flag signaling how frame contexts should be updated at the end of
319 // a frame decode
320 REFRESH_FRAME_CONTEXT_MODE refresh_frame_context;
321
322 int ref_frame_sign_bias[TOTAL_REFS_PER_FRAME]; /* Two state 0, 1 */
323
324 struct loopfilter lf;
325 struct segmentation seg;
326
327 int frame_parallel_decode; // frame-based threading.
328
329// Context probabilities for reference frame prediction
330#if CONFIG_EXT_REFS
331 MV_REFERENCE_FRAME comp_fwd_ref[FWD_REFS];
332 MV_REFERENCE_FRAME comp_bwd_ref[BWD_REFS];
333#else
334 MV_REFERENCE_FRAME comp_fixed_ref;
335 MV_REFERENCE_FRAME comp_var_ref[COMP_REFS];
336#endif // CONFIG_EXT_REFS
337 REFERENCE_MODE reference_mode;
338
339 FRAME_CONTEXT *fc; /* this frame entropy */
340 FRAME_CONTEXT *frame_contexts; // FRAME_CONTEXTS
341 unsigned int frame_context_idx; /* Context to use/update */
342 FRAME_COUNTS counts;
343
344#if CONFIG_ENTROPY
345 // The initial probabilities for a frame, before any subframe backward update,
346 // and after forward update.
Yaowu Xuf883b422016-08-30 14:01:10 -0700347 av1_coeff_probs_model starting_coef_probs[TX_SIZES][PLANE_TYPES];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700348 // Number of subframe backward updates already done
349 uint8_t coef_probs_update_idx;
350 // Signal if the backward update is subframe or end-of-frame
351 uint8_t partial_prob_update;
352 // Frame level flag to turn on/off subframe backward update
353 uint8_t do_subframe_update;
354#endif // CONFIG_ENTROPY
355
356 unsigned int current_video_frame;
357 BITSTREAM_PROFILE profile;
358
Yaowu Xuf883b422016-08-30 14:01:10 -0700359 // AOM_BITS_8 in profile 0 or 1, AOM_BITS_10 or AOM_BITS_12 in profile 2 or 3.
360 aom_bit_depth_t bit_depth;
361 aom_bit_depth_t dequant_bit_depth; // bit_depth of current dequantizer
Yaowu Xuc27fc142016-08-22 16:08:15 -0700362
363 int error_resilient_mode;
364
365#if !CONFIG_EXT_TILE
366 int log2_tile_cols, log2_tile_rows;
367#endif // !CONFIG_EXT_TILE
368 int tile_cols, tile_rows;
369 int tile_width, tile_height; // In MI units
370
Ryan Lei9b02b0e2017-01-30 15:52:20 -0800371#if CONFIG_LOOPFILTERING_ACROSS_TILES
Ryan Lei7386eda2016-12-08 21:08:31 -0800372 int loop_filter_across_tiles_enabled;
Ryan Lei9b02b0e2017-01-30 15:52:20 -0800373#endif // CONFIG_LOOPFILTERING_ACROSS_TILES
Ryan Lei7386eda2016-12-08 21:08:31 -0800374
Yaowu Xuc27fc142016-08-22 16:08:15 -0700375 int byte_alignment;
376 int skip_loop_filter;
377
378 // Private data associated with the frame buffer callbacks.
379 void *cb_priv;
Yaowu Xuf883b422016-08-30 14:01:10 -0700380 aom_get_frame_buffer_cb_fn_t get_fb_cb;
381 aom_release_frame_buffer_cb_fn_t release_fb_cb;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700382
383 // Handles memory for the codec.
384 InternalFrameBufferList int_frame_buffers;
385
386 // External BufferPool passed from outside.
387 BufferPool *buffer_pool;
388
389 PARTITION_CONTEXT *above_seg_context;
390 ENTROPY_CONTEXT *above_context[MAX_MB_PLANE];
391#if CONFIG_VAR_TX
392 TXFM_CONTEXT *above_txfm_context;
393 TXFM_CONTEXT left_txfm_context[MAX_MIB_SIZE];
394#endif
395 int above_context_alloc_cols;
396
397 // scratch memory for intraonly/keyframe forward updates from default tables
398 // - this is intentionally not placed in FRAME_CONTEXT since it's reset upon
399 // each keyframe and not used afterwards
Yaowu Xuf883b422016-08-30 14:01:10 -0700400 aom_prob kf_y_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700401#if CONFIG_GLOBAL_MOTION
David Barkercf3d0b02016-11-10 10:14:49 +0000402 WarpedMotionParams global_motion[TOTAL_REFS_PER_FRAME];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700403#endif
404
405 BLOCK_SIZE sb_size; // Size of the superblock used for this frame
406 int mib_size; // Size of the superblock in units of MI blocks
407 int mib_size_log2; // Log 2 of above.
408#if CONFIG_DERING
409 int dering_level;
410#endif
Thomas Davies80188d12016-10-26 16:08:35 -0700411
Arild Fuldseth07441162016-08-15 15:07:52 +0200412#if CONFIG_DELTA_Q
413 int delta_q_present_flag;
Thomas Daviesf6936102016-09-05 16:51:31 +0100414 // Resolution of delta quant
415 int delta_q_res;
Arild Fuldseth07441162016-08-15 15:07:52 +0200416#endif
Thomas Davies80188d12016-10-26 16:08:35 -0700417#if CONFIG_TILE_GROUPS
418 int num_tg;
419#endif
Arild Fuldseth (arilfuld)5114b7b2016-11-09 13:32:54 +0100420#if CONFIG_REFERENCE_BUFFER
421 int current_frame_id;
422 int ref_frame_id[REF_FRAMES];
423 int valid_for_referencing[REF_FRAMES];
424 int refresh_mask;
425 int invalid_delta_frame_id_minus1;
426#endif
Alex Converseeb780e72016-12-13 12:46:41 -0800427#if CONFIG_ANS && ANS_MAX_SYMBOLS
428 int ans_window_size_log2;
429#endif
Yaowu Xuf883b422016-08-30 14:01:10 -0700430} AV1_COMMON;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700431
Arild Fuldseth (arilfuld)5114b7b2016-11-09 13:32:54 +0100432#if CONFIG_REFERENCE_BUFFER
433/* Initial version of sequence header structure */
434typedef struct SequenceHeader {
435 int frame_id_numbers_present_flag;
436 int frame_id_length_minus7;
437 int delta_frame_id_length_minus2;
438} SequenceHeader;
439#endif
440
Yaowu Xuc27fc142016-08-22 16:08:15 -0700441// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
442// frame reference count.
443static void lock_buffer_pool(BufferPool *const pool) {
444#if CONFIG_MULTITHREAD
445 pthread_mutex_lock(&pool->pool_mutex);
446#else
447 (void)pool;
448#endif
449}
450
451static void unlock_buffer_pool(BufferPool *const pool) {
452#if CONFIG_MULTITHREAD
453 pthread_mutex_unlock(&pool->pool_mutex);
454#else
455 (void)pool;
456#endif
457}
458
Yaowu Xuf883b422016-08-30 14:01:10 -0700459static INLINE YV12_BUFFER_CONFIG *get_ref_frame(AV1_COMMON *cm, int index) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700460 if (index < 0 || index >= REF_FRAMES) return NULL;
461 if (cm->ref_frame_map[index] < 0) return NULL;
462 assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
463 return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf;
464}
465
466static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(
Yaowu Xuf883b422016-08-30 14:01:10 -0700467 const AV1_COMMON *const cm) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700468 return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
469}
470
Yaowu Xuf883b422016-08-30 14:01:10 -0700471static INLINE int get_free_fb(AV1_COMMON *cm) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700472 RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
473 int i;
474
475 lock_buffer_pool(cm->buffer_pool);
476 for (i = 0; i < FRAME_BUFFERS; ++i)
477 if (frame_bufs[i].ref_count == 0) break;
478
479 if (i != FRAME_BUFFERS) {
480 frame_bufs[i].ref_count = 1;
481 } else {
482 // Reset i to be INVALID_IDX to indicate no free buffer found.
483 i = INVALID_IDX;
484 }
485
486 unlock_buffer_pool(cm->buffer_pool);
487 return i;
488}
489
490static INLINE void ref_cnt_fb(RefCntBuffer *bufs, int *idx, int new_idx) {
491 const int ref_index = *idx;
492
493 if (ref_index >= 0 && bufs[ref_index].ref_count > 0)
494 bufs[ref_index].ref_count--;
495
496 *idx = new_idx;
497
498 bufs[new_idx].ref_count++;
499}
500
Yaowu Xuf883b422016-08-30 14:01:10 -0700501static INLINE int mi_cols_aligned_to_sb(const AV1_COMMON *cm) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700502 return ALIGN_POWER_OF_TWO(cm->mi_cols, cm->mib_size_log2);
503}
504
Yaowu Xuf883b422016-08-30 14:01:10 -0700505static INLINE int mi_rows_aligned_to_sb(const AV1_COMMON *cm) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700506 return ALIGN_POWER_OF_TWO(cm->mi_rows, cm->mib_size_log2);
507}
508
Yaowu Xuf883b422016-08-30 14:01:10 -0700509static INLINE int frame_is_intra_only(const AV1_COMMON *const cm) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700510 return cm->frame_type == KEY_FRAME || cm->intra_only;
511}
512
Yaowu Xuf883b422016-08-30 14:01:10 -0700513static INLINE void av1_init_macroblockd(AV1_COMMON *cm, MACROBLOCKD *xd,
Yushin Cho77bba8d2016-11-04 16:36:56 -0700514#if CONFIG_PVQ
515 tran_low_t *pvq_ref_coeff,
516#endif
Yaowu Xuf883b422016-08-30 14:01:10 -0700517 tran_low_t *dqcoeff) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700518 int i;
519 for (i = 0; i < MAX_MB_PLANE; ++i) {
520 xd->plane[i].dqcoeff = dqcoeff;
Yushin Cho77bba8d2016-11-04 16:36:56 -0700521#if CONFIG_PVQ
522 xd->plane[i].pvq_ref_coeff = pvq_ref_coeff;
523#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -0700524 xd->above_context[i] = cm->above_context[i];
525 if (xd->plane[i].plane_type == PLANE_TYPE_Y) {
526 memcpy(xd->plane[i].seg_dequant, cm->y_dequant, sizeof(cm->y_dequant));
527#if CONFIG_AOM_QM
528 memcpy(xd->plane[i].seg_iqmatrix, cm->y_iqmatrix, sizeof(cm->y_iqmatrix));
529#endif
530
531#if CONFIG_NEW_QUANT
532 memcpy(xd->plane[i].seg_dequant_nuq, cm->y_dequant_nuq,
533 sizeof(cm->y_dequant_nuq));
534#endif
535 } else {
536 memcpy(xd->plane[i].seg_dequant, cm->uv_dequant, sizeof(cm->uv_dequant));
537#if CONFIG_AOM_QM
538 memcpy(xd->plane[i].seg_iqmatrix, cm->uv_iqmatrix,
539 sizeof(cm->uv_iqmatrix));
540#endif
541#if CONFIG_NEW_QUANT
542 memcpy(xd->plane[i].seg_dequant_nuq, cm->uv_dequant_nuq,
543 sizeof(cm->uv_dequant_nuq));
544#endif
545 }
546 xd->fc = cm->fc;
547 }
Yaowu Xuc27fc142016-08-22 16:08:15 -0700548 xd->above_seg_context = cm->above_seg_context;
549#if CONFIG_VAR_TX
550 xd->above_txfm_context = cm->above_txfm_context;
551#endif
552 xd->mi_stride = cm->mi_stride;
553 xd->error_info = &cm->error;
554}
555
556static INLINE void set_skip_context(MACROBLOCKD *xd, int mi_row, int mi_col) {
557 const int above_idx = mi_col * 2;
558 const int left_idx = (mi_row * 2) & MAX_MIB_MASK_2;
559 int i;
560 for (i = 0; i < MAX_MB_PLANE; ++i) {
561 struct macroblockd_plane *const pd = &xd->plane[i];
562 pd->above_context = &xd->above_context[i][above_idx >> pd->subsampling_x];
563 pd->left_context = &xd->left_context[i][left_idx >> pd->subsampling_y];
564 }
565}
566
567static INLINE int calc_mi_size(int len) {
568 // len is in mi units.
569 return len + MAX_MIB_SIZE;
570}
571
Jingning Hanfaad0e12016-12-07 10:54:57 -0800572static INLINE void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh) {
Jingning Hana6923f72016-07-15 08:50:14 -0700573 int i;
574 for (i = 0; i < MAX_MB_PLANE; i++) {
575 xd->plane[i].n4_w = (bw << 1) >> xd->plane[i].subsampling_x;
576 xd->plane[i].n4_h = (bh << 1) >> xd->plane[i].subsampling_y;
Jingning Hanfaad0e12016-12-07 10:54:57 -0800577
578 xd->plane[i].width = (bw * MI_SIZE) >> xd->plane[i].subsampling_x;
579 xd->plane[i].height = (bh * MI_SIZE) >> xd->plane[i].subsampling_y;
Jingning Hana6923f72016-07-15 08:50:14 -0700580 }
581}
582
Yaowu Xuc27fc142016-08-22 16:08:15 -0700583static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile,
584 int mi_row, int bh, int mi_col, int bw,
585 int mi_rows, int mi_cols) {
586 xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8);
587 xd->mb_to_bottom_edge = ((mi_rows - bh - mi_row) * MI_SIZE) * 8;
588 xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
589 xd->mb_to_right_edge = ((mi_cols - bw - mi_col) * MI_SIZE) * 8;
590
591 // Are edges available for intra prediction?
592 xd->up_available = (mi_row > tile->mi_row_start);
593 xd->left_available = (mi_col > tile->mi_col_start);
594 if (xd->up_available) {
595 xd->above_mi = xd->mi[-xd->mi_stride];
596 // above_mi may be NULL in encoder's first pass.
597 xd->above_mbmi = xd->above_mi ? &xd->above_mi->mbmi : NULL;
598 } else {
599 xd->above_mi = NULL;
600 xd->above_mbmi = NULL;
601 }
602
603 if (xd->left_available) {
604 xd->left_mi = xd->mi[-1];
605 // left_mi may be NULL in encoder's first pass.
606 xd->left_mbmi = xd->left_mi ? &xd->left_mi->mbmi : NULL;
607 } else {
608 xd->left_mi = NULL;
609 xd->left_mbmi = NULL;
610 }
611
612 xd->n8_h = bh;
613 xd->n8_w = bw;
614#if CONFIG_REF_MV
615 xd->is_sec_rect = 0;
616 if (xd->n8_w < xd->n8_h)
617 if (mi_col & (xd->n8_h - 1)) xd->is_sec_rect = 1;
618
619 if (xd->n8_w > xd->n8_h)
620 if (mi_row & (xd->n8_w - 1)) xd->is_sec_rect = 1;
621#endif
622}
623
Yaowu Xuf883b422016-08-30 14:01:10 -0700624static INLINE const aom_prob *get_y_mode_probs(const AV1_COMMON *cm,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700625 const MODE_INFO *mi,
626 const MODE_INFO *above_mi,
627 const MODE_INFO *left_mi,
628 int block) {
Yaowu Xuf883b422016-08-30 14:01:10 -0700629 const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
630 const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700631 return cm->kf_y_prob[above][left];
632}
633
Nathan E. Egge10ba2be2016-11-16 09:44:26 -0500634#if CONFIG_EC_MULTISYMBOL
Thomas Davies1bfb5ed2017-01-11 15:28:11 +0000635static INLINE aom_cdf_prob *get_y_mode_cdf(FRAME_CONTEXT *tile_ctx,
636 const MODE_INFO *mi,
Thomas9ac55082016-09-23 18:04:17 +0100637 const MODE_INFO *above_mi,
638 const MODE_INFO *left_mi,
639 int block) {
Nathan E. Egge3ef926e2016-09-07 18:20:41 -0400640 const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
641 const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
Thomas Davies1bfb5ed2017-01-11 15:28:11 +0000642 return tile_ctx->kf_y_cdf[above][left];
Nathan E. Egge3ef926e2016-09-07 18:20:41 -0400643}
644#endif
645
Yaowu Xuc27fc142016-08-22 16:08:15 -0700646static INLINE void update_partition_context(MACROBLOCKD *xd, int mi_row,
647 int mi_col, BLOCK_SIZE subsize,
648 BLOCK_SIZE bsize) {
649 PARTITION_CONTEXT *const above_ctx = xd->above_seg_context + mi_col;
650 PARTITION_CONTEXT *const left_ctx =
651 xd->left_seg_context + (mi_row & MAX_MIB_MASK);
652
653#if CONFIG_EXT_PARTITION_TYPES
Jingning Hanc709e1f2016-12-06 14:48:09 -0800654 const int bw = mi_size_wide[bsize];
655 const int bh = mi_size_high[bsize];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700656 memset(above_ctx, partition_context_lookup[subsize].above, bw);
657 memset(left_ctx, partition_context_lookup[subsize].left, bh);
658#else
659 // num_4x4_blocks_wide_lookup[bsize] / 2
Jingning Hanc709e1f2016-12-06 14:48:09 -0800660 const int bs = mi_size_wide[bsize];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700661
662 // update the partition context at the end notes. set partition bits
663 // of block sizes larger than the current one to be one, and partition
664 // bits of smaller block sizes to be zero.
665 memset(above_ctx, partition_context_lookup[subsize].above, bs);
666 memset(left_ctx, partition_context_lookup[subsize].left, bs);
667#endif // CONFIG_EXT_PARTITION_TYPES
668}
669
670#if CONFIG_EXT_PARTITION_TYPES
671static INLINE void update_ext_partition_context(MACROBLOCKD *xd, int mi_row,
672 int mi_col, BLOCK_SIZE subsize,
673 BLOCK_SIZE bsize,
674 PARTITION_TYPE partition) {
675 if (bsize >= BLOCK_8X8) {
Jingning Han456e0862017-01-19 16:04:26 -0800676 const int hbs = mi_size_wide[bsize] / 2;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700677 BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
678 switch (partition) {
679 case PARTITION_SPLIT:
680 if (bsize != BLOCK_8X8) break;
681 case PARTITION_NONE:
682 case PARTITION_HORZ:
683 case PARTITION_VERT:
684 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
685 break;
686 case PARTITION_HORZ_A:
687 update_partition_context(xd, mi_row, mi_col, bsize2, subsize);
688 update_partition_context(xd, mi_row + hbs, mi_col, subsize, subsize);
689 break;
690 case PARTITION_HORZ_B:
691 update_partition_context(xd, mi_row, mi_col, subsize, subsize);
692 update_partition_context(xd, mi_row + hbs, mi_col, bsize2, subsize);
693 break;
694 case PARTITION_VERT_A:
695 update_partition_context(xd, mi_row, mi_col, bsize2, subsize);
696 update_partition_context(xd, mi_row, mi_col + hbs, subsize, subsize);
697 break;
698 case PARTITION_VERT_B:
699 update_partition_context(xd, mi_row, mi_col, subsize, subsize);
700 update_partition_context(xd, mi_row, mi_col + hbs, bsize2, subsize);
701 break;
702 default: assert(0 && "Invalid partition type");
703 }
704 }
705}
706#endif // CONFIG_EXT_PARTITION_TYPES
707
708static INLINE int partition_plane_context(const MACROBLOCKD *xd, int mi_row,
Alex Converse55c6bde2017-01-12 15:55:31 -0800709 int mi_col,
710#if CONFIG_UNPOISON_PARTITION_CTX
711 int has_rows, int has_cols,
712#endif
713 BLOCK_SIZE bsize) {
714#if CONFIG_UNPOISON_PARTITION_CTX
715 const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col;
716 const PARTITION_CONTEXT *left_ctx =
717 xd->left_seg_context + (mi_row & MAX_MIB_MASK);
718 // Minimum partition point is 8x8. Offset the bsl accordingly.
719 const int bsl = mi_width_log2_lookup[bsize] - mi_width_log2_lookup[BLOCK_8X8];
720 int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1;
721
722 assert(b_width_log2_lookup[bsize] == b_height_log2_lookup[bsize]);
723 assert(bsl >= 0);
724
725 if (has_rows && has_cols)
726 return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
727 else if (has_rows && !has_cols)
728 return PARTITION_CONTEXTS_PRIMARY + bsl;
729 else if (!has_rows && has_cols)
730 return PARTITION_CONTEXTS_PRIMARY + PARTITION_BLOCK_SIZES + bsl;
731 else
732 return -1; // Bogus context, forced SPLIT
733#else
Yaowu Xuc27fc142016-08-22 16:08:15 -0700734 const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col;
735 const PARTITION_CONTEXT *left_ctx =
736 xd->left_seg_context + (mi_row & MAX_MIB_MASK);
Jingning Hanbcf62ea2016-12-08 16:08:38 -0800737 // Minimum partition point is 8x8. Offset the bsl accordingly.
738 const int bsl = mi_width_log2_lookup[bsize] - mi_width_log2_lookup[BLOCK_8X8];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700739 int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1;
740
741 assert(b_width_log2_lookup[bsize] == b_height_log2_lookup[bsize]);
742 assert(bsl >= 0);
743
744 return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
Alex Converse55c6bde2017-01-12 15:55:31 -0800745#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -0700746}
747
Jingning Hanf65b8702016-10-31 12:13:20 -0700748static INLINE int max_block_wide(const MACROBLOCKD *xd, const BLOCK_SIZE bsize,
749 const int plane) {
750 int max_blocks_wide = block_size_wide[bsize];
751 const struct macroblockd_plane *const pd = &xd->plane[plane];
752
753 if (xd->mb_to_right_edge < 0)
754 max_blocks_wide += xd->mb_to_right_edge >> (3 + pd->subsampling_x);
755
756 // Scale the width in the transform block unit.
757 return max_blocks_wide >> tx_size_wide_log2[0];
758}
759
760static INLINE int max_block_high(const MACROBLOCKD *xd, const BLOCK_SIZE bsize,
761 const int plane) {
762 int max_blocks_high = block_size_high[bsize];
763 const struct macroblockd_plane *const pd = &xd->plane[plane];
764
765 if (xd->mb_to_bottom_edge < 0)
766 max_blocks_high += xd->mb_to_bottom_edge >> (3 + pd->subsampling_y);
767
768 // Scale the width in the transform block unit.
769 return max_blocks_high >> tx_size_wide_log2[0];
770}
771
Yaowu Xuf883b422016-08-30 14:01:10 -0700772static INLINE void av1_zero_above_context(AV1_COMMON *const cm,
773 int mi_col_start, int mi_col_end) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700774 const int width = mi_col_end - mi_col_start;
775
776 const int offset_y = 2 * mi_col_start;
777 const int width_y = 2 * width;
778 const int offset_uv = offset_y >> cm->subsampling_x;
779 const int width_uv = width_y >> cm->subsampling_x;
780
Yaowu Xuf883b422016-08-30 14:01:10 -0700781 av1_zero_array(cm->above_context[0] + offset_y, width_y);
782 av1_zero_array(cm->above_context[1] + offset_uv, width_uv);
783 av1_zero_array(cm->above_context[2] + offset_uv, width_uv);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700784
Yaowu Xuf883b422016-08-30 14:01:10 -0700785 av1_zero_array(cm->above_seg_context + mi_col_start, width);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700786
787#if CONFIG_VAR_TX
Yaowu Xuf883b422016-08-30 14:01:10 -0700788 av1_zero_array(cm->above_txfm_context + mi_col_start, width);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700789#endif // CONFIG_VAR_TX
790}
791
Yaowu Xuf883b422016-08-30 14:01:10 -0700792static INLINE void av1_zero_left_context(MACROBLOCKD *const xd) {
793 av1_zero(xd->left_context);
794 av1_zero(xd->left_seg_context);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700795#if CONFIG_VAR_TX
Yaowu Xuf883b422016-08-30 14:01:10 -0700796 av1_zero(xd->left_txfm_context_buffer);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700797#endif
798}
799
800#if CONFIG_VAR_TX
Jingning Hane67b38a2016-11-04 10:30:00 -0700801static INLINE TX_SIZE get_min_tx_size(const TX_SIZE tx_size) {
802 if (tx_size >= TX_SIZES_ALL) assert(0);
803 return txsize_sqr_map[tx_size];
804}
805
Jingning Han8b9478a2016-11-01 15:43:23 -0700806static INLINE void set_txfm_ctx(TXFM_CONTEXT *txfm_ctx, uint8_t txs, int len) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700807 int i;
Jingning Han8b9478a2016-11-01 15:43:23 -0700808 for (i = 0; i < len; ++i) txfm_ctx[i] = txs;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700809}
810
811static INLINE void set_txfm_ctxs(TX_SIZE tx_size, int n8_w, int n8_h,
Jingning Han1b1dc932016-11-09 10:55:30 -0800812 const int skip, const MACROBLOCKD *xd) {
Jingning Han8b9478a2016-11-01 15:43:23 -0700813 uint8_t bw = tx_size_wide[tx_size];
814 uint8_t bh = tx_size_high[tx_size];
Jingning Han1b1dc932016-11-09 10:55:30 -0800815
816 if (skip) {
Jingning Hanff6ee6a2016-12-07 09:55:21 -0800817 bw = n8_w * MI_SIZE;
818 bh = n8_h * MI_SIZE;
Jingning Han1b1dc932016-11-09 10:55:30 -0800819 }
820
Jingning Han8b9478a2016-11-01 15:43:23 -0700821 set_txfm_ctx(xd->above_txfm_context, bw, n8_w);
822 set_txfm_ctx(xd->left_txfm_context, bh, n8_h);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700823}
824
825static INLINE void txfm_partition_update(TXFM_CONTEXT *above_ctx,
826 TXFM_CONTEXT *left_ctx,
Jingning Han581d1692017-01-05 16:03:54 -0800827 TX_SIZE tx_size, TX_SIZE txb_size) {
828 BLOCK_SIZE bsize = txsize_to_bsize[txb_size];
Jingning Hanc709e1f2016-12-06 14:48:09 -0800829 int bh = mi_size_high[bsize];
830 int bw = mi_size_wide[bsize];
Jingning Han8b9478a2016-11-01 15:43:23 -0700831 uint8_t txw = tx_size_wide[tx_size];
832 uint8_t txh = tx_size_high[tx_size];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700833 int i;
Jingning Han8b9478a2016-11-01 15:43:23 -0700834 for (i = 0; i < bh; ++i) left_ctx[i] = txh;
835 for (i = 0; i < bw; ++i) above_ctx[i] = txw;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700836}
837
838static INLINE int txfm_partition_context(TXFM_CONTEXT *above_ctx,
839 TXFM_CONTEXT *left_ctx,
Jingning Hanc8b89362016-11-01 10:28:53 -0700840 const BLOCK_SIZE bsize,
841 const TX_SIZE tx_size) {
Jingning Han8b9478a2016-11-01 15:43:23 -0700842 const uint8_t txw = tx_size_wide[tx_size];
843 const uint8_t txh = tx_size_high[tx_size];
844 const int above = *above_ctx < txw;
845 const int left = *left_ctx < txh;
Jingning Hanc8b89362016-11-01 10:28:53 -0700846 TX_SIZE max_tx_size = max_txsize_lookup[bsize];
Debargha Mukherjee153e1f82016-11-17 09:59:14 -0800847 int category = TXFM_PARTITION_CONTEXTS - 1;
Jingning Han607fa6a2016-10-26 10:46:28 -0700848
Jingning Han0c70a802017-01-10 15:29:45 -0800849 // dummy return, not used by others.
850 if (tx_size <= TX_4X4) return 0;
851
852 switch (AOMMAX(block_size_wide[bsize], block_size_high[bsize])) {
853 case 64:
854 case 32: max_tx_size = TX_32X32; break;
855 case 16: max_tx_size = TX_16X16; break;
856 case 8: max_tx_size = TX_8X8; break;
857 default: assert(0);
858 }
859
Debargha Mukherjee153e1f82016-11-17 09:59:14 -0800860 if (max_tx_size >= TX_8X8) {
861 category = (tx_size != max_tx_size && max_tx_size > TX_8X8) +
Debargha Mukherjee932cf692016-11-18 08:14:10 -0800862 (TX_SIZES - 1 - max_tx_size) * 2;
Jingning Hanc8b89362016-11-01 10:28:53 -0700863 }
Debargha Mukherjee153e1f82016-11-17 09:59:14 -0800864 if (category == TXFM_PARTITION_CONTEXTS - 1) return category;
Jingning Hanc8b89362016-11-01 10:28:53 -0700865 return category * 3 + above + left;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700866}
867#endif
868
Yaowu Xuf883b422016-08-30 14:01:10 -0700869static INLINE PARTITION_TYPE get_partition(const AV1_COMMON *const cm,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700870 const int mi_row, const int mi_col,
871 const BLOCK_SIZE bsize) {
872 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) {
873 return PARTITION_INVALID;
874 } else {
875 const int offset = mi_row * cm->mi_stride + mi_col;
876 MODE_INFO **mi = cm->mi_grid_visible + offset;
877 const MB_MODE_INFO *const mbmi = &mi[0]->mbmi;
878 const int bsl = b_width_log2_lookup[bsize];
879 const PARTITION_TYPE partition = partition_lookup[bsl][mbmi->sb_type];
880#if !CONFIG_EXT_PARTITION_TYPES
881 return partition;
882#else
Jingning Hanc709e1f2016-12-06 14:48:09 -0800883 const int hbs = mi_size_wide[bsize] / 2;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700884
885 assert(cm->mi_grid_visible[offset] == &cm->mi[offset]);
886
887 if (partition != PARTITION_NONE && bsize > BLOCK_8X8 &&
888 mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
889 const BLOCK_SIZE h = get_subsize(bsize, PARTITION_HORZ_A);
890 const BLOCK_SIZE v = get_subsize(bsize, PARTITION_VERT_A);
891 const MB_MODE_INFO *const mbmi_right = &mi[hbs]->mbmi;
892 const MB_MODE_INFO *const mbmi_below = &mi[hbs * cm->mi_stride]->mbmi;
893 if (mbmi->sb_type == h) {
894 return mbmi_below->sb_type == h ? PARTITION_HORZ : PARTITION_HORZ_B;
895 } else if (mbmi->sb_type == v) {
896 return mbmi_right->sb_type == v ? PARTITION_VERT : PARTITION_VERT_B;
897 } else if (mbmi_below->sb_type == h) {
898 return PARTITION_HORZ_A;
899 } else if (mbmi_right->sb_type == v) {
900 return PARTITION_VERT_A;
901 } else {
902 return PARTITION_SPLIT;
903 }
904 }
905
906 return partition;
907#endif // !CONFIG_EXT_PARTITION_TYPES
908 }
909}
910
Yaowu Xuf883b422016-08-30 14:01:10 -0700911static INLINE void set_sb_size(AV1_COMMON *const cm, const BLOCK_SIZE sb_size) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700912 cm->sb_size = sb_size;
Jingning Hanc709e1f2016-12-06 14:48:09 -0800913 cm->mib_size = mi_size_wide[cm->sb_size];
Jingning Hanbfcd1f12016-12-06 09:09:32 -0800914#if CONFIG_CB4X4
Jingning Hanbfcd1f12016-12-06 09:09:32 -0800915 cm->mib_size_log2 = b_width_log2_lookup[cm->sb_size];
916#else
Yaowu Xuc27fc142016-08-22 16:08:15 -0700917 cm->mib_size_log2 = mi_width_log2_lookup[cm->sb_size];
Jingning Hanbfcd1f12016-12-06 09:09:32 -0800918#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -0700919}
920
921#ifdef __cplusplus
922} // extern "C"
923#endif
924
Yaowu Xuf883b422016-08-30 14:01:10 -0700925#endif // AV1_COMMON_ONYXC_INT_H_