blob: 7b85f7edbb3c5ae1b931e31e13b968e2abc7c181 [file] [log] [blame]
Yaowu Xuc27fc142016-08-22 16:08:15 -07001/*
Yaowu Xubde4ac82016-11-28 15:26:06 -08002 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
Yaowu Xuc27fc142016-08-22 16:08:15 -07003 *
Yaowu Xubde4ac82016-11-28 15:26:06 -08004 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
Yaowu Xuc27fc142016-08-22 16:08:15 -070010 */
11
Ravi Chaudharyc5e74692018-10-08 16:05:38 +053012#include "av1/encoder/av1_multi_thread.h"
Yaowu Xuc27fc142016-08-22 16:08:15 -070013#include "av1/encoder/encodeframe.h"
14#include "av1/encoder/encoder.h"
15#include "av1/encoder/ethread.h"
Ravi Chaudhary95ba1fa2018-10-11 11:42:04 +053016#include "av1/encoder/rdopt.h"
Yaowu Xuf883b422016-08-30 14:01:10 -070017#include "aom_dsp/aom_dsp_common.h"
Yaowu Xuc27fc142016-08-22 16:08:15 -070018
19static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) {
hui sub53682f2017-08-01 17:09:18 -070020 for (int i = 0; i < REFERENCE_MODES; i++)
Yaowu Xuc27fc142016-08-22 16:08:15 -070021 td->rd_counts.comp_pred_diff[i] += td_t->rd_counts.comp_pred_diff[i];
22
Zoe Liu27deb382018-03-27 15:13:56 -070023 for (int i = 0; i < REF_FRAMES; i++)
Debargha Mukherjeea575d232017-04-28 17:46:47 -070024 td->rd_counts.global_motion_used[i] +=
25 td_t->rd_counts.global_motion_used[i];
Debargha Mukherjeea575d232017-04-28 17:46:47 -070026
Yaowu Xua0cc9aa2017-06-21 17:45:31 -070027 td->rd_counts.compound_ref_used_flag |=
28 td_t->rd_counts.compound_ref_used_flag;
Zoe Liu8a5d3432017-11-30 16:33:44 -080029 td->rd_counts.skip_mode_used_flag |= td_t->rd_counts.skip_mode_used_flag;
Yaowu Xuc27fc142016-08-22 16:08:15 -070030}
31
Ravi Chaudhary99e31c12018-11-09 10:43:53 +053032static void update_delta_lf_for_row_mt(AV1_COMP *cpi) {
33 AV1_COMMON *cm = &cpi->common;
34 MACROBLOCKD *xd = &cpi->td.mb.e_mbd;
35 const int mib_size = cm->seq_params.mib_size;
36 const int frame_lf_count =
37 av1_num_planes(cm) > 1 ? FRAME_LF_COUNT : FRAME_LF_COUNT - 2;
38 for (int row = 0; row < cm->tile_rows; row++) {
39 for (int col = 0; col < cm->tile_cols; col++) {
40 TileDataEnc *tile_data = &cpi->tile_data[row * cm->tile_cols + col];
41 const TileInfo *const tile_info = &tile_data->tile_info;
42 for (int mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
43 mi_row += mib_size) {
44 if (mi_row == tile_info->mi_row_start)
45 av1_reset_loop_filter_delta(xd, av1_num_planes(cm));
46 for (int mi_col = tile_info->mi_col_start;
47 mi_col < tile_info->mi_col_end; mi_col += mib_size) {
48 const int idx_str = cm->mi_stride * mi_row + mi_col;
49 MB_MODE_INFO **mi = cm->mi_grid_visible + idx_str;
50 MB_MODE_INFO *mbmi = mi[0];
51 if (mbmi->skip == 1 && (mbmi->sb_type == cm->seq_params.sb_size)) {
52 for (int lf_id = 0; lf_id < frame_lf_count; ++lf_id)
53 mbmi->delta_lf[lf_id] = xd->delta_lf[lf_id];
54 mbmi->delta_lf_from_base = xd->delta_lf_from_base;
55 } else {
David Turnerebf96f42018-11-14 16:57:57 +000056 if (cm->delta_q_info.delta_lf_multi) {
Ravi Chaudhary99e31c12018-11-09 10:43:53 +053057 for (int lf_id = 0; lf_id < frame_lf_count; ++lf_id)
58 xd->delta_lf[lf_id] = mbmi->delta_lf[lf_id];
59 } else {
60 xd->delta_lf_from_base = mbmi->delta_lf_from_base;
61 }
62 }
63 }
64 }
65 }
66 }
67}
68
Ravi Chaudhary40cdf132018-10-08 11:04:16 +053069void av1_row_mt_sync_read_dummy(struct AV1RowMTSyncData *const row_mt_sync,
70 int r, int c) {
71 (void)row_mt_sync;
72 (void)r;
73 (void)c;
74 return;
75}
76
77void av1_row_mt_sync_write_dummy(struct AV1RowMTSyncData *const row_mt_sync,
78 int r, int c, const int cols) {
79 (void)row_mt_sync;
80 (void)r;
81 (void)c;
82 (void)cols;
83 return;
84}
85
Ravi Chaudharyc5e74692018-10-08 16:05:38 +053086void av1_row_mt_sync_read(AV1RowMTSync *const row_mt_sync, int r, int c) {
87#if CONFIG_MULTITHREAD
88 const int nsync = row_mt_sync->sync_range;
89
90 if (r) {
91 pthread_mutex_t *const mutex = &row_mt_sync->mutex_[r - 1];
92 pthread_mutex_lock(mutex);
93
94 while (c > row_mt_sync->cur_col[r - 1] - nsync) {
95 pthread_cond_wait(&row_mt_sync->cond_[r - 1], mutex);
96 }
97 pthread_mutex_unlock(mutex);
98 }
99#else
100 (void)row_mt_sync;
101 (void)r;
102 (void)c;
103#endif // CONFIG_MULTITHREAD
104}
105
106void av1_row_mt_sync_write(AV1RowMTSync *const row_mt_sync, int r, int c,
107 const int cols) {
108#if CONFIG_MULTITHREAD
109 const int nsync = row_mt_sync->sync_range;
110 int cur;
111 // Only signal when there are enough encoded blocks for next row to run.
112 int sig = 1;
113
114 if (c < cols - 1) {
115 cur = c;
116 if (c % nsync) sig = 0;
117 } else {
118 cur = cols + nsync;
119 }
120
121 if (sig) {
122 pthread_mutex_lock(&row_mt_sync->mutex_[r]);
123
124 row_mt_sync->cur_col[r] = cur;
125
126 pthread_cond_signal(&row_mt_sync->cond_[r]);
127 pthread_mutex_unlock(&row_mt_sync->mutex_[r]);
128 }
129#else
130 (void)row_mt_sync;
131 (void)r;
132 (void)c;
133 (void)cols;
134#endif // CONFIG_MULTITHREAD
135}
136
137// Allocate memory for row synchronization
138void av1_row_mt_sync_mem_alloc(AV1RowMTSync *row_mt_sync, AV1_COMMON *cm,
139 int rows) {
140 row_mt_sync->rows = rows;
141#if CONFIG_MULTITHREAD
142 {
143 int i;
144
145 CHECK_MEM_ERROR(cm, row_mt_sync->mutex_,
146 aom_malloc(sizeof(*row_mt_sync->mutex_) * rows));
147 if (row_mt_sync->mutex_) {
148 for (i = 0; i < rows; ++i) {
149 pthread_mutex_init(&row_mt_sync->mutex_[i], NULL);
150 }
151 }
152
153 CHECK_MEM_ERROR(cm, row_mt_sync->cond_,
154 aom_malloc(sizeof(*row_mt_sync->cond_) * rows));
155 if (row_mt_sync->cond_) {
156 for (i = 0; i < rows; ++i) {
157 pthread_cond_init(&row_mt_sync->cond_[i], NULL);
158 }
159 }
160 }
161#endif // CONFIG_MULTITHREAD
162
163 CHECK_MEM_ERROR(cm, row_mt_sync->cur_col,
164 aom_malloc(sizeof(*row_mt_sync->cur_col) * rows));
165
166 // Set up nsync.
167 if (cm->seq_params.mib_size_log2 == 4)
168 row_mt_sync->sync_range = 2;
169 else
170 row_mt_sync->sync_range = 1;
171}
172
173// Deallocate row based multi-threading synchronization related mutex and data
174void av1_row_mt_sync_mem_dealloc(AV1RowMTSync *row_mt_sync) {
175 if (row_mt_sync != NULL) {
176#if CONFIG_MULTITHREAD
177 int i;
178
179 if (row_mt_sync->mutex_ != NULL) {
180 for (i = 0; i < row_mt_sync->rows; ++i) {
181 pthread_mutex_destroy(&row_mt_sync->mutex_[i]);
182 }
183 aom_free(row_mt_sync->mutex_);
184 }
185 if (row_mt_sync->cond_ != NULL) {
186 for (i = 0; i < row_mt_sync->rows; ++i) {
187 pthread_cond_destroy(&row_mt_sync->cond_[i]);
188 }
189 aom_free(row_mt_sync->cond_);
190 }
191#endif // CONFIG_MULTITHREAD
192 aom_free(row_mt_sync->cur_col);
193 // clear the structure as the source of this call may be dynamic change
194 // in tiles in which case this call will be followed by an _alloc()
195 // which may fail.
196 av1_zero(*row_mt_sync);
197 }
198}
199
Sarah Parker427e3b12018-10-12 12:28:44 -0700200static void assign_tile_to_thread(MultiThreadHandle *multi_thread_ctxt,
201 int num_tiles, int num_workers) {
Ravi Chaudhary90a15f42018-10-11 18:56:35 +0530202 int tile_id = 0;
203 int i;
204
205 for (i = 0; i < num_workers; i++) {
206 multi_thread_ctxt->thread_id_to_tile_id[i] = tile_id++;
207 if (tile_id == num_tiles) tile_id = 0;
208 }
209}
210
211static int get_next_job(AV1_COMP *const cpi, int *current_mi_row,
212 int cur_tile_id) {
213 AV1_COMMON *const cm = &cpi->common;
214 TileDataEnc *const this_tile = &cpi->tile_data[cur_tile_id];
215 AV1RowMTInfo *row_mt_info = &this_tile->row_mt_info;
216
217 if (row_mt_info->current_mi_row < this_tile->tile_info.mi_row_end) {
218 *current_mi_row = row_mt_info->current_mi_row;
219 row_mt_info->num_threads_working++;
220 row_mt_info->current_mi_row += cm->seq_params.mib_size;
221 return 1;
222 }
223 return 0;
224}
225
226static void switch_tile_and_get_next_job(AV1_COMP *const cpi, int *cur_tile_id,
227 int *current_mi_row,
228 int *end_of_frame) {
229 AV1_COMMON *const cm = &cpi->common;
230 const int tile_cols = cm->tile_cols;
231 const int tile_rows = cm->tile_rows;
232
233 int tile_id = -1; // Stores the tile ID with minimum proc done
234 int max_mis_to_encode = 0;
235 int min_num_threads_working = INT_MAX;
236
237 for (int tile_row = 0; tile_row < tile_rows; tile_row++) {
238 for (int tile_col = 0; tile_col < tile_cols; tile_col++) {
239 int tile_index = tile_row * tile_cols + tile_col;
240 TileDataEnc *this_tile = &cpi->tile_data[tile_index];
241 AV1RowMTInfo *row_mt_info = &this_tile->row_mt_info;
242 int num_mis_to_encode =
243 this_tile->tile_info.mi_row_end - row_mt_info->current_mi_row;
244
245 // Tile to be processed by this thread is selected on the basis of
246 // availability of jobs:
247 // 1) If jobs are available, tile to be processed is chosen on the
248 // basis of minimum number of threads working for that tile. If two or
249 // more tiles have same number of threads working for them, then the tile
250 // with maximum number of jobs available will be chosen.
251 // 2) If no jobs are available, then end_of_frame is reached.
252 if (num_mis_to_encode > 0) {
253 int num_threads_working = row_mt_info->num_threads_working;
254 if (num_threads_working < min_num_threads_working) {
255 min_num_threads_working = num_threads_working;
256 max_mis_to_encode = 0;
257 }
258 if (num_threads_working == min_num_threads_working &&
259 num_mis_to_encode > max_mis_to_encode) {
260 tile_id = tile_index;
261 max_mis_to_encode = num_mis_to_encode;
262 }
263 }
264 }
265 }
266 if (tile_id == -1) {
267 *end_of_frame = 1;
268 } else {
269 // Update the cur ID to the next tile ID that will be processed,
270 // which will be the least processed tile
271 *cur_tile_id = tile_id;
272 get_next_job(cpi, current_mi_row, *cur_tile_id);
273 }
274}
275
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530276static int enc_row_mt_worker_hook(void *arg1, void *unused) {
277 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
278 AV1_COMP *const cpi = thread_data->cpi;
Ravi Chaudhary95ba1fa2018-10-11 11:42:04 +0530279 AV1_COMMON *const cm = &cpi->common;
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530280
Ravi Chaudhary90a15f42018-10-11 18:56:35 +0530281 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
282 int thread_id = thread_data->thread_id;
283 int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530284 (void)unused;
285
Ravi Chaudhary90a15f42018-10-11 18:56:35 +0530286 assert(cur_tile_id != -1);
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530287
Ravi Chaudhary90a15f42018-10-11 18:56:35 +0530288 int end_of_frame = 0;
289 while (1) {
290 int current_mi_row = -1;
291#if CONFIG_MULTITHREAD
292 pthread_mutex_lock(cpi->row_mt_mutex_);
293#endif
294 if (!get_next_job(cpi, &current_mi_row, cur_tile_id)) {
295 // No jobs are available for the current tile. Query for the status of
296 // other tiles and get the next job if available
297 switch_tile_and_get_next_job(cpi, &cur_tile_id, &current_mi_row,
298 &end_of_frame);
299 }
300#if CONFIG_MULTITHREAD
301 pthread_mutex_unlock(cpi->row_mt_mutex_);
302#endif
303 if (end_of_frame == 1) break;
304
305 TileDataEnc *const this_tile = &cpi->tile_data[cur_tile_id];
306 int tile_row = this_tile->tile_info.tile_row;
307 int tile_col = this_tile->tile_info.tile_col;
308
309 assert(current_mi_row != -1 &&
310 current_mi_row <= this_tile->tile_info.mi_row_end);
311
Ravi Chaudhary1cf7d162018-10-09 17:00:43 +0530312 ThreadData *td = thread_data->td;
313
314 td->mb.e_mbd.tile_ctx = td->tctx;
Ravi Chaudhary982ac042018-11-02 14:30:29 +0530315 td->mb.tile_pb_ctx = &this_tile->tctx;
Ravi Chaudhary1cf7d162018-10-09 17:00:43 +0530316 td->mb.backup_tile_ctx = &this_tile->backup_tctx;
Ravi Chaudhary6bd5ccb2018-11-02 17:26:39 +0530317 if (current_mi_row == this_tile->tile_info.mi_row_start)
318 memcpy(td->mb.e_mbd.tile_ctx, &this_tile->tctx, sizeof(FRAME_CONTEXT));
Ravi Chaudhary95ba1fa2018-10-11 11:42:04 +0530319 av1_init_above_context(cm, &td->mb.e_mbd, tile_row);
320
321 // Disable exhaustive search speed features for row based multi-threading of
322 // encoder.
323 td->mb.m_search_count_ptr = NULL;
324 td->mb.ex_search_count_ptr = NULL;
325
326 cfl_init(&td->mb.e_mbd.cfl, &cm->seq_params);
327 av1_crc32c_calculator_init(&td->mb.mb_rd_record.crc_calculator);
Ravi Chaudhary95ba1fa2018-10-11 11:42:04 +0530328
Ravi Chaudhary90a15f42018-10-11 18:56:35 +0530329 av1_encode_sb_row(cpi, td, tile_row, tile_col, current_mi_row);
330#if CONFIG_MULTITHREAD
331 pthread_mutex_lock(cpi->row_mt_mutex_);
332#endif
333 this_tile->row_mt_info.num_threads_working--;
334#if CONFIG_MULTITHREAD
335 pthread_mutex_unlock(cpi->row_mt_mutex_);
336#endif
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530337 }
338
339 return 1;
340}
341
Wan-Teh Chang8d2f5772018-09-12 15:44:59 -0700342static int enc_worker_hook(void *arg1, void *unused) {
343 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
Yaowu Xuf883b422016-08-30 14:01:10 -0700344 AV1_COMP *const cpi = thread_data->cpi;
345 const AV1_COMMON *const cm = &cpi->common;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700346 const int tile_cols = cm->tile_cols;
347 const int tile_rows = cm->tile_rows;
348 int t;
349
350 (void)unused;
351
352 for (t = thread_data->start; t < tile_rows * tile_cols;
353 t += cpi->num_workers) {
354 int tile_row = t / tile_cols;
355 int tile_col = t % tile_cols;
356
Ravi Chaudhary84a280a2018-09-24 16:09:48 +0530357 TileDataEnc *const this_tile =
358 &cpi->tile_data[tile_row * cm->tile_cols + tile_col];
359 thread_data->td->tctx = &this_tile->tctx;
360 thread_data->td->mb.e_mbd.tile_ctx = thread_data->td->tctx;
Ravi Chaudhary982ac042018-11-02 14:30:29 +0530361 thread_data->td->mb.tile_pb_ctx = thread_data->td->tctx;
Ravi Chaudhary84a280a2018-09-24 16:09:48 +0530362 thread_data->td->mb.backup_tile_ctx = &this_tile->backup_tctx;
Yaowu Xuf883b422016-08-30 14:01:10 -0700363 av1_encode_tile(cpi, thread_data->td, tile_row, tile_col);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700364 }
365
Wan-Teh Chang3f0cbf12018-07-03 14:59:18 -0700366 return 1;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700367}
368
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530369static void create_enc_workers(AV1_COMP *cpi, int num_workers) {
Yaowu Xuf883b422016-08-30 14:01:10 -0700370 AV1_COMMON *const cm = &cpi->common;
Yaowu Xuf883b422016-08-30 14:01:10 -0700371 const AVxWorkerInterface *const winterface = aom_get_worker_interface();
Yaowu Xuc27fc142016-08-22 16:08:15 -0700372
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530373 CHECK_MEM_ERROR(cm, cpi->workers,
374 aom_malloc(num_workers * sizeof(*cpi->workers)));
Yaowu Xuc27fc142016-08-22 16:08:15 -0700375
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530376 CHECK_MEM_ERROR(cm, cpi->tile_thr_data,
377 aom_calloc(num_workers, sizeof(*cpi->tile_thr_data)));
Yaowu Xuc27fc142016-08-22 16:08:15 -0700378
Ravi Chaudhary90a15f42018-10-11 18:56:35 +0530379#if CONFIG_MULTITHREAD
380 if (cpi->row_mt == 1) {
381 if (cpi->row_mt_mutex_ == NULL) {
382 CHECK_MEM_ERROR(cm, cpi->row_mt_mutex_,
383 aom_malloc(sizeof(*(cpi->row_mt_mutex_))));
384 if (cpi->row_mt_mutex_) pthread_mutex_init(cpi->row_mt_mutex_, NULL);
385 }
386 }
387#endif
388
Ravi Chaudhary1f58dd82018-12-07 17:24:15 +0530389 for (int i = num_workers - 1; i >= 0; i--) {
Yaowu Xuf883b422016-08-30 14:01:10 -0700390 AVxWorker *const worker = &cpi->workers[i];
Wan-Teh Chang3f0cbf12018-07-03 14:59:18 -0700391 EncWorkerData *const thread_data = &cpi->tile_thr_data[i];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700392
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530393 ++cpi->num_workers;
394 winterface->init(worker);
Wan-Teh Chang4d29ee82018-09-20 10:07:52 -0700395 worker->thread_name = "aom enc worker";
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530396
397 thread_data->cpi = cpi;
Ravi Chaudhary90a15f42018-10-11 18:56:35 +0530398 thread_data->thread_id = i;
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530399
Ravi Chaudhary1f58dd82018-12-07 17:24:15 +0530400 if (i > 0) {
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530401 // Allocate thread data.
402 CHECK_MEM_ERROR(cm, thread_data->td,
403 aom_memalign(32, sizeof(*thread_data->td)));
404 av1_zero(*thread_data->td);
405
406 // Set up pc_tree.
407 thread_data->td->pc_tree = NULL;
408 av1_setup_pc_tree(cm, thread_data->td);
409
410 CHECK_MEM_ERROR(cm, thread_data->td->above_pred_buf,
411 (uint8_t *)aom_memalign(
412 16, MAX_MB_PLANE * MAX_SB_SQUARE *
413 sizeof(*thread_data->td->above_pred_buf)));
414 CHECK_MEM_ERROR(cm, thread_data->td->left_pred_buf,
415 (uint8_t *)aom_memalign(
416 16, MAX_MB_PLANE * MAX_SB_SQUARE *
417 sizeof(*thread_data->td->left_pred_buf)));
418
419 CHECK_MEM_ERROR(
420 cm, thread_data->td->wsrc_buf,
421 (int32_t *)aom_memalign(
422 16, MAX_SB_SQUARE * sizeof(*thread_data->td->wsrc_buf)));
423
Ravi Chaudhary5d970f42018-09-25 11:25:32 +0530424#if CONFIG_COLLECT_INTER_MODE_RD_STATS
425 CHECK_MEM_ERROR(cm, thread_data->td->inter_modes_info,
426 (InterModesInfo *)aom_malloc(
427 sizeof(*thread_data->td->inter_modes_info)));
428#endif
429
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530430 for (int x = 0; x < 2; x++)
431 for (int y = 0; y < 2; y++)
432 CHECK_MEM_ERROR(
433 cm, thread_data->td->hash_value_buffer[x][y],
434 (uint32_t *)aom_malloc(
435 AOM_BUFFER_SIZE_FOR_BLOCK_HASH *
436 sizeof(*thread_data->td->hash_value_buffer[0][0])));
437
438 CHECK_MEM_ERROR(
439 cm, thread_data->td->mask_buf,
440 (int32_t *)aom_memalign(
441 16, MAX_SB_SQUARE * sizeof(*thread_data->td->mask_buf)));
442 // Allocate frame counters in thread data.
443 CHECK_MEM_ERROR(cm, thread_data->td->counts,
444 aom_calloc(1, sizeof(*thread_data->td->counts)));
445
446 // Allocate buffers used by palette coding mode.
447 CHECK_MEM_ERROR(
448 cm, thread_data->td->palette_buffer,
449 aom_memalign(16, sizeof(*thread_data->td->palette_buffer)));
450
Urvang Joshi0a4cfad2018-09-07 11:10:39 -0700451 CHECK_MEM_ERROR(
452 cm, thread_data->td->tmp_conv_dst,
453 aom_memalign(32, MAX_SB_SIZE * MAX_SB_SIZE *
454 sizeof(*thread_data->td->tmp_conv_dst)));
455 for (int j = 0; j < 2; ++j) {
456 CHECK_MEM_ERROR(
457 cm, thread_data->td->tmp_obmc_bufs[j],
wenyao.liu22d8ab32018-10-16 09:11:29 +0800458 aom_memalign(32, 2 * MAX_MB_PLANE * MAX_SB_SQUARE *
Urvang Joshi0a4cfad2018-09-07 11:10:39 -0700459 sizeof(*thread_data->td->tmp_obmc_bufs[j])));
460 }
461
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530462 // Create threads
463 if (!winterface->reset(worker))
464 aom_internal_error(&cm->error, AOM_CODEC_ERROR,
465 "Tile encoder thread creation failed");
466 } else {
467 // Main thread acts as a worker and uses the thread data in cpi.
468 thread_data->td = &cpi->td;
469 }
Ravi Chaudhary1cf7d162018-10-09 17:00:43 +0530470 if (cpi->row_mt == 1)
471 CHECK_MEM_ERROR(
472 cm, thread_data->td->tctx,
473 (FRAME_CONTEXT *)aom_memalign(16, sizeof(*thread_data->td->tctx)));
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530474 winterface->sync(worker);
475 }
476}
477
478static void launch_enc_workers(AV1_COMP *cpi, int num_workers) {
479 const AVxWorkerInterface *const winterface = aom_get_worker_interface();
480 // Encode a frame
Ravi Chaudhary1f58dd82018-12-07 17:24:15 +0530481 for (int i = num_workers - 1; i >= 0; i--) {
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530482 AVxWorker *const worker = &cpi->workers[i];
483 EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
484
485 // Set the starting tile for each thread.
486 thread_data->start = i;
487
Ravi Chaudhary1f58dd82018-12-07 17:24:15 +0530488 if (i == 0)
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530489 winterface->execute(worker);
490 else
491 winterface->launch(worker);
492 }
493}
494
495static void sync_enc_workers(AV1_COMP *cpi, int num_workers) {
496 const AVxWorkerInterface *const winterface = aom_get_worker_interface();
Wan-Teh Change45fa2d2018-10-19 11:02:22 -0700497 int had_error = 0;
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530498
499 // Encoding ends.
Ravi Chaudhary1f58dd82018-12-07 17:24:15 +0530500 for (int i = num_workers - 1; i >= 0; i--) {
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530501 AVxWorker *const worker = &cpi->workers[i];
Wan-Teh Change45fa2d2018-10-19 11:02:22 -0700502 had_error |= !winterface->sync(worker);
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530503 }
Wan-Teh Change45fa2d2018-10-19 11:02:22 -0700504
505 if (had_error)
506 aom_internal_error(&cpi->common.error, AOM_CODEC_ERROR,
507 "Failed to encode tile data");
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530508}
509
510static void accumulate_counters_enc_workers(AV1_COMP *cpi, int num_workers) {
Ravi Chaudhary1f58dd82018-12-07 17:24:15 +0530511 for (int i = num_workers - 1; i >= 0; i--) {
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530512 AVxWorker *const worker = &cpi->workers[i];
513 EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
Ravi Chaudhary00525ef2018-10-31 19:52:42 +0530514 cpi->intrabc_used |= thread_data->td->intrabc_used;
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530515 // Accumulate counters.
Ravi Chaudhary1f58dd82018-12-07 17:24:15 +0530516 if (i > 0) {
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530517 av1_accumulate_frame_counts(&cpi->counts, thread_data->td->counts);
518 accumulate_rd_opt(&cpi->td, thread_data->td);
519 cpi->td.mb.txb_split_count += thread_data->td->mb.txb_split_count;
520 }
521 }
522}
523
524static void prepare_enc_workers(AV1_COMP *cpi, AVxWorkerHook hook,
525 int num_workers) {
Ravi Chaudhary1f58dd82018-12-07 17:24:15 +0530526 for (int i = num_workers - 1; i >= 0; i--) {
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530527 AVxWorker *const worker = &cpi->workers[i];
528 EncWorkerData *const thread_data = &cpi->tile_thr_data[i];
529
530 worker->hook = hook;
Wan-Teh Chang3f0cbf12018-07-03 14:59:18 -0700531 worker->data1 = thread_data;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700532 worker->data2 = NULL;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700533
Ravi Chaudhary00525ef2018-10-31 19:52:42 +0530534 thread_data->td->intrabc_used = 0;
535
Yaowu Xuc27fc142016-08-22 16:08:15 -0700536 // Before encoding a frame, copy the thread data from cpi.
537 if (thread_data->td != &cpi->td) {
538 thread_data->td->mb = cpi->td.mb;
539 thread_data->td->rd_counts = cpi->td.rd_counts;
Jingning Hand064cf02017-06-01 10:00:39 -0700540 thread_data->td->mb.above_pred_buf = thread_data->td->above_pred_buf;
541 thread_data->td->mb.left_pred_buf = thread_data->td->left_pred_buf;
542 thread_data->td->mb.wsrc_buf = thread_data->td->wsrc_buf;
wenyao.liu22d8ab32018-10-16 09:11:29 +0800543
Ravi Chaudhary5d970f42018-09-25 11:25:32 +0530544#if CONFIG_COLLECT_INTER_MODE_RD_STATS
545 thread_data->td->mb.inter_modes_info = thread_data->td->inter_modes_info;
546#endif
Urvang Joshi0a4cfad2018-09-07 11:10:39 -0700547 for (int x = 0; x < 2; x++) {
Ravi Chaudhary783d6a32018-08-28 18:21:02 +0530548 for (int y = 0; y < 2; y++) {
549 memcpy(thread_data->td->hash_value_buffer[x][y],
550 cpi->td.mb.hash_value_buffer[x][y],
551 AOM_BUFFER_SIZE_FOR_BLOCK_HASH *
552 sizeof(*thread_data->td->hash_value_buffer[0][0]));
553 thread_data->td->mb.hash_value_buffer[x][y] =
554 thread_data->td->hash_value_buffer[x][y];
555 }
Urvang Joshi0a4cfad2018-09-07 11:10:39 -0700556 }
Jingning Hand064cf02017-06-01 10:00:39 -0700557 thread_data->td->mb.mask_buf = thread_data->td->mask_buf;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700558 }
Yue Chencc6a6ef2018-05-21 16:21:05 -0700559 if (thread_data->td->counts != &cpi->counts) {
560 memcpy(thread_data->td->counts, &cpi->counts, sizeof(cpi->counts));
Yaowu Xuc27fc142016-08-22 16:08:15 -0700561 }
562
Ravi Chaudhary1f58dd82018-12-07 17:24:15 +0530563 if (i > 0) {
hui su5d493142017-05-08 12:06:12 -0700564 thread_data->td->mb.palette_buffer = thread_data->td->palette_buffer;
Urvang Joshi0a4cfad2018-09-07 11:10:39 -0700565 thread_data->td->mb.tmp_conv_dst = thread_data->td->tmp_conv_dst;
566 for (int j = 0; j < 2; ++j) {
567 thread_data->td->mb.tmp_obmc_bufs[j] =
568 thread_data->td->tmp_obmc_bufs[j];
569 }
Urvang Joshie58f6ec2018-09-10 15:10:12 -0700570
571 thread_data->td->mb.e_mbd.tmp_conv_dst = thread_data->td->mb.tmp_conv_dst;
572 for (int j = 0; j < 2; ++j) {
573 thread_data->td->mb.e_mbd.tmp_obmc_bufs[j] =
574 thread_data->td->mb.tmp_obmc_bufs[j];
575 }
Urvang Joshi0a4cfad2018-09-07 11:10:39 -0700576 }
Yaowu Xuc27fc142016-08-22 16:08:15 -0700577 }
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530578}
Yaowu Xuc27fc142016-08-22 16:08:15 -0700579
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530580void av1_encode_tiles_mt(AV1_COMP *cpi) {
581 AV1_COMMON *const cm = &cpi->common;
582 const int tile_cols = cm->tile_cols;
583 const int tile_rows = cm->tile_rows;
584 int num_workers = AOMMIN(cpi->oxcf.max_threads, tile_cols * tile_rows);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700585
Ravi Chaudharya497eb42018-09-07 12:38:08 +0530586 if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows)
587 av1_alloc_tile_data(cpi);
588
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530589 av1_init_tile_data(cpi);
590 // Only run once to create threads and allocate thread data.
591 if (cpi->num_workers == 0) {
592 create_enc_workers(cpi, num_workers);
593 } else {
594 num_workers = AOMMIN(num_workers, cpi->num_workers);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700595 }
Wan-Teh Chang8d2f5772018-09-12 15:44:59 -0700596 prepare_enc_workers(cpi, enc_worker_hook, num_workers);
Ravi Chaudhary9ff90502018-08-31 15:46:26 +0530597 launch_enc_workers(cpi, num_workers);
598 sync_enc_workers(cpi, num_workers);
599 accumulate_counters_enc_workers(cpi, num_workers);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700600}
Yue Chencc6a6ef2018-05-21 16:21:05 -0700601
602// Accumulate frame counts. FRAME_COUNTS consist solely of 'unsigned int'
603// members, so we treat it as an array, and sum over the whole length.
604void av1_accumulate_frame_counts(FRAME_COUNTS *acc_counts,
605 const FRAME_COUNTS *counts) {
606 unsigned int *const acc = (unsigned int *)acc_counts;
607 const unsigned int *const cnt = (const unsigned int *)counts;
608
609 const unsigned int n_counts = sizeof(FRAME_COUNTS) / sizeof(unsigned int);
610
611 for (unsigned int i = 0; i < n_counts; i++) acc[i] += cnt[i];
612}
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530613
614void av1_encode_tiles_row_mt(AV1_COMP *cpi) {
615 AV1_COMMON *const cm = &cpi->common;
616 const int tile_cols = cm->tile_cols;
617 const int tile_rows = cm->tile_rows;
Ravi Chaudharyc5e74692018-10-08 16:05:38 +0530618 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
Ravi Chaudhary0b3531a2018-10-12 19:47:37 +0530619 int num_workers = 0;
620 int total_num_sb_rows = 0;
Ravi Chaudharyc5e74692018-10-08 16:05:38 +0530621 int max_sb_rows = 0;
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530622
Ravi Chaudharyc5e74692018-10-08 16:05:38 +0530623 if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) {
624 av1_row_mt_mem_dealloc(cpi);
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530625 av1_alloc_tile_data(cpi);
Ravi Chaudharyc5e74692018-10-08 16:05:38 +0530626 }
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530627
628 av1_init_tile_data(cpi);
Ravi Chaudharyc5e74692018-10-08 16:05:38 +0530629
630 for (int row = 0; row < tile_rows; row++) {
631 for (int col = 0; col < tile_cols; col++) {
632 TileDataEnc *tile_data = &cpi->tile_data[row * cm->tile_cols + col];
Ravi Chaudhary0b3531a2018-10-12 19:47:37 +0530633 int num_sb_rows_in_tile =
634 av1_get_sb_rows_in_tile(cm, tile_data->tile_info);
635 total_num_sb_rows += num_sb_rows_in_tile;
636 max_sb_rows = AOMMAX(max_sb_rows, num_sb_rows_in_tile);
Ravi Chaudharyc5e74692018-10-08 16:05:38 +0530637 }
638 }
Ravi Chaudhary0b3531a2018-10-12 19:47:37 +0530639 num_workers = AOMMIN(cpi->oxcf.max_threads, total_num_sb_rows);
Ravi Chaudharyc5e74692018-10-08 16:05:38 +0530640
641 if (multi_thread_ctxt->allocated_tile_cols != tile_cols ||
642 multi_thread_ctxt->allocated_tile_rows != tile_rows ||
643 multi_thread_ctxt->allocated_sb_rows != max_sb_rows) {
644 av1_row_mt_mem_dealloc(cpi);
645 av1_row_mt_mem_alloc(cpi, max_sb_rows);
646 }
647
Ravi Chaudhary90a15f42018-10-11 18:56:35 +0530648 memset(multi_thread_ctxt->thread_id_to_tile_id, -1,
649 sizeof(*multi_thread_ctxt->thread_id_to_tile_id) * MAX_NUM_THREADS);
650
Ravi Chaudharyc5e74692018-10-08 16:05:38 +0530651 for (int tile_row = 0; tile_row < tile_rows; tile_row++) {
652 for (int tile_col = 0; tile_col < tile_cols; tile_col++) {
Ravi Chaudhary90a15f42018-10-11 18:56:35 +0530653 int tile_id = tile_row * tile_cols + tile_col;
654 TileDataEnc *this_tile = &cpi->tile_data[tile_id];
Ravi Chaudharyc5e74692018-10-08 16:05:38 +0530655
656 // Initialize cur_col to -1 for all rows.
657 memset(this_tile->row_mt_sync.cur_col, -1,
658 sizeof(*this_tile->row_mt_sync.cur_col) * max_sb_rows);
Ravi Chaudhary90a15f42018-10-11 18:56:35 +0530659 this_tile->row_mt_info.current_mi_row = this_tile->tile_info.mi_row_start;
660 this_tile->row_mt_info.num_threads_working = 0;
661
Ravi Chaudhary95ba1fa2018-10-11 11:42:04 +0530662#if CONFIG_COLLECT_INTER_MODE_RD_STATS
663 av1_inter_mode_data_init(this_tile);
664#endif
665 av1_zero_above_context(cm, &cpi->td.mb.e_mbd,
666 this_tile->tile_info.mi_col_start,
667 this_tile->tile_info.mi_col_end, tile_row);
668 this_tile->m_search_count = 0; // Count of motion search hits.
669 this_tile->ex_search_count = 0; // Exhaustive mesh search hits.
Ravi Chaudharyc5e74692018-10-08 16:05:38 +0530670 }
671 }
672
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530673 // Only run once to create threads and allocate thread data.
674 if (cpi->num_workers == 0) {
675 create_enc_workers(cpi, num_workers);
676 } else {
677 num_workers = AOMMIN(num_workers, cpi->num_workers);
678 }
Ravi Chaudhary90a15f42018-10-11 18:56:35 +0530679 assign_tile_to_thread(multi_thread_ctxt, tile_cols * tile_rows, num_workers);
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530680 prepare_enc_workers(cpi, enc_row_mt_worker_hook, num_workers);
681 launch_enc_workers(cpi, num_workers);
682 sync_enc_workers(cpi, num_workers);
David Turnerebf96f42018-11-14 16:57:57 +0000683 if (cm->delta_q_info.delta_lf_present_flag) update_delta_lf_for_row_mt(cpi);
Ravi Chaudharyda4c8722018-10-05 17:55:20 +0530684 accumulate_counters_enc_workers(cpi, num_workers);
685}