Initialize mt_exit flags unconditionally
This change initializes the 'mt_exit' flags unconditionally
as suggested in https://aomedia-review.googlesource.com/c/aom/+/183861.
This will resolve the potential issue of 'mt_exit' having a stale
value from the previous aom_codec_encode() call.
Bug: aomedia:3276
Change-Id: I817b3605b08267bd75ecb694aa10fc07d10137ca
diff --git a/av1/common/thread_common.c b/av1/common/thread_common.c
index 8a6f290..7dafb9a 100644
--- a/av1/common/thread_common.c
+++ b/av1/common/thread_common.c
@@ -57,7 +57,6 @@
void av1_loop_filter_alloc(AV1LfSync *lf_sync, AV1_COMMON *cm, int rows,
int width, int num_workers) {
lf_sync->rows = rows;
- lf_sync->lf_mt_exit = false;
#if CONFIG_MULTITHREAD
{
int i, j;
@@ -617,7 +616,6 @@
}
lr_sync->num_workers = num_workers;
- lr_sync->lr_mt_exit = false;
for (int j = 0; j < num_planes; j++) {
CHECK_MEM_ERROR(
@@ -932,6 +930,7 @@
av1_loop_restoration_alloc(lr_sync, cm, num_workers, num_rows_lr,
num_planes, cm->width);
}
+ lr_sync->lr_mt_exit = false;
// Initialize cur_sb_col to -1 for all SB rows.
for (i = 0; i < num_planes; i++) {
@@ -985,6 +984,7 @@
cdef_sync->end_of_frame = 0;
cdef_sync->fbr = 0;
cdef_sync->fbc = 0;
+ cdef_sync->cdef_mt_exit = false;
}
static AOM_INLINE void launch_cdef_workers(AVxWorker *const workers,
diff --git a/av1/common/thread_common.h b/av1/common/thread_common.h
index cfc37f9..675687d 100644
--- a/av1/common/thread_common.h
+++ b/av1/common/thread_common.h
@@ -269,6 +269,7 @@
av1_loop_filter_dealloc(lf_sync);
av1_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
}
+ lf_sync->lf_mt_exit = false;
// Initialize cur_sb_col to -1 for all SB rows.
for (int i = 0; i < MAX_MB_PLANE; i++) {
diff --git a/av1/encoder/ethread.c b/av1/encoder/ethread.c
index fe62b82..61bb8a2 100644
--- a/av1/encoder/ethread.c
+++ b/av1/encoder/ethread.c
@@ -273,8 +273,6 @@
enc_row_mt->allocated_rows = max_rows;
enc_row_mt->allocated_cols = max_cols - 1;
enc_row_mt->allocated_sb_rows = sb_rows;
- enc_row_mt->row_mt_exit = false;
- enc_row_mt->firstpass_mt_exit = false;
}
void av1_row_mt_mem_dealloc(AV1_COMP *cpi) {
@@ -904,7 +902,6 @@
aom_malloc(sizeof(*(tpl_row_mt->mutex_))));
if (tpl_row_mt->mutex_) pthread_mutex_init(tpl_row_mt->mutex_, NULL);
}
- tpl_row_mt->tpl_mt_exit = false;
#if !CONFIG_REALTIME_ONLY
if (is_restoration_used(cm)) {
@@ -1924,6 +1921,7 @@
sizeof(*thread_id_to_tile_id) * MAX_NUM_THREADS);
memset(enc_row_mt->num_tile_cols_done, 0,
sizeof(*enc_row_mt->num_tile_cols_done) * sb_rows_in_frame);
+ enc_row_mt->row_mt_exit = false;
for (int tile_row = 0; tile_row < tile_rows; tile_row++) {
for (int tile_col = 0; tile_col < tile_cols; tile_col++) {
@@ -2002,6 +2000,7 @@
memset(thread_id_to_tile_id, -1,
sizeof(*thread_id_to_tile_id) * MAX_NUM_THREADS);
+ enc_row_mt->firstpass_mt_exit = false;
for (int tile_row = 0; tile_row < tile_rows; tile_row++) {
for (int tile_col = 0; tile_col < tile_cols; tile_col++) {
@@ -2300,6 +2299,7 @@
av1_tpl_alloc(tpl_sync, cm, mb_rows);
}
tpl_sync->num_threads_working = num_workers;
+ mt_info->tpl_row_mt.tpl_mt_exit = false;
// Initialize cur_mb_col to -1 for all MB rows.
memset(tpl_sync->num_finished_cols, -1,
@@ -3097,6 +3097,7 @@
AV1EncPackBSSync *const pack_bs_sync = &mt_info->pack_bs_sync;
const uint16_t num_tiles = cm->tiles.rows * cm->tiles.cols;
pack_bs_sync->next_job_idx = 0;
+ pack_bs_sync->pack_bs_mt_exit = false;
PackBSTileOrder *const pack_bs_tile_order = pack_bs_sync->pack_bs_tile_order;
// Reset tile order data of pack bitstream
@@ -3232,6 +3233,7 @@
cdef_sync->end_of_frame = 0;
cdef_sync->fbr = 0;
cdef_sync->fbc = 0;
+ cdef_sync->cdef_mt_exit = false;
}
// Checks if a job is available. If job is available,