Remove redundant AV1_COMMON.frame_to_show
common.frame_to_show is redundant, simply pointing to
common.cur_frame->buf. This patch removes frame_to_show, replacing uses
of it with common.cur_frame->buf.
This forms part of wider restructuring and refactoring in order to
achieve a clean API separation at the entry to the low-level encoder.
BUG=aomedia:2244
Change-Id: I38105342c18dd8aa3d3a7d979f5f6623d2a1699a
diff --git a/av1/common/onyxc_int.h b/av1/common/onyxc_int.h
index 564ef70..cc7d8db 100644
--- a/av1/common/onyxc_int.h
+++ b/av1/common/onyxc_int.h
@@ -341,7 +341,6 @@
// Scale of the current frame with respect to itself.
struct scale_factors sf_identity;
- YV12_BUFFER_CONFIG *frame_to_show;
RefCntBuffer *prev_frame;
// TODO(hkuang): Combine this with cur_buf in macroblockd.
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index 6817626..16a9b1d 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -3178,7 +3178,7 @@
struct aom_codec_cx_pkt pkt;
int i;
PSNR_STATS psnr;
- aom_calc_highbd_psnr(cpi->source, cpi->common.frame_to_show, &psnr,
+ aom_calc_highbd_psnr(cpi->source, &cpi->common.cur_frame->buf, &psnr,
cpi->td.mb.e_mbd.bd, cpi->oxcf.input_bit_depth);
for (i = 0; i < 4; ++i) {
@@ -4391,14 +4391,14 @@
if (lf->filter_level[0] || lf->filter_level[1]) {
if (cpi->num_workers > 1)
- av1_loop_filter_frame_mt(cm->frame_to_show, cm, xd, 0, num_planes, 0,
+ av1_loop_filter_frame_mt(&cm->cur_frame->buf, cm, xd, 0, num_planes, 0,
#if LOOP_FILTER_BITMASK
0,
#endif
cpi->workers, cpi->num_workers,
&cpi->lf_row_sync);
else
- av1_loop_filter_frame(cm->frame_to_show, cm, xd,
+ av1_loop_filter_frame(&cm->cur_frame->buf, cm, xd,
#if LOOP_FILTER_BITMASK
0,
#endif
@@ -4406,7 +4406,7 @@
}
if (!no_restoration)
- av1_loop_restoration_save_boundary_lines(cm->frame_to_show, cm, 0);
+ av1_loop_restoration_save_boundary_lines(&cm->cur_frame->buf, cm, 0);
if (no_cdef) {
cm->cdef_info.cdef_bits = 0;
@@ -4415,11 +4415,11 @@
cm->cdef_info.cdef_uv_strengths[0] = 0;
} else {
// Find CDEF parameters
- av1_cdef_search(cm->frame_to_show, cpi->source, cm, xd,
+ av1_cdef_search(&cm->cur_frame->buf, cpi->source, cm, xd,
cpi->sf.fast_cdef_search);
// Apply the filter
- av1_cdef_frame(cm->frame_to_show, cm, xd);
+ av1_cdef_frame(&cm->cur_frame->buf, cm, xd);
}
superres_post_encode(cpi);
@@ -4429,17 +4429,17 @@
cm->rst_info[1].frame_restoration_type = RESTORE_NONE;
cm->rst_info[2].frame_restoration_type = RESTORE_NONE;
} else {
- av1_loop_restoration_save_boundary_lines(cm->frame_to_show, cm, 1);
+ av1_loop_restoration_save_boundary_lines(&cm->cur_frame->buf, cm, 1);
av1_pick_filter_restoration(cpi->source, cpi);
if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
if (cpi->num_workers > 1)
- av1_loop_restoration_filter_frame_mt(cm->frame_to_show, cm, 0,
+ av1_loop_restoration_filter_frame_mt(&cm->cur_frame->buf, cm, 0,
cpi->workers, cpi->num_workers,
&cpi->lr_row_sync, &cpi->lr_ctxt);
else
- av1_loop_restoration_filter_frame(cm->frame_to_show, cm, 0,
+ av1_loop_restoration_filter_frame(&cm->cur_frame->buf, cm, 0,
&cpi->lr_ctxt);
}
}
@@ -4905,7 +4905,7 @@
static void dump_filtered_recon_frames(AV1_COMP *cpi) {
AV1_COMMON *const cm = &cpi->common;
const CurrentFrame *const current_frame = &cm->current_frame;
- const YV12_BUFFER_CONFIG *recon_buf = cm->frame_to_show;
+ const YV12_BUFFER_CONFIG *recon_buf = &cm->cur_frame->buf;
if (recon_buf == NULL) {
printf("Frame %d is not ready.\n", current_frame->frame_number);
@@ -5072,9 +5072,6 @@
cpi->seq_params_locked = 1;
- // Set up frame to show to get ready for stats collection.
- cm->frame_to_show = &cm->cur_frame->buf;
-
// Update current frame offset.
current_frame->order_hint = cm->cur_frame->order_hint;
@@ -5236,17 +5233,16 @@
cpi->refresh_last_frame = 1;
}
- cm->frame_to_show = &cm->cur_frame->buf;
- cm->frame_to_show->color_primaries = seq_params->color_primaries;
- cm->frame_to_show->transfer_characteristics =
+ cm->cur_frame->buf.color_primaries = seq_params->color_primaries;
+ cm->cur_frame->buf.transfer_characteristics =
seq_params->transfer_characteristics;
- cm->frame_to_show->matrix_coefficients = seq_params->matrix_coefficients;
- cm->frame_to_show->monochrome = seq_params->monochrome;
- cm->frame_to_show->chroma_sample_position =
+ cm->cur_frame->buf.matrix_coefficients = seq_params->matrix_coefficients;
+ cm->cur_frame->buf.monochrome = seq_params->monochrome;
+ cm->cur_frame->buf.chroma_sample_position =
seq_params->chroma_sample_position;
- cm->frame_to_show->color_range = seq_params->color_range;
- cm->frame_to_show->render_width = cm->render_width;
- cm->frame_to_show->render_height = cm->render_height;
+ cm->cur_frame->buf.color_range = seq_params->color_range;
+ cm->cur_frame->buf.render_width = cm->render_width;
+ cm->cur_frame->buf.render_height = cm->render_height;
// TODO(zoeliu): For non-ref frames, loop filtering may need to be turned
// off.
@@ -5267,11 +5263,11 @@
}
// TODO(debargha): Fix mv search range on encoder side
- // aom_extend_frame_inner_borders(cm->frame_to_show, av1_num_planes(cm));
- aom_extend_frame_borders(cm->frame_to_show, av1_num_planes(cm));
+ // aom_extend_frame_inner_borders(&cm->cur_frame->buf, av1_num_planes(cm));
+ aom_extend_frame_borders(&cm->cur_frame->buf, av1_num_planes(cm));
#ifdef OUTPUT_YUV_REC
- aom_write_one_yuv_frame(cm, cm->frame_to_show);
+ aom_write_one_yuv_frame(cm, &cm->cur_frame->buf);
#endif
// Build the bitstream
@@ -5713,7 +5709,7 @@
}
if (cm->show_frame) {
const YV12_BUFFER_CONFIG *orig = cpi->source;
- const YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
+ const YV12_BUFFER_CONFIG *recon = &cpi->common.cur_frame->buf;
double y, u, v, frame_all;
cpi->count++;
@@ -6917,8 +6913,8 @@
return -1;
} else {
int ret;
- if (cm->frame_to_show) {
- *dest = *cm->frame_to_show;
+ if (cm->cur_frame != NULL) {
+ *dest = cm->cur_frame->buf;
dest->y_width = cm->width;
dest->y_height = cm->height;
dest->uv_width = cm->width >> cm->seq_params.subsampling_x;
diff --git a/av1/encoder/picklpf.c b/av1/encoder/picklpf.c
index b6b84c8..c2657d7 100644
--- a/av1/encoder/picklpf.c
+++ b/av1/encoder/picklpf.c
@@ -70,24 +70,24 @@
// TODO(any): please enable multi-thread and remove the flag when loop
// filter mask is compatible with multi-thread.
if (cpi->num_workers > 1)
- av1_loop_filter_frame_mt(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, plane,
+ av1_loop_filter_frame_mt(&cm->cur_frame->buf, cm, &cpi->td.mb.e_mbd, plane,
plane + 1, partial_frame,
#if LOOP_FILTER_BITMASK
0,
#endif
cpi->workers, cpi->num_workers, &cpi->lf_row_sync);
else
- av1_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd,
+ av1_loop_filter_frame(&cm->cur_frame->buf, cm, &cpi->td.mb.e_mbd,
#if LOOP_FILTER_BITMASK
0,
#endif
plane, plane + 1, partial_frame);
- filt_err = aom_get_sse_plane(sd, cm->frame_to_show, plane,
+ filt_err = aom_get_sse_plane(sd, &cm->cur_frame->buf, plane,
cm->seq_params.use_highbitdepth);
// Re-instate the unfiltered frame
- yv12_copy_plane(&cpi->last_frame_uf, cm->frame_to_show, plane);
+ yv12_copy_plane(&cpi->last_frame_uf, &cm->cur_frame->buf, plane);
return filt_err;
}
@@ -120,7 +120,7 @@
// Set each entry to -1
memset(ss_err, 0xFF, sizeof(ss_err));
- yv12_copy_plane(cm->frame_to_show, &cpi->last_frame_uf, plane);
+ yv12_copy_plane(&cm->cur_frame->buf, &cpi->last_frame_uf, plane);
best_err = try_filter_frame(sd, cpi, filt_mid, partial_frame, plane, dir);
filt_best = filt_mid;
ss_err[filt_mid] = best_err;
diff --git a/av1/encoder/pickrst.c b/av1/encoder/pickrst.c
index a7fab16..b999178 100644
--- a/av1/encoder/pickrst.c
+++ b/av1/encoder/pickrst.c
@@ -140,7 +140,7 @@
rsc->rusi = rusi;
rsc->sf = sf;
- const YV12_BUFFER_CONFIG *dgd = cm->frame_to_show;
+ const YV12_BUFFER_CONFIG *dgd = &cm->cur_frame->buf;
const int is_uv = plane != AOM_PLANE_Y;
rsc->plane_width = src->crop_widths[is_uv];
rsc->plane_height = src->crop_heights[is_uv];
@@ -165,7 +165,7 @@
const int bit_depth = cm->seq_params.bit_depth;
const int highbd = cm->seq_params.use_highbitdepth;
- const YV12_BUFFER_CONFIG *fts = cm->frame_to_show;
+ const YV12_BUFFER_CONFIG *fts = &cm->cur_frame->buf;
// TODO(yunqing): For now, only use optimized LR filter in decoder. Can be
// also used in encoder.
const int optimized_lr = 0;
@@ -1281,7 +1281,7 @@
const int highbd = rsc->cm->seq_params.use_highbitdepth;
rusi->sse[RESTORE_NONE] = sse_restoration_unit(
- limits, rsc->src, rsc->cm->frame_to_show, rsc->plane, highbd);
+ limits, rsc->src, &rsc->cm->cur_frame->buf, rsc->plane, highbd);
rsc->sse += rusi->sse[RESTORE_NONE];
}