Apply temporal filter after define_gf_group()
Test this change on several datasets, the compression
performance remain neutral.
Regular q mode with --lag-in-frames=35
avg_psnr ovr_psnr ssim
ugc360p 0.019 0.013 -0.019
lowres 0.009 0.012 0.069
midres 0.049 0.076 -0.030
hdres 0.003 -0.014 -0.039
hdres2 -0.205 -0.189 -0.118
Common testing condition (CTC) mode with --lag-in-frames=48
avg_psnr ovr_psnr ssim
ugc360p 0.103 0.151 0.177
lowres 0.089 0.098 0.127
midres 0.088 0.097 0.076
hdres 0.106 0.114 0.066
hdres2 0.050 0.059 0.030
Common testing condition (CTC) mode with --lag-in-frames=35
avg_psnr ovr_psnr ssim
ugc360p 0.148 0.194 0.223
lowres 0.130 0.133 0.189
midres 0.136 0.140 0.109
hdres 0.122 0.132 0.053
hdres2 0.260 0.398 0.294
Here we see a significant performance drop at hdres2 in --lag-in-frames=35.
This is because we move ARF filtering to an earlier stage, where
we will have one less future frame for the filtering process.
The performance drop disappears in --lag-in-frames=48 hdres2 because in
that setting, we will always have enough future frames for filtering.
STATS_CHANGED
BUG=aomedia:3144
Change-Id: Ic2d1cb624a7870826acf432b7126659525b33558
diff --git a/av1/encoder/encode_strategy.c b/av1/encoder/encode_strategy.c
index fe2b9ae..6dda945 100644
--- a/av1/encoder/encode_strategy.c
+++ b/av1/encoder/encode_strategy.c
@@ -978,20 +978,28 @@
int show_existing_alt_ref = 0;
// TODO(bohanli): figure out why we need frame_type in cm here.
cm->current_frame.frame_type = frame_params->frame_type;
- int arf_src_index = gf_group->arf_src_offset[cpi->gf_frame_index];
- int is_forward_keyframe = 0;
- if (gf_group->frame_type[cpi->gf_frame_index] == KEY_FRAME &&
- gf_group->refbuf_state[cpi->gf_frame_index] == REFBUF_UPDATE)
- is_forward_keyframe = 1;
-
- const int code_arf = av1_temporal_filter(
- cpi, arf_src_index, update_type, is_forward_keyframe,
- &show_existing_alt_ref, &cpi->ppi->alt_ref_buffer);
- if (code_arf) {
- aom_extend_frame_borders(&cpi->ppi->alt_ref_buffer, av1_num_planes(cm));
- frame_input->source = &cpi->ppi->alt_ref_buffer;
- aom_copy_metadata_to_frame_buffer(frame_input->source,
- source_buffer->metadata);
+ if (update_type == KF_UPDATE || update_type == ARF_UPDATE) {
+ int show_tf_buf = 0;
+ YV12_BUFFER_CONFIG *tf_buf = av1_tf_info_get_filtered_buf(
+ &cpi->ppi->tf_info, cpi->gf_frame_index, &show_tf_buf);
+ if (tf_buf != NULL) {
+ frame_input->source = tf_buf;
+ show_existing_alt_ref = show_tf_buf;
+ }
+ } else {
+ const int arf_src_index = gf_group->arf_src_offset[cpi->gf_frame_index];
+ // Right now, we are still using alt_ref_buffer due to
+ // implementation complexity.
+ // TODO(angiebird): Reuse the buffer in tf_info here.
+ const int code_arf = av1_temporal_filter(
+ cpi, arf_src_index, cpi->gf_frame_index, &show_existing_alt_ref,
+ &cpi->ppi->alt_ref_buffer);
+ if (code_arf) {
+ aom_extend_frame_borders(&cpi->ppi->alt_ref_buffer, av1_num_planes(cm));
+ frame_input->source = &cpi->ppi->alt_ref_buffer;
+ aom_copy_metadata_to_frame_buffer(frame_input->source,
+ source_buffer->metadata);
+ }
}
// Currently INTNL_ARF_UPDATE only do show_existing.
if (update_type == ARF_UPDATE &&
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index b85f67e..5e7b9a5 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -78,6 +78,7 @@
#include "av1/encoder/speed_features.h"
#include "av1/encoder/superres_scale.h"
#include "av1/encoder/thirdpass.h"
+#include "av1/encoder/temporal_filter.h"
#include "av1/encoder/tpl_model.h"
#include "av1/encoder/reconinter_enc.h"
#include "av1/encoder/var_based_part.h"
@@ -1513,6 +1514,11 @@
void av1_remove_primary_compressor(AV1_PRIMARY *ppi) {
if (!ppi) return;
aom_free_frame_buffer(&ppi->alt_ref_buffer);
+
+#if !CONFIG_REALTIME_ONLY
+ av1_tf_info_free(&ppi->tf_info);
+#endif // !CONFIG_REALTIME_ONLY
+
for (int i = 0; i < MAX_NUM_OPERATING_POINTS; ++i) {
aom_free(ppi->level_params.level_info[i]);
}
@@ -1966,6 +1972,9 @@
if (!is_stat_generation_stage(cpi)) {
alloc_altref_frame_buffer(cpi);
alloc_util_frame_buffers(cpi);
+#if !CONFIG_REALTIME_ONLY
+ av1_tf_info_alloc(&cpi->ppi->tf_info, cpi);
+#endif // !CONFIG_REALTIME_ONLY
}
init_ref_frame_bufs(cpi);
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
index 0204d1c..b1032b5 100644
--- a/av1/encoder/encoder.h
+++ b/av1/encoder/encoder.h
@@ -2435,6 +2435,10 @@
YV12_BUFFER_CONFIG alt_ref_buffer;
/*!
+ * Info and resources used by temporal filtering.
+ */
+ TEMPORAL_FILTER_INFO tf_info;
+ /*!
* Elements part of the sequence header, that are applicable for all the
* frames in the video.
*/
diff --git a/av1/encoder/encoder_alloc.h b/av1/encoder/encoder_alloc.h
index bd7be4c..157e009 100644
--- a/av1/encoder/encoder_alloc.h
+++ b/av1/encoder/encoder_alloc.h
@@ -350,9 +350,10 @@
oxcf->frm_dim_cfg.height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->oxcf.tool_cfg.enable_global_motion))
+ NULL, cpi->oxcf.tool_cfg.enable_global_motion)) {
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate altref buffer");
+ }
}
static AOM_INLINE void alloc_util_frame_buffers(AV1_COMP *cpi) {
diff --git a/av1/encoder/gop_structure.c b/av1/encoder/gop_structure.c
index 46d4845..14ae80d 100644
--- a/av1/encoder/gop_structure.c
+++ b/av1/encoder/gop_structure.c
@@ -882,3 +882,9 @@
if (gf_group->max_layer_depth_allowed == 0)
set_ld_layer_depth(gf_group, p_rc->baseline_gf_interval);
}
+
+int av1_gop_check_forward_keyframe(const GF_GROUP *gf_group,
+ int gf_frame_index) {
+ return gf_group->frame_type[gf_frame_index] == KEY_FRAME &&
+ gf_group->refbuf_state[gf_frame_index] == REFBUF_UPDATE;
+}
diff --git a/av1/encoder/gop_structure.h b/av1/encoder/gop_structure.h
index 3d37e2b..045be16 100644
--- a/av1/encoder/gop_structure.h
+++ b/av1/encoder/gop_structure.h
@@ -74,6 +74,18 @@
int project_gfu_boost);
/*!\endcond */
+/*!\brief Check whether a frame in the GOP is a forward key frame
+ *
+ *\ingroup rate_control
+ *
+ * \param[in] gf_group GF/ARF group data structure
+ * \param[in] gf_frame_index GOP index
+ *
+ * \return Return 1 if it is a forward key frame, otherwise return 0
+ */
+int av1_gop_check_forward_keyframe(const GF_GROUP *gf_group,
+ int gf_frame_index);
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/av1/encoder/pass2_strategy.c b/av1/encoder/pass2_strategy.c
index 134c6ce..182f9e49 100644
--- a/av1/encoder/pass2_strategy.c
+++ b/av1/encoder/pass2_strategy.c
@@ -998,11 +998,8 @@
if (is_temporal_filter_enabled) {
int arf_src_index = gf_group->arf_src_offset[gf_group->arf_index];
- FRAME_UPDATE_TYPE arf_update_type =
- gf_group->update_type[gf_group->arf_index];
- int is_forward_keyframe = 0;
- av1_temporal_filter(cpi, arf_src_index, arf_update_type,
- is_forward_keyframe, NULL, &cpi->ppi->alt_ref_buffer);
+ av1_temporal_filter(cpi, arf_src_index, gf_group->arf_index, NULL,
+ &cpi->ppi->alt_ref_buffer);
aom_extend_frame_borders(&cpi->ppi->alt_ref_buffer,
av1_num_planes(&cpi->common));
}
@@ -3665,6 +3662,8 @@
define_gf_group(cpi, frame_params, 1);
+ av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group);
+
rc->frames_till_gf_update_due = p_rc->baseline_gf_interval;
assert(cpi->gf_frame_index == 0);
#if ARF_STATS_OUTPUT
diff --git a/av1/encoder/temporal_filter.c b/av1/encoder/temporal_filter.c
index b229d43..f056505 100644
--- a/av1/encoder/temporal_filter.c
+++ b/av1/encoder/temporal_filter.c
@@ -13,6 +13,7 @@
#include <limits.h>
#include "config/aom_config.h"
+#include "config/aom_scale_rtcd.h"
#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/odintrin.h"
@@ -30,6 +31,7 @@
#include "av1/encoder/ethread.h"
#include "av1/encoder/extend.h"
#include "av1/encoder/firstpass.h"
+#include "av1/encoder/gop_structure.h"
#include "av1/encoder/mcomp.h"
#include "av1/encoder/ratectrl.h"
#include "av1/encoder/reconinter_enc.h"
@@ -912,6 +914,13 @@
tf_restore_state(mbd, input_mb_mode_info, input_buffer, num_planes);
}
+int av1_calc_arf_boost(const TWO_PASS *twopass,
+ const TWO_PASS_FRAME *twopass_frame,
+ const PRIMARY_RATE_CONTROL *p_rc, FRAME_INFO *frame_info,
+ int offset, int f_frames, int b_frames,
+ int *num_fpstats_used, int *num_fpstats_required,
+ int project_gfu_boost);
+
/*!\brief Setups the frame buffer for temporal filtering. This fuction
* determines how many frames will be used for temporal filtering and then
* groups them into a buffer. This function will also estimate the noise level
@@ -921,21 +930,23 @@
* \param[in] cpi Top level encoder instance structure
* \param[in] filter_frame_lookahead_idx The index of the to-filter frame
* in the lookahead buffer cpi->lookahead
+ * \param[in] gf_frame_index GOP index
* \param[in] is_second_arf Whether the to-filter frame is the second ARF.
* This field will affect the number of frames
* used for filtering.
- * \param[in] update_type This frame's update type.
- *
- * \param[in] is_forward_keyframe Indicate whether this is a forward keyframe.
*
* \return Nothing will be returned. But the fields `frames`, `num_frames`,
* `filter_frame_idx` and `noise_levels` will be updated in cpi->tf_ctx.
*/
static void tf_setup_filtering_buffer(AV1_COMP *cpi,
- const int filter_frame_lookahead_idx,
- const int is_second_arf,
- FRAME_UPDATE_TYPE update_type,
- int is_forward_keyframe) {
+ int filter_frame_lookahead_idx,
+ int gf_frame_index, int is_second_arf) {
+ const GF_GROUP *gf_group = &cpi->ppi->gf_group;
+ const FRAME_UPDATE_TYPE update_type = gf_group->update_type[gf_frame_index];
+ const FRAME_TYPE frame_type = gf_group->frame_type[gf_frame_index];
+ const int is_forward_keyframe =
+ av1_gop_check_forward_keyframe(gf_group, gf_frame_index);
+
TemporalFilterCtx *tf_ctx = &cpi->tf_ctx;
YV12_BUFFER_CONFIG **frames = tf_ctx->frames;
// Number of frames used for filtering. Set `arnr_max_frames` as 1 to disable
@@ -946,15 +957,11 @@
const int lookahead_depth =
av1_lookahead_depth(cpi->ppi->lookahead, cpi->compressor_stage);
- int arf_src_offset = cpi->ppi->gf_group.arf_src_offset[cpi->gf_frame_index];
- const FRAME_TYPE frame_type =
- cpi->ppi->gf_group.frame_type[cpi->gf_frame_index];
-
// Temporal filtering should not go beyond key frames
const int key_to_curframe =
- AOMMAX(cpi->rc.frames_since_key + arf_src_offset, 0);
+ AOMMAX(cpi->rc.frames_since_key + filter_frame_lookahead_idx, 0);
const int curframe_to_key =
- AOMMAX(cpi->rc.frames_to_key - arf_src_offset - 1, 0);
+ AOMMAX(cpi->rc.frames_to_key - filter_frame_lookahead_idx - 1, 0);
// Number of buffered frames before the to-filter frame.
int max_before = AOMMIN(filter_frame_lookahead_idx, key_to_curframe);
@@ -1021,7 +1028,12 @@
num_before = is_forward_keyframe ? num_frames / 2 : 0;
num_after = AOMMIN(num_frames - 1, max_after);
} else {
- num_frames = AOMMIN(num_frames, cpi->ppi->p_rc.gfu_boost / 150);
+ int gfu_boost = av1_calc_arf_boost(&cpi->ppi->twopass, &cpi->twopass_frame,
+ &cpi->ppi->p_rc, &cpi->frame_info,
+ filter_frame_lookahead_idx, max_before,
+ max_after, NULL, NULL, 0);
+
+ num_frames = AOMMIN(num_frames, gfu_boost / 150);
num_frames += !(num_frames & 1); // Make the number odd.
// Only use 2 neighbours for the second ARF.
if (is_second_arf) num_frames = AOMMIN(num_frames, 3);
@@ -1129,17 +1141,21 @@
// Returns:
// Nothing will be returned. But the contents of cpi->tf_ctx will be modified.
static void init_tf_ctx(AV1_COMP *cpi, int filter_frame_lookahead_idx,
- int is_second_arf, FRAME_UPDATE_TYPE update_type,
- int is_forward_keyframe,
+ int gf_frame_index, int is_second_arf,
YV12_BUFFER_CONFIG *output_frame) {
+ const GF_GROUP *gf_group = &cpi->ppi->gf_group;
+ const FRAME_UPDATE_TYPE update_type = gf_group->update_type[gf_frame_index];
+ const int is_forward_keyframe =
+ av1_gop_check_forward_keyframe(gf_group, gf_frame_index);
+
TemporalFilterCtx *tf_ctx = &cpi->tf_ctx;
// Setup frame buffer for filtering.
YV12_BUFFER_CONFIG **frames = tf_ctx->frames;
tf_ctx->num_frames = 0;
tf_ctx->filter_frame_idx = -1;
tf_ctx->output_frame = output_frame;
- tf_setup_filtering_buffer(cpi, filter_frame_lookahead_idx, is_second_arf,
- update_type, is_forward_keyframe);
+ tf_setup_filtering_buffer(cpi, filter_frame_lookahead_idx, gf_frame_index,
+ is_second_arf);
assert(tf_ctx->num_frames > 0);
assert(tf_ctx->filter_frame_idx < tf_ctx->num_frames);
@@ -1185,15 +1201,16 @@
}
int av1_temporal_filter(AV1_COMP *cpi, const int filter_frame_lookahead_idx,
- FRAME_UPDATE_TYPE update_type, int is_forward_keyframe,
- int *show_existing_arf,
+ int gf_frame_index, int *show_existing_arf,
YV12_BUFFER_CONFIG *output_frame) {
MultiThreadInfo *const mt_info = &cpi->mt_info;
// Basic informaton of the current frame.
const GF_GROUP *const gf_group = &cpi->ppi->gf_group;
- const uint8_t group_idx = cpi->gf_frame_index;
TemporalFilterCtx *tf_ctx = &cpi->tf_ctx;
TemporalFilterData *tf_data = &cpi->td.tf_data;
+ const FRAME_UPDATE_TYPE update_type = gf_group->update_type[gf_frame_index];
+ const int is_forward_keyframe =
+ av1_gop_check_forward_keyframe(gf_group, gf_frame_index);
// Filter one more ARF if the lookahead index is leq 7 (w.r.t. 9-th frame).
// This frame is ALWAYS a show existing frame.
const int is_second_arf =
@@ -1209,12 +1226,12 @@
#if CONFIG_FRAME_PARALLEL_ENCODE
// Only parallel level 0 frames go through temporal filtering.
- assert(gf_group->frame_parallel_level[group_idx] == 0);
+ assert(gf_group->frame_parallel_level[gf_frame_index] == 0);
#endif // CONFIG_FRAME_PARALLEL_ENCODE
// Initialize temporal filter context structure.
- init_tf_ctx(cpi, filter_frame_lookahead_idx, is_second_arf, update_type,
- is_forward_keyframe, output_frame);
+ init_tf_ctx(cpi, filter_frame_lookahead_idx, gf_frame_index, is_second_arf,
+ output_frame);
// Set showable frame.
if (is_forward_keyframe == 0 && update_type != KF_UPDATE) {
@@ -1252,13 +1269,13 @@
const float std = (float)sqrt((float)diff->sse / num_mbs - mean * mean);
// TODO(yunqing): This can be combined with TPL q calculation later.
- cpi->rc.base_frame_target = gf_group->bit_allocation[group_idx];
+ cpi->rc.base_frame_target = gf_group->bit_allocation[gf_frame_index];
av1_set_target_rate(cpi, cpi->common.width, cpi->common.height);
int top_index = 0;
int bottom_index = 0;
const int q = av1_rc_pick_q_and_bounds(
cpi, cpi->oxcf.frm_dim_cfg.width, cpi->oxcf.frm_dim_cfg.height,
- group_idx, &bottom_index, &top_index);
+ gf_frame_index, &bottom_index, &top_index);
const int ac_q = av1_ac_quant_QTX(q, 0, cpi->common.seq_params->bit_depth);
const float threshold = 0.7f * ac_q * ac_q;
@@ -1278,4 +1295,63 @@
return 1;
}
+
+void av1_tf_info_alloc(TEMPORAL_FILTER_INFO *tf_info, AV1_COMP *cpi) {
+ AV1_COMMON *cm = &cpi->common;
+ const SequenceHeader *const seq_params = cm->seq_params;
+ const AV1EncoderConfig *oxcf = &cpi->oxcf;
+ for (int i = 0; i < TF_INFO_BUF_COUNT; ++i) {
+ int ret = aom_realloc_frame_buffer(
+ &tf_info->tf_buf[i], oxcf->frm_dim_cfg.width, oxcf->frm_dim_cfg.height,
+ seq_params->subsampling_x, seq_params->subsampling_y,
+ seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
+ cm->features.byte_alignment, NULL, NULL, NULL,
+ cpi->oxcf.tool_cfg.enable_global_motion);
+ if (ret) {
+ aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
+ "Failed to allocate tf_info");
+ }
+ }
+}
+
+void av1_tf_info_free(TEMPORAL_FILTER_INFO *tf_info) {
+ for (int i = 0; i < TF_INFO_BUF_COUNT; ++i) {
+ aom_free_frame_buffer(&tf_info->tf_buf[i]);
+ }
+}
+
+void av1_tf_info_filtering(TEMPORAL_FILTER_INFO *tf_info, AV1_COMP *cpi,
+ const GF_GROUP *gf_group) {
+ const AV1_COMMON *const cm = &cpi->common;
+ av1_zero(tf_info->tf_buf_valid);
+ av1_zero(tf_info->tf_buf_gf_index);
+ for (int gf_index = 0; gf_index < gf_group->size; ++gf_index) {
+ int update_type = gf_group->update_type[gf_index];
+ if (update_type == KF_UPDATE || update_type == ARF_UPDATE) {
+ int buf_idx = update_type == ARF_UPDATE;
+ int lookahead_idx = gf_group->arf_src_offset[gf_index] +
+ gf_group->cur_frame_idx[gf_index];
+ YV12_BUFFER_CONFIG *out_buf = &tf_info->tf_buf[buf_idx];
+ av1_temporal_filter(cpi, lookahead_idx, gf_index,
+ &tf_info->show_tf_buf[buf_idx], out_buf);
+ aom_extend_frame_borders(out_buf, av1_num_planes(cm));
+ tf_info->tf_buf_gf_index[buf_idx] = gf_index;
+ tf_info->tf_buf_valid[buf_idx] = 1;
+ }
+ }
+}
+
+YV12_BUFFER_CONFIG *av1_tf_info_get_filtered_buf(TEMPORAL_FILTER_INFO *tf_info,
+ int gf_index,
+ int *show_tf_buf) {
+ YV12_BUFFER_CONFIG *out_buf = NULL;
+ *show_tf_buf = 0;
+ for (int i = 0; i < TF_INFO_BUF_COUNT; ++i) {
+ if (tf_info->tf_buf_valid[i] && tf_info->tf_buf_gf_index[i] == gf_index) {
+ out_buf = &tf_info->tf_buf[i];
+ *show_tf_buf = tf_info->show_tf_buf[i];
+ }
+ }
+ return out_buf;
+}
/*!\endcond */
diff --git a/av1/encoder/temporal_filter.h b/av1/encoder/temporal_filter.h
index bc9ff5c..4949656 100644
--- a/av1/encoder/temporal_filter.h
+++ b/av1/encoder/temporal_filter.h
@@ -133,6 +133,63 @@
int q_factor;
} TemporalFilterCtx;
+/*!
+ * buffer count in TEMPORAL_FILTER_INFO
+ * Currently we only apply filtering on KEY and ARF after
+ * define_gf_group(). Hence, the count is two.
+ */
+#define TF_INFO_BUF_COUNT 2
+/*!
+ * \brief Temporal filter info for a gop
+ */
+typedef struct TEMPORAL_FILTER_INFO {
+ /*!
+ * buffers used for temporal filtering in a GOP
+ */
+ YV12_BUFFER_CONFIG tf_buf[TF_INFO_BUF_COUNT];
+ /*!
+ * whether to show the buffer directly or not.
+ */
+ int show_tf_buf[TF_INFO_BUF_COUNT];
+ /*!
+ * the corresponding gf_index for the buffer.
+ */
+ int tf_buf_gf_index[TF_INFO_BUF_COUNT];
+ /*!
+ * whether the buf is valid or not.
+ */
+ int tf_buf_valid[TF_INFO_BUF_COUNT];
+} TEMPORAL_FILTER_INFO;
+
+/*!\brief Allocate buffers for TEMPORAL_FILTER_INFO
+ * \param[in,out] tf_info Temporal filter info for a gop
+ * \param[in,out] cpi Top level encoder instance structure
+ */
+void av1_tf_info_alloc(TEMPORAL_FILTER_INFO *tf_info, struct AV1_COMP *cpi);
+
+/*!\brief Free buffers for TEMPORAL_FILTER_INFO
+ * \param[in,out] tf_info Temporal filter info for a gop
+ */
+void av1_tf_info_free(TEMPORAL_FILTER_INFO *tf_info);
+
+/*!\brief Apply temporal filter for key frame and ARF in a gop
+ * \param[in,out] tf_info Temporal filter info for a gop
+ * \param[in,out] cpi Top level encoder instance structure
+ * \param[in] gf_group GF/ARF group data structure
+ */
+void av1_tf_info_filtering(TEMPORAL_FILTER_INFO *tf_info, struct AV1_COMP *cpi,
+ const GF_GROUP *gf_group);
+
+/*!\brief Get a filtered buffer from TEMPORAL_FILTER_INFO
+ * \param[in,out] tf_info Temporal filter info for a gop
+ * \param[in] gf_index gf_index for the target buffer
+ * \param[out] show_tf_buf whether the target buffer can be shown
+ * directly
+ */
+YV12_BUFFER_CONFIG *av1_tf_info_get_filtered_buf(TEMPORAL_FILTER_INFO *tf_info,
+ int gf_index,
+ int *show_tf_buf);
+
/*!\cond */
// Sum and SSE source vs filtered frame difference returned by
@@ -214,10 +271,11 @@
*
* \ingroup src_frame_proc
* \param[in] cpi Top level encoder instance
- * structure \param[in] filter_frame_lookahead_idx The index of the
- * to-filter frame in the lookahead buffer cpi->lookahead. \param[in]
- * update_type This frame's update type. \param[in]
- * is_forward_keyframe Indicate whether this is a forward keyframe.
+ * structure
+ * \param[in] filter_frame_lookahead_idx The index of the
+ * to-filter frame in the lookahead
+ * buffer cpi->lookahead.
+ * \param[in] gf_frame_index Index of GOP
* \param[in,out] show_existing_arf Whether to show existing ARF. This
* field is updated in this function.
* \param[out] output_frame Ouput filtered frame.
@@ -226,8 +284,7 @@
*/
int av1_temporal_filter(struct AV1_COMP *cpi,
const int filter_frame_lookahead_idx,
- FRAME_UPDATE_TYPE update_type, int is_forward_keyframe,
- int *show_existing_arf,
+ int gf_frame_index, int *show_existing_arf,
YV12_BUFFER_CONFIG *output_frame);
/*!\cond */
diff --git a/av1/encoder/tpl_model.c b/av1/encoder/tpl_model.c
index 468dfe6..5af4afe 100644
--- a/av1/encoder/tpl_model.c
+++ b/av1/encoder/tpl_model.c
@@ -1315,8 +1315,9 @@
tpl_frame->gf_picture = &buf->img;
}
if (gop_eval && cpi->rc.frames_since_key > 0 &&
- gf_group->arf_index == gf_index)
+ gf_group->arf_index == gf_index) {
tpl_frame->gf_picture = &cpi->ppi->alt_ref_buffer;
+ }
// 'cm->current_frame.frame_number' is the display number
// of the current frame.